combine: Do not throw away unneeded arms of parallels (PR83156)
[gcc.git] / gcc / combine.c
1 /* Optimize by combining instructions for GNU compiler.
2 Copyright (C) 1987-2017 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 /* This module is essentially the "combiner" phase of the U. of Arizona
21 Portable Optimizer, but redone to work on our list-structured
22 representation for RTL instead of their string representation.
23
24 The LOG_LINKS of each insn identify the most recent assignment
25 to each REG used in the insn. It is a list of previous insns,
26 each of which contains a SET for a REG that is used in this insn
27 and not used or set in between. LOG_LINKs never cross basic blocks.
28 They were set up by the preceding pass (lifetime analysis).
29
30 We try to combine each pair of insns joined by a logical link.
31 We also try to combine triplets of insns A, B and C when C has
32 a link back to B and B has a link back to A. Likewise for a
33 small number of quadruplets of insns A, B, C and D for which
34 there's high likelihood of success.
35
36 LOG_LINKS does not have links for use of the CC0. They don't
37 need to, because the insn that sets the CC0 is always immediately
38 before the insn that tests it. So we always regard a branch
39 insn as having a logical link to the preceding insn. The same is true
40 for an insn explicitly using CC0.
41
42 We check (with use_crosses_set_p) to avoid combining in such a way
43 as to move a computation to a place where its value would be different.
44
45 Combination is done by mathematically substituting the previous
46 insn(s) values for the regs they set into the expressions in
47 the later insns that refer to these regs. If the result is a valid insn
48 for our target machine, according to the machine description,
49 we install it, delete the earlier insns, and update the data flow
50 information (LOG_LINKS and REG_NOTES) for what we did.
51
52 There are a few exceptions where the dataflow information isn't
53 completely updated (however this is only a local issue since it is
54 regenerated before the next pass that uses it):
55
56 - reg_live_length is not updated
57 - reg_n_refs is not adjusted in the rare case when a register is
58 no longer required in a computation
59 - there are extremely rare cases (see distribute_notes) when a
60 REG_DEAD note is lost
61 - a LOG_LINKS entry that refers to an insn with multiple SETs may be
62 removed because there is no way to know which register it was
63 linking
64
65 To simplify substitution, we combine only when the earlier insn(s)
66 consist of only a single assignment. To simplify updating afterward,
67 we never combine when a subroutine call appears in the middle.
68
69 Since we do not represent assignments to CC0 explicitly except when that
70 is all an insn does, there is no LOG_LINKS entry in an insn that uses
71 the condition code for the insn that set the condition code.
72 Fortunately, these two insns must be consecutive.
73 Therefore, every JUMP_INSN is taken to have an implicit logical link
74 to the preceding insn. This is not quite right, since non-jumps can
75 also use the condition code; but in practice such insns would not
76 combine anyway. */
77
78 #include "config.h"
79 #include "system.h"
80 #include "coretypes.h"
81 #include "backend.h"
82 #include "target.h"
83 #include "rtl.h"
84 #include "tree.h"
85 #include "cfghooks.h"
86 #include "predict.h"
87 #include "df.h"
88 #include "memmodel.h"
89 #include "tm_p.h"
90 #include "optabs.h"
91 #include "regs.h"
92 #include "emit-rtl.h"
93 #include "recog.h"
94 #include "cgraph.h"
95 #include "stor-layout.h"
96 #include "cfgrtl.h"
97 #include "cfgcleanup.h"
98 /* Include expr.h after insn-config.h so we get HAVE_conditional_move. */
99 #include "explow.h"
100 #include "insn-attr.h"
101 #include "rtlhooks-def.h"
102 #include "params.h"
103 #include "tree-pass.h"
104 #include "valtrack.h"
105 #include "rtl-iter.h"
106 #include "print-rtl.h"
107
108 /* Number of attempts to combine instructions in this function. */
109
110 static int combine_attempts;
111
112 /* Number of attempts that got as far as substitution in this function. */
113
114 static int combine_merges;
115
116 /* Number of instructions combined with added SETs in this function. */
117
118 static int combine_extras;
119
120 /* Number of instructions combined in this function. */
121
122 static int combine_successes;
123
124 /* Totals over entire compilation. */
125
126 static int total_attempts, total_merges, total_extras, total_successes;
127
128 /* combine_instructions may try to replace the right hand side of the
129 second instruction with the value of an associated REG_EQUAL note
130 before throwing it at try_combine. That is problematic when there
131 is a REG_DEAD note for a register used in the old right hand side
132 and can cause distribute_notes to do wrong things. This is the
133 second instruction if it has been so modified, null otherwise. */
134
135 static rtx_insn *i2mod;
136
137 /* When I2MOD is nonnull, this is a copy of the old right hand side. */
138
139 static rtx i2mod_old_rhs;
140
141 /* When I2MOD is nonnull, this is a copy of the new right hand side. */
142
143 static rtx i2mod_new_rhs;
144 \f
145 struct reg_stat_type {
146 /* Record last point of death of (hard or pseudo) register n. */
147 rtx_insn *last_death;
148
149 /* Record last point of modification of (hard or pseudo) register n. */
150 rtx_insn *last_set;
151
152 /* The next group of fields allows the recording of the last value assigned
153 to (hard or pseudo) register n. We use this information to see if an
154 operation being processed is redundant given a prior operation performed
155 on the register. For example, an `and' with a constant is redundant if
156 all the zero bits are already known to be turned off.
157
158 We use an approach similar to that used by cse, but change it in the
159 following ways:
160
161 (1) We do not want to reinitialize at each label.
162 (2) It is useful, but not critical, to know the actual value assigned
163 to a register. Often just its form is helpful.
164
165 Therefore, we maintain the following fields:
166
167 last_set_value the last value assigned
168 last_set_label records the value of label_tick when the
169 register was assigned
170 last_set_table_tick records the value of label_tick when a
171 value using the register is assigned
172 last_set_invalid set to nonzero when it is not valid
173 to use the value of this register in some
174 register's value
175
176 To understand the usage of these tables, it is important to understand
177 the distinction between the value in last_set_value being valid and
178 the register being validly contained in some other expression in the
179 table.
180
181 (The next two parameters are out of date).
182
183 reg_stat[i].last_set_value is valid if it is nonzero, and either
184 reg_n_sets[i] is 1 or reg_stat[i].last_set_label == label_tick.
185
186 Register I may validly appear in any expression returned for the value
187 of another register if reg_n_sets[i] is 1. It may also appear in the
188 value for register J if reg_stat[j].last_set_invalid is zero, or
189 reg_stat[i].last_set_label < reg_stat[j].last_set_label.
190
191 If an expression is found in the table containing a register which may
192 not validly appear in an expression, the register is replaced by
193 something that won't match, (clobber (const_int 0)). */
194
195 /* Record last value assigned to (hard or pseudo) register n. */
196
197 rtx last_set_value;
198
199 /* Record the value of label_tick when an expression involving register n
200 is placed in last_set_value. */
201
202 int last_set_table_tick;
203
204 /* Record the value of label_tick when the value for register n is placed in
205 last_set_value. */
206
207 int last_set_label;
208
209 /* These fields are maintained in parallel with last_set_value and are
210 used to store the mode in which the register was last set, the bits
211 that were known to be zero when it was last set, and the number of
212 sign bits copies it was known to have when it was last set. */
213
214 unsigned HOST_WIDE_INT last_set_nonzero_bits;
215 char last_set_sign_bit_copies;
216 ENUM_BITFIELD(machine_mode) last_set_mode : 8;
217
218 /* Set nonzero if references to register n in expressions should not be
219 used. last_set_invalid is set nonzero when this register is being
220 assigned to and last_set_table_tick == label_tick. */
221
222 char last_set_invalid;
223
224 /* Some registers that are set more than once and used in more than one
225 basic block are nevertheless always set in similar ways. For example,
226 a QImode register may be loaded from memory in two places on a machine
227 where byte loads zero extend.
228
229 We record in the following fields if a register has some leading bits
230 that are always equal to the sign bit, and what we know about the
231 nonzero bits of a register, specifically which bits are known to be
232 zero.
233
234 If an entry is zero, it means that we don't know anything special. */
235
236 unsigned char sign_bit_copies;
237
238 unsigned HOST_WIDE_INT nonzero_bits;
239
240 /* Record the value of the label_tick when the last truncation
241 happened. The field truncated_to_mode is only valid if
242 truncation_label == label_tick. */
243
244 int truncation_label;
245
246 /* Record the last truncation seen for this register. If truncation
247 is not a nop to this mode we might be able to save an explicit
248 truncation if we know that value already contains a truncated
249 value. */
250
251 ENUM_BITFIELD(machine_mode) truncated_to_mode : 8;
252 };
253
254
255 static vec<reg_stat_type> reg_stat;
256
257 /* One plus the highest pseudo for which we track REG_N_SETS.
258 regstat_init_n_sets_and_refs allocates the array for REG_N_SETS just once,
259 but during combine_split_insns new pseudos can be created. As we don't have
260 updated DF information in that case, it is hard to initialize the array
261 after growing. The combiner only cares about REG_N_SETS (regno) == 1,
262 so instead of growing the arrays, just assume all newly created pseudos
263 during combine might be set multiple times. */
264
265 static unsigned int reg_n_sets_max;
266
267 /* Record the luid of the last insn that invalidated memory
268 (anything that writes memory, and subroutine calls, but not pushes). */
269
270 static int mem_last_set;
271
272 /* Record the luid of the last CALL_INSN
273 so we can tell whether a potential combination crosses any calls. */
274
275 static int last_call_luid;
276
277 /* When `subst' is called, this is the insn that is being modified
278 (by combining in a previous insn). The PATTERN of this insn
279 is still the old pattern partially modified and it should not be
280 looked at, but this may be used to examine the successors of the insn
281 to judge whether a simplification is valid. */
282
283 static rtx_insn *subst_insn;
284
285 /* This is the lowest LUID that `subst' is currently dealing with.
286 get_last_value will not return a value if the register was set at or
287 after this LUID. If not for this mechanism, we could get confused if
288 I2 or I1 in try_combine were an insn that used the old value of a register
289 to obtain a new value. In that case, we might erroneously get the
290 new value of the register when we wanted the old one. */
291
292 static int subst_low_luid;
293
294 /* This contains any hard registers that are used in newpat; reg_dead_at_p
295 must consider all these registers to be always live. */
296
297 static HARD_REG_SET newpat_used_regs;
298
299 /* This is an insn to which a LOG_LINKS entry has been added. If this
300 insn is the earlier than I2 or I3, combine should rescan starting at
301 that location. */
302
303 static rtx_insn *added_links_insn;
304
305 /* And similarly, for notes. */
306
307 static rtx_insn *added_notes_insn;
308
309 /* Basic block in which we are performing combines. */
310 static basic_block this_basic_block;
311 static bool optimize_this_for_speed_p;
312
313 \f
314 /* Length of the currently allocated uid_insn_cost array. */
315
316 static int max_uid_known;
317
318 /* The following array records the insn_cost for every insn
319 in the instruction stream. */
320
321 static int *uid_insn_cost;
322
323 /* The following array records the LOG_LINKS for every insn in the
324 instruction stream as struct insn_link pointers. */
325
326 struct insn_link {
327 rtx_insn *insn;
328 unsigned int regno;
329 struct insn_link *next;
330 };
331
332 static struct insn_link **uid_log_links;
333
334 static inline int
335 insn_uid_check (const_rtx insn)
336 {
337 int uid = INSN_UID (insn);
338 gcc_checking_assert (uid <= max_uid_known);
339 return uid;
340 }
341
342 #define INSN_COST(INSN) (uid_insn_cost[insn_uid_check (INSN)])
343 #define LOG_LINKS(INSN) (uid_log_links[insn_uid_check (INSN)])
344
345 #define FOR_EACH_LOG_LINK(L, INSN) \
346 for ((L) = LOG_LINKS (INSN); (L); (L) = (L)->next)
347
348 /* Links for LOG_LINKS are allocated from this obstack. */
349
350 static struct obstack insn_link_obstack;
351
352 /* Allocate a link. */
353
354 static inline struct insn_link *
355 alloc_insn_link (rtx_insn *insn, unsigned int regno, struct insn_link *next)
356 {
357 struct insn_link *l
358 = (struct insn_link *) obstack_alloc (&insn_link_obstack,
359 sizeof (struct insn_link));
360 l->insn = insn;
361 l->regno = regno;
362 l->next = next;
363 return l;
364 }
365
366 /* Incremented for each basic block. */
367
368 static int label_tick;
369
370 /* Reset to label_tick for each extended basic block in scanning order. */
371
372 static int label_tick_ebb_start;
373
374 /* Mode used to compute significance in reg_stat[].nonzero_bits. It is the
375 largest integer mode that can fit in HOST_BITS_PER_WIDE_INT. */
376
377 static scalar_int_mode nonzero_bits_mode;
378
379 /* Nonzero when reg_stat[].nonzero_bits and reg_stat[].sign_bit_copies can
380 be safely used. It is zero while computing them and after combine has
381 completed. This former test prevents propagating values based on
382 previously set values, which can be incorrect if a variable is modified
383 in a loop. */
384
385 static int nonzero_sign_valid;
386
387 \f
388 /* Record one modification to rtl structure
389 to be undone by storing old_contents into *where. */
390
391 enum undo_kind { UNDO_RTX, UNDO_INT, UNDO_MODE, UNDO_LINKS };
392
393 struct undo
394 {
395 struct undo *next;
396 enum undo_kind kind;
397 union { rtx r; int i; machine_mode m; struct insn_link *l; } old_contents;
398 union { rtx *r; int *i; struct insn_link **l; } where;
399 };
400
401 /* Record a bunch of changes to be undone, up to MAX_UNDO of them.
402 num_undo says how many are currently recorded.
403
404 other_insn is nonzero if we have modified some other insn in the process
405 of working on subst_insn. It must be verified too. */
406
407 struct undobuf
408 {
409 struct undo *undos;
410 struct undo *frees;
411 rtx_insn *other_insn;
412 };
413
414 static struct undobuf undobuf;
415
416 /* Number of times the pseudo being substituted for
417 was found and replaced. */
418
419 static int n_occurrences;
420
421 static rtx reg_nonzero_bits_for_combine (const_rtx, scalar_int_mode,
422 scalar_int_mode,
423 unsigned HOST_WIDE_INT *);
424 static rtx reg_num_sign_bit_copies_for_combine (const_rtx, scalar_int_mode,
425 scalar_int_mode,
426 unsigned int *);
427 static void do_SUBST (rtx *, rtx);
428 static void do_SUBST_INT (int *, int);
429 static void init_reg_last (void);
430 static void setup_incoming_promotions (rtx_insn *);
431 static void set_nonzero_bits_and_sign_copies (rtx, const_rtx, void *);
432 static int cant_combine_insn_p (rtx_insn *);
433 static int can_combine_p (rtx_insn *, rtx_insn *, rtx_insn *, rtx_insn *,
434 rtx_insn *, rtx_insn *, rtx *, rtx *);
435 static int combinable_i3pat (rtx_insn *, rtx *, rtx, rtx, rtx, int, int, rtx *);
436 static int contains_muldiv (rtx);
437 static rtx_insn *try_combine (rtx_insn *, rtx_insn *, rtx_insn *, rtx_insn *,
438 int *, rtx_insn *);
439 static void undo_all (void);
440 static void undo_commit (void);
441 static rtx *find_split_point (rtx *, rtx_insn *, bool);
442 static rtx subst (rtx, rtx, rtx, int, int, int);
443 static rtx combine_simplify_rtx (rtx, machine_mode, int, int);
444 static rtx simplify_if_then_else (rtx);
445 static rtx simplify_set (rtx);
446 static rtx simplify_logical (rtx);
447 static rtx expand_compound_operation (rtx);
448 static const_rtx expand_field_assignment (const_rtx);
449 static rtx make_extraction (machine_mode, rtx, HOST_WIDE_INT,
450 rtx, unsigned HOST_WIDE_INT, int, int, int);
451 static int get_pos_from_mask (unsigned HOST_WIDE_INT,
452 unsigned HOST_WIDE_INT *);
453 static rtx canon_reg_for_combine (rtx, rtx);
454 static rtx force_int_to_mode (rtx, scalar_int_mode, scalar_int_mode,
455 scalar_int_mode, unsigned HOST_WIDE_INT, int);
456 static rtx force_to_mode (rtx, machine_mode,
457 unsigned HOST_WIDE_INT, int);
458 static rtx if_then_else_cond (rtx, rtx *, rtx *);
459 static rtx known_cond (rtx, enum rtx_code, rtx, rtx);
460 static int rtx_equal_for_field_assignment_p (rtx, rtx, bool = false);
461 static rtx make_field_assignment (rtx);
462 static rtx apply_distributive_law (rtx);
463 static rtx distribute_and_simplify_rtx (rtx, int);
464 static rtx simplify_and_const_int_1 (scalar_int_mode, rtx,
465 unsigned HOST_WIDE_INT);
466 static rtx simplify_and_const_int (rtx, scalar_int_mode, rtx,
467 unsigned HOST_WIDE_INT);
468 static int merge_outer_ops (enum rtx_code *, HOST_WIDE_INT *, enum rtx_code,
469 HOST_WIDE_INT, machine_mode, int *);
470 static rtx simplify_shift_const_1 (enum rtx_code, machine_mode, rtx, int);
471 static rtx simplify_shift_const (rtx, enum rtx_code, machine_mode, rtx,
472 int);
473 static int recog_for_combine (rtx *, rtx_insn *, rtx *);
474 static rtx gen_lowpart_for_combine (machine_mode, rtx);
475 static enum rtx_code simplify_compare_const (enum rtx_code, machine_mode,
476 rtx, rtx *);
477 static enum rtx_code simplify_comparison (enum rtx_code, rtx *, rtx *);
478 static void update_table_tick (rtx);
479 static void record_value_for_reg (rtx, rtx_insn *, rtx);
480 static void check_promoted_subreg (rtx_insn *, rtx);
481 static void record_dead_and_set_regs_1 (rtx, const_rtx, void *);
482 static void record_dead_and_set_regs (rtx_insn *);
483 static int get_last_value_validate (rtx *, rtx_insn *, int, int);
484 static rtx get_last_value (const_rtx);
485 static int use_crosses_set_p (const_rtx, int);
486 static void reg_dead_at_p_1 (rtx, const_rtx, void *);
487 static int reg_dead_at_p (rtx, rtx_insn *);
488 static void move_deaths (rtx, rtx, int, rtx_insn *, rtx *);
489 static int reg_bitfield_target_p (rtx, rtx);
490 static void distribute_notes (rtx, rtx_insn *, rtx_insn *, rtx_insn *, rtx, rtx, rtx);
491 static void distribute_links (struct insn_link *);
492 static void mark_used_regs_combine (rtx);
493 static void record_promoted_value (rtx_insn *, rtx);
494 static bool unmentioned_reg_p (rtx, rtx);
495 static void record_truncated_values (rtx *, void *);
496 static bool reg_truncated_to_mode (machine_mode, const_rtx);
497 static rtx gen_lowpart_or_truncate (machine_mode, rtx);
498 \f
499
500 /* It is not safe to use ordinary gen_lowpart in combine.
501 See comments in gen_lowpart_for_combine. */
502 #undef RTL_HOOKS_GEN_LOWPART
503 #define RTL_HOOKS_GEN_LOWPART gen_lowpart_for_combine
504
505 /* Our implementation of gen_lowpart never emits a new pseudo. */
506 #undef RTL_HOOKS_GEN_LOWPART_NO_EMIT
507 #define RTL_HOOKS_GEN_LOWPART_NO_EMIT gen_lowpart_for_combine
508
509 #undef RTL_HOOKS_REG_NONZERO_REG_BITS
510 #define RTL_HOOKS_REG_NONZERO_REG_BITS reg_nonzero_bits_for_combine
511
512 #undef RTL_HOOKS_REG_NUM_SIGN_BIT_COPIES
513 #define RTL_HOOKS_REG_NUM_SIGN_BIT_COPIES reg_num_sign_bit_copies_for_combine
514
515 #undef RTL_HOOKS_REG_TRUNCATED_TO_MODE
516 #define RTL_HOOKS_REG_TRUNCATED_TO_MODE reg_truncated_to_mode
517
518 static const struct rtl_hooks combine_rtl_hooks = RTL_HOOKS_INITIALIZER;
519
520 \f
521 /* Convenience wrapper for the canonicalize_comparison target hook.
522 Target hooks cannot use enum rtx_code. */
523 static inline void
524 target_canonicalize_comparison (enum rtx_code *code, rtx *op0, rtx *op1,
525 bool op0_preserve_value)
526 {
527 int code_int = (int)*code;
528 targetm.canonicalize_comparison (&code_int, op0, op1, op0_preserve_value);
529 *code = (enum rtx_code)code_int;
530 }
531
532 /* Try to split PATTERN found in INSN. This returns NULL_RTX if
533 PATTERN can not be split. Otherwise, it returns an insn sequence.
534 This is a wrapper around split_insns which ensures that the
535 reg_stat vector is made larger if the splitter creates a new
536 register. */
537
538 static rtx_insn *
539 combine_split_insns (rtx pattern, rtx_insn *insn)
540 {
541 rtx_insn *ret;
542 unsigned int nregs;
543
544 ret = split_insns (pattern, insn);
545 nregs = max_reg_num ();
546 if (nregs > reg_stat.length ())
547 reg_stat.safe_grow_cleared (nregs);
548 return ret;
549 }
550
551 /* This is used by find_single_use to locate an rtx in LOC that
552 contains exactly one use of DEST, which is typically either a REG
553 or CC0. It returns a pointer to the innermost rtx expression
554 containing DEST. Appearances of DEST that are being used to
555 totally replace it are not counted. */
556
557 static rtx *
558 find_single_use_1 (rtx dest, rtx *loc)
559 {
560 rtx x = *loc;
561 enum rtx_code code = GET_CODE (x);
562 rtx *result = NULL;
563 rtx *this_result;
564 int i;
565 const char *fmt;
566
567 switch (code)
568 {
569 case CONST:
570 case LABEL_REF:
571 case SYMBOL_REF:
572 CASE_CONST_ANY:
573 case CLOBBER:
574 return 0;
575
576 case SET:
577 /* If the destination is anything other than CC0, PC, a REG or a SUBREG
578 of a REG that occupies all of the REG, the insn uses DEST if
579 it is mentioned in the destination or the source. Otherwise, we
580 need just check the source. */
581 if (GET_CODE (SET_DEST (x)) != CC0
582 && GET_CODE (SET_DEST (x)) != PC
583 && !REG_P (SET_DEST (x))
584 && ! (GET_CODE (SET_DEST (x)) == SUBREG
585 && REG_P (SUBREG_REG (SET_DEST (x)))
586 && !read_modify_subreg_p (SET_DEST (x))))
587 break;
588
589 return find_single_use_1 (dest, &SET_SRC (x));
590
591 case MEM:
592 case SUBREG:
593 return find_single_use_1 (dest, &XEXP (x, 0));
594
595 default:
596 break;
597 }
598
599 /* If it wasn't one of the common cases above, check each expression and
600 vector of this code. Look for a unique usage of DEST. */
601
602 fmt = GET_RTX_FORMAT (code);
603 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
604 {
605 if (fmt[i] == 'e')
606 {
607 if (dest == XEXP (x, i)
608 || (REG_P (dest) && REG_P (XEXP (x, i))
609 && REGNO (dest) == REGNO (XEXP (x, i))))
610 this_result = loc;
611 else
612 this_result = find_single_use_1 (dest, &XEXP (x, i));
613
614 if (result == NULL)
615 result = this_result;
616 else if (this_result)
617 /* Duplicate usage. */
618 return NULL;
619 }
620 else if (fmt[i] == 'E')
621 {
622 int j;
623
624 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
625 {
626 if (XVECEXP (x, i, j) == dest
627 || (REG_P (dest)
628 && REG_P (XVECEXP (x, i, j))
629 && REGNO (XVECEXP (x, i, j)) == REGNO (dest)))
630 this_result = loc;
631 else
632 this_result = find_single_use_1 (dest, &XVECEXP (x, i, j));
633
634 if (result == NULL)
635 result = this_result;
636 else if (this_result)
637 return NULL;
638 }
639 }
640 }
641
642 return result;
643 }
644
645
646 /* See if DEST, produced in INSN, is used only a single time in the
647 sequel. If so, return a pointer to the innermost rtx expression in which
648 it is used.
649
650 If PLOC is nonzero, *PLOC is set to the insn containing the single use.
651
652 If DEST is cc0_rtx, we look only at the next insn. In that case, we don't
653 care about REG_DEAD notes or LOG_LINKS.
654
655 Otherwise, we find the single use by finding an insn that has a
656 LOG_LINKS pointing at INSN and has a REG_DEAD note for DEST. If DEST is
657 only referenced once in that insn, we know that it must be the first
658 and last insn referencing DEST. */
659
660 static rtx *
661 find_single_use (rtx dest, rtx_insn *insn, rtx_insn **ploc)
662 {
663 basic_block bb;
664 rtx_insn *next;
665 rtx *result;
666 struct insn_link *link;
667
668 if (dest == cc0_rtx)
669 {
670 next = NEXT_INSN (insn);
671 if (next == 0
672 || (!NONJUMP_INSN_P (next) && !JUMP_P (next)))
673 return 0;
674
675 result = find_single_use_1 (dest, &PATTERN (next));
676 if (result && ploc)
677 *ploc = next;
678 return result;
679 }
680
681 if (!REG_P (dest))
682 return 0;
683
684 bb = BLOCK_FOR_INSN (insn);
685 for (next = NEXT_INSN (insn);
686 next && BLOCK_FOR_INSN (next) == bb;
687 next = NEXT_INSN (next))
688 if (NONDEBUG_INSN_P (next) && dead_or_set_p (next, dest))
689 {
690 FOR_EACH_LOG_LINK (link, next)
691 if (link->insn == insn && link->regno == REGNO (dest))
692 break;
693
694 if (link)
695 {
696 result = find_single_use_1 (dest, &PATTERN (next));
697 if (ploc)
698 *ploc = next;
699 return result;
700 }
701 }
702
703 return 0;
704 }
705 \f
706 /* Substitute NEWVAL, an rtx expression, into INTO, a place in some
707 insn. The substitution can be undone by undo_all. If INTO is already
708 set to NEWVAL, do not record this change. Because computing NEWVAL might
709 also call SUBST, we have to compute it before we put anything into
710 the undo table. */
711
712 static void
713 do_SUBST (rtx *into, rtx newval)
714 {
715 struct undo *buf;
716 rtx oldval = *into;
717
718 if (oldval == newval)
719 return;
720
721 /* We'd like to catch as many invalid transformations here as
722 possible. Unfortunately, there are way too many mode changes
723 that are perfectly valid, so we'd waste too much effort for
724 little gain doing the checks here. Focus on catching invalid
725 transformations involving integer constants. */
726 if (GET_MODE_CLASS (GET_MODE (oldval)) == MODE_INT
727 && CONST_INT_P (newval))
728 {
729 /* Sanity check that we're replacing oldval with a CONST_INT
730 that is a valid sign-extension for the original mode. */
731 gcc_assert (INTVAL (newval)
732 == trunc_int_for_mode (INTVAL (newval), GET_MODE (oldval)));
733
734 /* Replacing the operand of a SUBREG or a ZERO_EXTEND with a
735 CONST_INT is not valid, because after the replacement, the
736 original mode would be gone. Unfortunately, we can't tell
737 when do_SUBST is called to replace the operand thereof, so we
738 perform this test on oldval instead, checking whether an
739 invalid replacement took place before we got here. */
740 gcc_assert (!(GET_CODE (oldval) == SUBREG
741 && CONST_INT_P (SUBREG_REG (oldval))));
742 gcc_assert (!(GET_CODE (oldval) == ZERO_EXTEND
743 && CONST_INT_P (XEXP (oldval, 0))));
744 }
745
746 if (undobuf.frees)
747 buf = undobuf.frees, undobuf.frees = buf->next;
748 else
749 buf = XNEW (struct undo);
750
751 buf->kind = UNDO_RTX;
752 buf->where.r = into;
753 buf->old_contents.r = oldval;
754 *into = newval;
755
756 buf->next = undobuf.undos, undobuf.undos = buf;
757 }
758
759 #define SUBST(INTO, NEWVAL) do_SUBST (&(INTO), (NEWVAL))
760
761 /* Similar to SUBST, but NEWVAL is an int expression. Note that substitution
762 for the value of a HOST_WIDE_INT value (including CONST_INT) is
763 not safe. */
764
765 static void
766 do_SUBST_INT (int *into, int newval)
767 {
768 struct undo *buf;
769 int oldval = *into;
770
771 if (oldval == newval)
772 return;
773
774 if (undobuf.frees)
775 buf = undobuf.frees, undobuf.frees = buf->next;
776 else
777 buf = XNEW (struct undo);
778
779 buf->kind = UNDO_INT;
780 buf->where.i = into;
781 buf->old_contents.i = oldval;
782 *into = newval;
783
784 buf->next = undobuf.undos, undobuf.undos = buf;
785 }
786
787 #define SUBST_INT(INTO, NEWVAL) do_SUBST_INT (&(INTO), (NEWVAL))
788
789 /* Similar to SUBST, but just substitute the mode. This is used when
790 changing the mode of a pseudo-register, so that any other
791 references to the entry in the regno_reg_rtx array will change as
792 well. */
793
794 static void
795 do_SUBST_MODE (rtx *into, machine_mode newval)
796 {
797 struct undo *buf;
798 machine_mode oldval = GET_MODE (*into);
799
800 if (oldval == newval)
801 return;
802
803 if (undobuf.frees)
804 buf = undobuf.frees, undobuf.frees = buf->next;
805 else
806 buf = XNEW (struct undo);
807
808 buf->kind = UNDO_MODE;
809 buf->where.r = into;
810 buf->old_contents.m = oldval;
811 adjust_reg_mode (*into, newval);
812
813 buf->next = undobuf.undos, undobuf.undos = buf;
814 }
815
816 #define SUBST_MODE(INTO, NEWVAL) do_SUBST_MODE (&(INTO), (NEWVAL))
817
818 /* Similar to SUBST, but NEWVAL is a LOG_LINKS expression. */
819
820 static void
821 do_SUBST_LINK (struct insn_link **into, struct insn_link *newval)
822 {
823 struct undo *buf;
824 struct insn_link * oldval = *into;
825
826 if (oldval == newval)
827 return;
828
829 if (undobuf.frees)
830 buf = undobuf.frees, undobuf.frees = buf->next;
831 else
832 buf = XNEW (struct undo);
833
834 buf->kind = UNDO_LINKS;
835 buf->where.l = into;
836 buf->old_contents.l = oldval;
837 *into = newval;
838
839 buf->next = undobuf.undos, undobuf.undos = buf;
840 }
841
842 #define SUBST_LINK(oldval, newval) do_SUBST_LINK (&oldval, newval)
843 \f
844 /* Subroutine of try_combine. Determine whether the replacement patterns
845 NEWPAT, NEWI2PAT and NEWOTHERPAT are cheaper according to insn_cost
846 than the original sequence I0, I1, I2, I3 and undobuf.other_insn. Note
847 that I0, I1 and/or NEWI2PAT may be NULL_RTX. Similarly, NEWOTHERPAT and
848 undobuf.other_insn may also both be NULL_RTX. Return false if the cost
849 of all the instructions can be estimated and the replacements are more
850 expensive than the original sequence. */
851
852 static bool
853 combine_validate_cost (rtx_insn *i0, rtx_insn *i1, rtx_insn *i2, rtx_insn *i3,
854 rtx newpat, rtx newi2pat, rtx newotherpat)
855 {
856 int i0_cost, i1_cost, i2_cost, i3_cost;
857 int new_i2_cost, new_i3_cost;
858 int old_cost, new_cost;
859
860 /* Lookup the original insn_costs. */
861 i2_cost = INSN_COST (i2);
862 i3_cost = INSN_COST (i3);
863
864 if (i1)
865 {
866 i1_cost = INSN_COST (i1);
867 if (i0)
868 {
869 i0_cost = INSN_COST (i0);
870 old_cost = (i0_cost > 0 && i1_cost > 0 && i2_cost > 0 && i3_cost > 0
871 ? i0_cost + i1_cost + i2_cost + i3_cost : 0);
872 }
873 else
874 {
875 old_cost = (i1_cost > 0 && i2_cost > 0 && i3_cost > 0
876 ? i1_cost + i2_cost + i3_cost : 0);
877 i0_cost = 0;
878 }
879 }
880 else
881 {
882 old_cost = (i2_cost > 0 && i3_cost > 0) ? i2_cost + i3_cost : 0;
883 i1_cost = i0_cost = 0;
884 }
885
886 /* If we have split a PARALLEL I2 to I1,I2, we have counted its cost twice;
887 correct that. */
888 if (old_cost && i1 && INSN_UID (i1) == INSN_UID (i2))
889 old_cost -= i1_cost;
890
891
892 /* Calculate the replacement insn_costs. */
893 rtx tmp = PATTERN (i3);
894 PATTERN (i3) = newpat;
895 int tmpi = INSN_CODE (i3);
896 INSN_CODE (i3) = -1;
897 new_i3_cost = insn_cost (i3, optimize_this_for_speed_p);
898 PATTERN (i3) = tmp;
899 INSN_CODE (i3) = tmpi;
900 if (newi2pat)
901 {
902 tmp = PATTERN (i2);
903 PATTERN (i2) = newi2pat;
904 tmpi = INSN_CODE (i2);
905 INSN_CODE (i2) = -1;
906 new_i2_cost = insn_cost (i2, optimize_this_for_speed_p);
907 PATTERN (i2) = tmp;
908 INSN_CODE (i2) = tmpi;
909 new_cost = (new_i2_cost > 0 && new_i3_cost > 0)
910 ? new_i2_cost + new_i3_cost : 0;
911 }
912 else
913 {
914 new_cost = new_i3_cost;
915 new_i2_cost = 0;
916 }
917
918 if (undobuf.other_insn)
919 {
920 int old_other_cost, new_other_cost;
921
922 old_other_cost = INSN_COST (undobuf.other_insn);
923 tmp = PATTERN (undobuf.other_insn);
924 PATTERN (undobuf.other_insn) = newotherpat;
925 tmpi = INSN_CODE (undobuf.other_insn);
926 INSN_CODE (undobuf.other_insn) = -1;
927 new_other_cost = insn_cost (undobuf.other_insn,
928 optimize_this_for_speed_p);
929 PATTERN (undobuf.other_insn) = tmp;
930 INSN_CODE (undobuf.other_insn) = tmpi;
931 if (old_other_cost > 0 && new_other_cost > 0)
932 {
933 old_cost += old_other_cost;
934 new_cost += new_other_cost;
935 }
936 else
937 old_cost = 0;
938 }
939
940 /* Disallow this combination if both new_cost and old_cost are greater than
941 zero, and new_cost is greater than old cost. */
942 int reject = old_cost > 0 && new_cost > old_cost;
943
944 if (dump_file)
945 {
946 fprintf (dump_file, "%s combination of insns ",
947 reject ? "rejecting" : "allowing");
948 if (i0)
949 fprintf (dump_file, "%d, ", INSN_UID (i0));
950 if (i1 && INSN_UID (i1) != INSN_UID (i2))
951 fprintf (dump_file, "%d, ", INSN_UID (i1));
952 fprintf (dump_file, "%d and %d\n", INSN_UID (i2), INSN_UID (i3));
953
954 fprintf (dump_file, "original costs ");
955 if (i0)
956 fprintf (dump_file, "%d + ", i0_cost);
957 if (i1 && INSN_UID (i1) != INSN_UID (i2))
958 fprintf (dump_file, "%d + ", i1_cost);
959 fprintf (dump_file, "%d + %d = %d\n", i2_cost, i3_cost, old_cost);
960
961 if (newi2pat)
962 fprintf (dump_file, "replacement costs %d + %d = %d\n",
963 new_i2_cost, new_i3_cost, new_cost);
964 else
965 fprintf (dump_file, "replacement cost %d\n", new_cost);
966 }
967
968 if (reject)
969 return false;
970
971 /* Update the uid_insn_cost array with the replacement costs. */
972 INSN_COST (i2) = new_i2_cost;
973 INSN_COST (i3) = new_i3_cost;
974 if (i1)
975 {
976 INSN_COST (i1) = 0;
977 if (i0)
978 INSN_COST (i0) = 0;
979 }
980
981 return true;
982 }
983
984
985 /* Delete any insns that copy a register to itself. */
986
987 static void
988 delete_noop_moves (void)
989 {
990 rtx_insn *insn, *next;
991 basic_block bb;
992
993 FOR_EACH_BB_FN (bb, cfun)
994 {
995 for (insn = BB_HEAD (bb); insn != NEXT_INSN (BB_END (bb)); insn = next)
996 {
997 next = NEXT_INSN (insn);
998 if (INSN_P (insn) && noop_move_p (insn))
999 {
1000 if (dump_file)
1001 fprintf (dump_file, "deleting noop move %d\n", INSN_UID (insn));
1002
1003 delete_insn_and_edges (insn);
1004 }
1005 }
1006 }
1007 }
1008
1009 \f
1010 /* Return false if we do not want to (or cannot) combine DEF. */
1011 static bool
1012 can_combine_def_p (df_ref def)
1013 {
1014 /* Do not consider if it is pre/post modification in MEM. */
1015 if (DF_REF_FLAGS (def) & DF_REF_PRE_POST_MODIFY)
1016 return false;
1017
1018 unsigned int regno = DF_REF_REGNO (def);
1019
1020 /* Do not combine frame pointer adjustments. */
1021 if ((regno == FRAME_POINTER_REGNUM
1022 && (!reload_completed || frame_pointer_needed))
1023 || (!HARD_FRAME_POINTER_IS_FRAME_POINTER
1024 && regno == HARD_FRAME_POINTER_REGNUM
1025 && (!reload_completed || frame_pointer_needed))
1026 || (FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
1027 && regno == ARG_POINTER_REGNUM && fixed_regs[regno]))
1028 return false;
1029
1030 return true;
1031 }
1032
1033 /* Return false if we do not want to (or cannot) combine USE. */
1034 static bool
1035 can_combine_use_p (df_ref use)
1036 {
1037 /* Do not consider the usage of the stack pointer by function call. */
1038 if (DF_REF_FLAGS (use) & DF_REF_CALL_STACK_USAGE)
1039 return false;
1040
1041 return true;
1042 }
1043
1044 /* Fill in log links field for all insns. */
1045
1046 static void
1047 create_log_links (void)
1048 {
1049 basic_block bb;
1050 rtx_insn **next_use;
1051 rtx_insn *insn;
1052 df_ref def, use;
1053
1054 next_use = XCNEWVEC (rtx_insn *, max_reg_num ());
1055
1056 /* Pass through each block from the end, recording the uses of each
1057 register and establishing log links when def is encountered.
1058 Note that we do not clear next_use array in order to save time,
1059 so we have to test whether the use is in the same basic block as def.
1060
1061 There are a few cases below when we do not consider the definition or
1062 usage -- these are taken from original flow.c did. Don't ask me why it is
1063 done this way; I don't know and if it works, I don't want to know. */
1064
1065 FOR_EACH_BB_FN (bb, cfun)
1066 {
1067 FOR_BB_INSNS_REVERSE (bb, insn)
1068 {
1069 if (!NONDEBUG_INSN_P (insn))
1070 continue;
1071
1072 /* Log links are created only once. */
1073 gcc_assert (!LOG_LINKS (insn));
1074
1075 FOR_EACH_INSN_DEF (def, insn)
1076 {
1077 unsigned int regno = DF_REF_REGNO (def);
1078 rtx_insn *use_insn;
1079
1080 if (!next_use[regno])
1081 continue;
1082
1083 if (!can_combine_def_p (def))
1084 continue;
1085
1086 use_insn = next_use[regno];
1087 next_use[regno] = NULL;
1088
1089 if (BLOCK_FOR_INSN (use_insn) != bb)
1090 continue;
1091
1092 /* flow.c claimed:
1093
1094 We don't build a LOG_LINK for hard registers contained
1095 in ASM_OPERANDs. If these registers get replaced,
1096 we might wind up changing the semantics of the insn,
1097 even if reload can make what appear to be valid
1098 assignments later. */
1099 if (regno < FIRST_PSEUDO_REGISTER
1100 && asm_noperands (PATTERN (use_insn)) >= 0)
1101 continue;
1102
1103 /* Don't add duplicate links between instructions. */
1104 struct insn_link *links;
1105 FOR_EACH_LOG_LINK (links, use_insn)
1106 if (insn == links->insn && regno == links->regno)
1107 break;
1108
1109 if (!links)
1110 LOG_LINKS (use_insn)
1111 = alloc_insn_link (insn, regno, LOG_LINKS (use_insn));
1112 }
1113
1114 FOR_EACH_INSN_USE (use, insn)
1115 if (can_combine_use_p (use))
1116 next_use[DF_REF_REGNO (use)] = insn;
1117 }
1118 }
1119
1120 free (next_use);
1121 }
1122
1123 /* Walk the LOG_LINKS of insn B to see if we find a reference to A. Return
1124 true if we found a LOG_LINK that proves that A feeds B. This only works
1125 if there are no instructions between A and B which could have a link
1126 depending on A, since in that case we would not record a link for B.
1127 We also check the implicit dependency created by a cc0 setter/user
1128 pair. */
1129
1130 static bool
1131 insn_a_feeds_b (rtx_insn *a, rtx_insn *b)
1132 {
1133 struct insn_link *links;
1134 FOR_EACH_LOG_LINK (links, b)
1135 if (links->insn == a)
1136 return true;
1137 if (HAVE_cc0 && sets_cc0_p (a))
1138 return true;
1139 return false;
1140 }
1141 \f
1142 /* Main entry point for combiner. F is the first insn of the function.
1143 NREGS is the first unused pseudo-reg number.
1144
1145 Return nonzero if the combiner has turned an indirect jump
1146 instruction into a direct jump. */
1147 static int
1148 combine_instructions (rtx_insn *f, unsigned int nregs)
1149 {
1150 rtx_insn *insn, *next;
1151 rtx_insn *prev;
1152 struct insn_link *links, *nextlinks;
1153 rtx_insn *first;
1154 basic_block last_bb;
1155
1156 int new_direct_jump_p = 0;
1157
1158 for (first = f; first && !NONDEBUG_INSN_P (first); )
1159 first = NEXT_INSN (first);
1160 if (!first)
1161 return 0;
1162
1163 combine_attempts = 0;
1164 combine_merges = 0;
1165 combine_extras = 0;
1166 combine_successes = 0;
1167
1168 rtl_hooks = combine_rtl_hooks;
1169
1170 reg_stat.safe_grow_cleared (nregs);
1171
1172 init_recog_no_volatile ();
1173
1174 /* Allocate array for insn info. */
1175 max_uid_known = get_max_uid ();
1176 uid_log_links = XCNEWVEC (struct insn_link *, max_uid_known + 1);
1177 uid_insn_cost = XCNEWVEC (int, max_uid_known + 1);
1178 gcc_obstack_init (&insn_link_obstack);
1179
1180 nonzero_bits_mode = int_mode_for_size (HOST_BITS_PER_WIDE_INT, 0).require ();
1181
1182 /* Don't use reg_stat[].nonzero_bits when computing it. This can cause
1183 problems when, for example, we have j <<= 1 in a loop. */
1184
1185 nonzero_sign_valid = 0;
1186 label_tick = label_tick_ebb_start = 1;
1187
1188 /* Scan all SETs and see if we can deduce anything about what
1189 bits are known to be zero for some registers and how many copies
1190 of the sign bit are known to exist for those registers.
1191
1192 Also set any known values so that we can use it while searching
1193 for what bits are known to be set. */
1194
1195 setup_incoming_promotions (first);
1196 /* Allow the entry block and the first block to fall into the same EBB.
1197 Conceptually the incoming promotions are assigned to the entry block. */
1198 last_bb = ENTRY_BLOCK_PTR_FOR_FN (cfun);
1199
1200 create_log_links ();
1201 FOR_EACH_BB_FN (this_basic_block, cfun)
1202 {
1203 optimize_this_for_speed_p = optimize_bb_for_speed_p (this_basic_block);
1204 last_call_luid = 0;
1205 mem_last_set = -1;
1206
1207 label_tick++;
1208 if (!single_pred_p (this_basic_block)
1209 || single_pred (this_basic_block) != last_bb)
1210 label_tick_ebb_start = label_tick;
1211 last_bb = this_basic_block;
1212
1213 FOR_BB_INSNS (this_basic_block, insn)
1214 if (INSN_P (insn) && BLOCK_FOR_INSN (insn))
1215 {
1216 rtx links;
1217
1218 subst_low_luid = DF_INSN_LUID (insn);
1219 subst_insn = insn;
1220
1221 note_stores (PATTERN (insn), set_nonzero_bits_and_sign_copies,
1222 insn);
1223 record_dead_and_set_regs (insn);
1224
1225 if (AUTO_INC_DEC)
1226 for (links = REG_NOTES (insn); links; links = XEXP (links, 1))
1227 if (REG_NOTE_KIND (links) == REG_INC)
1228 set_nonzero_bits_and_sign_copies (XEXP (links, 0), NULL_RTX,
1229 insn);
1230
1231 /* Record the current insn_cost of this instruction. */
1232 if (NONJUMP_INSN_P (insn))
1233 INSN_COST (insn) = insn_cost (insn, optimize_this_for_speed_p);
1234 if (dump_file)
1235 {
1236 fprintf (dump_file, "insn_cost %d for ", INSN_COST (insn));
1237 dump_insn_slim (dump_file, insn);
1238 }
1239 }
1240 }
1241
1242 nonzero_sign_valid = 1;
1243
1244 /* Now scan all the insns in forward order. */
1245 label_tick = label_tick_ebb_start = 1;
1246 init_reg_last ();
1247 setup_incoming_promotions (first);
1248 last_bb = ENTRY_BLOCK_PTR_FOR_FN (cfun);
1249 int max_combine = PARAM_VALUE (PARAM_MAX_COMBINE_INSNS);
1250
1251 FOR_EACH_BB_FN (this_basic_block, cfun)
1252 {
1253 rtx_insn *last_combined_insn = NULL;
1254
1255 /* Ignore instruction combination in basic blocks that are going to
1256 be removed as unreachable anyway. See PR82386. */
1257 if (EDGE_COUNT (this_basic_block->preds) == 0)
1258 continue;
1259
1260 optimize_this_for_speed_p = optimize_bb_for_speed_p (this_basic_block);
1261 last_call_luid = 0;
1262 mem_last_set = -1;
1263
1264 label_tick++;
1265 if (!single_pred_p (this_basic_block)
1266 || single_pred (this_basic_block) != last_bb)
1267 label_tick_ebb_start = label_tick;
1268 last_bb = this_basic_block;
1269
1270 rtl_profile_for_bb (this_basic_block);
1271 for (insn = BB_HEAD (this_basic_block);
1272 insn != NEXT_INSN (BB_END (this_basic_block));
1273 insn = next ? next : NEXT_INSN (insn))
1274 {
1275 next = 0;
1276 if (!NONDEBUG_INSN_P (insn))
1277 continue;
1278
1279 while (last_combined_insn
1280 && (!NONDEBUG_INSN_P (last_combined_insn)
1281 || last_combined_insn->deleted ()))
1282 last_combined_insn = PREV_INSN (last_combined_insn);
1283 if (last_combined_insn == NULL_RTX
1284 || BLOCK_FOR_INSN (last_combined_insn) != this_basic_block
1285 || DF_INSN_LUID (last_combined_insn) <= DF_INSN_LUID (insn))
1286 last_combined_insn = insn;
1287
1288 /* See if we know about function return values before this
1289 insn based upon SUBREG flags. */
1290 check_promoted_subreg (insn, PATTERN (insn));
1291
1292 /* See if we can find hardregs and subreg of pseudos in
1293 narrower modes. This could help turning TRUNCATEs
1294 into SUBREGs. */
1295 note_uses (&PATTERN (insn), record_truncated_values, NULL);
1296
1297 /* Try this insn with each insn it links back to. */
1298
1299 FOR_EACH_LOG_LINK (links, insn)
1300 if ((next = try_combine (insn, links->insn, NULL,
1301 NULL, &new_direct_jump_p,
1302 last_combined_insn)) != 0)
1303 {
1304 statistics_counter_event (cfun, "two-insn combine", 1);
1305 goto retry;
1306 }
1307
1308 /* Try each sequence of three linked insns ending with this one. */
1309
1310 if (max_combine >= 3)
1311 FOR_EACH_LOG_LINK (links, insn)
1312 {
1313 rtx_insn *link = links->insn;
1314
1315 /* If the linked insn has been replaced by a note, then there
1316 is no point in pursuing this chain any further. */
1317 if (NOTE_P (link))
1318 continue;
1319
1320 FOR_EACH_LOG_LINK (nextlinks, link)
1321 if ((next = try_combine (insn, link, nextlinks->insn,
1322 NULL, &new_direct_jump_p,
1323 last_combined_insn)) != 0)
1324 {
1325 statistics_counter_event (cfun, "three-insn combine", 1);
1326 goto retry;
1327 }
1328 }
1329
1330 /* Try to combine a jump insn that uses CC0
1331 with a preceding insn that sets CC0, and maybe with its
1332 logical predecessor as well.
1333 This is how we make decrement-and-branch insns.
1334 We need this special code because data flow connections
1335 via CC0 do not get entered in LOG_LINKS. */
1336
1337 if (HAVE_cc0
1338 && JUMP_P (insn)
1339 && (prev = prev_nonnote_insn (insn)) != 0
1340 && NONJUMP_INSN_P (prev)
1341 && sets_cc0_p (PATTERN (prev)))
1342 {
1343 if ((next = try_combine (insn, prev, NULL, NULL,
1344 &new_direct_jump_p,
1345 last_combined_insn)) != 0)
1346 goto retry;
1347
1348 FOR_EACH_LOG_LINK (nextlinks, prev)
1349 if ((next = try_combine (insn, prev, nextlinks->insn,
1350 NULL, &new_direct_jump_p,
1351 last_combined_insn)) != 0)
1352 goto retry;
1353 }
1354
1355 /* Do the same for an insn that explicitly references CC0. */
1356 if (HAVE_cc0 && NONJUMP_INSN_P (insn)
1357 && (prev = prev_nonnote_insn (insn)) != 0
1358 && NONJUMP_INSN_P (prev)
1359 && sets_cc0_p (PATTERN (prev))
1360 && GET_CODE (PATTERN (insn)) == SET
1361 && reg_mentioned_p (cc0_rtx, SET_SRC (PATTERN (insn))))
1362 {
1363 if ((next = try_combine (insn, prev, NULL, NULL,
1364 &new_direct_jump_p,
1365 last_combined_insn)) != 0)
1366 goto retry;
1367
1368 FOR_EACH_LOG_LINK (nextlinks, prev)
1369 if ((next = try_combine (insn, prev, nextlinks->insn,
1370 NULL, &new_direct_jump_p,
1371 last_combined_insn)) != 0)
1372 goto retry;
1373 }
1374
1375 /* Finally, see if any of the insns that this insn links to
1376 explicitly references CC0. If so, try this insn, that insn,
1377 and its predecessor if it sets CC0. */
1378 if (HAVE_cc0)
1379 {
1380 FOR_EACH_LOG_LINK (links, insn)
1381 if (NONJUMP_INSN_P (links->insn)
1382 && GET_CODE (PATTERN (links->insn)) == SET
1383 && reg_mentioned_p (cc0_rtx, SET_SRC (PATTERN (links->insn)))
1384 && (prev = prev_nonnote_insn (links->insn)) != 0
1385 && NONJUMP_INSN_P (prev)
1386 && sets_cc0_p (PATTERN (prev))
1387 && (next = try_combine (insn, links->insn,
1388 prev, NULL, &new_direct_jump_p,
1389 last_combined_insn)) != 0)
1390 goto retry;
1391 }
1392
1393 /* Try combining an insn with two different insns whose results it
1394 uses. */
1395 if (max_combine >= 3)
1396 FOR_EACH_LOG_LINK (links, insn)
1397 for (nextlinks = links->next; nextlinks;
1398 nextlinks = nextlinks->next)
1399 if ((next = try_combine (insn, links->insn,
1400 nextlinks->insn, NULL,
1401 &new_direct_jump_p,
1402 last_combined_insn)) != 0)
1403
1404 {
1405 statistics_counter_event (cfun, "three-insn combine", 1);
1406 goto retry;
1407 }
1408
1409 /* Try four-instruction combinations. */
1410 if (max_combine >= 4)
1411 FOR_EACH_LOG_LINK (links, insn)
1412 {
1413 struct insn_link *next1;
1414 rtx_insn *link = links->insn;
1415
1416 /* If the linked insn has been replaced by a note, then there
1417 is no point in pursuing this chain any further. */
1418 if (NOTE_P (link))
1419 continue;
1420
1421 FOR_EACH_LOG_LINK (next1, link)
1422 {
1423 rtx_insn *link1 = next1->insn;
1424 if (NOTE_P (link1))
1425 continue;
1426 /* I0 -> I1 -> I2 -> I3. */
1427 FOR_EACH_LOG_LINK (nextlinks, link1)
1428 if ((next = try_combine (insn, link, link1,
1429 nextlinks->insn,
1430 &new_direct_jump_p,
1431 last_combined_insn)) != 0)
1432 {
1433 statistics_counter_event (cfun, "four-insn combine", 1);
1434 goto retry;
1435 }
1436 /* I0, I1 -> I2, I2 -> I3. */
1437 for (nextlinks = next1->next; nextlinks;
1438 nextlinks = nextlinks->next)
1439 if ((next = try_combine (insn, link, link1,
1440 nextlinks->insn,
1441 &new_direct_jump_p,
1442 last_combined_insn)) != 0)
1443 {
1444 statistics_counter_event (cfun, "four-insn combine", 1);
1445 goto retry;
1446 }
1447 }
1448
1449 for (next1 = links->next; next1; next1 = next1->next)
1450 {
1451 rtx_insn *link1 = next1->insn;
1452 if (NOTE_P (link1))
1453 continue;
1454 /* I0 -> I2; I1, I2 -> I3. */
1455 FOR_EACH_LOG_LINK (nextlinks, link)
1456 if ((next = try_combine (insn, link, link1,
1457 nextlinks->insn,
1458 &new_direct_jump_p,
1459 last_combined_insn)) != 0)
1460 {
1461 statistics_counter_event (cfun, "four-insn combine", 1);
1462 goto retry;
1463 }
1464 /* I0 -> I1; I1, I2 -> I3. */
1465 FOR_EACH_LOG_LINK (nextlinks, link1)
1466 if ((next = try_combine (insn, link, link1,
1467 nextlinks->insn,
1468 &new_direct_jump_p,
1469 last_combined_insn)) != 0)
1470 {
1471 statistics_counter_event (cfun, "four-insn combine", 1);
1472 goto retry;
1473 }
1474 }
1475 }
1476
1477 /* Try this insn with each REG_EQUAL note it links back to. */
1478 FOR_EACH_LOG_LINK (links, insn)
1479 {
1480 rtx set, note;
1481 rtx_insn *temp = links->insn;
1482 if ((set = single_set (temp)) != 0
1483 && (note = find_reg_equal_equiv_note (temp)) != 0
1484 && (note = XEXP (note, 0), GET_CODE (note)) != EXPR_LIST
1485 /* Avoid using a register that may already been marked
1486 dead by an earlier instruction. */
1487 && ! unmentioned_reg_p (note, SET_SRC (set))
1488 && (GET_MODE (note) == VOIDmode
1489 ? SCALAR_INT_MODE_P (GET_MODE (SET_DEST (set)))
1490 : (GET_MODE (SET_DEST (set)) == GET_MODE (note)
1491 && (GET_CODE (SET_DEST (set)) != ZERO_EXTRACT
1492 || (GET_MODE (XEXP (SET_DEST (set), 0))
1493 == GET_MODE (note))))))
1494 {
1495 /* Temporarily replace the set's source with the
1496 contents of the REG_EQUAL note. The insn will
1497 be deleted or recognized by try_combine. */
1498 rtx orig_src = SET_SRC (set);
1499 rtx orig_dest = SET_DEST (set);
1500 if (GET_CODE (SET_DEST (set)) == ZERO_EXTRACT)
1501 SET_DEST (set) = XEXP (SET_DEST (set), 0);
1502 SET_SRC (set) = note;
1503 i2mod = temp;
1504 i2mod_old_rhs = copy_rtx (orig_src);
1505 i2mod_new_rhs = copy_rtx (note);
1506 next = try_combine (insn, i2mod, NULL, NULL,
1507 &new_direct_jump_p,
1508 last_combined_insn);
1509 i2mod = NULL;
1510 if (next)
1511 {
1512 statistics_counter_event (cfun, "insn-with-note combine", 1);
1513 goto retry;
1514 }
1515 SET_SRC (set) = orig_src;
1516 SET_DEST (set) = orig_dest;
1517 }
1518 }
1519
1520 if (!NOTE_P (insn))
1521 record_dead_and_set_regs (insn);
1522
1523 retry:
1524 ;
1525 }
1526 }
1527
1528 default_rtl_profile ();
1529 clear_bb_flags ();
1530 new_direct_jump_p |= purge_all_dead_edges ();
1531 delete_noop_moves ();
1532
1533 /* Clean up. */
1534 obstack_free (&insn_link_obstack, NULL);
1535 free (uid_log_links);
1536 free (uid_insn_cost);
1537 reg_stat.release ();
1538
1539 {
1540 struct undo *undo, *next;
1541 for (undo = undobuf.frees; undo; undo = next)
1542 {
1543 next = undo->next;
1544 free (undo);
1545 }
1546 undobuf.frees = 0;
1547 }
1548
1549 total_attempts += combine_attempts;
1550 total_merges += combine_merges;
1551 total_extras += combine_extras;
1552 total_successes += combine_successes;
1553
1554 nonzero_sign_valid = 0;
1555 rtl_hooks = general_rtl_hooks;
1556
1557 /* Make recognizer allow volatile MEMs again. */
1558 init_recog ();
1559
1560 return new_direct_jump_p;
1561 }
1562
1563 /* Wipe the last_xxx fields of reg_stat in preparation for another pass. */
1564
1565 static void
1566 init_reg_last (void)
1567 {
1568 unsigned int i;
1569 reg_stat_type *p;
1570
1571 FOR_EACH_VEC_ELT (reg_stat, i, p)
1572 memset (p, 0, offsetof (reg_stat_type, sign_bit_copies));
1573 }
1574 \f
1575 /* Set up any promoted values for incoming argument registers. */
1576
1577 static void
1578 setup_incoming_promotions (rtx_insn *first)
1579 {
1580 tree arg;
1581 bool strictly_local = false;
1582
1583 for (arg = DECL_ARGUMENTS (current_function_decl); arg;
1584 arg = DECL_CHAIN (arg))
1585 {
1586 rtx x, reg = DECL_INCOMING_RTL (arg);
1587 int uns1, uns3;
1588 machine_mode mode1, mode2, mode3, mode4;
1589
1590 /* Only continue if the incoming argument is in a register. */
1591 if (!REG_P (reg))
1592 continue;
1593
1594 /* Determine, if possible, whether all call sites of the current
1595 function lie within the current compilation unit. (This does
1596 take into account the exporting of a function via taking its
1597 address, and so forth.) */
1598 strictly_local = cgraph_node::local_info (current_function_decl)->local;
1599
1600 /* The mode and signedness of the argument before any promotions happen
1601 (equal to the mode of the pseudo holding it at that stage). */
1602 mode1 = TYPE_MODE (TREE_TYPE (arg));
1603 uns1 = TYPE_UNSIGNED (TREE_TYPE (arg));
1604
1605 /* The mode and signedness of the argument after any source language and
1606 TARGET_PROMOTE_PROTOTYPES-driven promotions. */
1607 mode2 = TYPE_MODE (DECL_ARG_TYPE (arg));
1608 uns3 = TYPE_UNSIGNED (DECL_ARG_TYPE (arg));
1609
1610 /* The mode and signedness of the argument as it is actually passed,
1611 see assign_parm_setup_reg in function.c. */
1612 mode3 = promote_function_mode (TREE_TYPE (arg), mode1, &uns3,
1613 TREE_TYPE (cfun->decl), 0);
1614
1615 /* The mode of the register in which the argument is being passed. */
1616 mode4 = GET_MODE (reg);
1617
1618 /* Eliminate sign extensions in the callee when:
1619 (a) A mode promotion has occurred; */
1620 if (mode1 == mode3)
1621 continue;
1622 /* (b) The mode of the register is the same as the mode of
1623 the argument as it is passed; */
1624 if (mode3 != mode4)
1625 continue;
1626 /* (c) There's no language level extension; */
1627 if (mode1 == mode2)
1628 ;
1629 /* (c.1) All callers are from the current compilation unit. If that's
1630 the case we don't have to rely on an ABI, we only have to know
1631 what we're generating right now, and we know that we will do the
1632 mode1 to mode2 promotion with the given sign. */
1633 else if (!strictly_local)
1634 continue;
1635 /* (c.2) The combination of the two promotions is useful. This is
1636 true when the signs match, or if the first promotion is unsigned.
1637 In the later case, (sign_extend (zero_extend x)) is the same as
1638 (zero_extend (zero_extend x)), so make sure to force UNS3 true. */
1639 else if (uns1)
1640 uns3 = true;
1641 else if (uns3)
1642 continue;
1643
1644 /* Record that the value was promoted from mode1 to mode3,
1645 so that any sign extension at the head of the current
1646 function may be eliminated. */
1647 x = gen_rtx_CLOBBER (mode1, const0_rtx);
1648 x = gen_rtx_fmt_e ((uns3 ? ZERO_EXTEND : SIGN_EXTEND), mode3, x);
1649 record_value_for_reg (reg, first, x);
1650 }
1651 }
1652
1653 /* If MODE has a precision lower than PREC and SRC is a non-negative constant
1654 that would appear negative in MODE, sign-extend SRC for use in nonzero_bits
1655 because some machines (maybe most) will actually do the sign-extension and
1656 this is the conservative approach.
1657
1658 ??? For 2.5, try to tighten up the MD files in this regard instead of this
1659 kludge. */
1660
1661 static rtx
1662 sign_extend_short_imm (rtx src, machine_mode mode, unsigned int prec)
1663 {
1664 scalar_int_mode int_mode;
1665 if (CONST_INT_P (src)
1666 && is_a <scalar_int_mode> (mode, &int_mode)
1667 && GET_MODE_PRECISION (int_mode) < prec
1668 && INTVAL (src) > 0
1669 && val_signbit_known_set_p (int_mode, INTVAL (src)))
1670 src = GEN_INT (INTVAL (src) | ~GET_MODE_MASK (int_mode));
1671
1672 return src;
1673 }
1674
1675 /* Update RSP for pseudo-register X from INSN's REG_EQUAL note (if one exists)
1676 and SET. */
1677
1678 static void
1679 update_rsp_from_reg_equal (reg_stat_type *rsp, rtx_insn *insn, const_rtx set,
1680 rtx x)
1681 {
1682 rtx reg_equal_note = insn ? find_reg_equal_equiv_note (insn) : NULL_RTX;
1683 unsigned HOST_WIDE_INT bits = 0;
1684 rtx reg_equal = NULL, src = SET_SRC (set);
1685 unsigned int num = 0;
1686
1687 if (reg_equal_note)
1688 reg_equal = XEXP (reg_equal_note, 0);
1689
1690 if (SHORT_IMMEDIATES_SIGN_EXTEND)
1691 {
1692 src = sign_extend_short_imm (src, GET_MODE (x), BITS_PER_WORD);
1693 if (reg_equal)
1694 reg_equal = sign_extend_short_imm (reg_equal, GET_MODE (x), BITS_PER_WORD);
1695 }
1696
1697 /* Don't call nonzero_bits if it cannot change anything. */
1698 if (rsp->nonzero_bits != HOST_WIDE_INT_M1U)
1699 {
1700 bits = nonzero_bits (src, nonzero_bits_mode);
1701 if (reg_equal && bits)
1702 bits &= nonzero_bits (reg_equal, nonzero_bits_mode);
1703 rsp->nonzero_bits |= bits;
1704 }
1705
1706 /* Don't call num_sign_bit_copies if it cannot change anything. */
1707 if (rsp->sign_bit_copies != 1)
1708 {
1709 num = num_sign_bit_copies (SET_SRC (set), GET_MODE (x));
1710 if (reg_equal && num != GET_MODE_PRECISION (GET_MODE (x)))
1711 {
1712 unsigned int numeq = num_sign_bit_copies (reg_equal, GET_MODE (x));
1713 if (num == 0 || numeq > num)
1714 num = numeq;
1715 }
1716 if (rsp->sign_bit_copies == 0 || num < rsp->sign_bit_copies)
1717 rsp->sign_bit_copies = num;
1718 }
1719 }
1720
1721 /* Called via note_stores. If X is a pseudo that is narrower than
1722 HOST_BITS_PER_WIDE_INT and is being set, record what bits are known zero.
1723
1724 If we are setting only a portion of X and we can't figure out what
1725 portion, assume all bits will be used since we don't know what will
1726 be happening.
1727
1728 Similarly, set how many bits of X are known to be copies of the sign bit
1729 at all locations in the function. This is the smallest number implied
1730 by any set of X. */
1731
1732 static void
1733 set_nonzero_bits_and_sign_copies (rtx x, const_rtx set, void *data)
1734 {
1735 rtx_insn *insn = (rtx_insn *) data;
1736 scalar_int_mode mode;
1737
1738 if (REG_P (x)
1739 && REGNO (x) >= FIRST_PSEUDO_REGISTER
1740 /* If this register is undefined at the start of the file, we can't
1741 say what its contents were. */
1742 && ! REGNO_REG_SET_P
1743 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb), REGNO (x))
1744 && is_a <scalar_int_mode> (GET_MODE (x), &mode)
1745 && HWI_COMPUTABLE_MODE_P (mode))
1746 {
1747 reg_stat_type *rsp = &reg_stat[REGNO (x)];
1748
1749 if (set == 0 || GET_CODE (set) == CLOBBER)
1750 {
1751 rsp->nonzero_bits = GET_MODE_MASK (mode);
1752 rsp->sign_bit_copies = 1;
1753 return;
1754 }
1755
1756 /* If this register is being initialized using itself, and the
1757 register is uninitialized in this basic block, and there are
1758 no LOG_LINKS which set the register, then part of the
1759 register is uninitialized. In that case we can't assume
1760 anything about the number of nonzero bits.
1761
1762 ??? We could do better if we checked this in
1763 reg_{nonzero_bits,num_sign_bit_copies}_for_combine. Then we
1764 could avoid making assumptions about the insn which initially
1765 sets the register, while still using the information in other
1766 insns. We would have to be careful to check every insn
1767 involved in the combination. */
1768
1769 if (insn
1770 && reg_referenced_p (x, PATTERN (insn))
1771 && !REGNO_REG_SET_P (DF_LR_IN (BLOCK_FOR_INSN (insn)),
1772 REGNO (x)))
1773 {
1774 struct insn_link *link;
1775
1776 FOR_EACH_LOG_LINK (link, insn)
1777 if (dead_or_set_p (link->insn, x))
1778 break;
1779 if (!link)
1780 {
1781 rsp->nonzero_bits = GET_MODE_MASK (mode);
1782 rsp->sign_bit_copies = 1;
1783 return;
1784 }
1785 }
1786
1787 /* If this is a complex assignment, see if we can convert it into a
1788 simple assignment. */
1789 set = expand_field_assignment (set);
1790
1791 /* If this is a simple assignment, or we have a paradoxical SUBREG,
1792 set what we know about X. */
1793
1794 if (SET_DEST (set) == x
1795 || (paradoxical_subreg_p (SET_DEST (set))
1796 && SUBREG_REG (SET_DEST (set)) == x))
1797 update_rsp_from_reg_equal (rsp, insn, set, x);
1798 else
1799 {
1800 rsp->nonzero_bits = GET_MODE_MASK (mode);
1801 rsp->sign_bit_copies = 1;
1802 }
1803 }
1804 }
1805 \f
1806 /* See if INSN can be combined into I3. PRED, PRED2, SUCC and SUCC2 are
1807 optionally insns that were previously combined into I3 or that will be
1808 combined into the merger of INSN and I3. The order is PRED, PRED2,
1809 INSN, SUCC, SUCC2, I3.
1810
1811 Return 0 if the combination is not allowed for any reason.
1812
1813 If the combination is allowed, *PDEST will be set to the single
1814 destination of INSN and *PSRC to the single source, and this function
1815 will return 1. */
1816
1817 static int
1818 can_combine_p (rtx_insn *insn, rtx_insn *i3, rtx_insn *pred ATTRIBUTE_UNUSED,
1819 rtx_insn *pred2 ATTRIBUTE_UNUSED, rtx_insn *succ, rtx_insn *succ2,
1820 rtx *pdest, rtx *psrc)
1821 {
1822 int i;
1823 const_rtx set = 0;
1824 rtx src, dest;
1825 rtx_insn *p;
1826 rtx link;
1827 bool all_adjacent = true;
1828 int (*is_volatile_p) (const_rtx);
1829
1830 if (succ)
1831 {
1832 if (succ2)
1833 {
1834 if (next_active_insn (succ2) != i3)
1835 all_adjacent = false;
1836 if (next_active_insn (succ) != succ2)
1837 all_adjacent = false;
1838 }
1839 else if (next_active_insn (succ) != i3)
1840 all_adjacent = false;
1841 if (next_active_insn (insn) != succ)
1842 all_adjacent = false;
1843 }
1844 else if (next_active_insn (insn) != i3)
1845 all_adjacent = false;
1846
1847 /* Can combine only if previous insn is a SET of a REG, a SUBREG or CC0.
1848 or a PARALLEL consisting of such a SET and CLOBBERs.
1849
1850 If INSN has CLOBBER parallel parts, ignore them for our processing.
1851 By definition, these happen during the execution of the insn. When it
1852 is merged with another insn, all bets are off. If they are, in fact,
1853 needed and aren't also supplied in I3, they may be added by
1854 recog_for_combine. Otherwise, it won't match.
1855
1856 We can also ignore a SET whose SET_DEST is mentioned in a REG_UNUSED
1857 note.
1858
1859 Get the source and destination of INSN. If more than one, can't
1860 combine. */
1861
1862 if (GET_CODE (PATTERN (insn)) == SET)
1863 set = PATTERN (insn);
1864 else if (GET_CODE (PATTERN (insn)) == PARALLEL
1865 && GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) == SET)
1866 {
1867 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
1868 {
1869 rtx elt = XVECEXP (PATTERN (insn), 0, i);
1870
1871 switch (GET_CODE (elt))
1872 {
1873 /* This is important to combine floating point insns
1874 for the SH4 port. */
1875 case USE:
1876 /* Combining an isolated USE doesn't make sense.
1877 We depend here on combinable_i3pat to reject them. */
1878 /* The code below this loop only verifies that the inputs of
1879 the SET in INSN do not change. We call reg_set_between_p
1880 to verify that the REG in the USE does not change between
1881 I3 and INSN.
1882 If the USE in INSN was for a pseudo register, the matching
1883 insn pattern will likely match any register; combining this
1884 with any other USE would only be safe if we knew that the
1885 used registers have identical values, or if there was
1886 something to tell them apart, e.g. different modes. For
1887 now, we forgo such complicated tests and simply disallow
1888 combining of USES of pseudo registers with any other USE. */
1889 if (REG_P (XEXP (elt, 0))
1890 && GET_CODE (PATTERN (i3)) == PARALLEL)
1891 {
1892 rtx i3pat = PATTERN (i3);
1893 int i = XVECLEN (i3pat, 0) - 1;
1894 unsigned int regno = REGNO (XEXP (elt, 0));
1895
1896 do
1897 {
1898 rtx i3elt = XVECEXP (i3pat, 0, i);
1899
1900 if (GET_CODE (i3elt) == USE
1901 && REG_P (XEXP (i3elt, 0))
1902 && (REGNO (XEXP (i3elt, 0)) == regno
1903 ? reg_set_between_p (XEXP (elt, 0),
1904 PREV_INSN (insn), i3)
1905 : regno >= FIRST_PSEUDO_REGISTER))
1906 return 0;
1907 }
1908 while (--i >= 0);
1909 }
1910 break;
1911
1912 /* We can ignore CLOBBERs. */
1913 case CLOBBER:
1914 break;
1915
1916 case SET:
1917 /* Ignore SETs whose result isn't used but not those that
1918 have side-effects. */
1919 if (find_reg_note (insn, REG_UNUSED, SET_DEST (elt))
1920 && insn_nothrow_p (insn)
1921 && !side_effects_p (elt))
1922 break;
1923
1924 /* If we have already found a SET, this is a second one and
1925 so we cannot combine with this insn. */
1926 if (set)
1927 return 0;
1928
1929 set = elt;
1930 break;
1931
1932 default:
1933 /* Anything else means we can't combine. */
1934 return 0;
1935 }
1936 }
1937
1938 if (set == 0
1939 /* If SET_SRC is an ASM_OPERANDS we can't throw away these CLOBBERs,
1940 so don't do anything with it. */
1941 || GET_CODE (SET_SRC (set)) == ASM_OPERANDS)
1942 return 0;
1943 }
1944 else
1945 return 0;
1946
1947 if (set == 0)
1948 return 0;
1949
1950 /* The simplification in expand_field_assignment may call back to
1951 get_last_value, so set safe guard here. */
1952 subst_low_luid = DF_INSN_LUID (insn);
1953
1954 set = expand_field_assignment (set);
1955 src = SET_SRC (set), dest = SET_DEST (set);
1956
1957 /* Do not eliminate user-specified register if it is in an
1958 asm input because we may break the register asm usage defined
1959 in GCC manual if allow to do so.
1960 Be aware that this may cover more cases than we expect but this
1961 should be harmless. */
1962 if (REG_P (dest) && REG_USERVAR_P (dest) && HARD_REGISTER_P (dest)
1963 && extract_asm_operands (PATTERN (i3)))
1964 return 0;
1965
1966 /* Don't eliminate a store in the stack pointer. */
1967 if (dest == stack_pointer_rtx
1968 /* Don't combine with an insn that sets a register to itself if it has
1969 a REG_EQUAL note. This may be part of a LIBCALL sequence. */
1970 || (rtx_equal_p (src, dest) && find_reg_note (insn, REG_EQUAL, NULL_RTX))
1971 /* Can't merge an ASM_OPERANDS. */
1972 || GET_CODE (src) == ASM_OPERANDS
1973 /* Can't merge a function call. */
1974 || GET_CODE (src) == CALL
1975 /* Don't eliminate a function call argument. */
1976 || (CALL_P (i3)
1977 && (find_reg_fusage (i3, USE, dest)
1978 || (REG_P (dest)
1979 && REGNO (dest) < FIRST_PSEUDO_REGISTER
1980 && global_regs[REGNO (dest)])))
1981 /* Don't substitute into an incremented register. */
1982 || FIND_REG_INC_NOTE (i3, dest)
1983 || (succ && FIND_REG_INC_NOTE (succ, dest))
1984 || (succ2 && FIND_REG_INC_NOTE (succ2, dest))
1985 /* Don't substitute into a non-local goto, this confuses CFG. */
1986 || (JUMP_P (i3) && find_reg_note (i3, REG_NON_LOCAL_GOTO, NULL_RTX))
1987 /* Make sure that DEST is not used after INSN but before SUCC, or
1988 after SUCC and before SUCC2, or after SUCC2 but before I3. */
1989 || (!all_adjacent
1990 && ((succ2
1991 && (reg_used_between_p (dest, succ2, i3)
1992 || reg_used_between_p (dest, succ, succ2)))
1993 || (!succ2 && succ && reg_used_between_p (dest, succ, i3))
1994 || (succ
1995 /* SUCC and SUCC2 can be split halves from a PARALLEL; in
1996 that case SUCC is not in the insn stream, so use SUCC2
1997 instead for this test. */
1998 && reg_used_between_p (dest, insn,
1999 succ2
2000 && INSN_UID (succ) == INSN_UID (succ2)
2001 ? succ2 : succ))))
2002 /* Make sure that the value that is to be substituted for the register
2003 does not use any registers whose values alter in between. However,
2004 If the insns are adjacent, a use can't cross a set even though we
2005 think it might (this can happen for a sequence of insns each setting
2006 the same destination; last_set of that register might point to
2007 a NOTE). If INSN has a REG_EQUIV note, the register is always
2008 equivalent to the memory so the substitution is valid even if there
2009 are intervening stores. Also, don't move a volatile asm or
2010 UNSPEC_VOLATILE across any other insns. */
2011 || (! all_adjacent
2012 && (((!MEM_P (src)
2013 || ! find_reg_note (insn, REG_EQUIV, src))
2014 && use_crosses_set_p (src, DF_INSN_LUID (insn)))
2015 || (GET_CODE (src) == ASM_OPERANDS && MEM_VOLATILE_P (src))
2016 || GET_CODE (src) == UNSPEC_VOLATILE))
2017 /* Don't combine across a CALL_INSN, because that would possibly
2018 change whether the life span of some REGs crosses calls or not,
2019 and it is a pain to update that information.
2020 Exception: if source is a constant, moving it later can't hurt.
2021 Accept that as a special case. */
2022 || (DF_INSN_LUID (insn) < last_call_luid && ! CONSTANT_P (src)))
2023 return 0;
2024
2025 /* DEST must either be a REG or CC0. */
2026 if (REG_P (dest))
2027 {
2028 /* If register alignment is being enforced for multi-word items in all
2029 cases except for parameters, it is possible to have a register copy
2030 insn referencing a hard register that is not allowed to contain the
2031 mode being copied and which would not be valid as an operand of most
2032 insns. Eliminate this problem by not combining with such an insn.
2033
2034 Also, on some machines we don't want to extend the life of a hard
2035 register. */
2036
2037 if (REG_P (src)
2038 && ((REGNO (dest) < FIRST_PSEUDO_REGISTER
2039 && !targetm.hard_regno_mode_ok (REGNO (dest), GET_MODE (dest)))
2040 /* Don't extend the life of a hard register unless it is
2041 user variable (if we have few registers) or it can't
2042 fit into the desired register (meaning something special
2043 is going on).
2044 Also avoid substituting a return register into I3, because
2045 reload can't handle a conflict with constraints of other
2046 inputs. */
2047 || (REGNO (src) < FIRST_PSEUDO_REGISTER
2048 && !targetm.hard_regno_mode_ok (REGNO (src),
2049 GET_MODE (src)))))
2050 return 0;
2051 }
2052 else if (GET_CODE (dest) != CC0)
2053 return 0;
2054
2055
2056 if (GET_CODE (PATTERN (i3)) == PARALLEL)
2057 for (i = XVECLEN (PATTERN (i3), 0) - 1; i >= 0; i--)
2058 if (GET_CODE (XVECEXP (PATTERN (i3), 0, i)) == CLOBBER)
2059 {
2060 rtx reg = XEXP (XVECEXP (PATTERN (i3), 0, i), 0);
2061
2062 /* If the clobber represents an earlyclobber operand, we must not
2063 substitute an expression containing the clobbered register.
2064 As we do not analyze the constraint strings here, we have to
2065 make the conservative assumption. However, if the register is
2066 a fixed hard reg, the clobber cannot represent any operand;
2067 we leave it up to the machine description to either accept or
2068 reject use-and-clobber patterns. */
2069 if (!REG_P (reg)
2070 || REGNO (reg) >= FIRST_PSEUDO_REGISTER
2071 || !fixed_regs[REGNO (reg)])
2072 if (reg_overlap_mentioned_p (reg, src))
2073 return 0;
2074 }
2075
2076 /* If INSN contains anything volatile, or is an `asm' (whether volatile
2077 or not), reject, unless nothing volatile comes between it and I3 */
2078
2079 if (GET_CODE (src) == ASM_OPERANDS || volatile_refs_p (src))
2080 {
2081 /* Make sure neither succ nor succ2 contains a volatile reference. */
2082 if (succ2 != 0 && volatile_refs_p (PATTERN (succ2)))
2083 return 0;
2084 if (succ != 0 && volatile_refs_p (PATTERN (succ)))
2085 return 0;
2086 /* We'll check insns between INSN and I3 below. */
2087 }
2088
2089 /* If INSN is an asm, and DEST is a hard register, reject, since it has
2090 to be an explicit register variable, and was chosen for a reason. */
2091
2092 if (GET_CODE (src) == ASM_OPERANDS
2093 && REG_P (dest) && REGNO (dest) < FIRST_PSEUDO_REGISTER)
2094 return 0;
2095
2096 /* If INSN contains volatile references (specifically volatile MEMs),
2097 we cannot combine across any other volatile references.
2098 Even if INSN doesn't contain volatile references, any intervening
2099 volatile insn might affect machine state. */
2100
2101 is_volatile_p = volatile_refs_p (PATTERN (insn))
2102 ? volatile_refs_p
2103 : volatile_insn_p;
2104
2105 for (p = NEXT_INSN (insn); p != i3; p = NEXT_INSN (p))
2106 if (INSN_P (p) && p != succ && p != succ2 && is_volatile_p (PATTERN (p)))
2107 return 0;
2108
2109 /* If INSN contains an autoincrement or autodecrement, make sure that
2110 register is not used between there and I3, and not already used in
2111 I3 either. Neither must it be used in PRED or SUCC, if they exist.
2112 Also insist that I3 not be a jump; if it were one
2113 and the incremented register were spilled, we would lose. */
2114
2115 if (AUTO_INC_DEC)
2116 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2117 if (REG_NOTE_KIND (link) == REG_INC
2118 && (JUMP_P (i3)
2119 || reg_used_between_p (XEXP (link, 0), insn, i3)
2120 || (pred != NULL_RTX
2121 && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (pred)))
2122 || (pred2 != NULL_RTX
2123 && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (pred2)))
2124 || (succ != NULL_RTX
2125 && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (succ)))
2126 || (succ2 != NULL_RTX
2127 && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (succ2)))
2128 || reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (i3))))
2129 return 0;
2130
2131 /* Don't combine an insn that follows a CC0-setting insn.
2132 An insn that uses CC0 must not be separated from the one that sets it.
2133 We do, however, allow I2 to follow a CC0-setting insn if that insn
2134 is passed as I1; in that case it will be deleted also.
2135 We also allow combining in this case if all the insns are adjacent
2136 because that would leave the two CC0 insns adjacent as well.
2137 It would be more logical to test whether CC0 occurs inside I1 or I2,
2138 but that would be much slower, and this ought to be equivalent. */
2139
2140 if (HAVE_cc0)
2141 {
2142 p = prev_nonnote_insn (insn);
2143 if (p && p != pred && NONJUMP_INSN_P (p) && sets_cc0_p (PATTERN (p))
2144 && ! all_adjacent)
2145 return 0;
2146 }
2147
2148 /* If we get here, we have passed all the tests and the combination is
2149 to be allowed. */
2150
2151 *pdest = dest;
2152 *psrc = src;
2153
2154 return 1;
2155 }
2156 \f
2157 /* LOC is the location within I3 that contains its pattern or the component
2158 of a PARALLEL of the pattern. We validate that it is valid for combining.
2159
2160 One problem is if I3 modifies its output, as opposed to replacing it
2161 entirely, we can't allow the output to contain I2DEST, I1DEST or I0DEST as
2162 doing so would produce an insn that is not equivalent to the original insns.
2163
2164 Consider:
2165
2166 (set (reg:DI 101) (reg:DI 100))
2167 (set (subreg:SI (reg:DI 101) 0) <foo>)
2168
2169 This is NOT equivalent to:
2170
2171 (parallel [(set (subreg:SI (reg:DI 100) 0) <foo>)
2172 (set (reg:DI 101) (reg:DI 100))])
2173
2174 Not only does this modify 100 (in which case it might still be valid
2175 if 100 were dead in I2), it sets 101 to the ORIGINAL value of 100.
2176
2177 We can also run into a problem if I2 sets a register that I1
2178 uses and I1 gets directly substituted into I3 (not via I2). In that
2179 case, we would be getting the wrong value of I2DEST into I3, so we
2180 must reject the combination. This case occurs when I2 and I1 both
2181 feed into I3, rather than when I1 feeds into I2, which feeds into I3.
2182 If I1_NOT_IN_SRC is nonzero, it means that finding I1 in the source
2183 of a SET must prevent combination from occurring. The same situation
2184 can occur for I0, in which case I0_NOT_IN_SRC is set.
2185
2186 Before doing the above check, we first try to expand a field assignment
2187 into a set of logical operations.
2188
2189 If PI3_DEST_KILLED is nonzero, it is a pointer to a location in which
2190 we place a register that is both set and used within I3. If more than one
2191 such register is detected, we fail.
2192
2193 Return 1 if the combination is valid, zero otherwise. */
2194
2195 static int
2196 combinable_i3pat (rtx_insn *i3, rtx *loc, rtx i2dest, rtx i1dest, rtx i0dest,
2197 int i1_not_in_src, int i0_not_in_src, rtx *pi3dest_killed)
2198 {
2199 rtx x = *loc;
2200
2201 if (GET_CODE (x) == SET)
2202 {
2203 rtx set = x ;
2204 rtx dest = SET_DEST (set);
2205 rtx src = SET_SRC (set);
2206 rtx inner_dest = dest;
2207 rtx subdest;
2208
2209 while (GET_CODE (inner_dest) == STRICT_LOW_PART
2210 || GET_CODE (inner_dest) == SUBREG
2211 || GET_CODE (inner_dest) == ZERO_EXTRACT)
2212 inner_dest = XEXP (inner_dest, 0);
2213
2214 /* Check for the case where I3 modifies its output, as discussed
2215 above. We don't want to prevent pseudos from being combined
2216 into the address of a MEM, so only prevent the combination if
2217 i1 or i2 set the same MEM. */
2218 if ((inner_dest != dest &&
2219 (!MEM_P (inner_dest)
2220 || rtx_equal_p (i2dest, inner_dest)
2221 || (i1dest && rtx_equal_p (i1dest, inner_dest))
2222 || (i0dest && rtx_equal_p (i0dest, inner_dest)))
2223 && (reg_overlap_mentioned_p (i2dest, inner_dest)
2224 || (i1dest && reg_overlap_mentioned_p (i1dest, inner_dest))
2225 || (i0dest && reg_overlap_mentioned_p (i0dest, inner_dest))))
2226
2227 /* This is the same test done in can_combine_p except we can't test
2228 all_adjacent; we don't have to, since this instruction will stay
2229 in place, thus we are not considering increasing the lifetime of
2230 INNER_DEST.
2231
2232 Also, if this insn sets a function argument, combining it with
2233 something that might need a spill could clobber a previous
2234 function argument; the all_adjacent test in can_combine_p also
2235 checks this; here, we do a more specific test for this case. */
2236
2237 || (REG_P (inner_dest)
2238 && REGNO (inner_dest) < FIRST_PSEUDO_REGISTER
2239 && !targetm.hard_regno_mode_ok (REGNO (inner_dest),
2240 GET_MODE (inner_dest)))
2241 || (i1_not_in_src && reg_overlap_mentioned_p (i1dest, src))
2242 || (i0_not_in_src && reg_overlap_mentioned_p (i0dest, src)))
2243 return 0;
2244
2245 /* If DEST is used in I3, it is being killed in this insn, so
2246 record that for later. We have to consider paradoxical
2247 subregs here, since they kill the whole register, but we
2248 ignore partial subregs, STRICT_LOW_PART, etc.
2249 Never add REG_DEAD notes for the FRAME_POINTER_REGNUM or the
2250 STACK_POINTER_REGNUM, since these are always considered to be
2251 live. Similarly for ARG_POINTER_REGNUM if it is fixed. */
2252 subdest = dest;
2253 if (GET_CODE (subdest) == SUBREG && !partial_subreg_p (subdest))
2254 subdest = SUBREG_REG (subdest);
2255 if (pi3dest_killed
2256 && REG_P (subdest)
2257 && reg_referenced_p (subdest, PATTERN (i3))
2258 && REGNO (subdest) != FRAME_POINTER_REGNUM
2259 && (HARD_FRAME_POINTER_IS_FRAME_POINTER
2260 || REGNO (subdest) != HARD_FRAME_POINTER_REGNUM)
2261 && (FRAME_POINTER_REGNUM == ARG_POINTER_REGNUM
2262 || (REGNO (subdest) != ARG_POINTER_REGNUM
2263 || ! fixed_regs [REGNO (subdest)]))
2264 && REGNO (subdest) != STACK_POINTER_REGNUM)
2265 {
2266 if (*pi3dest_killed)
2267 return 0;
2268
2269 *pi3dest_killed = subdest;
2270 }
2271 }
2272
2273 else if (GET_CODE (x) == PARALLEL)
2274 {
2275 int i;
2276
2277 for (i = 0; i < XVECLEN (x, 0); i++)
2278 if (! combinable_i3pat (i3, &XVECEXP (x, 0, i), i2dest, i1dest, i0dest,
2279 i1_not_in_src, i0_not_in_src, pi3dest_killed))
2280 return 0;
2281 }
2282
2283 return 1;
2284 }
2285 \f
2286 /* Return 1 if X is an arithmetic expression that contains a multiplication
2287 and division. We don't count multiplications by powers of two here. */
2288
2289 static int
2290 contains_muldiv (rtx x)
2291 {
2292 switch (GET_CODE (x))
2293 {
2294 case MOD: case DIV: case UMOD: case UDIV:
2295 return 1;
2296
2297 case MULT:
2298 return ! (CONST_INT_P (XEXP (x, 1))
2299 && pow2p_hwi (UINTVAL (XEXP (x, 1))));
2300 default:
2301 if (BINARY_P (x))
2302 return contains_muldiv (XEXP (x, 0))
2303 || contains_muldiv (XEXP (x, 1));
2304
2305 if (UNARY_P (x))
2306 return contains_muldiv (XEXP (x, 0));
2307
2308 return 0;
2309 }
2310 }
2311 \f
2312 /* Determine whether INSN can be used in a combination. Return nonzero if
2313 not. This is used in try_combine to detect early some cases where we
2314 can't perform combinations. */
2315
2316 static int
2317 cant_combine_insn_p (rtx_insn *insn)
2318 {
2319 rtx set;
2320 rtx src, dest;
2321
2322 /* If this isn't really an insn, we can't do anything.
2323 This can occur when flow deletes an insn that it has merged into an
2324 auto-increment address. */
2325 if (!NONDEBUG_INSN_P (insn))
2326 return 1;
2327
2328 /* Never combine loads and stores involving hard regs that are likely
2329 to be spilled. The register allocator can usually handle such
2330 reg-reg moves by tying. If we allow the combiner to make
2331 substitutions of likely-spilled regs, reload might die.
2332 As an exception, we allow combinations involving fixed regs; these are
2333 not available to the register allocator so there's no risk involved. */
2334
2335 set = single_set (insn);
2336 if (! set)
2337 return 0;
2338 src = SET_SRC (set);
2339 dest = SET_DEST (set);
2340 if (GET_CODE (src) == SUBREG)
2341 src = SUBREG_REG (src);
2342 if (GET_CODE (dest) == SUBREG)
2343 dest = SUBREG_REG (dest);
2344 if (REG_P (src) && REG_P (dest)
2345 && ((HARD_REGISTER_P (src)
2346 && ! TEST_HARD_REG_BIT (fixed_reg_set, REGNO (src))
2347 && targetm.class_likely_spilled_p (REGNO_REG_CLASS (REGNO (src))))
2348 || (HARD_REGISTER_P (dest)
2349 && ! TEST_HARD_REG_BIT (fixed_reg_set, REGNO (dest))
2350 && targetm.class_likely_spilled_p (REGNO_REG_CLASS (REGNO (dest))))))
2351 return 1;
2352
2353 return 0;
2354 }
2355
2356 struct likely_spilled_retval_info
2357 {
2358 unsigned regno, nregs;
2359 unsigned mask;
2360 };
2361
2362 /* Called via note_stores by likely_spilled_retval_p. Remove from info->mask
2363 hard registers that are known to be written to / clobbered in full. */
2364 static void
2365 likely_spilled_retval_1 (rtx x, const_rtx set, void *data)
2366 {
2367 struct likely_spilled_retval_info *const info =
2368 (struct likely_spilled_retval_info *) data;
2369 unsigned regno, nregs;
2370 unsigned new_mask;
2371
2372 if (!REG_P (XEXP (set, 0)))
2373 return;
2374 regno = REGNO (x);
2375 if (regno >= info->regno + info->nregs)
2376 return;
2377 nregs = REG_NREGS (x);
2378 if (regno + nregs <= info->regno)
2379 return;
2380 new_mask = (2U << (nregs - 1)) - 1;
2381 if (regno < info->regno)
2382 new_mask >>= info->regno - regno;
2383 else
2384 new_mask <<= regno - info->regno;
2385 info->mask &= ~new_mask;
2386 }
2387
2388 /* Return nonzero iff part of the return value is live during INSN, and
2389 it is likely spilled. This can happen when more than one insn is needed
2390 to copy the return value, e.g. when we consider to combine into the
2391 second copy insn for a complex value. */
2392
2393 static int
2394 likely_spilled_retval_p (rtx_insn *insn)
2395 {
2396 rtx_insn *use = BB_END (this_basic_block);
2397 rtx reg;
2398 rtx_insn *p;
2399 unsigned regno, nregs;
2400 /* We assume here that no machine mode needs more than
2401 32 hard registers when the value overlaps with a register
2402 for which TARGET_FUNCTION_VALUE_REGNO_P is true. */
2403 unsigned mask;
2404 struct likely_spilled_retval_info info;
2405
2406 if (!NONJUMP_INSN_P (use) || GET_CODE (PATTERN (use)) != USE || insn == use)
2407 return 0;
2408 reg = XEXP (PATTERN (use), 0);
2409 if (!REG_P (reg) || !targetm.calls.function_value_regno_p (REGNO (reg)))
2410 return 0;
2411 regno = REGNO (reg);
2412 nregs = REG_NREGS (reg);
2413 if (nregs == 1)
2414 return 0;
2415 mask = (2U << (nregs - 1)) - 1;
2416
2417 /* Disregard parts of the return value that are set later. */
2418 info.regno = regno;
2419 info.nregs = nregs;
2420 info.mask = mask;
2421 for (p = PREV_INSN (use); info.mask && p != insn; p = PREV_INSN (p))
2422 if (INSN_P (p))
2423 note_stores (PATTERN (p), likely_spilled_retval_1, &info);
2424 mask = info.mask;
2425
2426 /* Check if any of the (probably) live return value registers is
2427 likely spilled. */
2428 nregs --;
2429 do
2430 {
2431 if ((mask & 1 << nregs)
2432 && targetm.class_likely_spilled_p (REGNO_REG_CLASS (regno + nregs)))
2433 return 1;
2434 } while (nregs--);
2435 return 0;
2436 }
2437
2438 /* Adjust INSN after we made a change to its destination.
2439
2440 Changing the destination can invalidate notes that say something about
2441 the results of the insn and a LOG_LINK pointing to the insn. */
2442
2443 static void
2444 adjust_for_new_dest (rtx_insn *insn)
2445 {
2446 /* For notes, be conservative and simply remove them. */
2447 remove_reg_equal_equiv_notes (insn);
2448
2449 /* The new insn will have a destination that was previously the destination
2450 of an insn just above it. Call distribute_links to make a LOG_LINK from
2451 the next use of that destination. */
2452
2453 rtx set = single_set (insn);
2454 gcc_assert (set);
2455
2456 rtx reg = SET_DEST (set);
2457
2458 while (GET_CODE (reg) == ZERO_EXTRACT
2459 || GET_CODE (reg) == STRICT_LOW_PART
2460 || GET_CODE (reg) == SUBREG)
2461 reg = XEXP (reg, 0);
2462 gcc_assert (REG_P (reg));
2463
2464 distribute_links (alloc_insn_link (insn, REGNO (reg), NULL));
2465
2466 df_insn_rescan (insn);
2467 }
2468
2469 /* Return TRUE if combine can reuse reg X in mode MODE.
2470 ADDED_SETS is nonzero if the original set is still required. */
2471 static bool
2472 can_change_dest_mode (rtx x, int added_sets, machine_mode mode)
2473 {
2474 unsigned int regno;
2475
2476 if (!REG_P (x))
2477 return false;
2478
2479 /* Don't change between modes with different underlying register sizes,
2480 since this could lead to invalid subregs. */
2481 if (REGMODE_NATURAL_SIZE (mode)
2482 != REGMODE_NATURAL_SIZE (GET_MODE (x)))
2483 return false;
2484
2485 regno = REGNO (x);
2486 /* Allow hard registers if the new mode is legal, and occupies no more
2487 registers than the old mode. */
2488 if (regno < FIRST_PSEUDO_REGISTER)
2489 return (targetm.hard_regno_mode_ok (regno, mode)
2490 && REG_NREGS (x) >= hard_regno_nregs (regno, mode));
2491
2492 /* Or a pseudo that is only used once. */
2493 return (regno < reg_n_sets_max
2494 && REG_N_SETS (regno) == 1
2495 && !added_sets
2496 && !REG_USERVAR_P (x));
2497 }
2498
2499
2500 /* Check whether X, the destination of a set, refers to part of
2501 the register specified by REG. */
2502
2503 static bool
2504 reg_subword_p (rtx x, rtx reg)
2505 {
2506 /* Check that reg is an integer mode register. */
2507 if (!REG_P (reg) || GET_MODE_CLASS (GET_MODE (reg)) != MODE_INT)
2508 return false;
2509
2510 if (GET_CODE (x) == STRICT_LOW_PART
2511 || GET_CODE (x) == ZERO_EXTRACT)
2512 x = XEXP (x, 0);
2513
2514 return GET_CODE (x) == SUBREG
2515 && SUBREG_REG (x) == reg
2516 && GET_MODE_CLASS (GET_MODE (x)) == MODE_INT;
2517 }
2518
2519 /* Delete the unconditional jump INSN and adjust the CFG correspondingly.
2520 Note that the INSN should be deleted *after* removing dead edges, so
2521 that the kept edge is the fallthrough edge for a (set (pc) (pc))
2522 but not for a (set (pc) (label_ref FOO)). */
2523
2524 static void
2525 update_cfg_for_uncondjump (rtx_insn *insn)
2526 {
2527 basic_block bb = BLOCK_FOR_INSN (insn);
2528 gcc_assert (BB_END (bb) == insn);
2529
2530 purge_dead_edges (bb);
2531
2532 delete_insn (insn);
2533 if (EDGE_COUNT (bb->succs) == 1)
2534 {
2535 rtx_insn *insn;
2536
2537 single_succ_edge (bb)->flags |= EDGE_FALLTHRU;
2538
2539 /* Remove barriers from the footer if there are any. */
2540 for (insn = BB_FOOTER (bb); insn; insn = NEXT_INSN (insn))
2541 if (BARRIER_P (insn))
2542 {
2543 if (PREV_INSN (insn))
2544 SET_NEXT_INSN (PREV_INSN (insn)) = NEXT_INSN (insn);
2545 else
2546 BB_FOOTER (bb) = NEXT_INSN (insn);
2547 if (NEXT_INSN (insn))
2548 SET_PREV_INSN (NEXT_INSN (insn)) = PREV_INSN (insn);
2549 }
2550 else if (LABEL_P (insn))
2551 break;
2552 }
2553 }
2554
2555 /* Return whether PAT is a PARALLEL of exactly N register SETs followed
2556 by an arbitrary number of CLOBBERs. */
2557 static bool
2558 is_parallel_of_n_reg_sets (rtx pat, int n)
2559 {
2560 if (GET_CODE (pat) != PARALLEL)
2561 return false;
2562
2563 int len = XVECLEN (pat, 0);
2564 if (len < n)
2565 return false;
2566
2567 int i;
2568 for (i = 0; i < n; i++)
2569 if (GET_CODE (XVECEXP (pat, 0, i)) != SET
2570 || !REG_P (SET_DEST (XVECEXP (pat, 0, i))))
2571 return false;
2572 for ( ; i < len; i++)
2573 if (GET_CODE (XVECEXP (pat, 0, i)) != CLOBBER
2574 || XEXP (XVECEXP (pat, 0, i), 0) == const0_rtx)
2575 return false;
2576
2577 return true;
2578 }
2579
2580 /* Return whether INSN, a PARALLEL of N register SETs (and maybe some
2581 CLOBBERs), can be split into individual SETs in that order, without
2582 changing semantics. */
2583 static bool
2584 can_split_parallel_of_n_reg_sets (rtx_insn *insn, int n)
2585 {
2586 if (!insn_nothrow_p (insn))
2587 return false;
2588
2589 rtx pat = PATTERN (insn);
2590
2591 int i, j;
2592 for (i = 0; i < n; i++)
2593 {
2594 if (side_effects_p (SET_SRC (XVECEXP (pat, 0, i))))
2595 return false;
2596
2597 rtx reg = SET_DEST (XVECEXP (pat, 0, i));
2598
2599 for (j = i + 1; j < n; j++)
2600 if (reg_referenced_p (reg, XVECEXP (pat, 0, j)))
2601 return false;
2602 }
2603
2604 return true;
2605 }
2606
2607 /* Try to combine the insns I0, I1 and I2 into I3.
2608 Here I0, I1 and I2 appear earlier than I3.
2609 I0 and I1 can be zero; then we combine just I2 into I3, or I1 and I2 into
2610 I3.
2611
2612 If we are combining more than two insns and the resulting insn is not
2613 recognized, try splitting it into two insns. If that happens, I2 and I3
2614 are retained and I1/I0 are pseudo-deleted by turning them into a NOTE.
2615 Otherwise, I0, I1 and I2 are pseudo-deleted.
2616
2617 Return 0 if the combination does not work. Then nothing is changed.
2618 If we did the combination, return the insn at which combine should
2619 resume scanning.
2620
2621 Set NEW_DIRECT_JUMP_P to a nonzero value if try_combine creates a
2622 new direct jump instruction.
2623
2624 LAST_COMBINED_INSN is either I3, or some insn after I3 that has
2625 been I3 passed to an earlier try_combine within the same basic
2626 block. */
2627
2628 static rtx_insn *
2629 try_combine (rtx_insn *i3, rtx_insn *i2, rtx_insn *i1, rtx_insn *i0,
2630 int *new_direct_jump_p, rtx_insn *last_combined_insn)
2631 {
2632 /* New patterns for I3 and I2, respectively. */
2633 rtx newpat, newi2pat = 0;
2634 rtvec newpat_vec_with_clobbers = 0;
2635 int substed_i2 = 0, substed_i1 = 0, substed_i0 = 0;
2636 /* Indicates need to preserve SET in I0, I1 or I2 in I3 if it is not
2637 dead. */
2638 int added_sets_0, added_sets_1, added_sets_2;
2639 /* Total number of SETs to put into I3. */
2640 int total_sets;
2641 /* Nonzero if I2's or I1's body now appears in I3. */
2642 int i2_is_used = 0, i1_is_used = 0;
2643 /* INSN_CODEs for new I3, new I2, and user of condition code. */
2644 int insn_code_number, i2_code_number = 0, other_code_number = 0;
2645 /* Contains I3 if the destination of I3 is used in its source, which means
2646 that the old life of I3 is being killed. If that usage is placed into
2647 I2 and not in I3, a REG_DEAD note must be made. */
2648 rtx i3dest_killed = 0;
2649 /* SET_DEST and SET_SRC of I2, I1 and I0. */
2650 rtx i2dest = 0, i2src = 0, i1dest = 0, i1src = 0, i0dest = 0, i0src = 0;
2651 /* Copy of SET_SRC of I1 and I0, if needed. */
2652 rtx i1src_copy = 0, i0src_copy = 0, i0src_copy2 = 0;
2653 /* Set if I2DEST was reused as a scratch register. */
2654 bool i2scratch = false;
2655 /* The PATTERNs of I0, I1, and I2, or a copy of them in certain cases. */
2656 rtx i0pat = 0, i1pat = 0, i2pat = 0;
2657 /* Indicates if I2DEST or I1DEST is in I2SRC or I1_SRC. */
2658 int i2dest_in_i2src = 0, i1dest_in_i1src = 0, i2dest_in_i1src = 0;
2659 int i0dest_in_i0src = 0, i1dest_in_i0src = 0, i2dest_in_i0src = 0;
2660 int i2dest_killed = 0, i1dest_killed = 0, i0dest_killed = 0;
2661 int i1_feeds_i2_n = 0, i0_feeds_i2_n = 0, i0_feeds_i1_n = 0;
2662 /* Notes that must be added to REG_NOTES in I3 and I2. */
2663 rtx new_i3_notes, new_i2_notes;
2664 /* Notes that we substituted I3 into I2 instead of the normal case. */
2665 int i3_subst_into_i2 = 0;
2666 /* Notes that I1, I2 or I3 is a MULT operation. */
2667 int have_mult = 0;
2668 int swap_i2i3 = 0;
2669 int changed_i3_dest = 0;
2670
2671 int maxreg;
2672 rtx_insn *temp_insn;
2673 rtx temp_expr;
2674 struct insn_link *link;
2675 rtx other_pat = 0;
2676 rtx new_other_notes;
2677 int i;
2678 scalar_int_mode dest_mode, temp_mode;
2679
2680 /* Immediately return if any of I0,I1,I2 are the same insn (I3 can
2681 never be). */
2682 if (i1 == i2 || i0 == i2 || (i0 && i0 == i1))
2683 return 0;
2684
2685 /* Only try four-insn combinations when there's high likelihood of
2686 success. Look for simple insns, such as loads of constants or
2687 binary operations involving a constant. */
2688 if (i0)
2689 {
2690 int i;
2691 int ngood = 0;
2692 int nshift = 0;
2693 rtx set0, set3;
2694
2695 if (!flag_expensive_optimizations)
2696 return 0;
2697
2698 for (i = 0; i < 4; i++)
2699 {
2700 rtx_insn *insn = i == 0 ? i0 : i == 1 ? i1 : i == 2 ? i2 : i3;
2701 rtx set = single_set (insn);
2702 rtx src;
2703 if (!set)
2704 continue;
2705 src = SET_SRC (set);
2706 if (CONSTANT_P (src))
2707 {
2708 ngood += 2;
2709 break;
2710 }
2711 else if (BINARY_P (src) && CONSTANT_P (XEXP (src, 1)))
2712 ngood++;
2713 else if (GET_CODE (src) == ASHIFT || GET_CODE (src) == ASHIFTRT
2714 || GET_CODE (src) == LSHIFTRT)
2715 nshift++;
2716 }
2717
2718 /* If I0 loads a memory and I3 sets the same memory, then I1 and I2
2719 are likely manipulating its value. Ideally we'll be able to combine
2720 all four insns into a bitfield insertion of some kind.
2721
2722 Note the source in I0 might be inside a sign/zero extension and the
2723 memory modes in I0 and I3 might be different. So extract the address
2724 from the destination of I3 and search for it in the source of I0.
2725
2726 In the event that there's a match but the source/dest do not actually
2727 refer to the same memory, the worst that happens is we try some
2728 combinations that we wouldn't have otherwise. */
2729 if ((set0 = single_set (i0))
2730 /* Ensure the source of SET0 is a MEM, possibly buried inside
2731 an extension. */
2732 && (GET_CODE (SET_SRC (set0)) == MEM
2733 || ((GET_CODE (SET_SRC (set0)) == ZERO_EXTEND
2734 || GET_CODE (SET_SRC (set0)) == SIGN_EXTEND)
2735 && GET_CODE (XEXP (SET_SRC (set0), 0)) == MEM))
2736 && (set3 = single_set (i3))
2737 /* Ensure the destination of SET3 is a MEM. */
2738 && GET_CODE (SET_DEST (set3)) == MEM
2739 /* Would it be better to extract the base address for the MEM
2740 in SET3 and look for that? I don't have cases where it matters
2741 but I could envision such cases. */
2742 && rtx_referenced_p (XEXP (SET_DEST (set3), 0), SET_SRC (set0)))
2743 ngood += 2;
2744
2745 if (ngood < 2 && nshift < 2)
2746 return 0;
2747 }
2748
2749 /* Exit early if one of the insns involved can't be used for
2750 combinations. */
2751 if (CALL_P (i2)
2752 || (i1 && CALL_P (i1))
2753 || (i0 && CALL_P (i0))
2754 || cant_combine_insn_p (i3)
2755 || cant_combine_insn_p (i2)
2756 || (i1 && cant_combine_insn_p (i1))
2757 || (i0 && cant_combine_insn_p (i0))
2758 || likely_spilled_retval_p (i3))
2759 return 0;
2760
2761 combine_attempts++;
2762 undobuf.other_insn = 0;
2763
2764 /* Reset the hard register usage information. */
2765 CLEAR_HARD_REG_SET (newpat_used_regs);
2766
2767 if (dump_file && (dump_flags & TDF_DETAILS))
2768 {
2769 if (i0)
2770 fprintf (dump_file, "\nTrying %d, %d, %d -> %d:\n",
2771 INSN_UID (i0), INSN_UID (i1), INSN_UID (i2), INSN_UID (i3));
2772 else if (i1)
2773 fprintf (dump_file, "\nTrying %d, %d -> %d:\n",
2774 INSN_UID (i1), INSN_UID (i2), INSN_UID (i3));
2775 else
2776 fprintf (dump_file, "\nTrying %d -> %d:\n",
2777 INSN_UID (i2), INSN_UID (i3));
2778
2779 if (i0)
2780 dump_insn_slim (dump_file, i0);
2781 if (i1)
2782 dump_insn_slim (dump_file, i1);
2783 dump_insn_slim (dump_file, i2);
2784 dump_insn_slim (dump_file, i3);
2785 }
2786
2787 /* If multiple insns feed into one of I2 or I3, they can be in any
2788 order. To simplify the code below, reorder them in sequence. */
2789 if (i0 && DF_INSN_LUID (i0) > DF_INSN_LUID (i2))
2790 std::swap (i0, i2);
2791 if (i0 && DF_INSN_LUID (i0) > DF_INSN_LUID (i1))
2792 std::swap (i0, i1);
2793 if (i1 && DF_INSN_LUID (i1) > DF_INSN_LUID (i2))
2794 std::swap (i1, i2);
2795
2796 added_links_insn = 0;
2797 added_notes_insn = 0;
2798
2799 /* First check for one important special case that the code below will
2800 not handle. Namely, the case where I1 is zero, I2 is a PARALLEL
2801 and I3 is a SET whose SET_SRC is a SET_DEST in I2. In that case,
2802 we may be able to replace that destination with the destination of I3.
2803 This occurs in the common code where we compute both a quotient and
2804 remainder into a structure, in which case we want to do the computation
2805 directly into the structure to avoid register-register copies.
2806
2807 Note that this case handles both multiple sets in I2 and also cases
2808 where I2 has a number of CLOBBERs inside the PARALLEL.
2809
2810 We make very conservative checks below and only try to handle the
2811 most common cases of this. For example, we only handle the case
2812 where I2 and I3 are adjacent to avoid making difficult register
2813 usage tests. */
2814
2815 if (i1 == 0 && NONJUMP_INSN_P (i3) && GET_CODE (PATTERN (i3)) == SET
2816 && REG_P (SET_SRC (PATTERN (i3)))
2817 && REGNO (SET_SRC (PATTERN (i3))) >= FIRST_PSEUDO_REGISTER
2818 && find_reg_note (i3, REG_DEAD, SET_SRC (PATTERN (i3)))
2819 && GET_CODE (PATTERN (i2)) == PARALLEL
2820 && ! side_effects_p (SET_DEST (PATTERN (i3)))
2821 /* If the dest of I3 is a ZERO_EXTRACT or STRICT_LOW_PART, the code
2822 below would need to check what is inside (and reg_overlap_mentioned_p
2823 doesn't support those codes anyway). Don't allow those destinations;
2824 the resulting insn isn't likely to be recognized anyway. */
2825 && GET_CODE (SET_DEST (PATTERN (i3))) != ZERO_EXTRACT
2826 && GET_CODE (SET_DEST (PATTERN (i3))) != STRICT_LOW_PART
2827 && ! reg_overlap_mentioned_p (SET_SRC (PATTERN (i3)),
2828 SET_DEST (PATTERN (i3)))
2829 && next_active_insn (i2) == i3)
2830 {
2831 rtx p2 = PATTERN (i2);
2832
2833 /* Make sure that the destination of I3,
2834 which we are going to substitute into one output of I2,
2835 is not used within another output of I2. We must avoid making this:
2836 (parallel [(set (mem (reg 69)) ...)
2837 (set (reg 69) ...)])
2838 which is not well-defined as to order of actions.
2839 (Besides, reload can't handle output reloads for this.)
2840
2841 The problem can also happen if the dest of I3 is a memory ref,
2842 if another dest in I2 is an indirect memory ref.
2843
2844 Neither can this PARALLEL be an asm. We do not allow combining
2845 that usually (see can_combine_p), so do not here either. */
2846 bool ok = true;
2847 for (i = 0; ok && i < XVECLEN (p2, 0); i++)
2848 {
2849 if ((GET_CODE (XVECEXP (p2, 0, i)) == SET
2850 || GET_CODE (XVECEXP (p2, 0, i)) == CLOBBER)
2851 && reg_overlap_mentioned_p (SET_DEST (PATTERN (i3)),
2852 SET_DEST (XVECEXP (p2, 0, i))))
2853 ok = false;
2854 else if (GET_CODE (XVECEXP (p2, 0, i)) == SET
2855 && GET_CODE (SET_SRC (XVECEXP (p2, 0, i))) == ASM_OPERANDS)
2856 ok = false;
2857 }
2858
2859 if (ok)
2860 for (i = 0; i < XVECLEN (p2, 0); i++)
2861 if (GET_CODE (XVECEXP (p2, 0, i)) == SET
2862 && SET_DEST (XVECEXP (p2, 0, i)) == SET_SRC (PATTERN (i3)))
2863 {
2864 combine_merges++;
2865
2866 subst_insn = i3;
2867 subst_low_luid = DF_INSN_LUID (i2);
2868
2869 added_sets_2 = added_sets_1 = added_sets_0 = 0;
2870 i2src = SET_SRC (XVECEXP (p2, 0, i));
2871 i2dest = SET_DEST (XVECEXP (p2, 0, i));
2872 i2dest_killed = dead_or_set_p (i2, i2dest);
2873
2874 /* Replace the dest in I2 with our dest and make the resulting
2875 insn the new pattern for I3. Then skip to where we validate
2876 the pattern. Everything was set up above. */
2877 SUBST (SET_DEST (XVECEXP (p2, 0, i)), SET_DEST (PATTERN (i3)));
2878 newpat = p2;
2879 i3_subst_into_i2 = 1;
2880 goto validate_replacement;
2881 }
2882 }
2883
2884 /* If I2 is setting a pseudo to a constant and I3 is setting some
2885 sub-part of it to another constant, merge them by making a new
2886 constant. */
2887 if (i1 == 0
2888 && (temp_expr = single_set (i2)) != 0
2889 && is_a <scalar_int_mode> (GET_MODE (SET_DEST (temp_expr)), &temp_mode)
2890 && CONST_SCALAR_INT_P (SET_SRC (temp_expr))
2891 && GET_CODE (PATTERN (i3)) == SET
2892 && CONST_SCALAR_INT_P (SET_SRC (PATTERN (i3)))
2893 && reg_subword_p (SET_DEST (PATTERN (i3)), SET_DEST (temp_expr)))
2894 {
2895 rtx dest = SET_DEST (PATTERN (i3));
2896 rtx temp_dest = SET_DEST (temp_expr);
2897 int offset = -1;
2898 int width = 0;
2899
2900 if (GET_CODE (dest) == ZERO_EXTRACT)
2901 {
2902 if (CONST_INT_P (XEXP (dest, 1))
2903 && CONST_INT_P (XEXP (dest, 2))
2904 && is_a <scalar_int_mode> (GET_MODE (XEXP (dest, 0)),
2905 &dest_mode))
2906 {
2907 width = INTVAL (XEXP (dest, 1));
2908 offset = INTVAL (XEXP (dest, 2));
2909 dest = XEXP (dest, 0);
2910 if (BITS_BIG_ENDIAN)
2911 offset = GET_MODE_PRECISION (dest_mode) - width - offset;
2912 }
2913 }
2914 else
2915 {
2916 if (GET_CODE (dest) == STRICT_LOW_PART)
2917 dest = XEXP (dest, 0);
2918 if (is_a <scalar_int_mode> (GET_MODE (dest), &dest_mode))
2919 {
2920 width = GET_MODE_PRECISION (dest_mode);
2921 offset = 0;
2922 }
2923 }
2924
2925 if (offset >= 0)
2926 {
2927 /* If this is the low part, we're done. */
2928 if (subreg_lowpart_p (dest))
2929 ;
2930 /* Handle the case where inner is twice the size of outer. */
2931 else if (GET_MODE_PRECISION (temp_mode)
2932 == 2 * GET_MODE_PRECISION (dest_mode))
2933 offset += GET_MODE_PRECISION (dest_mode);
2934 /* Otherwise give up for now. */
2935 else
2936 offset = -1;
2937 }
2938
2939 if (offset >= 0)
2940 {
2941 rtx inner = SET_SRC (PATTERN (i3));
2942 rtx outer = SET_SRC (temp_expr);
2943
2944 wide_int o = wi::insert (rtx_mode_t (outer, temp_mode),
2945 rtx_mode_t (inner, dest_mode),
2946 offset, width);
2947
2948 combine_merges++;
2949 subst_insn = i3;
2950 subst_low_luid = DF_INSN_LUID (i2);
2951 added_sets_2 = added_sets_1 = added_sets_0 = 0;
2952 i2dest = temp_dest;
2953 i2dest_killed = dead_or_set_p (i2, i2dest);
2954
2955 /* Replace the source in I2 with the new constant and make the
2956 resulting insn the new pattern for I3. Then skip to where we
2957 validate the pattern. Everything was set up above. */
2958 SUBST (SET_SRC (temp_expr),
2959 immed_wide_int_const (o, temp_mode));
2960
2961 newpat = PATTERN (i2);
2962
2963 /* The dest of I3 has been replaced with the dest of I2. */
2964 changed_i3_dest = 1;
2965 goto validate_replacement;
2966 }
2967 }
2968
2969 /* If we have no I1 and I2 looks like:
2970 (parallel [(set (reg:CC X) (compare:CC OP (const_int 0)))
2971 (set Y OP)])
2972 make up a dummy I1 that is
2973 (set Y OP)
2974 and change I2 to be
2975 (set (reg:CC X) (compare:CC Y (const_int 0)))
2976
2977 (We can ignore any trailing CLOBBERs.)
2978
2979 This undoes a previous combination and allows us to match a branch-and-
2980 decrement insn. */
2981
2982 if (!HAVE_cc0 && i1 == 0
2983 && is_parallel_of_n_reg_sets (PATTERN (i2), 2)
2984 && (GET_MODE_CLASS (GET_MODE (SET_DEST (XVECEXP (PATTERN (i2), 0, 0))))
2985 == MODE_CC)
2986 && GET_CODE (SET_SRC (XVECEXP (PATTERN (i2), 0, 0))) == COMPARE
2987 && XEXP (SET_SRC (XVECEXP (PATTERN (i2), 0, 0)), 1) == const0_rtx
2988 && rtx_equal_p (XEXP (SET_SRC (XVECEXP (PATTERN (i2), 0, 0)), 0),
2989 SET_SRC (XVECEXP (PATTERN (i2), 0, 1)))
2990 && !reg_used_between_p (SET_DEST (XVECEXP (PATTERN (i2), 0, 0)), i2, i3)
2991 && !reg_used_between_p (SET_DEST (XVECEXP (PATTERN (i2), 0, 1)), i2, i3))
2992 {
2993 /* We make I1 with the same INSN_UID as I2. This gives it
2994 the same DF_INSN_LUID for value tracking. Our fake I1 will
2995 never appear in the insn stream so giving it the same INSN_UID
2996 as I2 will not cause a problem. */
2997
2998 i1 = gen_rtx_INSN (VOIDmode, NULL, i2, BLOCK_FOR_INSN (i2),
2999 XVECEXP (PATTERN (i2), 0, 1), INSN_LOCATION (i2),
3000 -1, NULL_RTX);
3001 INSN_UID (i1) = INSN_UID (i2);
3002
3003 SUBST (PATTERN (i2), XVECEXP (PATTERN (i2), 0, 0));
3004 SUBST (XEXP (SET_SRC (PATTERN (i2)), 0),
3005 SET_DEST (PATTERN (i1)));
3006 unsigned int regno = REGNO (SET_DEST (PATTERN (i1)));
3007 SUBST_LINK (LOG_LINKS (i2),
3008 alloc_insn_link (i1, regno, LOG_LINKS (i2)));
3009 }
3010
3011 /* If I2 is a PARALLEL of two SETs of REGs (and perhaps some CLOBBERs),
3012 make those two SETs separate I1 and I2 insns, and make an I0 that is
3013 the original I1. */
3014 if (!HAVE_cc0 && i0 == 0
3015 && is_parallel_of_n_reg_sets (PATTERN (i2), 2)
3016 && can_split_parallel_of_n_reg_sets (i2, 2)
3017 && !reg_used_between_p (SET_DEST (XVECEXP (PATTERN (i2), 0, 0)), i2, i3)
3018 && !reg_used_between_p (SET_DEST (XVECEXP (PATTERN (i2), 0, 1)), i2, i3)
3019 && !reg_set_between_p (SET_DEST (XVECEXP (PATTERN (i2), 0, 0)), i2, i3)
3020 && !reg_set_between_p (SET_DEST (XVECEXP (PATTERN (i2), 0, 1)), i2, i3))
3021 {
3022 /* If there is no I1, there is no I0 either. */
3023 i0 = i1;
3024
3025 /* We make I1 with the same INSN_UID as I2. This gives it
3026 the same DF_INSN_LUID for value tracking. Our fake I1 will
3027 never appear in the insn stream so giving it the same INSN_UID
3028 as I2 will not cause a problem. */
3029
3030 i1 = gen_rtx_INSN (VOIDmode, NULL, i2, BLOCK_FOR_INSN (i2),
3031 XVECEXP (PATTERN (i2), 0, 0), INSN_LOCATION (i2),
3032 -1, NULL_RTX);
3033 INSN_UID (i1) = INSN_UID (i2);
3034
3035 SUBST (PATTERN (i2), XVECEXP (PATTERN (i2), 0, 1));
3036 }
3037
3038 /* Verify that I2 and I1 are valid for combining. */
3039 if (! can_combine_p (i2, i3, i0, i1, NULL, NULL, &i2dest, &i2src)
3040 || (i1 && ! can_combine_p (i1, i3, i0, NULL, i2, NULL,
3041 &i1dest, &i1src))
3042 || (i0 && ! can_combine_p (i0, i3, NULL, NULL, i1, i2,
3043 &i0dest, &i0src)))
3044 {
3045 undo_all ();
3046 return 0;
3047 }
3048
3049 /* Record whether I2DEST is used in I2SRC and similarly for the other
3050 cases. Knowing this will help in register status updating below. */
3051 i2dest_in_i2src = reg_overlap_mentioned_p (i2dest, i2src);
3052 i1dest_in_i1src = i1 && reg_overlap_mentioned_p (i1dest, i1src);
3053 i2dest_in_i1src = i1 && reg_overlap_mentioned_p (i2dest, i1src);
3054 i0dest_in_i0src = i0 && reg_overlap_mentioned_p (i0dest, i0src);
3055 i1dest_in_i0src = i0 && reg_overlap_mentioned_p (i1dest, i0src);
3056 i2dest_in_i0src = i0 && reg_overlap_mentioned_p (i2dest, i0src);
3057 i2dest_killed = dead_or_set_p (i2, i2dest);
3058 i1dest_killed = i1 && dead_or_set_p (i1, i1dest);
3059 i0dest_killed = i0 && dead_or_set_p (i0, i0dest);
3060
3061 /* For the earlier insns, determine which of the subsequent ones they
3062 feed. */
3063 i1_feeds_i2_n = i1 && insn_a_feeds_b (i1, i2);
3064 i0_feeds_i1_n = i0 && insn_a_feeds_b (i0, i1);
3065 i0_feeds_i2_n = (i0 && (!i0_feeds_i1_n ? insn_a_feeds_b (i0, i2)
3066 : (!reg_overlap_mentioned_p (i1dest, i0dest)
3067 && reg_overlap_mentioned_p (i0dest, i2src))));
3068
3069 /* Ensure that I3's pattern can be the destination of combines. */
3070 if (! combinable_i3pat (i3, &PATTERN (i3), i2dest, i1dest, i0dest,
3071 i1 && i2dest_in_i1src && !i1_feeds_i2_n,
3072 i0 && ((i2dest_in_i0src && !i0_feeds_i2_n)
3073 || (i1dest_in_i0src && !i0_feeds_i1_n)),
3074 &i3dest_killed))
3075 {
3076 undo_all ();
3077 return 0;
3078 }
3079
3080 /* See if any of the insns is a MULT operation. Unless one is, we will
3081 reject a combination that is, since it must be slower. Be conservative
3082 here. */
3083 if (GET_CODE (i2src) == MULT
3084 || (i1 != 0 && GET_CODE (i1src) == MULT)
3085 || (i0 != 0 && GET_CODE (i0src) == MULT)
3086 || (GET_CODE (PATTERN (i3)) == SET
3087 && GET_CODE (SET_SRC (PATTERN (i3))) == MULT))
3088 have_mult = 1;
3089
3090 /* If I3 has an inc, then give up if I1 or I2 uses the reg that is inc'd.
3091 We used to do this EXCEPT in one case: I3 has a post-inc in an
3092 output operand. However, that exception can give rise to insns like
3093 mov r3,(r3)+
3094 which is a famous insn on the PDP-11 where the value of r3 used as the
3095 source was model-dependent. Avoid this sort of thing. */
3096
3097 #if 0
3098 if (!(GET_CODE (PATTERN (i3)) == SET
3099 && REG_P (SET_SRC (PATTERN (i3)))
3100 && MEM_P (SET_DEST (PATTERN (i3)))
3101 && (GET_CODE (XEXP (SET_DEST (PATTERN (i3)), 0)) == POST_INC
3102 || GET_CODE (XEXP (SET_DEST (PATTERN (i3)), 0)) == POST_DEC)))
3103 /* It's not the exception. */
3104 #endif
3105 if (AUTO_INC_DEC)
3106 {
3107 rtx link;
3108 for (link = REG_NOTES (i3); link; link = XEXP (link, 1))
3109 if (REG_NOTE_KIND (link) == REG_INC
3110 && (reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (i2))
3111 || (i1 != 0
3112 && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (i1)))))
3113 {
3114 undo_all ();
3115 return 0;
3116 }
3117 }
3118
3119 /* See if the SETs in I1 or I2 need to be kept around in the merged
3120 instruction: whenever the value set there is still needed past I3.
3121 For the SET in I2, this is easy: we see if I2DEST dies or is set in I3.
3122
3123 For the SET in I1, we have two cases: if I1 and I2 independently feed
3124 into I3, the set in I1 needs to be kept around unless I1DEST dies
3125 or is set in I3. Otherwise (if I1 feeds I2 which feeds I3), the set
3126 in I1 needs to be kept around unless I1DEST dies or is set in either
3127 I2 or I3. The same considerations apply to I0. */
3128
3129 added_sets_2 = !dead_or_set_p (i3, i2dest);
3130
3131 if (i1)
3132 added_sets_1 = !(dead_or_set_p (i3, i1dest)
3133 || (i1_feeds_i2_n && dead_or_set_p (i2, i1dest)));
3134 else
3135 added_sets_1 = 0;
3136
3137 if (i0)
3138 added_sets_0 = !(dead_or_set_p (i3, i0dest)
3139 || (i0_feeds_i1_n && dead_or_set_p (i1, i0dest))
3140 || ((i0_feeds_i2_n || (i0_feeds_i1_n && i1_feeds_i2_n))
3141 && dead_or_set_p (i2, i0dest)));
3142 else
3143 added_sets_0 = 0;
3144
3145 /* We are about to copy insns for the case where they need to be kept
3146 around. Check that they can be copied in the merged instruction. */
3147
3148 if (targetm.cannot_copy_insn_p
3149 && ((added_sets_2 && targetm.cannot_copy_insn_p (i2))
3150 || (i1 && added_sets_1 && targetm.cannot_copy_insn_p (i1))
3151 || (i0 && added_sets_0 && targetm.cannot_copy_insn_p (i0))))
3152 {
3153 undo_all ();
3154 return 0;
3155 }
3156
3157 /* If the set in I2 needs to be kept around, we must make a copy of
3158 PATTERN (I2), so that when we substitute I1SRC for I1DEST in
3159 PATTERN (I2), we are only substituting for the original I1DEST, not into
3160 an already-substituted copy. This also prevents making self-referential
3161 rtx. If I2 is a PARALLEL, we just need the piece that assigns I2SRC to
3162 I2DEST. */
3163
3164 if (added_sets_2)
3165 {
3166 if (GET_CODE (PATTERN (i2)) == PARALLEL)
3167 i2pat = gen_rtx_SET (i2dest, copy_rtx (i2src));
3168 else
3169 i2pat = copy_rtx (PATTERN (i2));
3170 }
3171
3172 if (added_sets_1)
3173 {
3174 if (GET_CODE (PATTERN (i1)) == PARALLEL)
3175 i1pat = gen_rtx_SET (i1dest, copy_rtx (i1src));
3176 else
3177 i1pat = copy_rtx (PATTERN (i1));
3178 }
3179
3180 if (added_sets_0)
3181 {
3182 if (GET_CODE (PATTERN (i0)) == PARALLEL)
3183 i0pat = gen_rtx_SET (i0dest, copy_rtx (i0src));
3184 else
3185 i0pat = copy_rtx (PATTERN (i0));
3186 }
3187
3188 combine_merges++;
3189
3190 /* Substitute in the latest insn for the regs set by the earlier ones. */
3191
3192 maxreg = max_reg_num ();
3193
3194 subst_insn = i3;
3195
3196 /* Many machines that don't use CC0 have insns that can both perform an
3197 arithmetic operation and set the condition code. These operations will
3198 be represented as a PARALLEL with the first element of the vector
3199 being a COMPARE of an arithmetic operation with the constant zero.
3200 The second element of the vector will set some pseudo to the result
3201 of the same arithmetic operation. If we simplify the COMPARE, we won't
3202 match such a pattern and so will generate an extra insn. Here we test
3203 for this case, where both the comparison and the operation result are
3204 needed, and make the PARALLEL by just replacing I2DEST in I3SRC with
3205 I2SRC. Later we will make the PARALLEL that contains I2. */
3206
3207 if (!HAVE_cc0 && i1 == 0 && added_sets_2 && GET_CODE (PATTERN (i3)) == SET
3208 && GET_CODE (SET_SRC (PATTERN (i3))) == COMPARE
3209 && CONST_INT_P (XEXP (SET_SRC (PATTERN (i3)), 1))
3210 && rtx_equal_p (XEXP (SET_SRC (PATTERN (i3)), 0), i2dest))
3211 {
3212 rtx newpat_dest;
3213 rtx *cc_use_loc = NULL;
3214 rtx_insn *cc_use_insn = NULL;
3215 rtx op0 = i2src, op1 = XEXP (SET_SRC (PATTERN (i3)), 1);
3216 machine_mode compare_mode, orig_compare_mode;
3217 enum rtx_code compare_code = UNKNOWN, orig_compare_code = UNKNOWN;
3218 scalar_int_mode mode;
3219
3220 newpat = PATTERN (i3);
3221 newpat_dest = SET_DEST (newpat);
3222 compare_mode = orig_compare_mode = GET_MODE (newpat_dest);
3223
3224 if (undobuf.other_insn == 0
3225 && (cc_use_loc = find_single_use (SET_DEST (newpat), i3,
3226 &cc_use_insn)))
3227 {
3228 compare_code = orig_compare_code = GET_CODE (*cc_use_loc);
3229 if (is_a <scalar_int_mode> (GET_MODE (i2dest), &mode))
3230 compare_code = simplify_compare_const (compare_code, mode,
3231 op0, &op1);
3232 target_canonicalize_comparison (&compare_code, &op0, &op1, 1);
3233 }
3234
3235 /* Do the rest only if op1 is const0_rtx, which may be the
3236 result of simplification. */
3237 if (op1 == const0_rtx)
3238 {
3239 /* If a single use of the CC is found, prepare to modify it
3240 when SELECT_CC_MODE returns a new CC-class mode, or when
3241 the above simplify_compare_const() returned a new comparison
3242 operator. undobuf.other_insn is assigned the CC use insn
3243 when modifying it. */
3244 if (cc_use_loc)
3245 {
3246 #ifdef SELECT_CC_MODE
3247 machine_mode new_mode
3248 = SELECT_CC_MODE (compare_code, op0, op1);
3249 if (new_mode != orig_compare_mode
3250 && can_change_dest_mode (SET_DEST (newpat),
3251 added_sets_2, new_mode))
3252 {
3253 unsigned int regno = REGNO (newpat_dest);
3254 compare_mode = new_mode;
3255 if (regno < FIRST_PSEUDO_REGISTER)
3256 newpat_dest = gen_rtx_REG (compare_mode, regno);
3257 else
3258 {
3259 SUBST_MODE (regno_reg_rtx[regno], compare_mode);
3260 newpat_dest = regno_reg_rtx[regno];
3261 }
3262 }
3263 #endif
3264 /* Cases for modifying the CC-using comparison. */
3265 if (compare_code != orig_compare_code
3266 /* ??? Do we need to verify the zero rtx? */
3267 && XEXP (*cc_use_loc, 1) == const0_rtx)
3268 {
3269 /* Replace cc_use_loc with entire new RTX. */
3270 SUBST (*cc_use_loc,
3271 gen_rtx_fmt_ee (compare_code, compare_mode,
3272 newpat_dest, const0_rtx));
3273 undobuf.other_insn = cc_use_insn;
3274 }
3275 else if (compare_mode != orig_compare_mode)
3276 {
3277 /* Just replace the CC reg with a new mode. */
3278 SUBST (XEXP (*cc_use_loc, 0), newpat_dest);
3279 undobuf.other_insn = cc_use_insn;
3280 }
3281 }
3282
3283 /* Now we modify the current newpat:
3284 First, SET_DEST(newpat) is updated if the CC mode has been
3285 altered. For targets without SELECT_CC_MODE, this should be
3286 optimized away. */
3287 if (compare_mode != orig_compare_mode)
3288 SUBST (SET_DEST (newpat), newpat_dest);
3289 /* This is always done to propagate i2src into newpat. */
3290 SUBST (SET_SRC (newpat),
3291 gen_rtx_COMPARE (compare_mode, op0, op1));
3292 /* Create new version of i2pat if needed; the below PARALLEL
3293 creation needs this to work correctly. */
3294 if (! rtx_equal_p (i2src, op0))
3295 i2pat = gen_rtx_SET (i2dest, op0);
3296 i2_is_used = 1;
3297 }
3298 }
3299
3300 if (i2_is_used == 0)
3301 {
3302 /* It is possible that the source of I2 or I1 may be performing
3303 an unneeded operation, such as a ZERO_EXTEND of something
3304 that is known to have the high part zero. Handle that case
3305 by letting subst look at the inner insns.
3306
3307 Another way to do this would be to have a function that tries
3308 to simplify a single insn instead of merging two or more
3309 insns. We don't do this because of the potential of infinite
3310 loops and because of the potential extra memory required.
3311 However, doing it the way we are is a bit of a kludge and
3312 doesn't catch all cases.
3313
3314 But only do this if -fexpensive-optimizations since it slows
3315 things down and doesn't usually win.
3316
3317 This is not done in the COMPARE case above because the
3318 unmodified I2PAT is used in the PARALLEL and so a pattern
3319 with a modified I2SRC would not match. */
3320
3321 if (flag_expensive_optimizations)
3322 {
3323 /* Pass pc_rtx so no substitutions are done, just
3324 simplifications. */
3325 if (i1)
3326 {
3327 subst_low_luid = DF_INSN_LUID (i1);
3328 i1src = subst (i1src, pc_rtx, pc_rtx, 0, 0, 0);
3329 }
3330
3331 subst_low_luid = DF_INSN_LUID (i2);
3332 i2src = subst (i2src, pc_rtx, pc_rtx, 0, 0, 0);
3333 }
3334
3335 n_occurrences = 0; /* `subst' counts here */
3336 subst_low_luid = DF_INSN_LUID (i2);
3337
3338 /* If I1 feeds into I2 and I1DEST is in I1SRC, we need to make a unique
3339 copy of I2SRC each time we substitute it, in order to avoid creating
3340 self-referential RTL when we will be substituting I1SRC for I1DEST
3341 later. Likewise if I0 feeds into I2, either directly or indirectly
3342 through I1, and I0DEST is in I0SRC. */
3343 newpat = subst (PATTERN (i3), i2dest, i2src, 0, 0,
3344 (i1_feeds_i2_n && i1dest_in_i1src)
3345 || ((i0_feeds_i2_n || (i0_feeds_i1_n && i1_feeds_i2_n))
3346 && i0dest_in_i0src));
3347 substed_i2 = 1;
3348
3349 /* Record whether I2's body now appears within I3's body. */
3350 i2_is_used = n_occurrences;
3351 }
3352
3353 /* If we already got a failure, don't try to do more. Otherwise, try to
3354 substitute I1 if we have it. */
3355
3356 if (i1 && GET_CODE (newpat) != CLOBBER)
3357 {
3358 /* Check that an autoincrement side-effect on I1 has not been lost.
3359 This happens if I1DEST is mentioned in I2 and dies there, and
3360 has disappeared from the new pattern. */
3361 if ((FIND_REG_INC_NOTE (i1, NULL_RTX) != 0
3362 && i1_feeds_i2_n
3363 && dead_or_set_p (i2, i1dest)
3364 && !reg_overlap_mentioned_p (i1dest, newpat))
3365 /* Before we can do this substitution, we must redo the test done
3366 above (see detailed comments there) that ensures I1DEST isn't
3367 mentioned in any SETs in NEWPAT that are field assignments. */
3368 || !combinable_i3pat (NULL, &newpat, i1dest, NULL_RTX, NULL_RTX,
3369 0, 0, 0))
3370 {
3371 undo_all ();
3372 return 0;
3373 }
3374
3375 n_occurrences = 0;
3376 subst_low_luid = DF_INSN_LUID (i1);
3377
3378 /* If the following substitution will modify I1SRC, make a copy of it
3379 for the case where it is substituted for I1DEST in I2PAT later. */
3380 if (added_sets_2 && i1_feeds_i2_n)
3381 i1src_copy = copy_rtx (i1src);
3382
3383 /* If I0 feeds into I1 and I0DEST is in I0SRC, we need to make a unique
3384 copy of I1SRC each time we substitute it, in order to avoid creating
3385 self-referential RTL when we will be substituting I0SRC for I0DEST
3386 later. */
3387 newpat = subst (newpat, i1dest, i1src, 0, 0,
3388 i0_feeds_i1_n && i0dest_in_i0src);
3389 substed_i1 = 1;
3390
3391 /* Record whether I1's body now appears within I3's body. */
3392 i1_is_used = n_occurrences;
3393 }
3394
3395 /* Likewise for I0 if we have it. */
3396
3397 if (i0 && GET_CODE (newpat) != CLOBBER)
3398 {
3399 if ((FIND_REG_INC_NOTE (i0, NULL_RTX) != 0
3400 && ((i0_feeds_i2_n && dead_or_set_p (i2, i0dest))
3401 || (i0_feeds_i1_n && dead_or_set_p (i1, i0dest)))
3402 && !reg_overlap_mentioned_p (i0dest, newpat))
3403 || !combinable_i3pat (NULL, &newpat, i0dest, NULL_RTX, NULL_RTX,
3404 0, 0, 0))
3405 {
3406 undo_all ();
3407 return 0;
3408 }
3409
3410 /* If the following substitution will modify I0SRC, make a copy of it
3411 for the case where it is substituted for I0DEST in I1PAT later. */
3412 if (added_sets_1 && i0_feeds_i1_n)
3413 i0src_copy = copy_rtx (i0src);
3414 /* And a copy for I0DEST in I2PAT substitution. */
3415 if (added_sets_2 && ((i0_feeds_i1_n && i1_feeds_i2_n)
3416 || (i0_feeds_i2_n)))
3417 i0src_copy2 = copy_rtx (i0src);
3418
3419 n_occurrences = 0;
3420 subst_low_luid = DF_INSN_LUID (i0);
3421 newpat = subst (newpat, i0dest, i0src, 0, 0, 0);
3422 substed_i0 = 1;
3423 }
3424
3425 /* Fail if an autoincrement side-effect has been duplicated. Be careful
3426 to count all the ways that I2SRC and I1SRC can be used. */
3427 if ((FIND_REG_INC_NOTE (i2, NULL_RTX) != 0
3428 && i2_is_used + added_sets_2 > 1)
3429 || (i1 != 0 && FIND_REG_INC_NOTE (i1, NULL_RTX) != 0
3430 && (i1_is_used + added_sets_1 + (added_sets_2 && i1_feeds_i2_n)
3431 > 1))
3432 || (i0 != 0 && FIND_REG_INC_NOTE (i0, NULL_RTX) != 0
3433 && (n_occurrences + added_sets_0
3434 + (added_sets_1 && i0_feeds_i1_n)
3435 + (added_sets_2 && i0_feeds_i2_n)
3436 > 1))
3437 /* Fail if we tried to make a new register. */
3438 || max_reg_num () != maxreg
3439 /* Fail if we couldn't do something and have a CLOBBER. */
3440 || GET_CODE (newpat) == CLOBBER
3441 /* Fail if this new pattern is a MULT and we didn't have one before
3442 at the outer level. */
3443 || (GET_CODE (newpat) == SET && GET_CODE (SET_SRC (newpat)) == MULT
3444 && ! have_mult))
3445 {
3446 undo_all ();
3447 return 0;
3448 }
3449
3450 /* If the actions of the earlier insns must be kept
3451 in addition to substituting them into the latest one,
3452 we must make a new PARALLEL for the latest insn
3453 to hold additional the SETs. */
3454
3455 if (added_sets_0 || added_sets_1 || added_sets_2)
3456 {
3457 int extra_sets = added_sets_0 + added_sets_1 + added_sets_2;
3458 combine_extras++;
3459
3460 if (GET_CODE (newpat) == PARALLEL)
3461 {
3462 rtvec old = XVEC (newpat, 0);
3463 total_sets = XVECLEN (newpat, 0) + extra_sets;
3464 newpat = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (total_sets));
3465 memcpy (XVEC (newpat, 0)->elem, &old->elem[0],
3466 sizeof (old->elem[0]) * old->num_elem);
3467 }
3468 else
3469 {
3470 rtx old = newpat;
3471 total_sets = 1 + extra_sets;
3472 newpat = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (total_sets));
3473 XVECEXP (newpat, 0, 0) = old;
3474 }
3475
3476 if (added_sets_0)
3477 XVECEXP (newpat, 0, --total_sets) = i0pat;
3478
3479 if (added_sets_1)
3480 {
3481 rtx t = i1pat;
3482 if (i0_feeds_i1_n)
3483 t = subst (t, i0dest, i0src_copy ? i0src_copy : i0src, 0, 0, 0);
3484
3485 XVECEXP (newpat, 0, --total_sets) = t;
3486 }
3487 if (added_sets_2)
3488 {
3489 rtx t = i2pat;
3490 if (i1_feeds_i2_n)
3491 t = subst (t, i1dest, i1src_copy ? i1src_copy : i1src, 0, 0,
3492 i0_feeds_i1_n && i0dest_in_i0src);
3493 if ((i0_feeds_i1_n && i1_feeds_i2_n) || i0_feeds_i2_n)
3494 t = subst (t, i0dest, i0src_copy2 ? i0src_copy2 : i0src, 0, 0, 0);
3495
3496 XVECEXP (newpat, 0, --total_sets) = t;
3497 }
3498 }
3499
3500 validate_replacement:
3501
3502 /* Note which hard regs this insn has as inputs. */
3503 mark_used_regs_combine (newpat);
3504
3505 /* If recog_for_combine fails, it strips existing clobbers. If we'll
3506 consider splitting this pattern, we might need these clobbers. */
3507 if (i1 && GET_CODE (newpat) == PARALLEL
3508 && GET_CODE (XVECEXP (newpat, 0, XVECLEN (newpat, 0) - 1)) == CLOBBER)
3509 {
3510 int len = XVECLEN (newpat, 0);
3511
3512 newpat_vec_with_clobbers = rtvec_alloc (len);
3513 for (i = 0; i < len; i++)
3514 RTVEC_ELT (newpat_vec_with_clobbers, i) = XVECEXP (newpat, 0, i);
3515 }
3516
3517 /* We have recognized nothing yet. */
3518 insn_code_number = -1;
3519
3520 /* See if this is a PARALLEL of two SETs where one SET's destination is
3521 a register that is unused and this isn't marked as an instruction that
3522 might trap in an EH region. In that case, we just need the other SET.
3523 We prefer this over the PARALLEL.
3524
3525 This can occur when simplifying a divmod insn. We *must* test for this
3526 case here because the code below that splits two independent SETs doesn't
3527 handle this case correctly when it updates the register status.
3528
3529 It's pointless doing this if we originally had two sets, one from
3530 i3, and one from i2. Combining then splitting the parallel results
3531 in the original i2 again plus an invalid insn (which we delete).
3532 The net effect is only to move instructions around, which makes
3533 debug info less accurate.
3534
3535 If the remaining SET came from I2 its destination should not be used
3536 between I2 and I3. See PR82024. */
3537
3538 if (!(added_sets_2 && i1 == 0)
3539 && is_parallel_of_n_reg_sets (newpat, 2)
3540 && asm_noperands (newpat) < 0)
3541 {
3542 rtx set0 = XVECEXP (newpat, 0, 0);
3543 rtx set1 = XVECEXP (newpat, 0, 1);
3544 rtx oldpat = newpat;
3545
3546 if (((REG_P (SET_DEST (set1))
3547 && find_reg_note (i3, REG_UNUSED, SET_DEST (set1)))
3548 || (GET_CODE (SET_DEST (set1)) == SUBREG
3549 && find_reg_note (i3, REG_UNUSED, SUBREG_REG (SET_DEST (set1)))))
3550 && insn_nothrow_p (i3)
3551 && !side_effects_p (SET_SRC (set1)))
3552 {
3553 newpat = set0;
3554 insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
3555 }
3556
3557 else if (((REG_P (SET_DEST (set0))
3558 && find_reg_note (i3, REG_UNUSED, SET_DEST (set0)))
3559 || (GET_CODE (SET_DEST (set0)) == SUBREG
3560 && find_reg_note (i3, REG_UNUSED,
3561 SUBREG_REG (SET_DEST (set0)))))
3562 && insn_nothrow_p (i3)
3563 && !side_effects_p (SET_SRC (set0)))
3564 {
3565 rtx dest = SET_DEST (set1);
3566 if (GET_CODE (dest) == SUBREG)
3567 dest = SUBREG_REG (dest);
3568 if (!reg_used_between_p (dest, i2, i3))
3569 {
3570 newpat = set1;
3571 insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
3572
3573 if (insn_code_number >= 0)
3574 changed_i3_dest = 1;
3575 }
3576 }
3577
3578 if (insn_code_number < 0)
3579 newpat = oldpat;
3580 }
3581
3582 /* Is the result of combination a valid instruction? */
3583 if (insn_code_number < 0)
3584 insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
3585
3586 /* If we were combining three insns and the result is a simple SET
3587 with no ASM_OPERANDS that wasn't recognized, try to split it into two
3588 insns. There are two ways to do this. It can be split using a
3589 machine-specific method (like when you have an addition of a large
3590 constant) or by combine in the function find_split_point. */
3591
3592 if (i1 && insn_code_number < 0 && GET_CODE (newpat) == SET
3593 && asm_noperands (newpat) < 0)
3594 {
3595 rtx parallel, *split;
3596 rtx_insn *m_split_insn;
3597
3598 /* See if the MD file can split NEWPAT. If it can't, see if letting it
3599 use I2DEST as a scratch register will help. In the latter case,
3600 convert I2DEST to the mode of the source of NEWPAT if we can. */
3601
3602 m_split_insn = combine_split_insns (newpat, i3);
3603
3604 /* We can only use I2DEST as a scratch reg if it doesn't overlap any
3605 inputs of NEWPAT. */
3606
3607 /* ??? If I2DEST is not safe, and I1DEST exists, then it would be
3608 possible to try that as a scratch reg. This would require adding
3609 more code to make it work though. */
3610
3611 if (m_split_insn == 0 && ! reg_overlap_mentioned_p (i2dest, newpat))
3612 {
3613 machine_mode new_mode = GET_MODE (SET_DEST (newpat));
3614
3615 /* ??? Reusing i2dest without resetting the reg_stat entry for it
3616 (temporarily, until we are committed to this instruction
3617 combination) does not work: for example, any call to nonzero_bits
3618 on the register (from a splitter in the MD file, for example)
3619 will get the old information, which is invalid.
3620
3621 Since nowadays we can create registers during combine just fine,
3622 we should just create a new one here, not reuse i2dest. */
3623
3624 /* First try to split using the original register as a
3625 scratch register. */
3626 parallel = gen_rtx_PARALLEL (VOIDmode,
3627 gen_rtvec (2, newpat,
3628 gen_rtx_CLOBBER (VOIDmode,
3629 i2dest)));
3630 m_split_insn = combine_split_insns (parallel, i3);
3631
3632 /* If that didn't work, try changing the mode of I2DEST if
3633 we can. */
3634 if (m_split_insn == 0
3635 && new_mode != GET_MODE (i2dest)
3636 && new_mode != VOIDmode
3637 && can_change_dest_mode (i2dest, added_sets_2, new_mode))
3638 {
3639 machine_mode old_mode = GET_MODE (i2dest);
3640 rtx ni2dest;
3641
3642 if (REGNO (i2dest) < FIRST_PSEUDO_REGISTER)
3643 ni2dest = gen_rtx_REG (new_mode, REGNO (i2dest));
3644 else
3645 {
3646 SUBST_MODE (regno_reg_rtx[REGNO (i2dest)], new_mode);
3647 ni2dest = regno_reg_rtx[REGNO (i2dest)];
3648 }
3649
3650 parallel = (gen_rtx_PARALLEL
3651 (VOIDmode,
3652 gen_rtvec (2, newpat,
3653 gen_rtx_CLOBBER (VOIDmode,
3654 ni2dest))));
3655 m_split_insn = combine_split_insns (parallel, i3);
3656
3657 if (m_split_insn == 0
3658 && REGNO (i2dest) >= FIRST_PSEUDO_REGISTER)
3659 {
3660 struct undo *buf;
3661
3662 adjust_reg_mode (regno_reg_rtx[REGNO (i2dest)], old_mode);
3663 buf = undobuf.undos;
3664 undobuf.undos = buf->next;
3665 buf->next = undobuf.frees;
3666 undobuf.frees = buf;
3667 }
3668 }
3669
3670 i2scratch = m_split_insn != 0;
3671 }
3672
3673 /* If recog_for_combine has discarded clobbers, try to use them
3674 again for the split. */
3675 if (m_split_insn == 0 && newpat_vec_with_clobbers)
3676 {
3677 parallel = gen_rtx_PARALLEL (VOIDmode, newpat_vec_with_clobbers);
3678 m_split_insn = combine_split_insns (parallel, i3);
3679 }
3680
3681 if (m_split_insn && NEXT_INSN (m_split_insn) == NULL_RTX)
3682 {
3683 rtx m_split_pat = PATTERN (m_split_insn);
3684 insn_code_number = recog_for_combine (&m_split_pat, i3, &new_i3_notes);
3685 if (insn_code_number >= 0)
3686 newpat = m_split_pat;
3687 }
3688 else if (m_split_insn && NEXT_INSN (NEXT_INSN (m_split_insn)) == NULL_RTX
3689 && (next_nonnote_nondebug_insn (i2) == i3
3690 || ! use_crosses_set_p (PATTERN (m_split_insn), DF_INSN_LUID (i2))))
3691 {
3692 rtx i2set, i3set;
3693 rtx newi3pat = PATTERN (NEXT_INSN (m_split_insn));
3694 newi2pat = PATTERN (m_split_insn);
3695
3696 i3set = single_set (NEXT_INSN (m_split_insn));
3697 i2set = single_set (m_split_insn);
3698
3699 i2_code_number = recog_for_combine (&newi2pat, i2, &new_i2_notes);
3700
3701 /* If I2 or I3 has multiple SETs, we won't know how to track
3702 register status, so don't use these insns. If I2's destination
3703 is used between I2 and I3, we also can't use these insns. */
3704
3705 if (i2_code_number >= 0 && i2set && i3set
3706 && (next_nonnote_nondebug_insn (i2) == i3
3707 || ! reg_used_between_p (SET_DEST (i2set), i2, i3)))
3708 insn_code_number = recog_for_combine (&newi3pat, i3,
3709 &new_i3_notes);
3710 if (insn_code_number >= 0)
3711 newpat = newi3pat;
3712
3713 /* It is possible that both insns now set the destination of I3.
3714 If so, we must show an extra use of it. */
3715
3716 if (insn_code_number >= 0)
3717 {
3718 rtx new_i3_dest = SET_DEST (i3set);
3719 rtx new_i2_dest = SET_DEST (i2set);
3720
3721 while (GET_CODE (new_i3_dest) == ZERO_EXTRACT
3722 || GET_CODE (new_i3_dest) == STRICT_LOW_PART
3723 || GET_CODE (new_i3_dest) == SUBREG)
3724 new_i3_dest = XEXP (new_i3_dest, 0);
3725
3726 while (GET_CODE (new_i2_dest) == ZERO_EXTRACT
3727 || GET_CODE (new_i2_dest) == STRICT_LOW_PART
3728 || GET_CODE (new_i2_dest) == SUBREG)
3729 new_i2_dest = XEXP (new_i2_dest, 0);
3730
3731 if (REG_P (new_i3_dest)
3732 && REG_P (new_i2_dest)
3733 && REGNO (new_i3_dest) == REGNO (new_i2_dest)
3734 && REGNO (new_i2_dest) < reg_n_sets_max)
3735 INC_REG_N_SETS (REGNO (new_i2_dest), 1);
3736 }
3737 }
3738
3739 /* If we can split it and use I2DEST, go ahead and see if that
3740 helps things be recognized. Verify that none of the registers
3741 are set between I2 and I3. */
3742 if (insn_code_number < 0
3743 && (split = find_split_point (&newpat, i3, false)) != 0
3744 && (!HAVE_cc0 || REG_P (i2dest))
3745 /* We need I2DEST in the proper mode. If it is a hard register
3746 or the only use of a pseudo, we can change its mode.
3747 Make sure we don't change a hard register to have a mode that
3748 isn't valid for it, or change the number of registers. */
3749 && (GET_MODE (*split) == GET_MODE (i2dest)
3750 || GET_MODE (*split) == VOIDmode
3751 || can_change_dest_mode (i2dest, added_sets_2,
3752 GET_MODE (*split)))
3753 && (next_nonnote_nondebug_insn (i2) == i3
3754 || ! use_crosses_set_p (*split, DF_INSN_LUID (i2)))
3755 /* We can't overwrite I2DEST if its value is still used by
3756 NEWPAT. */
3757 && ! reg_referenced_p (i2dest, newpat))
3758 {
3759 rtx newdest = i2dest;
3760 enum rtx_code split_code = GET_CODE (*split);
3761 machine_mode split_mode = GET_MODE (*split);
3762 bool subst_done = false;
3763 newi2pat = NULL_RTX;
3764
3765 i2scratch = true;
3766
3767 /* *SPLIT may be part of I2SRC, so make sure we have the
3768 original expression around for later debug processing.
3769 We should not need I2SRC any more in other cases. */
3770 if (MAY_HAVE_DEBUG_INSNS)
3771 i2src = copy_rtx (i2src);
3772 else
3773 i2src = NULL;
3774
3775 /* Get NEWDEST as a register in the proper mode. We have already
3776 validated that we can do this. */
3777 if (GET_MODE (i2dest) != split_mode && split_mode != VOIDmode)
3778 {
3779 if (REGNO (i2dest) < FIRST_PSEUDO_REGISTER)
3780 newdest = gen_rtx_REG (split_mode, REGNO (i2dest));
3781 else
3782 {
3783 SUBST_MODE (regno_reg_rtx[REGNO (i2dest)], split_mode);
3784 newdest = regno_reg_rtx[REGNO (i2dest)];
3785 }
3786 }
3787
3788 /* If *SPLIT is a (mult FOO (const_int pow2)), convert it to
3789 an ASHIFT. This can occur if it was inside a PLUS and hence
3790 appeared to be a memory address. This is a kludge. */
3791 if (split_code == MULT
3792 && CONST_INT_P (XEXP (*split, 1))
3793 && INTVAL (XEXP (*split, 1)) > 0
3794 && (i = exact_log2 (UINTVAL (XEXP (*split, 1)))) >= 0)
3795 {
3796 SUBST (*split, gen_rtx_ASHIFT (split_mode,
3797 XEXP (*split, 0), GEN_INT (i)));
3798 /* Update split_code because we may not have a multiply
3799 anymore. */
3800 split_code = GET_CODE (*split);
3801 }
3802
3803 /* Similarly for (plus (mult FOO (const_int pow2))). */
3804 if (split_code == PLUS
3805 && GET_CODE (XEXP (*split, 0)) == MULT
3806 && CONST_INT_P (XEXP (XEXP (*split, 0), 1))
3807 && INTVAL (XEXP (XEXP (*split, 0), 1)) > 0
3808 && (i = exact_log2 (UINTVAL (XEXP (XEXP (*split, 0), 1)))) >= 0)
3809 {
3810 rtx nsplit = XEXP (*split, 0);
3811 SUBST (XEXP (*split, 0), gen_rtx_ASHIFT (GET_MODE (nsplit),
3812 XEXP (nsplit, 0), GEN_INT (i)));
3813 /* Update split_code because we may not have a multiply
3814 anymore. */
3815 split_code = GET_CODE (*split);
3816 }
3817
3818 #ifdef INSN_SCHEDULING
3819 /* If *SPLIT is a paradoxical SUBREG, when we split it, it should
3820 be written as a ZERO_EXTEND. */
3821 if (split_code == SUBREG && MEM_P (SUBREG_REG (*split)))
3822 {
3823 /* Or as a SIGN_EXTEND if LOAD_EXTEND_OP says that that's
3824 what it really is. */
3825 if (load_extend_op (GET_MODE (SUBREG_REG (*split)))
3826 == SIGN_EXTEND)
3827 SUBST (*split, gen_rtx_SIGN_EXTEND (split_mode,
3828 SUBREG_REG (*split)));
3829 else
3830 SUBST (*split, gen_rtx_ZERO_EXTEND (split_mode,
3831 SUBREG_REG (*split)));
3832 }
3833 #endif
3834
3835 /* Attempt to split binary operators using arithmetic identities. */
3836 if (BINARY_P (SET_SRC (newpat))
3837 && split_mode == GET_MODE (SET_SRC (newpat))
3838 && ! side_effects_p (SET_SRC (newpat)))
3839 {
3840 rtx setsrc = SET_SRC (newpat);
3841 machine_mode mode = GET_MODE (setsrc);
3842 enum rtx_code code = GET_CODE (setsrc);
3843 rtx src_op0 = XEXP (setsrc, 0);
3844 rtx src_op1 = XEXP (setsrc, 1);
3845
3846 /* Split "X = Y op Y" as "Z = Y; X = Z op Z". */
3847 if (rtx_equal_p (src_op0, src_op1))
3848 {
3849 newi2pat = gen_rtx_SET (newdest, src_op0);
3850 SUBST (XEXP (setsrc, 0), newdest);
3851 SUBST (XEXP (setsrc, 1), newdest);
3852 subst_done = true;
3853 }
3854 /* Split "((P op Q) op R) op S" where op is PLUS or MULT. */
3855 else if ((code == PLUS || code == MULT)
3856 && GET_CODE (src_op0) == code
3857 && GET_CODE (XEXP (src_op0, 0)) == code
3858 && (INTEGRAL_MODE_P (mode)
3859 || (FLOAT_MODE_P (mode)
3860 && flag_unsafe_math_optimizations)))
3861 {
3862 rtx p = XEXP (XEXP (src_op0, 0), 0);
3863 rtx q = XEXP (XEXP (src_op0, 0), 1);
3864 rtx r = XEXP (src_op0, 1);
3865 rtx s = src_op1;
3866
3867 /* Split both "((X op Y) op X) op Y" and
3868 "((X op Y) op Y) op X" as "T op T" where T is
3869 "X op Y". */
3870 if ((rtx_equal_p (p,r) && rtx_equal_p (q,s))
3871 || (rtx_equal_p (p,s) && rtx_equal_p (q,r)))
3872 {
3873 newi2pat = gen_rtx_SET (newdest, XEXP (src_op0, 0));
3874 SUBST (XEXP (setsrc, 0), newdest);
3875 SUBST (XEXP (setsrc, 1), newdest);
3876 subst_done = true;
3877 }
3878 /* Split "((X op X) op Y) op Y)" as "T op T" where
3879 T is "X op Y". */
3880 else if (rtx_equal_p (p,q) && rtx_equal_p (r,s))
3881 {
3882 rtx tmp = simplify_gen_binary (code, mode, p, r);
3883 newi2pat = gen_rtx_SET (newdest, tmp);
3884 SUBST (XEXP (setsrc, 0), newdest);
3885 SUBST (XEXP (setsrc, 1), newdest);
3886 subst_done = true;
3887 }
3888 }
3889 }
3890
3891 if (!subst_done)
3892 {
3893 newi2pat = gen_rtx_SET (newdest, *split);
3894 SUBST (*split, newdest);
3895 }
3896
3897 i2_code_number = recog_for_combine (&newi2pat, i2, &new_i2_notes);
3898
3899 /* recog_for_combine might have added CLOBBERs to newi2pat.
3900 Make sure NEWPAT does not depend on the clobbered regs. */
3901 if (GET_CODE (newi2pat) == PARALLEL)
3902 for (i = XVECLEN (newi2pat, 0) - 1; i >= 0; i--)
3903 if (GET_CODE (XVECEXP (newi2pat, 0, i)) == CLOBBER)
3904 {
3905 rtx reg = XEXP (XVECEXP (newi2pat, 0, i), 0);
3906 if (reg_overlap_mentioned_p (reg, newpat))
3907 {
3908 undo_all ();
3909 return 0;
3910 }
3911 }
3912
3913 /* If the split point was a MULT and we didn't have one before,
3914 don't use one now. */
3915 if (i2_code_number >= 0 && ! (split_code == MULT && ! have_mult))
3916 insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
3917 }
3918 }
3919
3920 /* Check for a case where we loaded from memory in a narrow mode and
3921 then sign extended it, but we need both registers. In that case,
3922 we have a PARALLEL with both loads from the same memory location.
3923 We can split this into a load from memory followed by a register-register
3924 copy. This saves at least one insn, more if register allocation can
3925 eliminate the copy.
3926
3927 We cannot do this if the destination of the first assignment is a
3928 condition code register or cc0. We eliminate this case by making sure
3929 the SET_DEST and SET_SRC have the same mode.
3930
3931 We cannot do this if the destination of the second assignment is
3932 a register that we have already assumed is zero-extended. Similarly
3933 for a SUBREG of such a register. */
3934
3935 else if (i1 && insn_code_number < 0 && asm_noperands (newpat) < 0
3936 && GET_CODE (newpat) == PARALLEL
3937 && XVECLEN (newpat, 0) == 2
3938 && GET_CODE (XVECEXP (newpat, 0, 0)) == SET
3939 && GET_CODE (SET_SRC (XVECEXP (newpat, 0, 0))) == SIGN_EXTEND
3940 && (GET_MODE (SET_DEST (XVECEXP (newpat, 0, 0)))
3941 == GET_MODE (SET_SRC (XVECEXP (newpat, 0, 0))))
3942 && GET_CODE (XVECEXP (newpat, 0, 1)) == SET
3943 && rtx_equal_p (SET_SRC (XVECEXP (newpat, 0, 1)),
3944 XEXP (SET_SRC (XVECEXP (newpat, 0, 0)), 0))
3945 && ! use_crosses_set_p (SET_SRC (XVECEXP (newpat, 0, 1)),
3946 DF_INSN_LUID (i2))
3947 && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) != ZERO_EXTRACT
3948 && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) != STRICT_LOW_PART
3949 && ! (temp_expr = SET_DEST (XVECEXP (newpat, 0, 1)),
3950 (REG_P (temp_expr)
3951 && reg_stat[REGNO (temp_expr)].nonzero_bits != 0
3952 && GET_MODE_PRECISION (GET_MODE (temp_expr)) < BITS_PER_WORD
3953 && GET_MODE_PRECISION (GET_MODE (temp_expr)) < HOST_BITS_PER_INT
3954 && (reg_stat[REGNO (temp_expr)].nonzero_bits
3955 != GET_MODE_MASK (word_mode))))
3956 && ! (GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) == SUBREG
3957 && (temp_expr = SUBREG_REG (SET_DEST (XVECEXP (newpat, 0, 1))),
3958 (REG_P (temp_expr)
3959 && reg_stat[REGNO (temp_expr)].nonzero_bits != 0
3960 && GET_MODE_PRECISION (GET_MODE (temp_expr)) < BITS_PER_WORD
3961 && GET_MODE_PRECISION (GET_MODE (temp_expr)) < HOST_BITS_PER_INT
3962 && (reg_stat[REGNO (temp_expr)].nonzero_bits
3963 != GET_MODE_MASK (word_mode)))))
3964 && ! reg_overlap_mentioned_p (SET_DEST (XVECEXP (newpat, 0, 1)),
3965 SET_SRC (XVECEXP (newpat, 0, 1)))
3966 && ! find_reg_note (i3, REG_UNUSED,
3967 SET_DEST (XVECEXP (newpat, 0, 0))))
3968 {
3969 rtx ni2dest;
3970
3971 newi2pat = XVECEXP (newpat, 0, 0);
3972 ni2dest = SET_DEST (XVECEXP (newpat, 0, 0));
3973 newpat = XVECEXP (newpat, 0, 1);
3974 SUBST (SET_SRC (newpat),
3975 gen_lowpart (GET_MODE (SET_SRC (newpat)), ni2dest));
3976 i2_code_number = recog_for_combine (&newi2pat, i2, &new_i2_notes);
3977
3978 if (i2_code_number >= 0)
3979 insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
3980
3981 if (insn_code_number >= 0)
3982 swap_i2i3 = 1;
3983 }
3984
3985 /* Similarly, check for a case where we have a PARALLEL of two independent
3986 SETs but we started with three insns. In this case, we can do the sets
3987 as two separate insns. This case occurs when some SET allows two
3988 other insns to combine, but the destination of that SET is still live.
3989
3990 Also do this if we started with two insns and (at least) one of the
3991 resulting sets is a noop; this noop will be deleted later. */
3992
3993 else if (insn_code_number < 0 && asm_noperands (newpat) < 0
3994 && GET_CODE (newpat) == PARALLEL
3995 && XVECLEN (newpat, 0) == 2
3996 && GET_CODE (XVECEXP (newpat, 0, 0)) == SET
3997 && GET_CODE (XVECEXP (newpat, 0, 1)) == SET
3998 && (i1 || set_noop_p (XVECEXP (newpat, 0, 0))
3999 || set_noop_p (XVECEXP (newpat, 0, 1)))
4000 && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 0))) != ZERO_EXTRACT
4001 && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 0))) != STRICT_LOW_PART
4002 && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) != ZERO_EXTRACT
4003 && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) != STRICT_LOW_PART
4004 && ! reg_referenced_p (SET_DEST (XVECEXP (newpat, 0, 1)),
4005 XVECEXP (newpat, 0, 0))
4006 && ! reg_referenced_p (SET_DEST (XVECEXP (newpat, 0, 0)),
4007 XVECEXP (newpat, 0, 1))
4008 && ! (contains_muldiv (SET_SRC (XVECEXP (newpat, 0, 0)))
4009 && contains_muldiv (SET_SRC (XVECEXP (newpat, 0, 1)))))
4010 {
4011 rtx set0 = XVECEXP (newpat, 0, 0);
4012 rtx set1 = XVECEXP (newpat, 0, 1);
4013
4014 /* Normally, it doesn't matter which of the two is done first,
4015 but the one that references cc0 can't be the second, and
4016 one which uses any regs/memory set in between i2 and i3 can't
4017 be first. The PARALLEL might also have been pre-existing in i3,
4018 so we need to make sure that we won't wrongly hoist a SET to i2
4019 that would conflict with a death note present in there. */
4020 if (!use_crosses_set_p (SET_SRC (set1), DF_INSN_LUID (i2))
4021 && !(REG_P (SET_DEST (set1))
4022 && find_reg_note (i2, REG_DEAD, SET_DEST (set1)))
4023 && !(GET_CODE (SET_DEST (set1)) == SUBREG
4024 && find_reg_note (i2, REG_DEAD,
4025 SUBREG_REG (SET_DEST (set1))))
4026 && (!HAVE_cc0 || !reg_referenced_p (cc0_rtx, set0))
4027 /* If I3 is a jump, ensure that set0 is a jump so that
4028 we do not create invalid RTL. */
4029 && (!JUMP_P (i3) || SET_DEST (set0) == pc_rtx)
4030 )
4031 {
4032 newi2pat = set1;
4033 newpat = set0;
4034 }
4035 else if (!use_crosses_set_p (SET_SRC (set0), DF_INSN_LUID (i2))
4036 && !(REG_P (SET_DEST (set0))
4037 && find_reg_note (i2, REG_DEAD, SET_DEST (set0)))
4038 && !(GET_CODE (SET_DEST (set0)) == SUBREG
4039 && find_reg_note (i2, REG_DEAD,
4040 SUBREG_REG (SET_DEST (set0))))
4041 && (!HAVE_cc0 || !reg_referenced_p (cc0_rtx, set1))
4042 /* If I3 is a jump, ensure that set1 is a jump so that
4043 we do not create invalid RTL. */
4044 && (!JUMP_P (i3) || SET_DEST (set1) == pc_rtx)
4045 )
4046 {
4047 newi2pat = set0;
4048 newpat = set1;
4049 }
4050 else
4051 {
4052 undo_all ();
4053 return 0;
4054 }
4055
4056 i2_code_number = recog_for_combine (&newi2pat, i2, &new_i2_notes);
4057
4058 if (i2_code_number >= 0)
4059 {
4060 /* recog_for_combine might have added CLOBBERs to newi2pat.
4061 Make sure NEWPAT does not depend on the clobbered regs. */
4062 if (GET_CODE (newi2pat) == PARALLEL)
4063 {
4064 for (i = XVECLEN (newi2pat, 0) - 1; i >= 0; i--)
4065 if (GET_CODE (XVECEXP (newi2pat, 0, i)) == CLOBBER)
4066 {
4067 rtx reg = XEXP (XVECEXP (newi2pat, 0, i), 0);
4068 if (reg_overlap_mentioned_p (reg, newpat))
4069 {
4070 undo_all ();
4071 return 0;
4072 }
4073 }
4074 }
4075
4076 insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
4077 }
4078 }
4079
4080 /* If it still isn't recognized, fail and change things back the way they
4081 were. */
4082 if ((insn_code_number < 0
4083 /* Is the result a reasonable ASM_OPERANDS? */
4084 && (! check_asm_operands (newpat) || added_sets_1 || added_sets_2)))
4085 {
4086 undo_all ();
4087 return 0;
4088 }
4089
4090 /* If we had to change another insn, make sure it is valid also. */
4091 if (undobuf.other_insn)
4092 {
4093 CLEAR_HARD_REG_SET (newpat_used_regs);
4094
4095 other_pat = PATTERN (undobuf.other_insn);
4096 other_code_number = recog_for_combine (&other_pat, undobuf.other_insn,
4097 &new_other_notes);
4098
4099 if (other_code_number < 0 && ! check_asm_operands (other_pat))
4100 {
4101 undo_all ();
4102 return 0;
4103 }
4104 }
4105
4106 /* If I2 is the CC0 setter and I3 is the CC0 user then check whether
4107 they are adjacent to each other or not. */
4108 if (HAVE_cc0)
4109 {
4110 rtx_insn *p = prev_nonnote_insn (i3);
4111 if (p && p != i2 && NONJUMP_INSN_P (p) && newi2pat
4112 && sets_cc0_p (newi2pat))
4113 {
4114 undo_all ();
4115 return 0;
4116 }
4117 }
4118
4119 /* Only allow this combination if insn_cost reports that the
4120 replacement instructions are cheaper than the originals. */
4121 if (!combine_validate_cost (i0, i1, i2, i3, newpat, newi2pat, other_pat))
4122 {
4123 undo_all ();
4124 return 0;
4125 }
4126
4127 if (MAY_HAVE_DEBUG_INSNS)
4128 {
4129 struct undo *undo;
4130
4131 for (undo = undobuf.undos; undo; undo = undo->next)
4132 if (undo->kind == UNDO_MODE)
4133 {
4134 rtx reg = *undo->where.r;
4135 machine_mode new_mode = GET_MODE (reg);
4136 machine_mode old_mode = undo->old_contents.m;
4137
4138 /* Temporarily revert mode back. */
4139 adjust_reg_mode (reg, old_mode);
4140
4141 if (reg == i2dest && i2scratch)
4142 {
4143 /* If we used i2dest as a scratch register with a
4144 different mode, substitute it for the original
4145 i2src while its original mode is temporarily
4146 restored, and then clear i2scratch so that we don't
4147 do it again later. */
4148 propagate_for_debug (i2, last_combined_insn, reg, i2src,
4149 this_basic_block);
4150 i2scratch = false;
4151 /* Put back the new mode. */
4152 adjust_reg_mode (reg, new_mode);
4153 }
4154 else
4155 {
4156 rtx tempreg = gen_raw_REG (old_mode, REGNO (reg));
4157 rtx_insn *first, *last;
4158
4159 if (reg == i2dest)
4160 {
4161 first = i2;
4162 last = last_combined_insn;
4163 }
4164 else
4165 {
4166 first = i3;
4167 last = undobuf.other_insn;
4168 gcc_assert (last);
4169 if (DF_INSN_LUID (last)
4170 < DF_INSN_LUID (last_combined_insn))
4171 last = last_combined_insn;
4172 }
4173
4174 /* We're dealing with a reg that changed mode but not
4175 meaning, so we want to turn it into a subreg for
4176 the new mode. However, because of REG sharing and
4177 because its mode had already changed, we have to do
4178 it in two steps. First, replace any debug uses of
4179 reg, with its original mode temporarily restored,
4180 with this copy we have created; then, replace the
4181 copy with the SUBREG of the original shared reg,
4182 once again changed to the new mode. */
4183 propagate_for_debug (first, last, reg, tempreg,
4184 this_basic_block);
4185 adjust_reg_mode (reg, new_mode);
4186 propagate_for_debug (first, last, tempreg,
4187 lowpart_subreg (old_mode, reg, new_mode),
4188 this_basic_block);
4189 }
4190 }
4191 }
4192
4193 /* If we will be able to accept this, we have made a
4194 change to the destination of I3. This requires us to
4195 do a few adjustments. */
4196
4197 if (changed_i3_dest)
4198 {
4199 PATTERN (i3) = newpat;
4200 adjust_for_new_dest (i3);
4201 }
4202
4203 /* We now know that we can do this combination. Merge the insns and
4204 update the status of registers and LOG_LINKS. */
4205
4206 if (undobuf.other_insn)
4207 {
4208 rtx note, next;
4209
4210 PATTERN (undobuf.other_insn) = other_pat;
4211
4212 /* If any of the notes in OTHER_INSN were REG_DEAD or REG_UNUSED,
4213 ensure that they are still valid. Then add any non-duplicate
4214 notes added by recog_for_combine. */
4215 for (note = REG_NOTES (undobuf.other_insn); note; note = next)
4216 {
4217 next = XEXP (note, 1);
4218
4219 if ((REG_NOTE_KIND (note) == REG_DEAD
4220 && !reg_referenced_p (XEXP (note, 0),
4221 PATTERN (undobuf.other_insn)))
4222 ||(REG_NOTE_KIND (note) == REG_UNUSED
4223 && !reg_set_p (XEXP (note, 0),
4224 PATTERN (undobuf.other_insn)))
4225 /* Simply drop equal note since it may be no longer valid
4226 for other_insn. It may be possible to record that CC
4227 register is changed and only discard those notes, but
4228 in practice it's unnecessary complication and doesn't
4229 give any meaningful improvement.
4230
4231 See PR78559. */
4232 || REG_NOTE_KIND (note) == REG_EQUAL
4233 || REG_NOTE_KIND (note) == REG_EQUIV)
4234 remove_note (undobuf.other_insn, note);
4235 }
4236
4237 distribute_notes (new_other_notes, undobuf.other_insn,
4238 undobuf.other_insn, NULL, NULL_RTX, NULL_RTX,
4239 NULL_RTX);
4240 }
4241
4242 if (swap_i2i3)
4243 {
4244 rtx_insn *insn;
4245 struct insn_link *link;
4246 rtx ni2dest;
4247
4248 /* I3 now uses what used to be its destination and which is now
4249 I2's destination. This requires us to do a few adjustments. */
4250 PATTERN (i3) = newpat;
4251 adjust_for_new_dest (i3);
4252
4253 /* We need a LOG_LINK from I3 to I2. But we used to have one,
4254 so we still will.
4255
4256 However, some later insn might be using I2's dest and have
4257 a LOG_LINK pointing at I3. We must remove this link.
4258 The simplest way to remove the link is to point it at I1,
4259 which we know will be a NOTE. */
4260
4261 /* newi2pat is usually a SET here; however, recog_for_combine might
4262 have added some clobbers. */
4263 if (GET_CODE (newi2pat) == PARALLEL)
4264 ni2dest = SET_DEST (XVECEXP (newi2pat, 0, 0));
4265 else
4266 ni2dest = SET_DEST (newi2pat);
4267
4268 for (insn = NEXT_INSN (i3);
4269 insn && (this_basic_block->next_bb == EXIT_BLOCK_PTR_FOR_FN (cfun)
4270 || insn != BB_HEAD (this_basic_block->next_bb));
4271 insn = NEXT_INSN (insn))
4272 {
4273 if (NONDEBUG_INSN_P (insn)
4274 && reg_referenced_p (ni2dest, PATTERN (insn)))
4275 {
4276 FOR_EACH_LOG_LINK (link, insn)
4277 if (link->insn == i3)
4278 link->insn = i1;
4279
4280 break;
4281 }
4282 }
4283 }
4284
4285 {
4286 rtx i3notes, i2notes, i1notes = 0, i0notes = 0;
4287 struct insn_link *i3links, *i2links, *i1links = 0, *i0links = 0;
4288 rtx midnotes = 0;
4289 int from_luid;
4290 /* Compute which registers we expect to eliminate. newi2pat may be setting
4291 either i3dest or i2dest, so we must check it. */
4292 rtx elim_i2 = ((newi2pat && reg_set_p (i2dest, newi2pat))
4293 || i2dest_in_i2src || i2dest_in_i1src || i2dest_in_i0src
4294 || !i2dest_killed
4295 ? 0 : i2dest);
4296 /* For i1, we need to compute both local elimination and global
4297 elimination information with respect to newi2pat because i1dest
4298 may be the same as i3dest, in which case newi2pat may be setting
4299 i1dest. Global information is used when distributing REG_DEAD
4300 note for i2 and i3, in which case it does matter if newi2pat sets
4301 i1dest or not.
4302
4303 Local information is used when distributing REG_DEAD note for i1,
4304 in which case it doesn't matter if newi2pat sets i1dest or not.
4305 See PR62151, if we have four insns combination:
4306 i0: r0 <- i0src
4307 i1: r1 <- i1src (using r0)
4308 REG_DEAD (r0)
4309 i2: r0 <- i2src (using r1)
4310 i3: r3 <- i3src (using r0)
4311 ix: using r0
4312 From i1's point of view, r0 is eliminated, no matter if it is set
4313 by newi2pat or not. In other words, REG_DEAD info for r0 in i1
4314 should be discarded.
4315
4316 Note local information only affects cases in forms like "I1->I2->I3",
4317 "I0->I1->I2->I3" or "I0&I1->I2, I2->I3". For other cases like
4318 "I0->I1, I1&I2->I3" or "I1&I2->I3", newi2pat won't set i1dest or
4319 i0dest anyway. */
4320 rtx local_elim_i1 = (i1 == 0 || i1dest_in_i1src || i1dest_in_i0src
4321 || !i1dest_killed
4322 ? 0 : i1dest);
4323 rtx elim_i1 = (local_elim_i1 == 0
4324 || (newi2pat && reg_set_p (i1dest, newi2pat))
4325 ? 0 : i1dest);
4326 /* Same case as i1. */
4327 rtx local_elim_i0 = (i0 == 0 || i0dest_in_i0src || !i0dest_killed
4328 ? 0 : i0dest);
4329 rtx elim_i0 = (local_elim_i0 == 0
4330 || (newi2pat && reg_set_p (i0dest, newi2pat))
4331 ? 0 : i0dest);
4332
4333 /* Get the old REG_NOTES and LOG_LINKS from all our insns and
4334 clear them. */
4335 i3notes = REG_NOTES (i3), i3links = LOG_LINKS (i3);
4336 i2notes = REG_NOTES (i2), i2links = LOG_LINKS (i2);
4337 if (i1)
4338 i1notes = REG_NOTES (i1), i1links = LOG_LINKS (i1);
4339 if (i0)
4340 i0notes = REG_NOTES (i0), i0links = LOG_LINKS (i0);
4341
4342 /* Ensure that we do not have something that should not be shared but
4343 occurs multiple times in the new insns. Check this by first
4344 resetting all the `used' flags and then copying anything is shared. */
4345
4346 reset_used_flags (i3notes);
4347 reset_used_flags (i2notes);
4348 reset_used_flags (i1notes);
4349 reset_used_flags (i0notes);
4350 reset_used_flags (newpat);
4351 reset_used_flags (newi2pat);
4352 if (undobuf.other_insn)
4353 reset_used_flags (PATTERN (undobuf.other_insn));
4354
4355 i3notes = copy_rtx_if_shared (i3notes);
4356 i2notes = copy_rtx_if_shared (i2notes);
4357 i1notes = copy_rtx_if_shared (i1notes);
4358 i0notes = copy_rtx_if_shared (i0notes);
4359 newpat = copy_rtx_if_shared (newpat);
4360 newi2pat = copy_rtx_if_shared (newi2pat);
4361 if (undobuf.other_insn)
4362 reset_used_flags (PATTERN (undobuf.other_insn));
4363
4364 INSN_CODE (i3) = insn_code_number;
4365 PATTERN (i3) = newpat;
4366
4367 if (CALL_P (i3) && CALL_INSN_FUNCTION_USAGE (i3))
4368 {
4369 for (rtx link = CALL_INSN_FUNCTION_USAGE (i3); link;
4370 link = XEXP (link, 1))
4371 {
4372 if (substed_i2)
4373 {
4374 /* I2SRC must still be meaningful at this point. Some
4375 splitting operations can invalidate I2SRC, but those
4376 operations do not apply to calls. */
4377 gcc_assert (i2src);
4378 XEXP (link, 0) = simplify_replace_rtx (XEXP (link, 0),
4379 i2dest, i2src);
4380 }
4381 if (substed_i1)
4382 XEXP (link, 0) = simplify_replace_rtx (XEXP (link, 0),
4383 i1dest, i1src);
4384 if (substed_i0)
4385 XEXP (link, 0) = simplify_replace_rtx (XEXP (link, 0),
4386 i0dest, i0src);
4387 }
4388 }
4389
4390 if (undobuf.other_insn)
4391 INSN_CODE (undobuf.other_insn) = other_code_number;
4392
4393 /* We had one special case above where I2 had more than one set and
4394 we replaced a destination of one of those sets with the destination
4395 of I3. In that case, we have to update LOG_LINKS of insns later
4396 in this basic block. Note that this (expensive) case is rare.
4397
4398 Also, in this case, we must pretend that all REG_NOTEs for I2
4399 actually came from I3, so that REG_UNUSED notes from I2 will be
4400 properly handled. */
4401
4402 if (i3_subst_into_i2)
4403 {
4404 for (i = 0; i < XVECLEN (PATTERN (i2), 0); i++)
4405 if ((GET_CODE (XVECEXP (PATTERN (i2), 0, i)) == SET
4406 || GET_CODE (XVECEXP (PATTERN (i2), 0, i)) == CLOBBER)
4407 && REG_P (SET_DEST (XVECEXP (PATTERN (i2), 0, i)))
4408 && SET_DEST (XVECEXP (PATTERN (i2), 0, i)) != i2dest
4409 && ! find_reg_note (i2, REG_UNUSED,
4410 SET_DEST (XVECEXP (PATTERN (i2), 0, i))))
4411 for (temp_insn = NEXT_INSN (i2);
4412 temp_insn
4413 && (this_basic_block->next_bb == EXIT_BLOCK_PTR_FOR_FN (cfun)
4414 || BB_HEAD (this_basic_block) != temp_insn);
4415 temp_insn = NEXT_INSN (temp_insn))
4416 if (temp_insn != i3 && NONDEBUG_INSN_P (temp_insn))
4417 FOR_EACH_LOG_LINK (link, temp_insn)
4418 if (link->insn == i2)
4419 link->insn = i3;
4420
4421 if (i3notes)
4422 {
4423 rtx link = i3notes;
4424 while (XEXP (link, 1))
4425 link = XEXP (link, 1);
4426 XEXP (link, 1) = i2notes;
4427 }
4428 else
4429 i3notes = i2notes;
4430 i2notes = 0;
4431 }
4432
4433 LOG_LINKS (i3) = NULL;
4434 REG_NOTES (i3) = 0;
4435 LOG_LINKS (i2) = NULL;
4436 REG_NOTES (i2) = 0;
4437
4438 if (newi2pat)
4439 {
4440 if (MAY_HAVE_DEBUG_INSNS && i2scratch)
4441 propagate_for_debug (i2, last_combined_insn, i2dest, i2src,
4442 this_basic_block);
4443 INSN_CODE (i2) = i2_code_number;
4444 PATTERN (i2) = newi2pat;
4445 }
4446 else
4447 {
4448 if (MAY_HAVE_DEBUG_INSNS && i2src)
4449 propagate_for_debug (i2, last_combined_insn, i2dest, i2src,
4450 this_basic_block);
4451 SET_INSN_DELETED (i2);
4452 }
4453
4454 if (i1)
4455 {
4456 LOG_LINKS (i1) = NULL;
4457 REG_NOTES (i1) = 0;
4458 if (MAY_HAVE_DEBUG_INSNS)
4459 propagate_for_debug (i1, last_combined_insn, i1dest, i1src,
4460 this_basic_block);
4461 SET_INSN_DELETED (i1);
4462 }
4463
4464 if (i0)
4465 {
4466 LOG_LINKS (i0) = NULL;
4467 REG_NOTES (i0) = 0;
4468 if (MAY_HAVE_DEBUG_INSNS)
4469 propagate_for_debug (i0, last_combined_insn, i0dest, i0src,
4470 this_basic_block);
4471 SET_INSN_DELETED (i0);
4472 }
4473
4474 /* Get death notes for everything that is now used in either I3 or
4475 I2 and used to die in a previous insn. If we built two new
4476 patterns, move from I1 to I2 then I2 to I3 so that we get the
4477 proper movement on registers that I2 modifies. */
4478
4479 if (i0)
4480 from_luid = DF_INSN_LUID (i0);
4481 else if (i1)
4482 from_luid = DF_INSN_LUID (i1);
4483 else
4484 from_luid = DF_INSN_LUID (i2);
4485 if (newi2pat)
4486 move_deaths (newi2pat, NULL_RTX, from_luid, i2, &midnotes);
4487 move_deaths (newpat, newi2pat, from_luid, i3, &midnotes);
4488
4489 /* Distribute all the LOG_LINKS and REG_NOTES from I1, I2, and I3. */
4490 if (i3notes)
4491 distribute_notes (i3notes, i3, i3, newi2pat ? i2 : NULL,
4492 elim_i2, elim_i1, elim_i0);
4493 if (i2notes)
4494 distribute_notes (i2notes, i2, i3, newi2pat ? i2 : NULL,
4495 elim_i2, elim_i1, elim_i0);
4496 if (i1notes)
4497 distribute_notes (i1notes, i1, i3, newi2pat ? i2 : NULL,
4498 elim_i2, local_elim_i1, local_elim_i0);
4499 if (i0notes)
4500 distribute_notes (i0notes, i0, i3, newi2pat ? i2 : NULL,
4501 elim_i2, elim_i1, local_elim_i0);
4502 if (midnotes)
4503 distribute_notes (midnotes, NULL, i3, newi2pat ? i2 : NULL,
4504 elim_i2, elim_i1, elim_i0);
4505
4506 /* Distribute any notes added to I2 or I3 by recog_for_combine. We
4507 know these are REG_UNUSED and want them to go to the desired insn,
4508 so we always pass it as i3. */
4509
4510 if (newi2pat && new_i2_notes)
4511 distribute_notes (new_i2_notes, i2, i2, NULL, NULL_RTX, NULL_RTX,
4512 NULL_RTX);
4513
4514 if (new_i3_notes)
4515 distribute_notes (new_i3_notes, i3, i3, NULL, NULL_RTX, NULL_RTX,
4516 NULL_RTX);
4517
4518 /* If I3DEST was used in I3SRC, it really died in I3. We may need to
4519 put a REG_DEAD note for it somewhere. If NEWI2PAT exists and sets
4520 I3DEST, the death must be somewhere before I2, not I3. If we passed I3
4521 in that case, it might delete I2. Similarly for I2 and I1.
4522 Show an additional death due to the REG_DEAD note we make here. If
4523 we discard it in distribute_notes, we will decrement it again. */
4524
4525 if (i3dest_killed)
4526 {
4527 rtx new_note = alloc_reg_note (REG_DEAD, i3dest_killed, NULL_RTX);
4528 if (newi2pat && reg_set_p (i3dest_killed, newi2pat))
4529 distribute_notes (new_note, NULL, i2, NULL, elim_i2,
4530 elim_i1, elim_i0);
4531 else
4532 distribute_notes (new_note, NULL, i3, newi2pat ? i2 : NULL,
4533 elim_i2, elim_i1, elim_i0);
4534 }
4535
4536 if (i2dest_in_i2src)
4537 {
4538 rtx new_note = alloc_reg_note (REG_DEAD, i2dest, NULL_RTX);
4539 if (newi2pat && reg_set_p (i2dest, newi2pat))
4540 distribute_notes (new_note, NULL, i2, NULL, NULL_RTX,
4541 NULL_RTX, NULL_RTX);
4542 else
4543 distribute_notes (new_note, NULL, i3, newi2pat ? i2 : NULL,
4544 NULL_RTX, NULL_RTX, NULL_RTX);
4545 }
4546
4547 if (i1dest_in_i1src)
4548 {
4549 rtx new_note = alloc_reg_note (REG_DEAD, i1dest, NULL_RTX);
4550 if (newi2pat && reg_set_p (i1dest, newi2pat))
4551 distribute_notes (new_note, NULL, i2, NULL, NULL_RTX,
4552 NULL_RTX, NULL_RTX);
4553 else
4554 distribute_notes (new_note, NULL, i3, newi2pat ? i2 : NULL,
4555 NULL_RTX, NULL_RTX, NULL_RTX);
4556 }
4557
4558 if (i0dest_in_i0src)
4559 {
4560 rtx new_note = alloc_reg_note (REG_DEAD, i0dest, NULL_RTX);
4561 if (newi2pat && reg_set_p (i0dest, newi2pat))
4562 distribute_notes (new_note, NULL, i2, NULL, NULL_RTX,
4563 NULL_RTX, NULL_RTX);
4564 else
4565 distribute_notes (new_note, NULL, i3, newi2pat ? i2 : NULL,
4566 NULL_RTX, NULL_RTX, NULL_RTX);
4567 }
4568
4569 distribute_links (i3links);
4570 distribute_links (i2links);
4571 distribute_links (i1links);
4572 distribute_links (i0links);
4573
4574 if (REG_P (i2dest))
4575 {
4576 struct insn_link *link;
4577 rtx_insn *i2_insn = 0;
4578 rtx i2_val = 0, set;
4579
4580 /* The insn that used to set this register doesn't exist, and
4581 this life of the register may not exist either. See if one of
4582 I3's links points to an insn that sets I2DEST. If it does,
4583 that is now the last known value for I2DEST. If we don't update
4584 this and I2 set the register to a value that depended on its old
4585 contents, we will get confused. If this insn is used, thing
4586 will be set correctly in combine_instructions. */
4587 FOR_EACH_LOG_LINK (link, i3)
4588 if ((set = single_set (link->insn)) != 0
4589 && rtx_equal_p (i2dest, SET_DEST (set)))
4590 i2_insn = link->insn, i2_val = SET_SRC (set);
4591
4592 record_value_for_reg (i2dest, i2_insn, i2_val);
4593
4594 /* If the reg formerly set in I2 died only once and that was in I3,
4595 zero its use count so it won't make `reload' do any work. */
4596 if (! added_sets_2
4597 && (newi2pat == 0 || ! reg_mentioned_p (i2dest, newi2pat))
4598 && ! i2dest_in_i2src
4599 && REGNO (i2dest) < reg_n_sets_max)
4600 INC_REG_N_SETS (REGNO (i2dest), -1);
4601 }
4602
4603 if (i1 && REG_P (i1dest))
4604 {
4605 struct insn_link *link;
4606 rtx_insn *i1_insn = 0;
4607 rtx i1_val = 0, set;
4608
4609 FOR_EACH_LOG_LINK (link, i3)
4610 if ((set = single_set (link->insn)) != 0
4611 && rtx_equal_p (i1dest, SET_DEST (set)))
4612 i1_insn = link->insn, i1_val = SET_SRC (set);
4613
4614 record_value_for_reg (i1dest, i1_insn, i1_val);
4615
4616 if (! added_sets_1
4617 && ! i1dest_in_i1src
4618 && REGNO (i1dest) < reg_n_sets_max)
4619 INC_REG_N_SETS (REGNO (i1dest), -1);
4620 }
4621
4622 if (i0 && REG_P (i0dest))
4623 {
4624 struct insn_link *link;
4625 rtx_insn *i0_insn = 0;
4626 rtx i0_val = 0, set;
4627
4628 FOR_EACH_LOG_LINK (link, i3)
4629 if ((set = single_set (link->insn)) != 0
4630 && rtx_equal_p (i0dest, SET_DEST (set)))
4631 i0_insn = link->insn, i0_val = SET_SRC (set);
4632
4633 record_value_for_reg (i0dest, i0_insn, i0_val);
4634
4635 if (! added_sets_0
4636 && ! i0dest_in_i0src
4637 && REGNO (i0dest) < reg_n_sets_max)
4638 INC_REG_N_SETS (REGNO (i0dest), -1);
4639 }
4640
4641 /* Update reg_stat[].nonzero_bits et al for any changes that may have
4642 been made to this insn. The order is important, because newi2pat
4643 can affect nonzero_bits of newpat. */
4644 if (newi2pat)
4645 note_stores (newi2pat, set_nonzero_bits_and_sign_copies, NULL);
4646 note_stores (newpat, set_nonzero_bits_and_sign_copies, NULL);
4647 }
4648
4649 if (undobuf.other_insn != NULL_RTX)
4650 {
4651 if (dump_file)
4652 {
4653 fprintf (dump_file, "modifying other_insn ");
4654 dump_insn_slim (dump_file, undobuf.other_insn);
4655 }
4656 df_insn_rescan (undobuf.other_insn);
4657 }
4658
4659 if (i0 && !(NOTE_P (i0) && (NOTE_KIND (i0) == NOTE_INSN_DELETED)))
4660 {
4661 if (dump_file)
4662 {
4663 fprintf (dump_file, "modifying insn i0 ");
4664 dump_insn_slim (dump_file, i0);
4665 }
4666 df_insn_rescan (i0);
4667 }
4668
4669 if (i1 && !(NOTE_P (i1) && (NOTE_KIND (i1) == NOTE_INSN_DELETED)))
4670 {
4671 if (dump_file)
4672 {
4673 fprintf (dump_file, "modifying insn i1 ");
4674 dump_insn_slim (dump_file, i1);
4675 }
4676 df_insn_rescan (i1);
4677 }
4678
4679 if (i2 && !(NOTE_P (i2) && (NOTE_KIND (i2) == NOTE_INSN_DELETED)))
4680 {
4681 if (dump_file)
4682 {
4683 fprintf (dump_file, "modifying insn i2 ");
4684 dump_insn_slim (dump_file, i2);
4685 }
4686 df_insn_rescan (i2);
4687 }
4688
4689 if (i3 && !(NOTE_P (i3) && (NOTE_KIND (i3) == NOTE_INSN_DELETED)))
4690 {
4691 if (dump_file)
4692 {
4693 fprintf (dump_file, "modifying insn i3 ");
4694 dump_insn_slim (dump_file, i3);
4695 }
4696 df_insn_rescan (i3);
4697 }
4698
4699 /* Set new_direct_jump_p if a new return or simple jump instruction
4700 has been created. Adjust the CFG accordingly. */
4701 if (returnjump_p (i3) || any_uncondjump_p (i3))
4702 {
4703 *new_direct_jump_p = 1;
4704 mark_jump_label (PATTERN (i3), i3, 0);
4705 update_cfg_for_uncondjump (i3);
4706 }
4707
4708 if (undobuf.other_insn != NULL_RTX
4709 && (returnjump_p (undobuf.other_insn)
4710 || any_uncondjump_p (undobuf.other_insn)))
4711 {
4712 *new_direct_jump_p = 1;
4713 update_cfg_for_uncondjump (undobuf.other_insn);
4714 }
4715
4716 if (GET_CODE (PATTERN (i3)) == TRAP_IF
4717 && XEXP (PATTERN (i3), 0) == const1_rtx)
4718 {
4719 basic_block bb = BLOCK_FOR_INSN (i3);
4720 gcc_assert (bb);
4721 remove_edge (split_block (bb, i3));
4722 emit_barrier_after_bb (bb);
4723 *new_direct_jump_p = 1;
4724 }
4725
4726 if (undobuf.other_insn
4727 && GET_CODE (PATTERN (undobuf.other_insn)) == TRAP_IF
4728 && XEXP (PATTERN (undobuf.other_insn), 0) == const1_rtx)
4729 {
4730 basic_block bb = BLOCK_FOR_INSN (undobuf.other_insn);
4731 gcc_assert (bb);
4732 remove_edge (split_block (bb, undobuf.other_insn));
4733 emit_barrier_after_bb (bb);
4734 *new_direct_jump_p = 1;
4735 }
4736
4737 /* A noop might also need cleaning up of CFG, if it comes from the
4738 simplification of a jump. */
4739 if (JUMP_P (i3)
4740 && GET_CODE (newpat) == SET
4741 && SET_SRC (newpat) == pc_rtx
4742 && SET_DEST (newpat) == pc_rtx)
4743 {
4744 *new_direct_jump_p = 1;
4745 update_cfg_for_uncondjump (i3);
4746 }
4747
4748 if (undobuf.other_insn != NULL_RTX
4749 && JUMP_P (undobuf.other_insn)
4750 && GET_CODE (PATTERN (undobuf.other_insn)) == SET
4751 && SET_SRC (PATTERN (undobuf.other_insn)) == pc_rtx
4752 && SET_DEST (PATTERN (undobuf.other_insn)) == pc_rtx)
4753 {
4754 *new_direct_jump_p = 1;
4755 update_cfg_for_uncondjump (undobuf.other_insn);
4756 }
4757
4758 combine_successes++;
4759 undo_commit ();
4760
4761 rtx_insn *ret = newi2pat ? i2 : i3;
4762 if (added_links_insn && DF_INSN_LUID (added_links_insn) < DF_INSN_LUID (ret))
4763 ret = added_links_insn;
4764 if (added_notes_insn && DF_INSN_LUID (added_notes_insn) < DF_INSN_LUID (ret))
4765 ret = added_notes_insn;
4766
4767 return ret;
4768 }
4769 \f
4770 /* Get a marker for undoing to the current state. */
4771
4772 static void *
4773 get_undo_marker (void)
4774 {
4775 return undobuf.undos;
4776 }
4777
4778 /* Undo the modifications up to the marker. */
4779
4780 static void
4781 undo_to_marker (void *marker)
4782 {
4783 struct undo *undo, *next;
4784
4785 for (undo = undobuf.undos; undo != marker; undo = next)
4786 {
4787 gcc_assert (undo);
4788
4789 next = undo->next;
4790 switch (undo->kind)
4791 {
4792 case UNDO_RTX:
4793 *undo->where.r = undo->old_contents.r;
4794 break;
4795 case UNDO_INT:
4796 *undo->where.i = undo->old_contents.i;
4797 break;
4798 case UNDO_MODE:
4799 adjust_reg_mode (*undo->where.r, undo->old_contents.m);
4800 break;
4801 case UNDO_LINKS:
4802 *undo->where.l = undo->old_contents.l;
4803 break;
4804 default:
4805 gcc_unreachable ();
4806 }
4807
4808 undo->next = undobuf.frees;
4809 undobuf.frees = undo;
4810 }
4811
4812 undobuf.undos = (struct undo *) marker;
4813 }
4814
4815 /* Undo all the modifications recorded in undobuf. */
4816
4817 static void
4818 undo_all (void)
4819 {
4820 undo_to_marker (0);
4821 }
4822
4823 /* We've committed to accepting the changes we made. Move all
4824 of the undos to the free list. */
4825
4826 static void
4827 undo_commit (void)
4828 {
4829 struct undo *undo, *next;
4830
4831 for (undo = undobuf.undos; undo; undo = next)
4832 {
4833 next = undo->next;
4834 undo->next = undobuf.frees;
4835 undobuf.frees = undo;
4836 }
4837 undobuf.undos = 0;
4838 }
4839 \f
4840 /* Find the innermost point within the rtx at LOC, possibly LOC itself,
4841 where we have an arithmetic expression and return that point. LOC will
4842 be inside INSN.
4843
4844 try_combine will call this function to see if an insn can be split into
4845 two insns. */
4846
4847 static rtx *
4848 find_split_point (rtx *loc, rtx_insn *insn, bool set_src)
4849 {
4850 rtx x = *loc;
4851 enum rtx_code code = GET_CODE (x);
4852 rtx *split;
4853 unsigned HOST_WIDE_INT len = 0;
4854 HOST_WIDE_INT pos = 0;
4855 int unsignedp = 0;
4856 rtx inner = NULL_RTX;
4857 scalar_int_mode mode, inner_mode;
4858
4859 /* First special-case some codes. */
4860 switch (code)
4861 {
4862 case SUBREG:
4863 #ifdef INSN_SCHEDULING
4864 /* If we are making a paradoxical SUBREG invalid, it becomes a split
4865 point. */
4866 if (MEM_P (SUBREG_REG (x)))
4867 return loc;
4868 #endif
4869 return find_split_point (&SUBREG_REG (x), insn, false);
4870
4871 case MEM:
4872 /* If we have (mem (const ..)) or (mem (symbol_ref ...)), split it
4873 using LO_SUM and HIGH. */
4874 if (HAVE_lo_sum && (GET_CODE (XEXP (x, 0)) == CONST
4875 || GET_CODE (XEXP (x, 0)) == SYMBOL_REF))
4876 {
4877 machine_mode address_mode = get_address_mode (x);
4878
4879 SUBST (XEXP (x, 0),
4880 gen_rtx_LO_SUM (address_mode,
4881 gen_rtx_HIGH (address_mode, XEXP (x, 0)),
4882 XEXP (x, 0)));
4883 return &XEXP (XEXP (x, 0), 0);
4884 }
4885
4886 /* If we have a PLUS whose second operand is a constant and the
4887 address is not valid, perhaps will can split it up using
4888 the machine-specific way to split large constants. We use
4889 the first pseudo-reg (one of the virtual regs) as a placeholder;
4890 it will not remain in the result. */
4891 if (GET_CODE (XEXP (x, 0)) == PLUS
4892 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
4893 && ! memory_address_addr_space_p (GET_MODE (x), XEXP (x, 0),
4894 MEM_ADDR_SPACE (x)))
4895 {
4896 rtx reg = regno_reg_rtx[FIRST_PSEUDO_REGISTER];
4897 rtx_insn *seq = combine_split_insns (gen_rtx_SET (reg, XEXP (x, 0)),
4898 subst_insn);
4899
4900 /* This should have produced two insns, each of which sets our
4901 placeholder. If the source of the second is a valid address,
4902 we can make put both sources together and make a split point
4903 in the middle. */
4904
4905 if (seq
4906 && NEXT_INSN (seq) != NULL_RTX
4907 && NEXT_INSN (NEXT_INSN (seq)) == NULL_RTX
4908 && NONJUMP_INSN_P (seq)
4909 && GET_CODE (PATTERN (seq)) == SET
4910 && SET_DEST (PATTERN (seq)) == reg
4911 && ! reg_mentioned_p (reg,
4912 SET_SRC (PATTERN (seq)))
4913 && NONJUMP_INSN_P (NEXT_INSN (seq))
4914 && GET_CODE (PATTERN (NEXT_INSN (seq))) == SET
4915 && SET_DEST (PATTERN (NEXT_INSN (seq))) == reg
4916 && memory_address_addr_space_p
4917 (GET_MODE (x), SET_SRC (PATTERN (NEXT_INSN (seq))),
4918 MEM_ADDR_SPACE (x)))
4919 {
4920 rtx src1 = SET_SRC (PATTERN (seq));
4921 rtx src2 = SET_SRC (PATTERN (NEXT_INSN (seq)));
4922
4923 /* Replace the placeholder in SRC2 with SRC1. If we can
4924 find where in SRC2 it was placed, that can become our
4925 split point and we can replace this address with SRC2.
4926 Just try two obvious places. */
4927
4928 src2 = replace_rtx (src2, reg, src1);
4929 split = 0;
4930 if (XEXP (src2, 0) == src1)
4931 split = &XEXP (src2, 0);
4932 else if (GET_RTX_FORMAT (GET_CODE (XEXP (src2, 0)))[0] == 'e'
4933 && XEXP (XEXP (src2, 0), 0) == src1)
4934 split = &XEXP (XEXP (src2, 0), 0);
4935
4936 if (split)
4937 {
4938 SUBST (XEXP (x, 0), src2);
4939 return split;
4940 }
4941 }
4942
4943 /* If that didn't work, perhaps the first operand is complex and
4944 needs to be computed separately, so make a split point there.
4945 This will occur on machines that just support REG + CONST
4946 and have a constant moved through some previous computation. */
4947
4948 else if (!OBJECT_P (XEXP (XEXP (x, 0), 0))
4949 && ! (GET_CODE (XEXP (XEXP (x, 0), 0)) == SUBREG
4950 && OBJECT_P (SUBREG_REG (XEXP (XEXP (x, 0), 0)))))
4951 return &XEXP (XEXP (x, 0), 0);
4952 }
4953
4954 /* If we have a PLUS whose first operand is complex, try computing it
4955 separately by making a split there. */
4956 if (GET_CODE (XEXP (x, 0)) == PLUS
4957 && ! memory_address_addr_space_p (GET_MODE (x), XEXP (x, 0),
4958 MEM_ADDR_SPACE (x))
4959 && ! OBJECT_P (XEXP (XEXP (x, 0), 0))
4960 && ! (GET_CODE (XEXP (XEXP (x, 0), 0)) == SUBREG
4961 && OBJECT_P (SUBREG_REG (XEXP (XEXP (x, 0), 0)))))
4962 return &XEXP (XEXP (x, 0), 0);
4963 break;
4964
4965 case SET:
4966 /* If SET_DEST is CC0 and SET_SRC is not an operand, a COMPARE, or a
4967 ZERO_EXTRACT, the most likely reason why this doesn't match is that
4968 we need to put the operand into a register. So split at that
4969 point. */
4970
4971 if (SET_DEST (x) == cc0_rtx
4972 && GET_CODE (SET_SRC (x)) != COMPARE
4973 && GET_CODE (SET_SRC (x)) != ZERO_EXTRACT
4974 && !OBJECT_P (SET_SRC (x))
4975 && ! (GET_CODE (SET_SRC (x)) == SUBREG
4976 && OBJECT_P (SUBREG_REG (SET_SRC (x)))))
4977 return &SET_SRC (x);
4978
4979 /* See if we can split SET_SRC as it stands. */
4980 split = find_split_point (&SET_SRC (x), insn, true);
4981 if (split && split != &SET_SRC (x))
4982 return split;
4983
4984 /* See if we can split SET_DEST as it stands. */
4985 split = find_split_point (&SET_DEST (x), insn, false);
4986 if (split && split != &SET_DEST (x))
4987 return split;
4988
4989 /* See if this is a bitfield assignment with everything constant. If
4990 so, this is an IOR of an AND, so split it into that. */
4991 if (GET_CODE (SET_DEST (x)) == ZERO_EXTRACT
4992 && is_a <scalar_int_mode> (GET_MODE (XEXP (SET_DEST (x), 0)),
4993 &inner_mode)
4994 && HWI_COMPUTABLE_MODE_P (inner_mode)
4995 && CONST_INT_P (XEXP (SET_DEST (x), 1))
4996 && CONST_INT_P (XEXP (SET_DEST (x), 2))
4997 && CONST_INT_P (SET_SRC (x))
4998 && ((INTVAL (XEXP (SET_DEST (x), 1))
4999 + INTVAL (XEXP (SET_DEST (x), 2)))
5000 <= GET_MODE_PRECISION (inner_mode))
5001 && ! side_effects_p (XEXP (SET_DEST (x), 0)))
5002 {
5003 HOST_WIDE_INT pos = INTVAL (XEXP (SET_DEST (x), 2));
5004 unsigned HOST_WIDE_INT len = INTVAL (XEXP (SET_DEST (x), 1));
5005 unsigned HOST_WIDE_INT src = INTVAL (SET_SRC (x));
5006 rtx dest = XEXP (SET_DEST (x), 0);
5007 unsigned HOST_WIDE_INT mask
5008 = (HOST_WIDE_INT_1U << len) - 1;
5009 rtx or_mask;
5010
5011 if (BITS_BIG_ENDIAN)
5012 pos = GET_MODE_PRECISION (inner_mode) - len - pos;
5013
5014 or_mask = gen_int_mode (src << pos, inner_mode);
5015 if (src == mask)
5016 SUBST (SET_SRC (x),
5017 simplify_gen_binary (IOR, inner_mode, dest, or_mask));
5018 else
5019 {
5020 rtx negmask = gen_int_mode (~(mask << pos), inner_mode);
5021 SUBST (SET_SRC (x),
5022 simplify_gen_binary (IOR, inner_mode,
5023 simplify_gen_binary (AND, inner_mode,
5024 dest, negmask),
5025 or_mask));
5026 }
5027
5028 SUBST (SET_DEST (x), dest);
5029
5030 split = find_split_point (&SET_SRC (x), insn, true);
5031 if (split && split != &SET_SRC (x))
5032 return split;
5033 }
5034
5035 /* Otherwise, see if this is an operation that we can split into two.
5036 If so, try to split that. */
5037 code = GET_CODE (SET_SRC (x));
5038
5039 switch (code)
5040 {
5041 case AND:
5042 /* If we are AND'ing with a large constant that is only a single
5043 bit and the result is only being used in a context where we
5044 need to know if it is zero or nonzero, replace it with a bit
5045 extraction. This will avoid the large constant, which might
5046 have taken more than one insn to make. If the constant were
5047 not a valid argument to the AND but took only one insn to make,
5048 this is no worse, but if it took more than one insn, it will
5049 be better. */
5050
5051 if (CONST_INT_P (XEXP (SET_SRC (x), 1))
5052 && REG_P (XEXP (SET_SRC (x), 0))
5053 && (pos = exact_log2 (UINTVAL (XEXP (SET_SRC (x), 1)))) >= 7
5054 && REG_P (SET_DEST (x))
5055 && (split = find_single_use (SET_DEST (x), insn, NULL)) != 0
5056 && (GET_CODE (*split) == EQ || GET_CODE (*split) == NE)
5057 && XEXP (*split, 0) == SET_DEST (x)
5058 && XEXP (*split, 1) == const0_rtx)
5059 {
5060 rtx extraction = make_extraction (GET_MODE (SET_DEST (x)),
5061 XEXP (SET_SRC (x), 0),
5062 pos, NULL_RTX, 1, 1, 0, 0);
5063 if (extraction != 0)
5064 {
5065 SUBST (SET_SRC (x), extraction);
5066 return find_split_point (loc, insn, false);
5067 }
5068 }
5069 break;
5070
5071 case NE:
5072 /* If STORE_FLAG_VALUE is -1, this is (NE X 0) and only one bit of X
5073 is known to be on, this can be converted into a NEG of a shift. */
5074 if (STORE_FLAG_VALUE == -1 && XEXP (SET_SRC (x), 1) == const0_rtx
5075 && GET_MODE (SET_SRC (x)) == GET_MODE (XEXP (SET_SRC (x), 0))
5076 && 1 <= (pos = exact_log2
5077 (nonzero_bits (XEXP (SET_SRC (x), 0),
5078 GET_MODE (XEXP (SET_SRC (x), 0))))))
5079 {
5080 machine_mode mode = GET_MODE (XEXP (SET_SRC (x), 0));
5081
5082 SUBST (SET_SRC (x),
5083 gen_rtx_NEG (mode,
5084 gen_rtx_LSHIFTRT (mode,
5085 XEXP (SET_SRC (x), 0),
5086 GEN_INT (pos))));
5087
5088 split = find_split_point (&SET_SRC (x), insn, true);
5089 if (split && split != &SET_SRC (x))
5090 return split;
5091 }
5092 break;
5093
5094 case SIGN_EXTEND:
5095 inner = XEXP (SET_SRC (x), 0);
5096
5097 /* We can't optimize if either mode is a partial integer
5098 mode as we don't know how many bits are significant
5099 in those modes. */
5100 if (!is_int_mode (GET_MODE (inner), &inner_mode)
5101 || GET_MODE_CLASS (GET_MODE (SET_SRC (x))) == MODE_PARTIAL_INT)
5102 break;
5103
5104 pos = 0;
5105 len = GET_MODE_PRECISION (inner_mode);
5106 unsignedp = 0;
5107 break;
5108
5109 case SIGN_EXTRACT:
5110 case ZERO_EXTRACT:
5111 if (is_a <scalar_int_mode> (GET_MODE (XEXP (SET_SRC (x), 0)),
5112 &inner_mode)
5113 && CONST_INT_P (XEXP (SET_SRC (x), 1))
5114 && CONST_INT_P (XEXP (SET_SRC (x), 2)))
5115 {
5116 inner = XEXP (SET_SRC (x), 0);
5117 len = INTVAL (XEXP (SET_SRC (x), 1));
5118 pos = INTVAL (XEXP (SET_SRC (x), 2));
5119
5120 if (BITS_BIG_ENDIAN)
5121 pos = GET_MODE_PRECISION (inner_mode) - len - pos;
5122 unsignedp = (code == ZERO_EXTRACT);
5123 }
5124 break;
5125
5126 default:
5127 break;
5128 }
5129
5130 if (len && pos >= 0
5131 && pos + len <= GET_MODE_PRECISION (GET_MODE (inner))
5132 && is_a <scalar_int_mode> (GET_MODE (SET_SRC (x)), &mode))
5133 {
5134 /* For unsigned, we have a choice of a shift followed by an
5135 AND or two shifts. Use two shifts for field sizes where the
5136 constant might be too large. We assume here that we can
5137 always at least get 8-bit constants in an AND insn, which is
5138 true for every current RISC. */
5139
5140 if (unsignedp && len <= 8)
5141 {
5142 unsigned HOST_WIDE_INT mask
5143 = (HOST_WIDE_INT_1U << len) - 1;
5144 SUBST (SET_SRC (x),
5145 gen_rtx_AND (mode,
5146 gen_rtx_LSHIFTRT
5147 (mode, gen_lowpart (mode, inner),
5148 GEN_INT (pos)),
5149 gen_int_mode (mask, mode)));
5150
5151 split = find_split_point (&SET_SRC (x), insn, true);
5152 if (split && split != &SET_SRC (x))
5153 return split;
5154 }
5155 else
5156 {
5157 SUBST (SET_SRC (x),
5158 gen_rtx_fmt_ee
5159 (unsignedp ? LSHIFTRT : ASHIFTRT, mode,
5160 gen_rtx_ASHIFT (mode,
5161 gen_lowpart (mode, inner),
5162 GEN_INT (GET_MODE_PRECISION (mode)
5163 - len - pos)),
5164 GEN_INT (GET_MODE_PRECISION (mode) - len)));
5165
5166 split = find_split_point (&SET_SRC (x), insn, true);
5167 if (split && split != &SET_SRC (x))
5168 return split;
5169 }
5170 }
5171
5172 /* See if this is a simple operation with a constant as the second
5173 operand. It might be that this constant is out of range and hence
5174 could be used as a split point. */
5175 if (BINARY_P (SET_SRC (x))
5176 && CONSTANT_P (XEXP (SET_SRC (x), 1))
5177 && (OBJECT_P (XEXP (SET_SRC (x), 0))
5178 || (GET_CODE (XEXP (SET_SRC (x), 0)) == SUBREG
5179 && OBJECT_P (SUBREG_REG (XEXP (SET_SRC (x), 0))))))
5180 return &XEXP (SET_SRC (x), 1);
5181
5182 /* Finally, see if this is a simple operation with its first operand
5183 not in a register. The operation might require this operand in a
5184 register, so return it as a split point. We can always do this
5185 because if the first operand were another operation, we would have
5186 already found it as a split point. */
5187 if ((BINARY_P (SET_SRC (x)) || UNARY_P (SET_SRC (x)))
5188 && ! register_operand (XEXP (SET_SRC (x), 0), VOIDmode))
5189 return &XEXP (SET_SRC (x), 0);
5190
5191 return 0;
5192
5193 case AND:
5194 case IOR:
5195 /* We write NOR as (and (not A) (not B)), but if we don't have a NOR,
5196 it is better to write this as (not (ior A B)) so we can split it.
5197 Similarly for IOR. */
5198 if (GET_CODE (XEXP (x, 0)) == NOT && GET_CODE (XEXP (x, 1)) == NOT)
5199 {
5200 SUBST (*loc,
5201 gen_rtx_NOT (GET_MODE (x),
5202 gen_rtx_fmt_ee (code == IOR ? AND : IOR,
5203 GET_MODE (x),
5204 XEXP (XEXP (x, 0), 0),
5205 XEXP (XEXP (x, 1), 0))));
5206 return find_split_point (loc, insn, set_src);
5207 }
5208
5209 /* Many RISC machines have a large set of logical insns. If the
5210 second operand is a NOT, put it first so we will try to split the
5211 other operand first. */
5212 if (GET_CODE (XEXP (x, 1)) == NOT)
5213 {
5214 rtx tem = XEXP (x, 0);
5215 SUBST (XEXP (x, 0), XEXP (x, 1));
5216 SUBST (XEXP (x, 1), tem);
5217 }
5218 break;
5219
5220 case PLUS:
5221 case MINUS:
5222 /* Canonicalization can produce (minus A (mult B C)), where C is a
5223 constant. It may be better to try splitting (plus (mult B -C) A)
5224 instead if this isn't a multiply by a power of two. */
5225 if (set_src && code == MINUS && GET_CODE (XEXP (x, 1)) == MULT
5226 && GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT
5227 && !pow2p_hwi (INTVAL (XEXP (XEXP (x, 1), 1))))
5228 {
5229 machine_mode mode = GET_MODE (x);
5230 unsigned HOST_WIDE_INT this_int = INTVAL (XEXP (XEXP (x, 1), 1));
5231 HOST_WIDE_INT other_int = trunc_int_for_mode (-this_int, mode);
5232 SUBST (*loc, gen_rtx_PLUS (mode,
5233 gen_rtx_MULT (mode,
5234 XEXP (XEXP (x, 1), 0),
5235 gen_int_mode (other_int,
5236 mode)),
5237 XEXP (x, 0)));
5238 return find_split_point (loc, insn, set_src);
5239 }
5240
5241 /* Split at a multiply-accumulate instruction. However if this is
5242 the SET_SRC, we likely do not have such an instruction and it's
5243 worthless to try this split. */
5244 if (!set_src
5245 && (GET_CODE (XEXP (x, 0)) == MULT
5246 || (GET_CODE (XEXP (x, 0)) == ASHIFT
5247 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)))
5248 return loc;
5249
5250 default:
5251 break;
5252 }
5253
5254 /* Otherwise, select our actions depending on our rtx class. */
5255 switch (GET_RTX_CLASS (code))
5256 {
5257 case RTX_BITFIELD_OPS: /* This is ZERO_EXTRACT and SIGN_EXTRACT. */
5258 case RTX_TERNARY:
5259 split = find_split_point (&XEXP (x, 2), insn, false);
5260 if (split)
5261 return split;
5262 /* fall through */
5263 case RTX_BIN_ARITH:
5264 case RTX_COMM_ARITH:
5265 case RTX_COMPARE:
5266 case RTX_COMM_COMPARE:
5267 split = find_split_point (&XEXP (x, 1), insn, false);
5268 if (split)
5269 return split;
5270 /* fall through */
5271 case RTX_UNARY:
5272 /* Some machines have (and (shift ...) ...) insns. If X is not
5273 an AND, but XEXP (X, 0) is, use it as our split point. */
5274 if (GET_CODE (x) != AND && GET_CODE (XEXP (x, 0)) == AND)
5275 return &XEXP (x, 0);
5276
5277 split = find_split_point (&XEXP (x, 0), insn, false);
5278 if (split)
5279 return split;
5280 return loc;
5281
5282 default:
5283 /* Otherwise, we don't have a split point. */
5284 return 0;
5285 }
5286 }
5287 \f
5288 /* Throughout X, replace FROM with TO, and return the result.
5289 The result is TO if X is FROM;
5290 otherwise the result is X, but its contents may have been modified.
5291 If they were modified, a record was made in undobuf so that
5292 undo_all will (among other things) return X to its original state.
5293
5294 If the number of changes necessary is too much to record to undo,
5295 the excess changes are not made, so the result is invalid.
5296 The changes already made can still be undone.
5297 undobuf.num_undo is incremented for such changes, so by testing that
5298 the caller can tell whether the result is valid.
5299
5300 `n_occurrences' is incremented each time FROM is replaced.
5301
5302 IN_DEST is nonzero if we are processing the SET_DEST of a SET.
5303
5304 IN_COND is nonzero if we are at the top level of a condition.
5305
5306 UNIQUE_COPY is nonzero if each substitution must be unique. We do this
5307 by copying if `n_occurrences' is nonzero. */
5308
5309 static rtx
5310 subst (rtx x, rtx from, rtx to, int in_dest, int in_cond, int unique_copy)
5311 {
5312 enum rtx_code code = GET_CODE (x);
5313 machine_mode op0_mode = VOIDmode;
5314 const char *fmt;
5315 int len, i;
5316 rtx new_rtx;
5317
5318 /* Two expressions are equal if they are identical copies of a shared
5319 RTX or if they are both registers with the same register number
5320 and mode. */
5321
5322 #define COMBINE_RTX_EQUAL_P(X,Y) \
5323 ((X) == (Y) \
5324 || (REG_P (X) && REG_P (Y) \
5325 && REGNO (X) == REGNO (Y) && GET_MODE (X) == GET_MODE (Y)))
5326
5327 /* Do not substitute into clobbers of regs -- this will never result in
5328 valid RTL. */
5329 if (GET_CODE (x) == CLOBBER && REG_P (XEXP (x, 0)))
5330 return x;
5331
5332 if (! in_dest && COMBINE_RTX_EQUAL_P (x, from))
5333 {
5334 n_occurrences++;
5335 return (unique_copy && n_occurrences > 1 ? copy_rtx (to) : to);
5336 }
5337
5338 /* If X and FROM are the same register but different modes, they
5339 will not have been seen as equal above. However, the log links code
5340 will make a LOG_LINKS entry for that case. If we do nothing, we
5341 will try to rerecognize our original insn and, when it succeeds,
5342 we will delete the feeding insn, which is incorrect.
5343
5344 So force this insn not to match in this (rare) case. */
5345 if (! in_dest && code == REG && REG_P (from)
5346 && reg_overlap_mentioned_p (x, from))
5347 return gen_rtx_CLOBBER (GET_MODE (x), const0_rtx);
5348
5349 /* If this is an object, we are done unless it is a MEM or LO_SUM, both
5350 of which may contain things that can be combined. */
5351 if (code != MEM && code != LO_SUM && OBJECT_P (x))
5352 return x;
5353
5354 /* It is possible to have a subexpression appear twice in the insn.
5355 Suppose that FROM is a register that appears within TO.
5356 Then, after that subexpression has been scanned once by `subst',
5357 the second time it is scanned, TO may be found. If we were
5358 to scan TO here, we would find FROM within it and create a
5359 self-referent rtl structure which is completely wrong. */
5360 if (COMBINE_RTX_EQUAL_P (x, to))
5361 return to;
5362
5363 /* Parallel asm_operands need special attention because all of the
5364 inputs are shared across the arms. Furthermore, unsharing the
5365 rtl results in recognition failures. Failure to handle this case
5366 specially can result in circular rtl.
5367
5368 Solve this by doing a normal pass across the first entry of the
5369 parallel, and only processing the SET_DESTs of the subsequent
5370 entries. Ug. */
5371
5372 if (code == PARALLEL
5373 && GET_CODE (XVECEXP (x, 0, 0)) == SET
5374 && GET_CODE (SET_SRC (XVECEXP (x, 0, 0))) == ASM_OPERANDS)
5375 {
5376 new_rtx = subst (XVECEXP (x, 0, 0), from, to, 0, 0, unique_copy);
5377
5378 /* If this substitution failed, this whole thing fails. */
5379 if (GET_CODE (new_rtx) == CLOBBER
5380 && XEXP (new_rtx, 0) == const0_rtx)
5381 return new_rtx;
5382
5383 SUBST (XVECEXP (x, 0, 0), new_rtx);
5384
5385 for (i = XVECLEN (x, 0) - 1; i >= 1; i--)
5386 {
5387 rtx dest = SET_DEST (XVECEXP (x, 0, i));
5388
5389 if (!REG_P (dest)
5390 && GET_CODE (dest) != CC0
5391 && GET_CODE (dest) != PC)
5392 {
5393 new_rtx = subst (dest, from, to, 0, 0, unique_copy);
5394
5395 /* If this substitution failed, this whole thing fails. */
5396 if (GET_CODE (new_rtx) == CLOBBER
5397 && XEXP (new_rtx, 0) == const0_rtx)
5398 return new_rtx;
5399
5400 SUBST (SET_DEST (XVECEXP (x, 0, i)), new_rtx);
5401 }
5402 }
5403 }
5404 else
5405 {
5406 len = GET_RTX_LENGTH (code);
5407 fmt = GET_RTX_FORMAT (code);
5408
5409 /* We don't need to process a SET_DEST that is a register, CC0,
5410 or PC, so set up to skip this common case. All other cases
5411 where we want to suppress replacing something inside a
5412 SET_SRC are handled via the IN_DEST operand. */
5413 if (code == SET
5414 && (REG_P (SET_DEST (x))
5415 || GET_CODE (SET_DEST (x)) == CC0
5416 || GET_CODE (SET_DEST (x)) == PC))
5417 fmt = "ie";
5418
5419 /* Trying to simplify the operands of a widening MULT is not likely
5420 to create RTL matching a machine insn. */
5421 if (code == MULT
5422 && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
5423 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND)
5424 && (GET_CODE (XEXP (x, 1)) == ZERO_EXTEND
5425 || GET_CODE (XEXP (x, 1)) == SIGN_EXTEND)
5426 && REG_P (XEXP (XEXP (x, 0), 0))
5427 && REG_P (XEXP (XEXP (x, 1), 0))
5428 && from == to)
5429 return x;
5430
5431
5432 /* Get the mode of operand 0 in case X is now a SIGN_EXTEND of a
5433 constant. */
5434 if (fmt[0] == 'e')
5435 op0_mode = GET_MODE (XEXP (x, 0));
5436
5437 for (i = 0; i < len; i++)
5438 {
5439 if (fmt[i] == 'E')
5440 {
5441 int j;
5442 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
5443 {
5444 if (COMBINE_RTX_EQUAL_P (XVECEXP (x, i, j), from))
5445 {
5446 new_rtx = (unique_copy && n_occurrences
5447 ? copy_rtx (to) : to);
5448 n_occurrences++;
5449 }
5450 else
5451 {
5452 new_rtx = subst (XVECEXP (x, i, j), from, to, 0, 0,
5453 unique_copy);
5454
5455 /* If this substitution failed, this whole thing
5456 fails. */
5457 if (GET_CODE (new_rtx) == CLOBBER
5458 && XEXP (new_rtx, 0) == const0_rtx)
5459 return new_rtx;
5460 }
5461
5462 SUBST (XVECEXP (x, i, j), new_rtx);
5463 }
5464 }
5465 else if (fmt[i] == 'e')
5466 {
5467 /* If this is a register being set, ignore it. */
5468 new_rtx = XEXP (x, i);
5469 if (in_dest
5470 && i == 0
5471 && (((code == SUBREG || code == ZERO_EXTRACT)
5472 && REG_P (new_rtx))
5473 || code == STRICT_LOW_PART))
5474 ;
5475
5476 else if (COMBINE_RTX_EQUAL_P (XEXP (x, i), from))
5477 {
5478 /* In general, don't install a subreg involving two
5479 modes not tieable. It can worsen register
5480 allocation, and can even make invalid reload
5481 insns, since the reg inside may need to be copied
5482 from in the outside mode, and that may be invalid
5483 if it is an fp reg copied in integer mode.
5484
5485 We allow two exceptions to this: It is valid if
5486 it is inside another SUBREG and the mode of that
5487 SUBREG and the mode of the inside of TO is
5488 tieable and it is valid if X is a SET that copies
5489 FROM to CC0. */
5490
5491 if (GET_CODE (to) == SUBREG
5492 && !targetm.modes_tieable_p (GET_MODE (to),
5493 GET_MODE (SUBREG_REG (to)))
5494 && ! (code == SUBREG
5495 && (targetm.modes_tieable_p
5496 (GET_MODE (x), GET_MODE (SUBREG_REG (to)))))
5497 && (!HAVE_cc0
5498 || (! (code == SET
5499 && i == 1
5500 && XEXP (x, 0) == cc0_rtx))))
5501 return gen_rtx_CLOBBER (VOIDmode, const0_rtx);
5502
5503 if (code == SUBREG
5504 && REG_P (to)
5505 && REGNO (to) < FIRST_PSEUDO_REGISTER
5506 && simplify_subreg_regno (REGNO (to), GET_MODE (to),
5507 SUBREG_BYTE (x),
5508 GET_MODE (x)) < 0)
5509 return gen_rtx_CLOBBER (VOIDmode, const0_rtx);
5510
5511 new_rtx = (unique_copy && n_occurrences ? copy_rtx (to) : to);
5512 n_occurrences++;
5513 }
5514 else
5515 /* If we are in a SET_DEST, suppress most cases unless we
5516 have gone inside a MEM, in which case we want to
5517 simplify the address. We assume here that things that
5518 are actually part of the destination have their inner
5519 parts in the first expression. This is true for SUBREG,
5520 STRICT_LOW_PART, and ZERO_EXTRACT, which are the only
5521 things aside from REG and MEM that should appear in a
5522 SET_DEST. */
5523 new_rtx = subst (XEXP (x, i), from, to,
5524 (((in_dest
5525 && (code == SUBREG || code == STRICT_LOW_PART
5526 || code == ZERO_EXTRACT))
5527 || code == SET)
5528 && i == 0),
5529 code == IF_THEN_ELSE && i == 0,
5530 unique_copy);
5531
5532 /* If we found that we will have to reject this combination,
5533 indicate that by returning the CLOBBER ourselves, rather than
5534 an expression containing it. This will speed things up as
5535 well as prevent accidents where two CLOBBERs are considered
5536 to be equal, thus producing an incorrect simplification. */
5537
5538 if (GET_CODE (new_rtx) == CLOBBER && XEXP (new_rtx, 0) == const0_rtx)
5539 return new_rtx;
5540
5541 if (GET_CODE (x) == SUBREG && CONST_SCALAR_INT_P (new_rtx))
5542 {
5543 machine_mode mode = GET_MODE (x);
5544
5545 x = simplify_subreg (GET_MODE (x), new_rtx,
5546 GET_MODE (SUBREG_REG (x)),
5547 SUBREG_BYTE (x));
5548 if (! x)
5549 x = gen_rtx_CLOBBER (mode, const0_rtx);
5550 }
5551 else if (CONST_SCALAR_INT_P (new_rtx)
5552 && GET_CODE (x) == ZERO_EXTEND)
5553 {
5554 x = simplify_unary_operation (ZERO_EXTEND, GET_MODE (x),
5555 new_rtx, GET_MODE (XEXP (x, 0)));
5556 gcc_assert (x);
5557 }
5558 else
5559 SUBST (XEXP (x, i), new_rtx);
5560 }
5561 }
5562 }
5563
5564 /* Check if we are loading something from the constant pool via float
5565 extension; in this case we would undo compress_float_constant
5566 optimization and degenerate constant load to an immediate value. */
5567 if (GET_CODE (x) == FLOAT_EXTEND
5568 && MEM_P (XEXP (x, 0))
5569 && MEM_READONLY_P (XEXP (x, 0)))
5570 {
5571 rtx tmp = avoid_constant_pool_reference (x);
5572 if (x != tmp)
5573 return x;
5574 }
5575
5576 /* Try to simplify X. If the simplification changed the code, it is likely
5577 that further simplification will help, so loop, but limit the number
5578 of repetitions that will be performed. */
5579
5580 for (i = 0; i < 4; i++)
5581 {
5582 /* If X is sufficiently simple, don't bother trying to do anything
5583 with it. */
5584 if (code != CONST_INT && code != REG && code != CLOBBER)
5585 x = combine_simplify_rtx (x, op0_mode, in_dest, in_cond);
5586
5587 if (GET_CODE (x) == code)
5588 break;
5589
5590 code = GET_CODE (x);
5591
5592 /* We no longer know the original mode of operand 0 since we
5593 have changed the form of X) */
5594 op0_mode = VOIDmode;
5595 }
5596
5597 return x;
5598 }
5599 \f
5600 /* If X is a commutative operation whose operands are not in the canonical
5601 order, use substitutions to swap them. */
5602
5603 static void
5604 maybe_swap_commutative_operands (rtx x)
5605 {
5606 if (COMMUTATIVE_ARITH_P (x)
5607 && swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
5608 {
5609 rtx temp = XEXP (x, 0);
5610 SUBST (XEXP (x, 0), XEXP (x, 1));
5611 SUBST (XEXP (x, 1), temp);
5612 }
5613 }
5614
5615 /* Simplify X, a piece of RTL. We just operate on the expression at the
5616 outer level; call `subst' to simplify recursively. Return the new
5617 expression.
5618
5619 OP0_MODE is the original mode of XEXP (x, 0). IN_DEST is nonzero
5620 if we are inside a SET_DEST. IN_COND is nonzero if we are at the top level
5621 of a condition. */
5622
5623 static rtx
5624 combine_simplify_rtx (rtx x, machine_mode op0_mode, int in_dest,
5625 int in_cond)
5626 {
5627 enum rtx_code code = GET_CODE (x);
5628 machine_mode mode = GET_MODE (x);
5629 scalar_int_mode int_mode;
5630 rtx temp;
5631 int i;
5632
5633 /* If this is a commutative operation, put a constant last and a complex
5634 expression first. We don't need to do this for comparisons here. */
5635 maybe_swap_commutative_operands (x);
5636
5637 /* Try to fold this expression in case we have constants that weren't
5638 present before. */
5639 temp = 0;
5640 switch (GET_RTX_CLASS (code))
5641 {
5642 case RTX_UNARY:
5643 if (op0_mode == VOIDmode)
5644 op0_mode = GET_MODE (XEXP (x, 0));
5645 temp = simplify_unary_operation (code, mode, XEXP (x, 0), op0_mode);
5646 break;
5647 case RTX_COMPARE:
5648 case RTX_COMM_COMPARE:
5649 {
5650 machine_mode cmp_mode = GET_MODE (XEXP (x, 0));
5651 if (cmp_mode == VOIDmode)
5652 {
5653 cmp_mode = GET_MODE (XEXP (x, 1));
5654 if (cmp_mode == VOIDmode)
5655 cmp_mode = op0_mode;
5656 }
5657 temp = simplify_relational_operation (code, mode, cmp_mode,
5658 XEXP (x, 0), XEXP (x, 1));
5659 }
5660 break;
5661 case RTX_COMM_ARITH:
5662 case RTX_BIN_ARITH:
5663 temp = simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
5664 break;
5665 case RTX_BITFIELD_OPS:
5666 case RTX_TERNARY:
5667 temp = simplify_ternary_operation (code, mode, op0_mode, XEXP (x, 0),
5668 XEXP (x, 1), XEXP (x, 2));
5669 break;
5670 default:
5671 break;
5672 }
5673
5674 if (temp)
5675 {
5676 x = temp;
5677 code = GET_CODE (temp);
5678 op0_mode = VOIDmode;
5679 mode = GET_MODE (temp);
5680 }
5681
5682 /* If this is a simple operation applied to an IF_THEN_ELSE, try
5683 applying it to the arms of the IF_THEN_ELSE. This often simplifies
5684 things. Check for cases where both arms are testing the same
5685 condition.
5686
5687 Don't do anything if all operands are very simple. */
5688
5689 if ((BINARY_P (x)
5690 && ((!OBJECT_P (XEXP (x, 0))
5691 && ! (GET_CODE (XEXP (x, 0)) == SUBREG
5692 && OBJECT_P (SUBREG_REG (XEXP (x, 0)))))
5693 || (!OBJECT_P (XEXP (x, 1))
5694 && ! (GET_CODE (XEXP (x, 1)) == SUBREG
5695 && OBJECT_P (SUBREG_REG (XEXP (x, 1)))))))
5696 || (UNARY_P (x)
5697 && (!OBJECT_P (XEXP (x, 0))
5698 && ! (GET_CODE (XEXP (x, 0)) == SUBREG
5699 && OBJECT_P (SUBREG_REG (XEXP (x, 0)))))))
5700 {
5701 rtx cond, true_rtx, false_rtx;
5702
5703 cond = if_then_else_cond (x, &true_rtx, &false_rtx);
5704 if (cond != 0
5705 /* If everything is a comparison, what we have is highly unlikely
5706 to be simpler, so don't use it. */
5707 && ! (COMPARISON_P (x)
5708 && (COMPARISON_P (true_rtx) || COMPARISON_P (false_rtx))))
5709 {
5710 rtx cop1 = const0_rtx;
5711 enum rtx_code cond_code = simplify_comparison (NE, &cond, &cop1);
5712
5713 if (cond_code == NE && COMPARISON_P (cond))
5714 return x;
5715
5716 /* Simplify the alternative arms; this may collapse the true and
5717 false arms to store-flag values. Be careful to use copy_rtx
5718 here since true_rtx or false_rtx might share RTL with x as a
5719 result of the if_then_else_cond call above. */
5720 true_rtx = subst (copy_rtx (true_rtx), pc_rtx, pc_rtx, 0, 0, 0);
5721 false_rtx = subst (copy_rtx (false_rtx), pc_rtx, pc_rtx, 0, 0, 0);
5722
5723 /* If true_rtx and false_rtx are not general_operands, an if_then_else
5724 is unlikely to be simpler. */
5725 if (general_operand (true_rtx, VOIDmode)
5726 && general_operand (false_rtx, VOIDmode))
5727 {
5728 enum rtx_code reversed;
5729
5730 /* Restarting if we generate a store-flag expression will cause
5731 us to loop. Just drop through in this case. */
5732
5733 /* If the result values are STORE_FLAG_VALUE and zero, we can
5734 just make the comparison operation. */
5735 if (true_rtx == const_true_rtx && false_rtx == const0_rtx)
5736 x = simplify_gen_relational (cond_code, mode, VOIDmode,
5737 cond, cop1);
5738 else if (true_rtx == const0_rtx && false_rtx == const_true_rtx
5739 && ((reversed = reversed_comparison_code_parts
5740 (cond_code, cond, cop1, NULL))
5741 != UNKNOWN))
5742 x = simplify_gen_relational (reversed, mode, VOIDmode,
5743 cond, cop1);
5744
5745 /* Likewise, we can make the negate of a comparison operation
5746 if the result values are - STORE_FLAG_VALUE and zero. */
5747 else if (CONST_INT_P (true_rtx)
5748 && INTVAL (true_rtx) == - STORE_FLAG_VALUE
5749 && false_rtx == const0_rtx)
5750 x = simplify_gen_unary (NEG, mode,
5751 simplify_gen_relational (cond_code,
5752 mode, VOIDmode,
5753 cond, cop1),
5754 mode);
5755 else if (CONST_INT_P (false_rtx)
5756 && INTVAL (false_rtx) == - STORE_FLAG_VALUE
5757 && true_rtx == const0_rtx
5758 && ((reversed = reversed_comparison_code_parts
5759 (cond_code, cond, cop1, NULL))
5760 != UNKNOWN))
5761 x = simplify_gen_unary (NEG, mode,
5762 simplify_gen_relational (reversed,
5763 mode, VOIDmode,
5764 cond, cop1),
5765 mode);
5766 else
5767 return gen_rtx_IF_THEN_ELSE (mode,
5768 simplify_gen_relational (cond_code,
5769 mode,
5770 VOIDmode,
5771 cond,
5772 cop1),
5773 true_rtx, false_rtx);
5774
5775 code = GET_CODE (x);
5776 op0_mode = VOIDmode;
5777 }
5778 }
5779 }
5780
5781 /* First see if we can apply the inverse distributive law. */
5782 if (code == PLUS || code == MINUS
5783 || code == AND || code == IOR || code == XOR)
5784 {
5785 x = apply_distributive_law (x);
5786 code = GET_CODE (x);
5787 op0_mode = VOIDmode;
5788 }
5789
5790 /* If CODE is an associative operation not otherwise handled, see if we
5791 can associate some operands. This can win if they are constants or
5792 if they are logically related (i.e. (a & b) & a). */
5793 if ((code == PLUS || code == MINUS || code == MULT || code == DIV
5794 || code == AND || code == IOR || code == XOR
5795 || code == SMAX || code == SMIN || code == UMAX || code == UMIN)
5796 && ((INTEGRAL_MODE_P (mode) && code != DIV)
5797 || (flag_associative_math && FLOAT_MODE_P (mode))))
5798 {
5799 if (GET_CODE (XEXP (x, 0)) == code)
5800 {
5801 rtx other = XEXP (XEXP (x, 0), 0);
5802 rtx inner_op0 = XEXP (XEXP (x, 0), 1);
5803 rtx inner_op1 = XEXP (x, 1);
5804 rtx inner;
5805
5806 /* Make sure we pass the constant operand if any as the second
5807 one if this is a commutative operation. */
5808 if (CONSTANT_P (inner_op0) && COMMUTATIVE_ARITH_P (x))
5809 std::swap (inner_op0, inner_op1);
5810 inner = simplify_binary_operation (code == MINUS ? PLUS
5811 : code == DIV ? MULT
5812 : code,
5813 mode, inner_op0, inner_op1);
5814
5815 /* For commutative operations, try the other pair if that one
5816 didn't simplify. */
5817 if (inner == 0 && COMMUTATIVE_ARITH_P (x))
5818 {
5819 other = XEXP (XEXP (x, 0), 1);
5820 inner = simplify_binary_operation (code, mode,
5821 XEXP (XEXP (x, 0), 0),
5822 XEXP (x, 1));
5823 }
5824
5825 if (inner)
5826 return simplify_gen_binary (code, mode, other, inner);
5827 }
5828 }
5829
5830 /* A little bit of algebraic simplification here. */
5831 switch (code)
5832 {
5833 case MEM:
5834 /* Ensure that our address has any ASHIFTs converted to MULT in case
5835 address-recognizing predicates are called later. */
5836 temp = make_compound_operation (XEXP (x, 0), MEM);
5837 SUBST (XEXP (x, 0), temp);
5838 break;
5839
5840 case SUBREG:
5841 if (op0_mode == VOIDmode)
5842 op0_mode = GET_MODE (SUBREG_REG (x));
5843
5844 /* See if this can be moved to simplify_subreg. */
5845 if (CONSTANT_P (SUBREG_REG (x))
5846 && subreg_lowpart_offset (mode, op0_mode) == SUBREG_BYTE (x)
5847 /* Don't call gen_lowpart if the inner mode
5848 is VOIDmode and we cannot simplify it, as SUBREG without
5849 inner mode is invalid. */
5850 && (GET_MODE (SUBREG_REG (x)) != VOIDmode
5851 || gen_lowpart_common (mode, SUBREG_REG (x))))
5852 return gen_lowpart (mode, SUBREG_REG (x));
5853
5854 if (GET_MODE_CLASS (GET_MODE (SUBREG_REG (x))) == MODE_CC)
5855 break;
5856 {
5857 rtx temp;
5858 temp = simplify_subreg (mode, SUBREG_REG (x), op0_mode,
5859 SUBREG_BYTE (x));
5860 if (temp)
5861 return temp;
5862
5863 /* If op is known to have all lower bits zero, the result is zero. */
5864 scalar_int_mode int_mode, int_op0_mode;
5865 if (!in_dest
5866 && is_a <scalar_int_mode> (mode, &int_mode)
5867 && is_a <scalar_int_mode> (op0_mode, &int_op0_mode)
5868 && (GET_MODE_PRECISION (int_mode)
5869 < GET_MODE_PRECISION (int_op0_mode))
5870 && (subreg_lowpart_offset (int_mode, int_op0_mode)
5871 == SUBREG_BYTE (x))
5872 && HWI_COMPUTABLE_MODE_P (int_op0_mode)
5873 && (nonzero_bits (SUBREG_REG (x), int_op0_mode)
5874 & GET_MODE_MASK (int_mode)) == 0)
5875 return CONST0_RTX (int_mode);
5876 }
5877
5878 /* Don't change the mode of the MEM if that would change the meaning
5879 of the address. */
5880 if (MEM_P (SUBREG_REG (x))
5881 && (MEM_VOLATILE_P (SUBREG_REG (x))
5882 || mode_dependent_address_p (XEXP (SUBREG_REG (x), 0),
5883 MEM_ADDR_SPACE (SUBREG_REG (x)))))
5884 return gen_rtx_CLOBBER (mode, const0_rtx);
5885
5886 /* Note that we cannot do any narrowing for non-constants since
5887 we might have been counting on using the fact that some bits were
5888 zero. We now do this in the SET. */
5889
5890 break;
5891
5892 case NEG:
5893 temp = expand_compound_operation (XEXP (x, 0));
5894
5895 /* For C equal to the width of MODE minus 1, (neg (ashiftrt X C)) can be
5896 replaced by (lshiftrt X C). This will convert
5897 (neg (sign_extract X 1 Y)) to (zero_extract X 1 Y). */
5898
5899 if (GET_CODE (temp) == ASHIFTRT
5900 && CONST_INT_P (XEXP (temp, 1))
5901 && INTVAL (XEXP (temp, 1)) == GET_MODE_UNIT_PRECISION (mode) - 1)
5902 return simplify_shift_const (NULL_RTX, LSHIFTRT, mode, XEXP (temp, 0),
5903 INTVAL (XEXP (temp, 1)));
5904
5905 /* If X has only a single bit that might be nonzero, say, bit I, convert
5906 (neg X) to (ashiftrt (ashift X C-I) C-I) where C is the bitsize of
5907 MODE minus 1. This will convert (neg (zero_extract X 1 Y)) to
5908 (sign_extract X 1 Y). But only do this if TEMP isn't a register
5909 or a SUBREG of one since we'd be making the expression more
5910 complex if it was just a register. */
5911
5912 if (!REG_P (temp)
5913 && ! (GET_CODE (temp) == SUBREG
5914 && REG_P (SUBREG_REG (temp)))
5915 && is_a <scalar_int_mode> (mode, &int_mode)
5916 && (i = exact_log2 (nonzero_bits (temp, int_mode))) >= 0)
5917 {
5918 rtx temp1 = simplify_shift_const
5919 (NULL_RTX, ASHIFTRT, int_mode,
5920 simplify_shift_const (NULL_RTX, ASHIFT, int_mode, temp,
5921 GET_MODE_PRECISION (int_mode) - 1 - i),
5922 GET_MODE_PRECISION (int_mode) - 1 - i);
5923
5924 /* If all we did was surround TEMP with the two shifts, we
5925 haven't improved anything, so don't use it. Otherwise,
5926 we are better off with TEMP1. */
5927 if (GET_CODE (temp1) != ASHIFTRT
5928 || GET_CODE (XEXP (temp1, 0)) != ASHIFT
5929 || XEXP (XEXP (temp1, 0), 0) != temp)
5930 return temp1;
5931 }
5932 break;
5933
5934 case TRUNCATE:
5935 /* We can't handle truncation to a partial integer mode here
5936 because we don't know the real bitsize of the partial
5937 integer mode. */
5938 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
5939 break;
5940
5941 if (HWI_COMPUTABLE_MODE_P (mode))
5942 SUBST (XEXP (x, 0),
5943 force_to_mode (XEXP (x, 0), GET_MODE (XEXP (x, 0)),
5944 GET_MODE_MASK (mode), 0));
5945
5946 /* We can truncate a constant value and return it. */
5947 if (CONST_INT_P (XEXP (x, 0)))
5948 return gen_int_mode (INTVAL (XEXP (x, 0)), mode);
5949
5950 /* Similarly to what we do in simplify-rtx.c, a truncate of a register
5951 whose value is a comparison can be replaced with a subreg if
5952 STORE_FLAG_VALUE permits. */
5953 if (HWI_COMPUTABLE_MODE_P (mode)
5954 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0
5955 && (temp = get_last_value (XEXP (x, 0)))
5956 && COMPARISON_P (temp))
5957 return gen_lowpart (mode, XEXP (x, 0));
5958 break;
5959
5960 case CONST:
5961 /* (const (const X)) can become (const X). Do it this way rather than
5962 returning the inner CONST since CONST can be shared with a
5963 REG_EQUAL note. */
5964 if (GET_CODE (XEXP (x, 0)) == CONST)
5965 SUBST (XEXP (x, 0), XEXP (XEXP (x, 0), 0));
5966 break;
5967
5968 case LO_SUM:
5969 /* Convert (lo_sum (high FOO) FOO) to FOO. This is necessary so we
5970 can add in an offset. find_split_point will split this address up
5971 again if it doesn't match. */
5972 if (HAVE_lo_sum && GET_CODE (XEXP (x, 0)) == HIGH
5973 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
5974 return XEXP (x, 1);
5975 break;
5976
5977 case PLUS:
5978 /* (plus (xor (and <foo> (const_int pow2 - 1)) <c>) <-c>)
5979 when c is (const_int (pow2 + 1) / 2) is a sign extension of a
5980 bit-field and can be replaced by either a sign_extend or a
5981 sign_extract. The `and' may be a zero_extend and the two
5982 <c>, -<c> constants may be reversed. */
5983 if (GET_CODE (XEXP (x, 0)) == XOR
5984 && is_a <scalar_int_mode> (mode, &int_mode)
5985 && CONST_INT_P (XEXP (x, 1))
5986 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
5987 && INTVAL (XEXP (x, 1)) == -INTVAL (XEXP (XEXP (x, 0), 1))
5988 && ((i = exact_log2 (UINTVAL (XEXP (XEXP (x, 0), 1)))) >= 0
5989 || (i = exact_log2 (UINTVAL (XEXP (x, 1)))) >= 0)
5990 && HWI_COMPUTABLE_MODE_P (int_mode)
5991 && ((GET_CODE (XEXP (XEXP (x, 0), 0)) == AND
5992 && CONST_INT_P (XEXP (XEXP (XEXP (x, 0), 0), 1))
5993 && (UINTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1))
5994 == (HOST_WIDE_INT_1U << (i + 1)) - 1))
5995 || (GET_CODE (XEXP (XEXP (x, 0), 0)) == ZERO_EXTEND
5996 && (GET_MODE_PRECISION (GET_MODE (XEXP (XEXP (XEXP (x, 0), 0), 0)))
5997 == (unsigned int) i + 1))))
5998 return simplify_shift_const
5999 (NULL_RTX, ASHIFTRT, int_mode,
6000 simplify_shift_const (NULL_RTX, ASHIFT, int_mode,
6001 XEXP (XEXP (XEXP (x, 0), 0), 0),
6002 GET_MODE_PRECISION (int_mode) - (i + 1)),
6003 GET_MODE_PRECISION (int_mode) - (i + 1));
6004
6005 /* If only the low-order bit of X is possibly nonzero, (plus x -1)
6006 can become (ashiftrt (ashift (xor x 1) C) C) where C is
6007 the bitsize of the mode - 1. This allows simplification of
6008 "a = (b & 8) == 0;" */
6009 if (XEXP (x, 1) == constm1_rtx
6010 && !REG_P (XEXP (x, 0))
6011 && ! (GET_CODE (XEXP (x, 0)) == SUBREG
6012 && REG_P (SUBREG_REG (XEXP (x, 0))))
6013 && is_a <scalar_int_mode> (mode, &int_mode)
6014 && nonzero_bits (XEXP (x, 0), int_mode) == 1)
6015 return simplify_shift_const
6016 (NULL_RTX, ASHIFTRT, int_mode,
6017 simplify_shift_const (NULL_RTX, ASHIFT, int_mode,
6018 gen_rtx_XOR (int_mode, XEXP (x, 0),
6019 const1_rtx),
6020 GET_MODE_PRECISION (int_mode) - 1),
6021 GET_MODE_PRECISION (int_mode) - 1);
6022
6023 /* If we are adding two things that have no bits in common, convert
6024 the addition into an IOR. This will often be further simplified,
6025 for example in cases like ((a & 1) + (a & 2)), which can
6026 become a & 3. */
6027
6028 if (HWI_COMPUTABLE_MODE_P (mode)
6029 && (nonzero_bits (XEXP (x, 0), mode)
6030 & nonzero_bits (XEXP (x, 1), mode)) == 0)
6031 {
6032 /* Try to simplify the expression further. */
6033 rtx tor = simplify_gen_binary (IOR, mode, XEXP (x, 0), XEXP (x, 1));
6034 temp = combine_simplify_rtx (tor, VOIDmode, in_dest, 0);
6035
6036 /* If we could, great. If not, do not go ahead with the IOR
6037 replacement, since PLUS appears in many special purpose
6038 address arithmetic instructions. */
6039 if (GET_CODE (temp) != CLOBBER
6040 && (GET_CODE (temp) != IOR
6041 || ((XEXP (temp, 0) != XEXP (x, 0)
6042 || XEXP (temp, 1) != XEXP (x, 1))
6043 && (XEXP (temp, 0) != XEXP (x, 1)
6044 || XEXP (temp, 1) != XEXP (x, 0)))))
6045 return temp;
6046 }
6047
6048 /* Canonicalize x + x into x << 1. */
6049 if (GET_MODE_CLASS (mode) == MODE_INT
6050 && rtx_equal_p (XEXP (x, 0), XEXP (x, 1))
6051 && !side_effects_p (XEXP (x, 0)))
6052 return simplify_gen_binary (ASHIFT, mode, XEXP (x, 0), const1_rtx);
6053
6054 break;
6055
6056 case MINUS:
6057 /* (minus <foo> (and <foo> (const_int -pow2))) becomes
6058 (and <foo> (const_int pow2-1)) */
6059 if (is_a <scalar_int_mode> (mode, &int_mode)
6060 && GET_CODE (XEXP (x, 1)) == AND
6061 && CONST_INT_P (XEXP (XEXP (x, 1), 1))
6062 && pow2p_hwi (-UINTVAL (XEXP (XEXP (x, 1), 1)))
6063 && rtx_equal_p (XEXP (XEXP (x, 1), 0), XEXP (x, 0)))
6064 return simplify_and_const_int (NULL_RTX, int_mode, XEXP (x, 0),
6065 -INTVAL (XEXP (XEXP (x, 1), 1)) - 1);
6066 break;
6067
6068 case MULT:
6069 /* If we have (mult (plus A B) C), apply the distributive law and then
6070 the inverse distributive law to see if things simplify. This
6071 occurs mostly in addresses, often when unrolling loops. */
6072
6073 if (GET_CODE (XEXP (x, 0)) == PLUS)
6074 {
6075 rtx result = distribute_and_simplify_rtx (x, 0);
6076 if (result)
6077 return result;
6078 }
6079
6080 /* Try simplify a*(b/c) as (a*b)/c. */
6081 if (FLOAT_MODE_P (mode) && flag_associative_math
6082 && GET_CODE (XEXP (x, 0)) == DIV)
6083 {
6084 rtx tem = simplify_binary_operation (MULT, mode,
6085 XEXP (XEXP (x, 0), 0),
6086 XEXP (x, 1));
6087 if (tem)
6088 return simplify_gen_binary (DIV, mode, tem, XEXP (XEXP (x, 0), 1));
6089 }
6090 break;
6091
6092 case UDIV:
6093 /* If this is a divide by a power of two, treat it as a shift if
6094 its first operand is a shift. */
6095 if (is_a <scalar_int_mode> (mode, &int_mode)
6096 && CONST_INT_P (XEXP (x, 1))
6097 && (i = exact_log2 (UINTVAL (XEXP (x, 1)))) >= 0
6098 && (GET_CODE (XEXP (x, 0)) == ASHIFT
6099 || GET_CODE (XEXP (x, 0)) == LSHIFTRT
6100 || GET_CODE (XEXP (x, 0)) == ASHIFTRT
6101 || GET_CODE (XEXP (x, 0)) == ROTATE
6102 || GET_CODE (XEXP (x, 0)) == ROTATERT))
6103 return simplify_shift_const (NULL_RTX, LSHIFTRT, int_mode,
6104 XEXP (x, 0), i);
6105 break;
6106
6107 case EQ: case NE:
6108 case GT: case GTU: case GE: case GEU:
6109 case LT: case LTU: case LE: case LEU:
6110 case UNEQ: case LTGT:
6111 case UNGT: case UNGE:
6112 case UNLT: case UNLE:
6113 case UNORDERED: case ORDERED:
6114 /* If the first operand is a condition code, we can't do anything
6115 with it. */
6116 if (GET_CODE (XEXP (x, 0)) == COMPARE
6117 || (GET_MODE_CLASS (GET_MODE (XEXP (x, 0))) != MODE_CC
6118 && ! CC0_P (XEXP (x, 0))))
6119 {
6120 rtx op0 = XEXP (x, 0);
6121 rtx op1 = XEXP (x, 1);
6122 enum rtx_code new_code;
6123
6124 if (GET_CODE (op0) == COMPARE)
6125 op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
6126
6127 /* Simplify our comparison, if possible. */
6128 new_code = simplify_comparison (code, &op0, &op1);
6129
6130 /* If STORE_FLAG_VALUE is 1, we can convert (ne x 0) to simply X
6131 if only the low-order bit is possibly nonzero in X (such as when
6132 X is a ZERO_EXTRACT of one bit). Similarly, we can convert EQ to
6133 (xor X 1) or (minus 1 X); we use the former. Finally, if X is
6134 known to be either 0 or -1, NE becomes a NEG and EQ becomes
6135 (plus X 1).
6136
6137 Remove any ZERO_EXTRACT we made when thinking this was a
6138 comparison. It may now be simpler to use, e.g., an AND. If a
6139 ZERO_EXTRACT is indeed appropriate, it will be placed back by
6140 the call to make_compound_operation in the SET case.
6141
6142 Don't apply these optimizations if the caller would
6143 prefer a comparison rather than a value.
6144 E.g., for the condition in an IF_THEN_ELSE most targets need
6145 an explicit comparison. */
6146
6147 if (in_cond)
6148 ;
6149
6150 else if (STORE_FLAG_VALUE == 1
6151 && new_code == NE
6152 && is_int_mode (mode, &int_mode)
6153 && op1 == const0_rtx
6154 && int_mode == GET_MODE (op0)
6155 && nonzero_bits (op0, int_mode) == 1)
6156 return gen_lowpart (int_mode,
6157 expand_compound_operation (op0));
6158
6159 else if (STORE_FLAG_VALUE == 1
6160 && new_code == NE
6161 && is_int_mode (mode, &int_mode)
6162 && op1 == const0_rtx
6163 && int_mode == GET_MODE (op0)
6164 && (num_sign_bit_copies (op0, int_mode)
6165 == GET_MODE_PRECISION (int_mode)))
6166 {
6167 op0 = expand_compound_operation (op0);
6168 return simplify_gen_unary (NEG, int_mode,
6169 gen_lowpart (int_mode, op0),
6170 int_mode);
6171 }
6172
6173 else if (STORE_FLAG_VALUE == 1
6174 && new_code == EQ
6175 && is_int_mode (mode, &int_mode)
6176 && op1 == const0_rtx
6177 && int_mode == GET_MODE (op0)
6178 && nonzero_bits (op0, int_mode) == 1)
6179 {
6180 op0 = expand_compound_operation (op0);
6181 return simplify_gen_binary (XOR, int_mode,
6182 gen_lowpart (int_mode, op0),
6183 const1_rtx);
6184 }
6185
6186 else if (STORE_FLAG_VALUE == 1
6187 && new_code == EQ
6188 && is_int_mode (mode, &int_mode)
6189 && op1 == const0_rtx
6190 && int_mode == GET_MODE (op0)
6191 && (num_sign_bit_copies (op0, int_mode)
6192 == GET_MODE_PRECISION (int_mode)))
6193 {
6194 op0 = expand_compound_operation (op0);
6195 return plus_constant (int_mode, gen_lowpart (int_mode, op0), 1);
6196 }
6197
6198 /* If STORE_FLAG_VALUE is -1, we have cases similar to
6199 those above. */
6200 if (in_cond)
6201 ;
6202
6203 else if (STORE_FLAG_VALUE == -1
6204 && new_code == NE
6205 && is_int_mode (mode, &int_mode)
6206 && op1 == const0_rtx
6207 && int_mode == GET_MODE (op0)
6208 && (num_sign_bit_copies (op0, int_mode)
6209 == GET_MODE_PRECISION (int_mode)))
6210 return gen_lowpart (int_mode, expand_compound_operation (op0));
6211
6212 else if (STORE_FLAG_VALUE == -1
6213 && new_code == NE
6214 && is_int_mode (mode, &int_mode)
6215 && op1 == const0_rtx
6216 && int_mode == GET_MODE (op0)
6217 && nonzero_bits (op0, int_mode) == 1)
6218 {
6219 op0 = expand_compound_operation (op0);
6220 return simplify_gen_unary (NEG, int_mode,
6221 gen_lowpart (int_mode, op0),
6222 int_mode);
6223 }
6224
6225 else if (STORE_FLAG_VALUE == -1
6226 && new_code == EQ
6227 && is_int_mode (mode, &int_mode)
6228 && op1 == const0_rtx
6229 && int_mode == GET_MODE (op0)
6230 && (num_sign_bit_copies (op0, int_mode)
6231 == GET_MODE_PRECISION (int_mode)))
6232 {
6233 op0 = expand_compound_operation (op0);
6234 return simplify_gen_unary (NOT, int_mode,
6235 gen_lowpart (int_mode, op0),
6236 int_mode);
6237 }
6238
6239 /* If X is 0/1, (eq X 0) is X-1. */
6240 else if (STORE_FLAG_VALUE == -1
6241 && new_code == EQ
6242 && is_int_mode (mode, &int_mode)
6243 && op1 == const0_rtx
6244 && int_mode == GET_MODE (op0)
6245 && nonzero_bits (op0, int_mode) == 1)
6246 {
6247 op0 = expand_compound_operation (op0);
6248 return plus_constant (int_mode, gen_lowpart (int_mode, op0), -1);
6249 }
6250
6251 /* If STORE_FLAG_VALUE says to just test the sign bit and X has just
6252 one bit that might be nonzero, we can convert (ne x 0) to
6253 (ashift x c) where C puts the bit in the sign bit. Remove any
6254 AND with STORE_FLAG_VALUE when we are done, since we are only
6255 going to test the sign bit. */
6256 if (new_code == NE
6257 && is_int_mode (mode, &int_mode)
6258 && HWI_COMPUTABLE_MODE_P (int_mode)
6259 && val_signbit_p (int_mode, STORE_FLAG_VALUE)
6260 && op1 == const0_rtx
6261 && int_mode == GET_MODE (op0)
6262 && (i = exact_log2 (nonzero_bits (op0, int_mode))) >= 0)
6263 {
6264 x = simplify_shift_const (NULL_RTX, ASHIFT, int_mode,
6265 expand_compound_operation (op0),
6266 GET_MODE_PRECISION (int_mode) - 1 - i);
6267 if (GET_CODE (x) == AND && XEXP (x, 1) == const_true_rtx)
6268 return XEXP (x, 0);
6269 else
6270 return x;
6271 }
6272
6273 /* If the code changed, return a whole new comparison.
6274 We also need to avoid using SUBST in cases where
6275 simplify_comparison has widened a comparison with a CONST_INT,
6276 since in that case the wider CONST_INT may fail the sanity
6277 checks in do_SUBST. */
6278 if (new_code != code
6279 || (CONST_INT_P (op1)
6280 && GET_MODE (op0) != GET_MODE (XEXP (x, 0))
6281 && GET_MODE (op0) != GET_MODE (XEXP (x, 1))))
6282 return gen_rtx_fmt_ee (new_code, mode, op0, op1);
6283
6284 /* Otherwise, keep this operation, but maybe change its operands.
6285 This also converts (ne (compare FOO BAR) 0) to (ne FOO BAR). */
6286 SUBST (XEXP (x, 0), op0);
6287 SUBST (XEXP (x, 1), op1);
6288 }
6289 break;
6290
6291 case IF_THEN_ELSE:
6292 return simplify_if_then_else (x);
6293
6294 case ZERO_EXTRACT:
6295 case SIGN_EXTRACT:
6296 case ZERO_EXTEND:
6297 case SIGN_EXTEND:
6298 /* If we are processing SET_DEST, we are done. */
6299 if (in_dest)
6300 return x;
6301
6302 return expand_compound_operation (x);
6303
6304 case SET:
6305 return simplify_set (x);
6306
6307 case AND:
6308 case IOR:
6309 return simplify_logical (x);
6310
6311 case ASHIFT:
6312 case LSHIFTRT:
6313 case ASHIFTRT:
6314 case ROTATE:
6315 case ROTATERT:
6316 /* If this is a shift by a constant amount, simplify it. */
6317 if (CONST_INT_P (XEXP (x, 1)))
6318 return simplify_shift_const (x, code, mode, XEXP (x, 0),
6319 INTVAL (XEXP (x, 1)));
6320
6321 else if (SHIFT_COUNT_TRUNCATED && !REG_P (XEXP (x, 1)))
6322 SUBST (XEXP (x, 1),
6323 force_to_mode (XEXP (x, 1), GET_MODE (XEXP (x, 1)),
6324 (HOST_WIDE_INT_1U
6325 << exact_log2 (GET_MODE_UNIT_BITSIZE
6326 (GET_MODE (x))))
6327 - 1,
6328 0));
6329 break;
6330
6331 default:
6332 break;
6333 }
6334
6335 return x;
6336 }
6337 \f
6338 /* Simplify X, an IF_THEN_ELSE expression. Return the new expression. */
6339
6340 static rtx
6341 simplify_if_then_else (rtx x)
6342 {
6343 machine_mode mode = GET_MODE (x);
6344 rtx cond = XEXP (x, 0);
6345 rtx true_rtx = XEXP (x, 1);
6346 rtx false_rtx = XEXP (x, 2);
6347 enum rtx_code true_code = GET_CODE (cond);
6348 int comparison_p = COMPARISON_P (cond);
6349 rtx temp;
6350 int i;
6351 enum rtx_code false_code;
6352 rtx reversed;
6353 scalar_int_mode int_mode, inner_mode;
6354
6355 /* Simplify storing of the truth value. */
6356 if (comparison_p && true_rtx == const_true_rtx && false_rtx == const0_rtx)
6357 return simplify_gen_relational (true_code, mode, VOIDmode,
6358 XEXP (cond, 0), XEXP (cond, 1));
6359
6360 /* Also when the truth value has to be reversed. */
6361 if (comparison_p
6362 && true_rtx == const0_rtx && false_rtx == const_true_rtx
6363 && (reversed = reversed_comparison (cond, mode)))
6364 return reversed;
6365
6366 /* Sometimes we can simplify the arm of an IF_THEN_ELSE if a register used
6367 in it is being compared against certain values. Get the true and false
6368 comparisons and see if that says anything about the value of each arm. */
6369
6370 if (comparison_p
6371 && ((false_code = reversed_comparison_code (cond, NULL))
6372 != UNKNOWN)
6373 && REG_P (XEXP (cond, 0)))
6374 {
6375 HOST_WIDE_INT nzb;
6376 rtx from = XEXP (cond, 0);
6377 rtx true_val = XEXP (cond, 1);
6378 rtx false_val = true_val;
6379 int swapped = 0;
6380
6381 /* If FALSE_CODE is EQ, swap the codes and arms. */
6382
6383 if (false_code == EQ)
6384 {
6385 swapped = 1, true_code = EQ, false_code = NE;
6386 std::swap (true_rtx, false_rtx);
6387 }
6388
6389 scalar_int_mode from_mode;
6390 if (is_a <scalar_int_mode> (GET_MODE (from), &from_mode))
6391 {
6392 /* If we are comparing against zero and the expression being
6393 tested has only a single bit that might be nonzero, that is
6394 its value when it is not equal to zero. Similarly if it is
6395 known to be -1 or 0. */
6396 if (true_code == EQ
6397 && true_val == const0_rtx
6398 && pow2p_hwi (nzb = nonzero_bits (from, from_mode)))
6399 {
6400 false_code = EQ;
6401 false_val = gen_int_mode (nzb, from_mode);
6402 }
6403 else if (true_code == EQ
6404 && true_val == const0_rtx
6405 && (num_sign_bit_copies (from, from_mode)
6406 == GET_MODE_PRECISION (from_mode)))
6407 {
6408 false_code = EQ;
6409 false_val = constm1_rtx;
6410 }
6411 }
6412
6413 /* Now simplify an arm if we know the value of the register in the
6414 branch and it is used in the arm. Be careful due to the potential
6415 of locally-shared RTL. */
6416
6417 if (reg_mentioned_p (from, true_rtx))
6418 true_rtx = subst (known_cond (copy_rtx (true_rtx), true_code,
6419 from, true_val),
6420 pc_rtx, pc_rtx, 0, 0, 0);
6421 if (reg_mentioned_p (from, false_rtx))
6422 false_rtx = subst (known_cond (copy_rtx (false_rtx), false_code,
6423 from, false_val),
6424 pc_rtx, pc_rtx, 0, 0, 0);
6425
6426 SUBST (XEXP (x, 1), swapped ? false_rtx : true_rtx);
6427 SUBST (XEXP (x, 2), swapped ? true_rtx : false_rtx);
6428
6429 true_rtx = XEXP (x, 1);
6430 false_rtx = XEXP (x, 2);
6431 true_code = GET_CODE (cond);
6432 }
6433
6434 /* If we have (if_then_else FOO (pc) (label_ref BAR)) and FOO can be
6435 reversed, do so to avoid needing two sets of patterns for
6436 subtract-and-branch insns. Similarly if we have a constant in the true
6437 arm, the false arm is the same as the first operand of the comparison, or
6438 the false arm is more complicated than the true arm. */
6439
6440 if (comparison_p
6441 && reversed_comparison_code (cond, NULL) != UNKNOWN
6442 && (true_rtx == pc_rtx
6443 || (CONSTANT_P (true_rtx)
6444 && !CONST_INT_P (false_rtx) && false_rtx != pc_rtx)
6445 || true_rtx == const0_rtx
6446 || (OBJECT_P (true_rtx) && !OBJECT_P (false_rtx))
6447 || (GET_CODE (true_rtx) == SUBREG && OBJECT_P (SUBREG_REG (true_rtx))
6448 && !OBJECT_P (false_rtx))
6449 || reg_mentioned_p (true_rtx, false_rtx)
6450 || rtx_equal_p (false_rtx, XEXP (cond, 0))))
6451 {
6452 true_code = reversed_comparison_code (cond, NULL);
6453 SUBST (XEXP (x, 0), reversed_comparison (cond, GET_MODE (cond)));
6454 SUBST (XEXP (x, 1), false_rtx);
6455 SUBST (XEXP (x, 2), true_rtx);
6456
6457 std::swap (true_rtx, false_rtx);
6458 cond = XEXP (x, 0);
6459
6460 /* It is possible that the conditional has been simplified out. */
6461 true_code = GET_CODE (cond);
6462 comparison_p = COMPARISON_P (cond);
6463 }
6464
6465 /* If the two arms are identical, we don't need the comparison. */
6466
6467 if (rtx_equal_p (true_rtx, false_rtx) && ! side_effects_p (cond))
6468 return true_rtx;
6469
6470 /* Convert a == b ? b : a to "a". */
6471 if (true_code == EQ && ! side_effects_p (cond)
6472 && !HONOR_NANS (mode)
6473 && rtx_equal_p (XEXP (cond, 0), false_rtx)
6474 && rtx_equal_p (XEXP (cond, 1), true_rtx))
6475 return false_rtx;
6476 else if (true_code == NE && ! side_effects_p (cond)
6477 && !HONOR_NANS (mode)
6478 && rtx_equal_p (XEXP (cond, 0), true_rtx)
6479 && rtx_equal_p (XEXP (cond, 1), false_rtx))
6480 return true_rtx;
6481
6482 /* Look for cases where we have (abs x) or (neg (abs X)). */
6483
6484 if (GET_MODE_CLASS (mode) == MODE_INT
6485 && comparison_p
6486 && XEXP (cond, 1) == const0_rtx
6487 && GET_CODE (false_rtx) == NEG
6488 && rtx_equal_p (true_rtx, XEXP (false_rtx, 0))
6489 && rtx_equal_p (true_rtx, XEXP (cond, 0))
6490 && ! side_effects_p (true_rtx))
6491 switch (true_code)
6492 {
6493 case GT:
6494 case GE:
6495 return simplify_gen_unary (ABS, mode, true_rtx, mode);
6496 case LT:
6497 case LE:
6498 return
6499 simplify_gen_unary (NEG, mode,
6500 simplify_gen_unary (ABS, mode, true_rtx, mode),
6501 mode);
6502 default:
6503 break;
6504 }
6505
6506 /* Look for MIN or MAX. */
6507
6508 if ((! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
6509 && comparison_p
6510 && rtx_equal_p (XEXP (cond, 0), true_rtx)
6511 && rtx_equal_p (XEXP (cond, 1), false_rtx)
6512 && ! side_effects_p (cond))
6513 switch (true_code)
6514 {
6515 case GE:
6516 case GT:
6517 return simplify_gen_binary (SMAX, mode, true_rtx, false_rtx);
6518 case LE:
6519 case LT:
6520 return simplify_gen_binary (SMIN, mode, true_rtx, false_rtx);
6521 case GEU:
6522 case GTU:
6523 return simplify_gen_binary (UMAX, mode, true_rtx, false_rtx);
6524 case LEU:
6525 case LTU:
6526 return simplify_gen_binary (UMIN, mode, true_rtx, false_rtx);
6527 default:
6528 break;
6529 }
6530
6531 /* If we have (if_then_else COND (OP Z C1) Z) and OP is an identity when its
6532 second operand is zero, this can be done as (OP Z (mult COND C2)) where
6533 C2 = C1 * STORE_FLAG_VALUE. Similarly if OP has an outer ZERO_EXTEND or
6534 SIGN_EXTEND as long as Z is already extended (so we don't destroy it).
6535 We can do this kind of thing in some cases when STORE_FLAG_VALUE is
6536 neither 1 or -1, but it isn't worth checking for. */
6537
6538 if ((STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
6539 && comparison_p
6540 && is_int_mode (mode, &int_mode)
6541 && ! side_effects_p (x))
6542 {
6543 rtx t = make_compound_operation (true_rtx, SET);
6544 rtx f = make_compound_operation (false_rtx, SET);
6545 rtx cond_op0 = XEXP (cond, 0);
6546 rtx cond_op1 = XEXP (cond, 1);
6547 enum rtx_code op = UNKNOWN, extend_op = UNKNOWN;
6548 scalar_int_mode m = int_mode;
6549 rtx z = 0, c1 = NULL_RTX;
6550
6551 if ((GET_CODE (t) == PLUS || GET_CODE (t) == MINUS
6552 || GET_CODE (t) == IOR || GET_CODE (t) == XOR
6553 || GET_CODE (t) == ASHIFT
6554 || GET_CODE (t) == LSHIFTRT || GET_CODE (t) == ASHIFTRT)
6555 && rtx_equal_p (XEXP (t, 0), f))
6556 c1 = XEXP (t, 1), op = GET_CODE (t), z = f;
6557
6558 /* If an identity-zero op is commutative, check whether there
6559 would be a match if we swapped the operands. */
6560 else if ((GET_CODE (t) == PLUS || GET_CODE (t) == IOR
6561 || GET_CODE (t) == XOR)
6562 && rtx_equal_p (XEXP (t, 1), f))
6563 c1 = XEXP (t, 0), op = GET_CODE (t), z = f;
6564 else if (GET_CODE (t) == SIGN_EXTEND
6565 && is_a <scalar_int_mode> (GET_MODE (XEXP (t, 0)), &inner_mode)
6566 && (GET_CODE (XEXP (t, 0)) == PLUS
6567 || GET_CODE (XEXP (t, 0)) == MINUS
6568 || GET_CODE (XEXP (t, 0)) == IOR
6569 || GET_CODE (XEXP (t, 0)) == XOR
6570 || GET_CODE (XEXP (t, 0)) == ASHIFT
6571 || GET_CODE (XEXP (t, 0)) == LSHIFTRT
6572 || GET_CODE (XEXP (t, 0)) == ASHIFTRT)
6573 && GET_CODE (XEXP (XEXP (t, 0), 0)) == SUBREG
6574 && subreg_lowpart_p (XEXP (XEXP (t, 0), 0))
6575 && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 0)), f)
6576 && (num_sign_bit_copies (f, GET_MODE (f))
6577 > (unsigned int)
6578 (GET_MODE_PRECISION (int_mode)
6579 - GET_MODE_PRECISION (inner_mode))))
6580 {
6581 c1 = XEXP (XEXP (t, 0), 1); z = f; op = GET_CODE (XEXP (t, 0));
6582 extend_op = SIGN_EXTEND;
6583 m = inner_mode;
6584 }
6585 else if (GET_CODE (t) == SIGN_EXTEND
6586 && is_a <scalar_int_mode> (GET_MODE (XEXP (t, 0)), &inner_mode)
6587 && (GET_CODE (XEXP (t, 0)) == PLUS
6588 || GET_CODE (XEXP (t, 0)) == IOR
6589 || GET_CODE (XEXP (t, 0)) == XOR)
6590 && GET_CODE (XEXP (XEXP (t, 0), 1)) == SUBREG
6591 && subreg_lowpart_p (XEXP (XEXP (t, 0), 1))
6592 && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 1)), f)
6593 && (num_sign_bit_copies (f, GET_MODE (f))
6594 > (unsigned int)
6595 (GET_MODE_PRECISION (int_mode)
6596 - GET_MODE_PRECISION (inner_mode))))
6597 {
6598 c1 = XEXP (XEXP (t, 0), 0); z = f; op = GET_CODE (XEXP (t, 0));
6599 extend_op = SIGN_EXTEND;
6600 m = inner_mode;
6601 }
6602 else if (GET_CODE (t) == ZERO_EXTEND
6603 && is_a <scalar_int_mode> (GET_MODE (XEXP (t, 0)), &inner_mode)
6604 && (GET_CODE (XEXP (t, 0)) == PLUS
6605 || GET_CODE (XEXP (t, 0)) == MINUS
6606 || GET_CODE (XEXP (t, 0)) == IOR
6607 || GET_CODE (XEXP (t, 0)) == XOR
6608 || GET_CODE (XEXP (t, 0)) == ASHIFT
6609 || GET_CODE (XEXP (t, 0)) == LSHIFTRT
6610 || GET_CODE (XEXP (t, 0)) == ASHIFTRT)
6611 && GET_CODE (XEXP (XEXP (t, 0), 0)) == SUBREG
6612 && HWI_COMPUTABLE_MODE_P (int_mode)
6613 && subreg_lowpart_p (XEXP (XEXP (t, 0), 0))
6614 && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 0)), f)
6615 && ((nonzero_bits (f, GET_MODE (f))
6616 & ~GET_MODE_MASK (inner_mode))
6617 == 0))
6618 {
6619 c1 = XEXP (XEXP (t, 0), 1); z = f; op = GET_CODE (XEXP (t, 0));
6620 extend_op = ZERO_EXTEND;
6621 m = inner_mode;
6622 }
6623 else if (GET_CODE (t) == ZERO_EXTEND
6624 && is_a <scalar_int_mode> (GET_MODE (XEXP (t, 0)), &inner_mode)
6625 && (GET_CODE (XEXP (t, 0)) == PLUS
6626 || GET_CODE (XEXP (t, 0)) == IOR
6627 || GET_CODE (XEXP (t, 0)) == XOR)
6628 && GET_CODE (XEXP (XEXP (t, 0), 1)) == SUBREG
6629 && HWI_COMPUTABLE_MODE_P (int_mode)
6630 && subreg_lowpart_p (XEXP (XEXP (t, 0), 1))
6631 && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 1)), f)
6632 && ((nonzero_bits (f, GET_MODE (f))
6633 & ~GET_MODE_MASK (inner_mode))
6634 == 0))
6635 {
6636 c1 = XEXP (XEXP (t, 0), 0); z = f; op = GET_CODE (XEXP (t, 0));
6637 extend_op = ZERO_EXTEND;
6638 m = inner_mode;
6639 }
6640
6641 if (z)
6642 {
6643 machine_mode cm = m;
6644 if ((op == ASHIFT || op == LSHIFTRT || op == ASHIFTRT)
6645 && GET_MODE (c1) != VOIDmode)
6646 cm = GET_MODE (c1);
6647 temp = subst (simplify_gen_relational (true_code, cm, VOIDmode,
6648 cond_op0, cond_op1),
6649 pc_rtx, pc_rtx, 0, 0, 0);
6650 temp = simplify_gen_binary (MULT, cm, temp,
6651 simplify_gen_binary (MULT, cm, c1,
6652 const_true_rtx));
6653 temp = subst (temp, pc_rtx, pc_rtx, 0, 0, 0);
6654 temp = simplify_gen_binary (op, m, gen_lowpart (m, z), temp);
6655
6656 if (extend_op != UNKNOWN)
6657 temp = simplify_gen_unary (extend_op, int_mode, temp, m);
6658
6659 return temp;
6660 }
6661 }
6662
6663 /* If we have (if_then_else (ne A 0) C1 0) and either A is known to be 0 or
6664 1 and C1 is a single bit or A is known to be 0 or -1 and C1 is the
6665 negation of a single bit, we can convert this operation to a shift. We
6666 can actually do this more generally, but it doesn't seem worth it. */
6667
6668 if (true_code == NE
6669 && is_a <scalar_int_mode> (mode, &int_mode)
6670 && XEXP (cond, 1) == const0_rtx
6671 && false_rtx == const0_rtx
6672 && CONST_INT_P (true_rtx)
6673 && ((1 == nonzero_bits (XEXP (cond, 0), int_mode)
6674 && (i = exact_log2 (UINTVAL (true_rtx))) >= 0)
6675 || ((num_sign_bit_copies (XEXP (cond, 0), int_mode)
6676 == GET_MODE_PRECISION (int_mode))
6677 && (i = exact_log2 (-UINTVAL (true_rtx))) >= 0)))
6678 return
6679 simplify_shift_const (NULL_RTX, ASHIFT, int_mode,
6680 gen_lowpart (int_mode, XEXP (cond, 0)), i);
6681
6682 /* (IF_THEN_ELSE (NE A 0) C1 0) is A or a zero-extend of A if the only
6683 non-zero bit in A is C1. */
6684 if (true_code == NE && XEXP (cond, 1) == const0_rtx
6685 && false_rtx == const0_rtx && CONST_INT_P (true_rtx)
6686 && is_a <scalar_int_mode> (mode, &int_mode)
6687 && is_a <scalar_int_mode> (GET_MODE (XEXP (cond, 0)), &inner_mode)
6688 && (UINTVAL (true_rtx) & GET_MODE_MASK (int_mode))
6689 == nonzero_bits (XEXP (cond, 0), inner_mode)
6690 && (i = exact_log2 (UINTVAL (true_rtx) & GET_MODE_MASK (int_mode))) >= 0)
6691 {
6692 rtx val = XEXP (cond, 0);
6693 if (inner_mode == int_mode)
6694 return val;
6695 else if (GET_MODE_PRECISION (inner_mode) < GET_MODE_PRECISION (int_mode))
6696 return simplify_gen_unary (ZERO_EXTEND, int_mode, val, inner_mode);
6697 }
6698
6699 return x;
6700 }
6701 \f
6702 /* Simplify X, a SET expression. Return the new expression. */
6703
6704 static rtx
6705 simplify_set (rtx x)
6706 {
6707 rtx src = SET_SRC (x);
6708 rtx dest = SET_DEST (x);
6709 machine_mode mode
6710 = GET_MODE (src) != VOIDmode ? GET_MODE (src) : GET_MODE (dest);
6711 rtx_insn *other_insn;
6712 rtx *cc_use;
6713 scalar_int_mode int_mode;
6714
6715 /* (set (pc) (return)) gets written as (return). */
6716 if (GET_CODE (dest) == PC && ANY_RETURN_P (src))
6717 return src;
6718
6719 /* Now that we know for sure which bits of SRC we are using, see if we can
6720 simplify the expression for the object knowing that we only need the
6721 low-order bits. */
6722
6723 if (GET_MODE_CLASS (mode) == MODE_INT && HWI_COMPUTABLE_MODE_P (mode))
6724 {
6725 src = force_to_mode (src, mode, HOST_WIDE_INT_M1U, 0);
6726 SUBST (SET_SRC (x), src);
6727 }
6728
6729 /* If we are setting CC0 or if the source is a COMPARE, look for the use of
6730 the comparison result and try to simplify it unless we already have used
6731 undobuf.other_insn. */
6732 if ((GET_MODE_CLASS (mode) == MODE_CC
6733 || GET_CODE (src) == COMPARE
6734 || CC0_P (dest))
6735 && (cc_use = find_single_use (dest, subst_insn, &other_insn)) != 0
6736 && (undobuf.other_insn == 0 || other_insn == undobuf.other_insn)
6737 && COMPARISON_P (*cc_use)
6738 && rtx_equal_p (XEXP (*cc_use, 0), dest))
6739 {
6740 enum rtx_code old_code = GET_CODE (*cc_use);
6741 enum rtx_code new_code;
6742 rtx op0, op1, tmp;
6743 int other_changed = 0;
6744 rtx inner_compare = NULL_RTX;
6745 machine_mode compare_mode = GET_MODE (dest);
6746
6747 if (GET_CODE (src) == COMPARE)
6748 {
6749 op0 = XEXP (src, 0), op1 = XEXP (src, 1);
6750 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
6751 {
6752 inner_compare = op0;
6753 op0 = XEXP (inner_compare, 0), op1 = XEXP (inner_compare, 1);
6754 }
6755 }
6756 else
6757 op0 = src, op1 = CONST0_RTX (GET_MODE (src));
6758
6759 tmp = simplify_relational_operation (old_code, compare_mode, VOIDmode,
6760 op0, op1);
6761 if (!tmp)
6762 new_code = old_code;
6763 else if (!CONSTANT_P (tmp))
6764 {
6765 new_code = GET_CODE (tmp);
6766 op0 = XEXP (tmp, 0);
6767 op1 = XEXP (tmp, 1);
6768 }
6769 else
6770 {
6771 rtx pat = PATTERN (other_insn);
6772 undobuf.other_insn = other_insn;
6773 SUBST (*cc_use, tmp);
6774
6775 /* Attempt to simplify CC user. */
6776 if (GET_CODE (pat) == SET)
6777 {
6778 rtx new_rtx = simplify_rtx (SET_SRC (pat));
6779 if (new_rtx != NULL_RTX)
6780 SUBST (SET_SRC (pat), new_rtx);
6781 }
6782
6783 /* Convert X into a no-op move. */
6784 SUBST (SET_DEST (x), pc_rtx);
6785 SUBST (SET_SRC (x), pc_rtx);
6786 return x;
6787 }
6788
6789 /* Simplify our comparison, if possible. */
6790 new_code = simplify_comparison (new_code, &op0, &op1);
6791
6792 #ifdef SELECT_CC_MODE
6793 /* If this machine has CC modes other than CCmode, check to see if we
6794 need to use a different CC mode here. */
6795 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
6796 compare_mode = GET_MODE (op0);
6797 else if (inner_compare
6798 && GET_MODE_CLASS (GET_MODE (inner_compare)) == MODE_CC
6799 && new_code == old_code
6800 && op0 == XEXP (inner_compare, 0)
6801 && op1 == XEXP (inner_compare, 1))
6802 compare_mode = GET_MODE (inner_compare);
6803 else
6804 compare_mode = SELECT_CC_MODE (new_code, op0, op1);
6805
6806 /* If the mode changed, we have to change SET_DEST, the mode in the
6807 compare, and the mode in the place SET_DEST is used. If SET_DEST is
6808 a hard register, just build new versions with the proper mode. If it
6809 is a pseudo, we lose unless it is only time we set the pseudo, in
6810 which case we can safely change its mode. */
6811 if (!HAVE_cc0 && compare_mode != GET_MODE (dest))
6812 {
6813 if (can_change_dest_mode (dest, 0, compare_mode))
6814 {
6815 unsigned int regno = REGNO (dest);
6816 rtx new_dest;
6817
6818 if (regno < FIRST_PSEUDO_REGISTER)
6819 new_dest = gen_rtx_REG (compare_mode, regno);
6820 else
6821 {
6822 SUBST_MODE (regno_reg_rtx[regno], compare_mode);
6823 new_dest = regno_reg_rtx[regno];
6824 }
6825
6826 SUBST (SET_DEST (x), new_dest);
6827 SUBST (XEXP (*cc_use, 0), new_dest);
6828 other_changed = 1;
6829
6830 dest = new_dest;
6831 }
6832 }
6833 #endif /* SELECT_CC_MODE */
6834
6835 /* If the code changed, we have to build a new comparison in
6836 undobuf.other_insn. */
6837 if (new_code != old_code)
6838 {
6839 int other_changed_previously = other_changed;
6840 unsigned HOST_WIDE_INT mask;
6841 rtx old_cc_use = *cc_use;
6842
6843 SUBST (*cc_use, gen_rtx_fmt_ee (new_code, GET_MODE (*cc_use),
6844 dest, const0_rtx));
6845 other_changed = 1;
6846
6847 /* If the only change we made was to change an EQ into an NE or
6848 vice versa, OP0 has only one bit that might be nonzero, and OP1
6849 is zero, check if changing the user of the condition code will
6850 produce a valid insn. If it won't, we can keep the original code
6851 in that insn by surrounding our operation with an XOR. */
6852
6853 if (((old_code == NE && new_code == EQ)
6854 || (old_code == EQ && new_code == NE))
6855 && ! other_changed_previously && op1 == const0_rtx
6856 && HWI_COMPUTABLE_MODE_P (GET_MODE (op0))
6857 && pow2p_hwi (mask = nonzero_bits (op0, GET_MODE (op0))))
6858 {
6859 rtx pat = PATTERN (other_insn), note = 0;
6860
6861 if ((recog_for_combine (&pat, other_insn, &note) < 0
6862 && ! check_asm_operands (pat)))
6863 {
6864 *cc_use = old_cc_use;
6865 other_changed = 0;
6866
6867 op0 = simplify_gen_binary (XOR, GET_MODE (op0), op0,
6868 gen_int_mode (mask,
6869 GET_MODE (op0)));
6870 }
6871 }
6872 }
6873
6874 if (other_changed)
6875 undobuf.other_insn = other_insn;
6876
6877 /* Don't generate a compare of a CC with 0, just use that CC. */
6878 if (GET_MODE (op0) == compare_mode && op1 == const0_rtx)
6879 {
6880 SUBST (SET_SRC (x), op0);
6881 src = SET_SRC (x);
6882 }
6883 /* Otherwise, if we didn't previously have the same COMPARE we
6884 want, create it from scratch. */
6885 else if (GET_CODE (src) != COMPARE || GET_MODE (src) != compare_mode
6886 || XEXP (src, 0) != op0 || XEXP (src, 1) != op1)
6887 {
6888 SUBST (SET_SRC (x), gen_rtx_COMPARE (compare_mode, op0, op1));
6889 src = SET_SRC (x);
6890 }
6891 }
6892 else
6893 {
6894 /* Get SET_SRC in a form where we have placed back any
6895 compound expressions. Then do the checks below. */
6896 src = make_compound_operation (src, SET);
6897 SUBST (SET_SRC (x), src);
6898 }
6899
6900 /* If we have (set x (subreg:m1 (op:m2 ...) 0)) with OP being some operation,
6901 and X being a REG or (subreg (reg)), we may be able to convert this to
6902 (set (subreg:m2 x) (op)).
6903
6904 We can always do this if M1 is narrower than M2 because that means that
6905 we only care about the low bits of the result.
6906
6907 However, on machines without WORD_REGISTER_OPERATIONS defined, we cannot
6908 perform a narrower operation than requested since the high-order bits will
6909 be undefined. On machine where it is defined, this transformation is safe
6910 as long as M1 and M2 have the same number of words. */
6911
6912 if (GET_CODE (src) == SUBREG && subreg_lowpart_p (src)
6913 && !OBJECT_P (SUBREG_REG (src))
6914 && (((GET_MODE_SIZE (GET_MODE (src)) + (UNITS_PER_WORD - 1))
6915 / UNITS_PER_WORD)
6916 == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (src)))
6917 + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD))
6918 && (WORD_REGISTER_OPERATIONS || !paradoxical_subreg_p (src))
6919 && ! (REG_P (dest) && REGNO (dest) < FIRST_PSEUDO_REGISTER
6920 && !REG_CAN_CHANGE_MODE_P (REGNO (dest),
6921 GET_MODE (SUBREG_REG (src)),
6922 GET_MODE (src)))
6923 && (REG_P (dest)
6924 || (GET_CODE (dest) == SUBREG
6925 && REG_P (SUBREG_REG (dest)))))
6926 {
6927 SUBST (SET_DEST (x),
6928 gen_lowpart (GET_MODE (SUBREG_REG (src)),
6929 dest));
6930 SUBST (SET_SRC (x), SUBREG_REG (src));
6931
6932 src = SET_SRC (x), dest = SET_DEST (x);
6933 }
6934
6935 /* If we have (set (cc0) (subreg ...)), we try to remove the subreg
6936 in SRC. */
6937 if (dest == cc0_rtx
6938 && partial_subreg_p (src)
6939 && subreg_lowpart_p (src))
6940 {
6941 rtx inner = SUBREG_REG (src);
6942 machine_mode inner_mode = GET_MODE (inner);
6943
6944 /* Here we make sure that we don't have a sign bit on. */
6945 if (val_signbit_known_clear_p (GET_MODE (src),
6946 nonzero_bits (inner, inner_mode)))
6947 {
6948 SUBST (SET_SRC (x), inner);
6949 src = SET_SRC (x);
6950 }
6951 }
6952
6953 /* If we have (set FOO (subreg:M (mem:N BAR) 0)) with M wider than N, this
6954 would require a paradoxical subreg. Replace the subreg with a
6955 zero_extend to avoid the reload that would otherwise be required. */
6956
6957 enum rtx_code extend_op;
6958 if (paradoxical_subreg_p (src)
6959 && MEM_P (SUBREG_REG (src))
6960 && (extend_op = load_extend_op (GET_MODE (SUBREG_REG (src)))) != UNKNOWN)
6961 {
6962 SUBST (SET_SRC (x),
6963 gen_rtx_fmt_e (extend_op, GET_MODE (src), SUBREG_REG (src)));
6964
6965 src = SET_SRC (x);
6966 }
6967
6968 /* If we don't have a conditional move, SET_SRC is an IF_THEN_ELSE, and we
6969 are comparing an item known to be 0 or -1 against 0, use a logical
6970 operation instead. Check for one of the arms being an IOR of the other
6971 arm with some value. We compute three terms to be IOR'ed together. In
6972 practice, at most two will be nonzero. Then we do the IOR's. */
6973
6974 if (GET_CODE (dest) != PC
6975 && GET_CODE (src) == IF_THEN_ELSE
6976 && is_int_mode (GET_MODE (src), &int_mode)
6977 && (GET_CODE (XEXP (src, 0)) == EQ || GET_CODE (XEXP (src, 0)) == NE)
6978 && XEXP (XEXP (src, 0), 1) == const0_rtx
6979 && int_mode == GET_MODE (XEXP (XEXP (src, 0), 0))
6980 && (!HAVE_conditional_move
6981 || ! can_conditionally_move_p (int_mode))
6982 && (num_sign_bit_copies (XEXP (XEXP (src, 0), 0), int_mode)
6983 == GET_MODE_PRECISION (int_mode))
6984 && ! side_effects_p (src))
6985 {
6986 rtx true_rtx = (GET_CODE (XEXP (src, 0)) == NE
6987 ? XEXP (src, 1) : XEXP (src, 2));
6988 rtx false_rtx = (GET_CODE (XEXP (src, 0)) == NE
6989 ? XEXP (src, 2) : XEXP (src, 1));
6990 rtx term1 = const0_rtx, term2, term3;
6991
6992 if (GET_CODE (true_rtx) == IOR
6993 && rtx_equal_p (XEXP (true_rtx, 0), false_rtx))
6994 term1 = false_rtx, true_rtx = XEXP (true_rtx, 1), false_rtx = const0_rtx;
6995 else if (GET_CODE (true_rtx) == IOR
6996 && rtx_equal_p (XEXP (true_rtx, 1), false_rtx))
6997 term1 = false_rtx, true_rtx = XEXP (true_rtx, 0), false_rtx = const0_rtx;
6998 else if (GET_CODE (false_rtx) == IOR
6999 && rtx_equal_p (XEXP (false_rtx, 0), true_rtx))
7000 term1 = true_rtx, false_rtx = XEXP (false_rtx, 1), true_rtx = const0_rtx;
7001 else if (GET_CODE (false_rtx) == IOR
7002 && rtx_equal_p (XEXP (false_rtx, 1), true_rtx))
7003 term1 = true_rtx, false_rtx = XEXP (false_rtx, 0), true_rtx = const0_rtx;
7004
7005 term2 = simplify_gen_binary (AND, int_mode,
7006 XEXP (XEXP (src, 0), 0), true_rtx);
7007 term3 = simplify_gen_binary (AND, int_mode,
7008 simplify_gen_unary (NOT, int_mode,
7009 XEXP (XEXP (src, 0), 0),
7010 int_mode),
7011 false_rtx);
7012
7013 SUBST (SET_SRC (x),
7014 simplify_gen_binary (IOR, int_mode,
7015 simplify_gen_binary (IOR, int_mode,
7016 term1, term2),
7017 term3));
7018
7019 src = SET_SRC (x);
7020 }
7021
7022 /* If either SRC or DEST is a CLOBBER of (const_int 0), make this
7023 whole thing fail. */
7024 if (GET_CODE (src) == CLOBBER && XEXP (src, 0) == const0_rtx)
7025 return src;
7026 else if (GET_CODE (dest) == CLOBBER && XEXP (dest, 0) == const0_rtx)
7027 return dest;
7028 else
7029 /* Convert this into a field assignment operation, if possible. */
7030 return make_field_assignment (x);
7031 }
7032 \f
7033 /* Simplify, X, and AND, IOR, or XOR operation, and return the simplified
7034 result. */
7035
7036 static rtx
7037 simplify_logical (rtx x)
7038 {
7039 rtx op0 = XEXP (x, 0);
7040 rtx op1 = XEXP (x, 1);
7041 scalar_int_mode mode;
7042
7043 switch (GET_CODE (x))
7044 {
7045 case AND:
7046 /* We can call simplify_and_const_int only if we don't lose
7047 any (sign) bits when converting INTVAL (op1) to
7048 "unsigned HOST_WIDE_INT". */
7049 if (is_a <scalar_int_mode> (GET_MODE (x), &mode)
7050 && CONST_INT_P (op1)
7051 && (HWI_COMPUTABLE_MODE_P (mode)
7052 || INTVAL (op1) > 0))
7053 {
7054 x = simplify_and_const_int (x, mode, op0, INTVAL (op1));
7055 if (GET_CODE (x) != AND)
7056 return x;
7057
7058 op0 = XEXP (x, 0);
7059 op1 = XEXP (x, 1);
7060 }
7061
7062 /* If we have any of (and (ior A B) C) or (and (xor A B) C),
7063 apply the distributive law and then the inverse distributive
7064 law to see if things simplify. */
7065 if (GET_CODE (op0) == IOR || GET_CODE (op0) == XOR)
7066 {
7067 rtx result = distribute_and_simplify_rtx (x, 0);
7068 if (result)
7069 return result;
7070 }
7071 if (GET_CODE (op1) == IOR || GET_CODE (op1) == XOR)
7072 {
7073 rtx result = distribute_and_simplify_rtx (x, 1);
7074 if (result)
7075 return result;
7076 }
7077 break;
7078
7079 case IOR:
7080 /* If we have (ior (and A B) C), apply the distributive law and then
7081 the inverse distributive law to see if things simplify. */
7082
7083 if (GET_CODE (op0) == AND)
7084 {
7085 rtx result = distribute_and_simplify_rtx (x, 0);
7086 if (result)
7087 return result;
7088 }
7089
7090 if (GET_CODE (op1) == AND)
7091 {
7092 rtx result = distribute_and_simplify_rtx (x, 1);
7093 if (result)
7094 return result;
7095 }
7096 break;
7097
7098 default:
7099 gcc_unreachable ();
7100 }
7101
7102 return x;
7103 }
7104 \f
7105 /* We consider ZERO_EXTRACT, SIGN_EXTRACT, and SIGN_EXTEND as "compound
7106 operations" because they can be replaced with two more basic operations.
7107 ZERO_EXTEND is also considered "compound" because it can be replaced with
7108 an AND operation, which is simpler, though only one operation.
7109
7110 The function expand_compound_operation is called with an rtx expression
7111 and will convert it to the appropriate shifts and AND operations,
7112 simplifying at each stage.
7113
7114 The function make_compound_operation is called to convert an expression
7115 consisting of shifts and ANDs into the equivalent compound expression.
7116 It is the inverse of this function, loosely speaking. */
7117
7118 static rtx
7119 expand_compound_operation (rtx x)
7120 {
7121 unsigned HOST_WIDE_INT pos = 0, len;
7122 int unsignedp = 0;
7123 unsigned int modewidth;
7124 rtx tem;
7125 scalar_int_mode inner_mode;
7126
7127 switch (GET_CODE (x))
7128 {
7129 case ZERO_EXTEND:
7130 unsignedp = 1;
7131 /* FALLTHRU */
7132 case SIGN_EXTEND:
7133 /* We can't necessarily use a const_int for a multiword mode;
7134 it depends on implicitly extending the value.
7135 Since we don't know the right way to extend it,
7136 we can't tell whether the implicit way is right.
7137
7138 Even for a mode that is no wider than a const_int,
7139 we can't win, because we need to sign extend one of its bits through
7140 the rest of it, and we don't know which bit. */
7141 if (CONST_INT_P (XEXP (x, 0)))
7142 return x;
7143
7144 /* Reject modes that aren't scalar integers because turning vector
7145 or complex modes into shifts causes problems. */
7146 if (!is_a <scalar_int_mode> (GET_MODE (XEXP (x, 0)), &inner_mode))
7147 return x;
7148
7149 /* Return if (subreg:MODE FROM 0) is not a safe replacement for
7150 (zero_extend:MODE FROM) or (sign_extend:MODE FROM). It is for any MEM
7151 because (SUBREG (MEM...)) is guaranteed to cause the MEM to be
7152 reloaded. If not for that, MEM's would very rarely be safe.
7153
7154 Reject modes bigger than a word, because we might not be able
7155 to reference a two-register group starting with an arbitrary register
7156 (and currently gen_lowpart might crash for a SUBREG). */
7157
7158 if (GET_MODE_SIZE (inner_mode) > UNITS_PER_WORD)
7159 return x;
7160
7161 len = GET_MODE_PRECISION (inner_mode);
7162 /* If the inner object has VOIDmode (the only way this can happen
7163 is if it is an ASM_OPERANDS), we can't do anything since we don't
7164 know how much masking to do. */
7165 if (len == 0)
7166 return x;
7167
7168 break;
7169
7170 case ZERO_EXTRACT:
7171 unsignedp = 1;
7172
7173 /* fall through */
7174
7175 case SIGN_EXTRACT:
7176 /* If the operand is a CLOBBER, just return it. */
7177 if (GET_CODE (XEXP (x, 0)) == CLOBBER)
7178 return XEXP (x, 0);
7179
7180 if (!CONST_INT_P (XEXP (x, 1))
7181 || !CONST_INT_P (XEXP (x, 2)))
7182 return x;
7183
7184 /* Reject modes that aren't scalar integers because turning vector
7185 or complex modes into shifts causes problems. */
7186 if (!is_a <scalar_int_mode> (GET_MODE (XEXP (x, 0)), &inner_mode))
7187 return x;
7188
7189 len = INTVAL (XEXP (x, 1));
7190 pos = INTVAL (XEXP (x, 2));
7191
7192 /* This should stay within the object being extracted, fail otherwise. */
7193 if (len + pos > GET_MODE_PRECISION (inner_mode))
7194 return x;
7195
7196 if (BITS_BIG_ENDIAN)
7197 pos = GET_MODE_PRECISION (inner_mode) - len - pos;
7198
7199 break;
7200
7201 default:
7202 return x;
7203 }
7204
7205 /* We've rejected non-scalar operations by now. */
7206 scalar_int_mode mode = as_a <scalar_int_mode> (GET_MODE (x));
7207
7208 /* Convert sign extension to zero extension, if we know that the high
7209 bit is not set, as this is easier to optimize. It will be converted
7210 back to cheaper alternative in make_extraction. */
7211 if (GET_CODE (x) == SIGN_EXTEND
7212 && HWI_COMPUTABLE_MODE_P (mode)
7213 && ((nonzero_bits (XEXP (x, 0), inner_mode)
7214 & ~(((unsigned HOST_WIDE_INT) GET_MODE_MASK (inner_mode)) >> 1))
7215 == 0))
7216 {
7217 rtx temp = gen_rtx_ZERO_EXTEND (mode, XEXP (x, 0));
7218 rtx temp2 = expand_compound_operation (temp);
7219
7220 /* Make sure this is a profitable operation. */
7221 if (set_src_cost (x, mode, optimize_this_for_speed_p)
7222 > set_src_cost (temp2, mode, optimize_this_for_speed_p))
7223 return temp2;
7224 else if (set_src_cost (x, mode, optimize_this_for_speed_p)
7225 > set_src_cost (temp, mode, optimize_this_for_speed_p))
7226 return temp;
7227 else
7228 return x;
7229 }
7230
7231 /* We can optimize some special cases of ZERO_EXTEND. */
7232 if (GET_CODE (x) == ZERO_EXTEND)
7233 {
7234 /* (zero_extend:DI (truncate:SI foo:DI)) is just foo:DI if we
7235 know that the last value didn't have any inappropriate bits
7236 set. */
7237 if (GET_CODE (XEXP (x, 0)) == TRUNCATE
7238 && GET_MODE (XEXP (XEXP (x, 0), 0)) == mode
7239 && HWI_COMPUTABLE_MODE_P (mode)
7240 && (nonzero_bits (XEXP (XEXP (x, 0), 0), mode)
7241 & ~GET_MODE_MASK (inner_mode)) == 0)
7242 return XEXP (XEXP (x, 0), 0);
7243
7244 /* Likewise for (zero_extend:DI (subreg:SI foo:DI 0)). */
7245 if (GET_CODE (XEXP (x, 0)) == SUBREG
7246 && GET_MODE (SUBREG_REG (XEXP (x, 0))) == mode
7247 && subreg_lowpart_p (XEXP (x, 0))
7248 && HWI_COMPUTABLE_MODE_P (mode)
7249 && (nonzero_bits (SUBREG_REG (XEXP (x, 0)), mode)
7250 & ~GET_MODE_MASK (inner_mode)) == 0)
7251 return SUBREG_REG (XEXP (x, 0));
7252
7253 /* (zero_extend:DI (truncate:SI foo:DI)) is just foo:DI when foo
7254 is a comparison and STORE_FLAG_VALUE permits. This is like
7255 the first case, but it works even when MODE is larger
7256 than HOST_WIDE_INT. */
7257 if (GET_CODE (XEXP (x, 0)) == TRUNCATE
7258 && GET_MODE (XEXP (XEXP (x, 0), 0)) == mode
7259 && COMPARISON_P (XEXP (XEXP (x, 0), 0))
7260 && GET_MODE_PRECISION (inner_mode) <= HOST_BITS_PER_WIDE_INT
7261 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (inner_mode)) == 0)
7262 return XEXP (XEXP (x, 0), 0);
7263
7264 /* Likewise for (zero_extend:DI (subreg:SI foo:DI 0)). */
7265 if (GET_CODE (XEXP (x, 0)) == SUBREG
7266 && GET_MODE (SUBREG_REG (XEXP (x, 0))) == mode
7267 && subreg_lowpart_p (XEXP (x, 0))
7268 && COMPARISON_P (SUBREG_REG (XEXP (x, 0)))
7269 && GET_MODE_PRECISION (inner_mode) <= HOST_BITS_PER_WIDE_INT
7270 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (inner_mode)) == 0)
7271 return SUBREG_REG (XEXP (x, 0));
7272
7273 }
7274
7275 /* If we reach here, we want to return a pair of shifts. The inner
7276 shift is a left shift of BITSIZE - POS - LEN bits. The outer
7277 shift is a right shift of BITSIZE - LEN bits. It is arithmetic or
7278 logical depending on the value of UNSIGNEDP.
7279
7280 If this was a ZERO_EXTEND or ZERO_EXTRACT, this pair of shifts will be
7281 converted into an AND of a shift.
7282
7283 We must check for the case where the left shift would have a negative
7284 count. This can happen in a case like (x >> 31) & 255 on machines
7285 that can't shift by a constant. On those machines, we would first
7286 combine the shift with the AND to produce a variable-position
7287 extraction. Then the constant of 31 would be substituted in
7288 to produce such a position. */
7289
7290 modewidth = GET_MODE_PRECISION (mode);
7291 if (modewidth >= pos + len)
7292 {
7293 tem = gen_lowpart (mode, XEXP (x, 0));
7294 if (!tem || GET_CODE (tem) == CLOBBER)
7295 return x;
7296 tem = simplify_shift_const (NULL_RTX, ASHIFT, mode,
7297 tem, modewidth - pos - len);
7298 tem = simplify_shift_const (NULL_RTX, unsignedp ? LSHIFTRT : ASHIFTRT,
7299 mode, tem, modewidth - len);
7300 }
7301 else if (unsignedp && len < HOST_BITS_PER_WIDE_INT)
7302 tem = simplify_and_const_int (NULL_RTX, mode,
7303 simplify_shift_const (NULL_RTX, LSHIFTRT,
7304 mode, XEXP (x, 0),
7305 pos),
7306 (HOST_WIDE_INT_1U << len) - 1);
7307 else
7308 /* Any other cases we can't handle. */
7309 return x;
7310
7311 /* If we couldn't do this for some reason, return the original
7312 expression. */
7313 if (GET_CODE (tem) == CLOBBER)
7314 return x;
7315
7316 return tem;
7317 }
7318 \f
7319 /* X is a SET which contains an assignment of one object into
7320 a part of another (such as a bit-field assignment, STRICT_LOW_PART,
7321 or certain SUBREGS). If possible, convert it into a series of
7322 logical operations.
7323
7324 We half-heartedly support variable positions, but do not at all
7325 support variable lengths. */
7326
7327 static const_rtx
7328 expand_field_assignment (const_rtx x)
7329 {
7330 rtx inner;
7331 rtx pos; /* Always counts from low bit. */
7332 int len;
7333 rtx mask, cleared, masked;
7334 scalar_int_mode compute_mode;
7335
7336 /* Loop until we find something we can't simplify. */
7337 while (1)
7338 {
7339 if (GET_CODE (SET_DEST (x)) == STRICT_LOW_PART
7340 && GET_CODE (XEXP (SET_DEST (x), 0)) == SUBREG)
7341 {
7342 inner = SUBREG_REG (XEXP (SET_DEST (x), 0));
7343 len = GET_MODE_PRECISION (GET_MODE (XEXP (SET_DEST (x), 0)));
7344 pos = GEN_INT (subreg_lsb (XEXP (SET_DEST (x), 0)));
7345 }
7346 else if (GET_CODE (SET_DEST (x)) == ZERO_EXTRACT
7347 && CONST_INT_P (XEXP (SET_DEST (x), 1)))
7348 {
7349 inner = XEXP (SET_DEST (x), 0);
7350 len = INTVAL (XEXP (SET_DEST (x), 1));
7351 pos = XEXP (SET_DEST (x), 2);
7352
7353 /* A constant position should stay within the width of INNER. */
7354 if (CONST_INT_P (pos)
7355 && INTVAL (pos) + len > GET_MODE_PRECISION (GET_MODE (inner)))
7356 break;
7357
7358 if (BITS_BIG_ENDIAN)
7359 {
7360 if (CONST_INT_P (pos))
7361 pos = GEN_INT (GET_MODE_PRECISION (GET_MODE (inner)) - len
7362 - INTVAL (pos));
7363 else if (GET_CODE (pos) == MINUS
7364 && CONST_INT_P (XEXP (pos, 1))
7365 && (INTVAL (XEXP (pos, 1))
7366 == GET_MODE_PRECISION (GET_MODE (inner)) - len))
7367 /* If position is ADJUST - X, new position is X. */
7368 pos = XEXP (pos, 0);
7369 else
7370 {
7371 HOST_WIDE_INT prec = GET_MODE_PRECISION (GET_MODE (inner));
7372 pos = simplify_gen_binary (MINUS, GET_MODE (pos),
7373 gen_int_mode (prec - len,
7374 GET_MODE (pos)),
7375 pos);
7376 }
7377 }
7378 }
7379
7380 /* If the destination is a subreg that overwrites the whole of the inner
7381 register, we can move the subreg to the source. */
7382 else if (GET_CODE (SET_DEST (x)) == SUBREG
7383 /* We need SUBREGs to compute nonzero_bits properly. */
7384 && nonzero_sign_valid
7385 && !read_modify_subreg_p (SET_DEST (x)))
7386 {
7387 x = gen_rtx_SET (SUBREG_REG (SET_DEST (x)),
7388 gen_lowpart
7389 (GET_MODE (SUBREG_REG (SET_DEST (x))),
7390 SET_SRC (x)));
7391 continue;
7392 }
7393 else
7394 break;
7395
7396 while (GET_CODE (inner) == SUBREG && subreg_lowpart_p (inner))
7397 inner = SUBREG_REG (inner);
7398
7399 /* Don't attempt bitwise arithmetic on non scalar integer modes. */
7400 if (!is_a <scalar_int_mode> (GET_MODE (inner), &compute_mode))
7401 {
7402 /* Don't do anything for vector or complex integral types. */
7403 if (! FLOAT_MODE_P (GET_MODE (inner)))
7404 break;
7405
7406 /* Try to find an integral mode to pun with. */
7407 if (!int_mode_for_size (GET_MODE_BITSIZE (GET_MODE (inner)), 0)
7408 .exists (&compute_mode))
7409 break;
7410
7411 inner = gen_lowpart (compute_mode, inner);
7412 }
7413
7414 /* Compute a mask of LEN bits, if we can do this on the host machine. */
7415 if (len >= HOST_BITS_PER_WIDE_INT)
7416 break;
7417
7418 /* Don't try to compute in too wide unsupported modes. */
7419 if (!targetm.scalar_mode_supported_p (compute_mode))
7420 break;
7421
7422 /* Now compute the equivalent expression. Make a copy of INNER
7423 for the SET_DEST in case it is a MEM into which we will substitute;
7424 we don't want shared RTL in that case. */
7425 mask = gen_int_mode ((HOST_WIDE_INT_1U << len) - 1,
7426 compute_mode);
7427 cleared = simplify_gen_binary (AND, compute_mode,
7428 simplify_gen_unary (NOT, compute_mode,
7429 simplify_gen_binary (ASHIFT,
7430 compute_mode,
7431 mask, pos),
7432 compute_mode),
7433 inner);
7434 masked = simplify_gen_binary (ASHIFT, compute_mode,
7435 simplify_gen_binary (
7436 AND, compute_mode,
7437 gen_lowpart (compute_mode, SET_SRC (x)),
7438 mask),
7439 pos);
7440
7441 x = gen_rtx_SET (copy_rtx (inner),
7442 simplify_gen_binary (IOR, compute_mode,
7443 cleared, masked));
7444 }
7445
7446 return x;
7447 }
7448 \f
7449 /* Return an RTX for a reference to LEN bits of INNER. If POS_RTX is nonzero,
7450 it is an RTX that represents the (variable) starting position; otherwise,
7451 POS is the (constant) starting bit position. Both are counted from the LSB.
7452
7453 UNSIGNEDP is nonzero for an unsigned reference and zero for a signed one.
7454
7455 IN_DEST is nonzero if this is a reference in the destination of a SET.
7456 This is used when a ZERO_ or SIGN_EXTRACT isn't needed. If nonzero,
7457 a STRICT_LOW_PART will be used, if zero, ZERO_EXTEND or SIGN_EXTEND will
7458 be used.
7459
7460 IN_COMPARE is nonzero if we are in a COMPARE. This means that a
7461 ZERO_EXTRACT should be built even for bits starting at bit 0.
7462
7463 MODE is the desired mode of the result (if IN_DEST == 0).
7464
7465 The result is an RTX for the extraction or NULL_RTX if the target
7466 can't handle it. */
7467
7468 static rtx
7469 make_extraction (machine_mode mode, rtx inner, HOST_WIDE_INT pos,
7470 rtx pos_rtx, unsigned HOST_WIDE_INT len, int unsignedp,
7471 int in_dest, int in_compare)
7472 {
7473 /* This mode describes the size of the storage area
7474 to fetch the overall value from. Within that, we
7475 ignore the POS lowest bits, etc. */
7476 machine_mode is_mode = GET_MODE (inner);
7477 machine_mode inner_mode;
7478 scalar_int_mode wanted_inner_mode;
7479 scalar_int_mode wanted_inner_reg_mode = word_mode;
7480 scalar_int_mode pos_mode = word_mode;
7481 machine_mode extraction_mode = word_mode;
7482 rtx new_rtx = 0;
7483 rtx orig_pos_rtx = pos_rtx;
7484 HOST_WIDE_INT orig_pos;
7485
7486 if (pos_rtx && CONST_INT_P (pos_rtx))
7487 pos = INTVAL (pos_rtx), pos_rtx = 0;
7488
7489 if (GET_CODE (inner) == SUBREG
7490 && subreg_lowpart_p (inner)
7491 && (paradoxical_subreg_p (inner)
7492 /* If trying or potentionally trying to extract
7493 bits outside of is_mode, don't look through
7494 non-paradoxical SUBREGs. See PR82192. */
7495 || (pos_rtx == NULL_RTX
7496 && pos + len <= GET_MODE_PRECISION (is_mode))))
7497 {
7498 /* If going from (subreg:SI (mem:QI ...)) to (mem:QI ...),
7499 consider just the QI as the memory to extract from.
7500 The subreg adds or removes high bits; its mode is
7501 irrelevant to the meaning of this extraction,
7502 since POS and LEN count from the lsb. */
7503 if (MEM_P (SUBREG_REG (inner)))
7504 is_mode = GET_MODE (SUBREG_REG (inner));
7505 inner = SUBREG_REG (inner);
7506 }
7507 else if (GET_CODE (inner) == ASHIFT
7508 && CONST_INT_P (XEXP (inner, 1))
7509 && pos_rtx == 0 && pos == 0
7510 && len > UINTVAL (XEXP (inner, 1)))
7511 {
7512 /* We're extracting the least significant bits of an rtx
7513 (ashift X (const_int C)), where LEN > C. Extract the
7514 least significant (LEN - C) bits of X, giving an rtx
7515 whose mode is MODE, then shift it left C times. */
7516 new_rtx = make_extraction (mode, XEXP (inner, 0),
7517 0, 0, len - INTVAL (XEXP (inner, 1)),
7518 unsignedp, in_dest, in_compare);
7519 if (new_rtx != 0)
7520 return gen_rtx_ASHIFT (mode, new_rtx, XEXP (inner, 1));
7521 }
7522 else if (GET_CODE (inner) == TRUNCATE
7523 /* If trying or potentionally trying to extract
7524 bits outside of is_mode, don't look through
7525 TRUNCATE. See PR82192. */
7526 && pos_rtx == NULL_RTX
7527 && pos + len <= GET_MODE_PRECISION (is_mode))
7528 inner = XEXP (inner, 0);
7529
7530 inner_mode = GET_MODE (inner);
7531
7532 /* See if this can be done without an extraction. We never can if the
7533 width of the field is not the same as that of some integer mode. For
7534 registers, we can only avoid the extraction if the position is at the
7535 low-order bit and this is either not in the destination or we have the
7536 appropriate STRICT_LOW_PART operation available.
7537
7538 For MEM, we can avoid an extract if the field starts on an appropriate
7539 boundary and we can change the mode of the memory reference. */
7540
7541 scalar_int_mode tmode;
7542 if (int_mode_for_size (len, 1).exists (&tmode)
7543 && ((pos_rtx == 0 && (pos % BITS_PER_WORD) == 0
7544 && !MEM_P (inner)
7545 && (pos == 0 || REG_P (inner))
7546 && (inner_mode == tmode
7547 || !REG_P (inner)
7548 || TRULY_NOOP_TRUNCATION_MODES_P (tmode, inner_mode)
7549 || reg_truncated_to_mode (tmode, inner))
7550 && (! in_dest
7551 || (REG_P (inner)
7552 && have_insn_for (STRICT_LOW_PART, tmode))))
7553 || (MEM_P (inner) && pos_rtx == 0
7554 && (pos
7555 % (STRICT_ALIGNMENT ? GET_MODE_ALIGNMENT (tmode)
7556 : BITS_PER_UNIT)) == 0
7557 /* We can't do this if we are widening INNER_MODE (it
7558 may not be aligned, for one thing). */
7559 && !paradoxical_subreg_p (tmode, inner_mode)
7560 && (inner_mode == tmode
7561 || (! mode_dependent_address_p (XEXP (inner, 0),
7562 MEM_ADDR_SPACE (inner))
7563 && ! MEM_VOLATILE_P (inner))))))
7564 {
7565 /* If INNER is a MEM, make a new MEM that encompasses just the desired
7566 field. If the original and current mode are the same, we need not
7567 adjust the offset. Otherwise, we do if bytes big endian.
7568
7569 If INNER is not a MEM, get a piece consisting of just the field
7570 of interest (in this case POS % BITS_PER_WORD must be 0). */
7571
7572 if (MEM_P (inner))
7573 {
7574 HOST_WIDE_INT offset;
7575
7576 /* POS counts from lsb, but make OFFSET count in memory order. */
7577 if (BYTES_BIG_ENDIAN)
7578 offset = (GET_MODE_PRECISION (is_mode) - len - pos) / BITS_PER_UNIT;
7579 else
7580 offset = pos / BITS_PER_UNIT;
7581
7582 new_rtx = adjust_address_nv (inner, tmode, offset);
7583 }
7584 else if (REG_P (inner))
7585 {
7586 if (tmode != inner_mode)
7587 {
7588 /* We can't call gen_lowpart in a DEST since we
7589 always want a SUBREG (see below) and it would sometimes
7590 return a new hard register. */
7591 if (pos || in_dest)
7592 {
7593 unsigned int offset
7594 = subreg_offset_from_lsb (tmode, inner_mode, pos);
7595
7596 /* Avoid creating invalid subregs, for example when
7597 simplifying (x>>32)&255. */
7598 if (!validate_subreg (tmode, inner_mode, inner, offset))
7599 return NULL_RTX;
7600
7601 new_rtx = gen_rtx_SUBREG (tmode, inner, offset);
7602 }
7603 else
7604 new_rtx = gen_lowpart (tmode, inner);
7605 }
7606 else
7607 new_rtx = inner;
7608 }
7609 else
7610 new_rtx = force_to_mode (inner, tmode,
7611 len >= HOST_BITS_PER_WIDE_INT
7612 ? HOST_WIDE_INT_M1U
7613 : (HOST_WIDE_INT_1U << len) - 1, 0);
7614
7615 /* If this extraction is going into the destination of a SET,
7616 make a STRICT_LOW_PART unless we made a MEM. */
7617
7618 if (in_dest)
7619 return (MEM_P (new_rtx) ? new_rtx
7620 : (GET_CODE (new_rtx) != SUBREG
7621 ? gen_rtx_CLOBBER (tmode, const0_rtx)
7622 : gen_rtx_STRICT_LOW_PART (VOIDmode, new_rtx)));
7623
7624 if (mode == tmode)
7625 return new_rtx;
7626
7627 if (CONST_SCALAR_INT_P (new_rtx))
7628 return simplify_unary_operation (unsignedp ? ZERO_EXTEND : SIGN_EXTEND,
7629 mode, new_rtx, tmode);
7630
7631 /* If we know that no extraneous bits are set, and that the high
7632 bit is not set, convert the extraction to the cheaper of
7633 sign and zero extension, that are equivalent in these cases. */
7634 if (flag_expensive_optimizations
7635 && (HWI_COMPUTABLE_MODE_P (tmode)
7636 && ((nonzero_bits (new_rtx, tmode)
7637 & ~(((unsigned HOST_WIDE_INT)GET_MODE_MASK (tmode)) >> 1))
7638 == 0)))
7639 {
7640 rtx temp = gen_rtx_ZERO_EXTEND (mode, new_rtx);
7641 rtx temp1 = gen_rtx_SIGN_EXTEND (mode, new_rtx);
7642
7643 /* Prefer ZERO_EXTENSION, since it gives more information to
7644 backends. */
7645 if (set_src_cost (temp, mode, optimize_this_for_speed_p)
7646 <= set_src_cost (temp1, mode, optimize_this_for_speed_p))
7647 return temp;
7648 return temp1;
7649 }
7650
7651 /* Otherwise, sign- or zero-extend unless we already are in the
7652 proper mode. */
7653
7654 return (gen_rtx_fmt_e (unsignedp ? ZERO_EXTEND : SIGN_EXTEND,
7655 mode, new_rtx));
7656 }
7657
7658 /* Unless this is a COMPARE or we have a funny memory reference,
7659 don't do anything with zero-extending field extracts starting at
7660 the low-order bit since they are simple AND operations. */
7661 if (pos_rtx == 0 && pos == 0 && ! in_dest
7662 && ! in_compare && unsignedp)
7663 return 0;
7664
7665 /* Unless INNER is not MEM, reject this if we would be spanning bytes or
7666 if the position is not a constant and the length is not 1. In all
7667 other cases, we would only be going outside our object in cases when
7668 an original shift would have been undefined. */
7669 if (MEM_P (inner)
7670 && ((pos_rtx == 0 && pos + len > GET_MODE_PRECISION (is_mode))
7671 || (pos_rtx != 0 && len != 1)))
7672 return 0;
7673
7674 enum extraction_pattern pattern = (in_dest ? EP_insv
7675 : unsignedp ? EP_extzv : EP_extv);
7676
7677 /* If INNER is not from memory, we want it to have the mode of a register
7678 extraction pattern's structure operand, or word_mode if there is no
7679 such pattern. The same applies to extraction_mode and pos_mode
7680 and their respective operands.
7681
7682 For memory, assume that the desired extraction_mode and pos_mode
7683 are the same as for a register operation, since at present we don't
7684 have named patterns for aligned memory structures. */
7685 struct extraction_insn insn;
7686 if (get_best_reg_extraction_insn (&insn, pattern,
7687 GET_MODE_BITSIZE (inner_mode), mode))
7688 {
7689 wanted_inner_reg_mode = insn.struct_mode.require ();
7690 pos_mode = insn.pos_mode;
7691 extraction_mode = insn.field_mode;
7692 }
7693
7694 /* Never narrow an object, since that might not be safe. */
7695
7696 if (mode != VOIDmode
7697 && partial_subreg_p (extraction_mode, mode))
7698 extraction_mode = mode;
7699
7700 if (!MEM_P (inner))
7701 wanted_inner_mode = wanted_inner_reg_mode;
7702 else
7703 {
7704 /* Be careful not to go beyond the extracted object and maintain the
7705 natural alignment of the memory. */
7706 wanted_inner_mode = smallest_int_mode_for_size (len);
7707 while (pos % GET_MODE_BITSIZE (wanted_inner_mode) + len
7708 > GET_MODE_BITSIZE (wanted_inner_mode))
7709 wanted_inner_mode = GET_MODE_WIDER_MODE (wanted_inner_mode).require ();
7710 }
7711
7712 orig_pos = pos;
7713
7714 if (BITS_BIG_ENDIAN)
7715 {
7716 /* POS is passed as if BITS_BIG_ENDIAN == 0, so we need to convert it to
7717 BITS_BIG_ENDIAN style. If position is constant, compute new
7718 position. Otherwise, build subtraction.
7719 Note that POS is relative to the mode of the original argument.
7720 If it's a MEM we need to recompute POS relative to that.
7721 However, if we're extracting from (or inserting into) a register,
7722 we want to recompute POS relative to wanted_inner_mode. */
7723 int width = (MEM_P (inner)
7724 ? GET_MODE_BITSIZE (is_mode)
7725 : GET_MODE_BITSIZE (wanted_inner_mode));
7726
7727 if (pos_rtx == 0)
7728 pos = width - len - pos;
7729 else
7730 pos_rtx
7731 = gen_rtx_MINUS (GET_MODE (pos_rtx),
7732 gen_int_mode (width - len, GET_MODE (pos_rtx)),
7733 pos_rtx);
7734 /* POS may be less than 0 now, but we check for that below.
7735 Note that it can only be less than 0 if !MEM_P (inner). */
7736 }
7737
7738 /* If INNER has a wider mode, and this is a constant extraction, try to
7739 make it smaller and adjust the byte to point to the byte containing
7740 the value. */
7741 if (wanted_inner_mode != VOIDmode
7742 && inner_mode != wanted_inner_mode
7743 && ! pos_rtx
7744 && partial_subreg_p (wanted_inner_mode, is_mode)
7745 && MEM_P (inner)
7746 && ! mode_dependent_address_p (XEXP (inner, 0), MEM_ADDR_SPACE (inner))
7747 && ! MEM_VOLATILE_P (inner))
7748 {
7749 int offset = 0;
7750
7751 /* The computations below will be correct if the machine is big
7752 endian in both bits and bytes or little endian in bits and bytes.
7753 If it is mixed, we must adjust. */
7754
7755 /* If bytes are big endian and we had a paradoxical SUBREG, we must
7756 adjust OFFSET to compensate. */
7757 if (BYTES_BIG_ENDIAN
7758 && paradoxical_subreg_p (is_mode, inner_mode))
7759 offset -= GET_MODE_SIZE (is_mode) - GET_MODE_SIZE (inner_mode);
7760
7761 /* We can now move to the desired byte. */
7762 offset += (pos / GET_MODE_BITSIZE (wanted_inner_mode))
7763 * GET_MODE_SIZE (wanted_inner_mode);
7764 pos %= GET_MODE_BITSIZE (wanted_inner_mode);
7765
7766 if (BYTES_BIG_ENDIAN != BITS_BIG_ENDIAN
7767 && is_mode != wanted_inner_mode)
7768 offset = (GET_MODE_SIZE (is_mode)
7769 - GET_MODE_SIZE (wanted_inner_mode) - offset);
7770
7771 inner = adjust_address_nv (inner, wanted_inner_mode, offset);
7772 }
7773
7774 /* If INNER is not memory, get it into the proper mode. If we are changing
7775 its mode, POS must be a constant and smaller than the size of the new
7776 mode. */
7777 else if (!MEM_P (inner))
7778 {
7779 /* On the LHS, don't create paradoxical subregs implicitely truncating
7780 the register unless TARGET_TRULY_NOOP_TRUNCATION. */
7781 if (in_dest
7782 && !TRULY_NOOP_TRUNCATION_MODES_P (GET_MODE (inner),
7783 wanted_inner_mode))
7784 return NULL_RTX;
7785
7786 if (GET_MODE (inner) != wanted_inner_mode
7787 && (pos_rtx != 0
7788 || orig_pos + len > GET_MODE_BITSIZE (wanted_inner_mode)))
7789 return NULL_RTX;
7790
7791 if (orig_pos < 0)
7792 return NULL_RTX;
7793
7794 inner = force_to_mode (inner, wanted_inner_mode,
7795 pos_rtx
7796 || len + orig_pos >= HOST_BITS_PER_WIDE_INT
7797 ? HOST_WIDE_INT_M1U
7798 : (((HOST_WIDE_INT_1U << len) - 1)
7799 << orig_pos),
7800 0);
7801 }
7802
7803 /* Adjust mode of POS_RTX, if needed. If we want a wider mode, we
7804 have to zero extend. Otherwise, we can just use a SUBREG.
7805
7806 We dealt with constant rtxes earlier, so pos_rtx cannot
7807 have VOIDmode at this point. */
7808 if (pos_rtx != 0
7809 && (GET_MODE_SIZE (pos_mode)
7810 > GET_MODE_SIZE (as_a <scalar_int_mode> (GET_MODE (pos_rtx)))))
7811 {
7812 rtx temp = simplify_gen_unary (ZERO_EXTEND, pos_mode, pos_rtx,
7813 GET_MODE (pos_rtx));
7814
7815 /* If we know that no extraneous bits are set, and that the high
7816 bit is not set, convert extraction to cheaper one - either
7817 SIGN_EXTENSION or ZERO_EXTENSION, that are equivalent in these
7818 cases. */
7819 if (flag_expensive_optimizations
7820 && (HWI_COMPUTABLE_MODE_P (GET_MODE (pos_rtx))
7821 && ((nonzero_bits (pos_rtx, GET_MODE (pos_rtx))
7822 & ~(((unsigned HOST_WIDE_INT)
7823 GET_MODE_MASK (GET_MODE (pos_rtx)))
7824 >> 1))
7825 == 0)))
7826 {
7827 rtx temp1 = simplify_gen_unary (SIGN_EXTEND, pos_mode, pos_rtx,
7828 GET_MODE (pos_rtx));
7829
7830 /* Prefer ZERO_EXTENSION, since it gives more information to
7831 backends. */
7832 if (set_src_cost (temp1, pos_mode, optimize_this_for_speed_p)
7833 < set_src_cost (temp, pos_mode, optimize_this_for_speed_p))
7834 temp = temp1;
7835 }
7836 pos_rtx = temp;
7837 }
7838
7839 /* Make POS_RTX unless we already have it and it is correct. If we don't
7840 have a POS_RTX but we do have an ORIG_POS_RTX, the latter must
7841 be a CONST_INT. */
7842 if (pos_rtx == 0 && orig_pos_rtx != 0 && INTVAL (orig_pos_rtx) == pos)
7843 pos_rtx = orig_pos_rtx;
7844
7845 else if (pos_rtx == 0)
7846 pos_rtx = GEN_INT (pos);
7847
7848 /* Make the required operation. See if we can use existing rtx. */
7849 new_rtx = gen_rtx_fmt_eee (unsignedp ? ZERO_EXTRACT : SIGN_EXTRACT,
7850 extraction_mode, inner, GEN_INT (len), pos_rtx);
7851 if (! in_dest)
7852 new_rtx = gen_lowpart (mode, new_rtx);
7853
7854 return new_rtx;
7855 }
7856 \f
7857 /* See if X (of mode MODE) contains an ASHIFT of COUNT or more bits that
7858 can be commuted with any other operations in X. Return X without
7859 that shift if so. */
7860
7861 static rtx
7862 extract_left_shift (scalar_int_mode mode, rtx x, int count)
7863 {
7864 enum rtx_code code = GET_CODE (x);
7865 rtx tem;
7866
7867 switch (code)
7868 {
7869 case ASHIFT:
7870 /* This is the shift itself. If it is wide enough, we will return
7871 either the value being shifted if the shift count is equal to
7872 COUNT or a shift for the difference. */
7873 if (CONST_INT_P (XEXP (x, 1))
7874 && INTVAL (XEXP (x, 1)) >= count)
7875 return simplify_shift_const (NULL_RTX, ASHIFT, mode, XEXP (x, 0),
7876 INTVAL (XEXP (x, 1)) - count);
7877 break;
7878
7879 case NEG: case NOT:
7880 if ((tem = extract_left_shift (mode, XEXP (x, 0), count)) != 0)
7881 return simplify_gen_unary (code, mode, tem, mode);
7882
7883 break;
7884
7885 case PLUS: case IOR: case XOR: case AND:
7886 /* If we can safely shift this constant and we find the inner shift,
7887 make a new operation. */
7888 if (CONST_INT_P (XEXP (x, 1))
7889 && (UINTVAL (XEXP (x, 1))
7890 & (((HOST_WIDE_INT_1U << count)) - 1)) == 0
7891 && (tem = extract_left_shift (mode, XEXP (x, 0), count)) != 0)
7892 {
7893 HOST_WIDE_INT val = INTVAL (XEXP (x, 1)) >> count;
7894 return simplify_gen_binary (code, mode, tem,
7895 gen_int_mode (val, mode));
7896 }
7897 break;
7898
7899 default:
7900 break;
7901 }
7902
7903 return 0;
7904 }
7905 \f
7906 /* Subroutine of make_compound_operation. *X_PTR is the rtx at the current
7907 level of the expression and MODE is its mode. IN_CODE is as for
7908 make_compound_operation. *NEXT_CODE_PTR is the value of IN_CODE
7909 that should be used when recursing on operands of *X_PTR.
7910
7911 There are two possible actions:
7912
7913 - Return null. This tells the caller to recurse on *X_PTR with IN_CODE
7914 equal to *NEXT_CODE_PTR, after which *X_PTR holds the final value.
7915
7916 - Return a new rtx, which the caller returns directly. */
7917
7918 static rtx
7919 make_compound_operation_int (scalar_int_mode mode, rtx *x_ptr,
7920 enum rtx_code in_code,
7921 enum rtx_code *next_code_ptr)
7922 {
7923 rtx x = *x_ptr;
7924 enum rtx_code next_code = *next_code_ptr;
7925 enum rtx_code code = GET_CODE (x);
7926 int mode_width = GET_MODE_PRECISION (mode);
7927 rtx rhs, lhs;
7928 rtx new_rtx = 0;
7929 int i;
7930 rtx tem;
7931 scalar_int_mode inner_mode;
7932 bool equality_comparison = false;
7933
7934 if (in_code == EQ)
7935 {
7936 equality_comparison = true;
7937 in_code = COMPARE;
7938 }
7939
7940 /* Process depending on the code of this operation. If NEW is set
7941 nonzero, it will be returned. */
7942
7943 switch (code)
7944 {
7945 case ASHIFT:
7946 /* Convert shifts by constants into multiplications if inside
7947 an address. */
7948 if (in_code == MEM && CONST_INT_P (XEXP (x, 1))
7949 && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT
7950 && INTVAL (XEXP (x, 1)) >= 0)
7951 {
7952 HOST_WIDE_INT count = INTVAL (XEXP (x, 1));
7953 HOST_WIDE_INT multval = HOST_WIDE_INT_1 << count;
7954
7955 new_rtx = make_compound_operation (XEXP (x, 0), next_code);
7956 if (GET_CODE (new_rtx) == NEG)
7957 {
7958 new_rtx = XEXP (new_rtx, 0);
7959 multval = -multval;
7960 }
7961 multval = trunc_int_for_mode (multval, mode);
7962 new_rtx = gen_rtx_MULT (mode, new_rtx, gen_int_mode (multval, mode));
7963 }
7964 break;
7965
7966 case PLUS:
7967 lhs = XEXP (x, 0);
7968 rhs = XEXP (x, 1);
7969 lhs = make_compound_operation (lhs, next_code);
7970 rhs = make_compound_operation (rhs, next_code);
7971 if (GET_CODE (lhs) == MULT && GET_CODE (XEXP (lhs, 0)) == NEG)
7972 {
7973 tem = simplify_gen_binary (MULT, mode, XEXP (XEXP (lhs, 0), 0),
7974 XEXP (lhs, 1));
7975 new_rtx = simplify_gen_binary (MINUS, mode, rhs, tem);
7976 }
7977 else if (GET_CODE (lhs) == MULT
7978 && (CONST_INT_P (XEXP (lhs, 1)) && INTVAL (XEXP (lhs, 1)) < 0))
7979 {
7980 tem = simplify_gen_binary (MULT, mode, XEXP (lhs, 0),
7981 simplify_gen_unary (NEG, mode,
7982 XEXP (lhs, 1),
7983 mode));
7984 new_rtx = simplify_gen_binary (MINUS, mode, rhs, tem);
7985 }
7986 else
7987 {
7988 SUBST (XEXP (x, 0), lhs);
7989 SUBST (XEXP (x, 1), rhs);
7990 }
7991 maybe_swap_commutative_operands (x);
7992 return x;
7993
7994 case MINUS:
7995 lhs = XEXP (x, 0);
7996 rhs = XEXP (x, 1);
7997 lhs = make_compound_operation (lhs, next_code);
7998 rhs = make_compound_operation (rhs, next_code);
7999 if (GET_CODE (rhs) == MULT && GET_CODE (XEXP (rhs, 0)) == NEG)
8000 {
8001 tem = simplify_gen_binary (MULT, mode, XEXP (XEXP (rhs, 0), 0),
8002 XEXP (rhs, 1));
8003 return simplify_gen_binary (PLUS, mode, tem, lhs);
8004 }
8005 else if (GET_CODE (rhs) == MULT
8006 && (CONST_INT_P (XEXP (rhs, 1)) && INTVAL (XEXP (rhs, 1)) < 0))
8007 {
8008 tem = simplify_gen_binary (MULT, mode, XEXP (rhs, 0),
8009 simplify_gen_unary (NEG, mode,
8010 XEXP (rhs, 1),
8011 mode));
8012 return simplify_gen_binary (PLUS, mode, tem, lhs);
8013 }
8014 else
8015 {
8016 SUBST (XEXP (x, 0), lhs);
8017 SUBST (XEXP (x, 1), rhs);
8018 return x;
8019 }
8020
8021 case AND:
8022 /* If the second operand is not a constant, we can't do anything
8023 with it. */
8024 if (!CONST_INT_P (XEXP (x, 1)))
8025 break;
8026
8027 /* If the constant is a power of two minus one and the first operand
8028 is a logical right shift, make an extraction. */
8029 if (GET_CODE (XEXP (x, 0)) == LSHIFTRT
8030 && (i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0)
8031 {
8032 new_rtx = make_compound_operation (XEXP (XEXP (x, 0), 0), next_code);
8033 new_rtx = make_extraction (mode, new_rtx, 0, XEXP (XEXP (x, 0), 1),
8034 i, 1, 0, in_code == COMPARE);
8035 }
8036
8037 /* Same as previous, but for (subreg (lshiftrt ...)) in first op. */
8038 else if (GET_CODE (XEXP (x, 0)) == SUBREG
8039 && subreg_lowpart_p (XEXP (x, 0))
8040 && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (XEXP (x, 0))),
8041 &inner_mode)
8042 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == LSHIFTRT
8043 && (i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0)
8044 {
8045 rtx inner_x0 = SUBREG_REG (XEXP (x, 0));
8046 new_rtx = make_compound_operation (XEXP (inner_x0, 0), next_code);
8047 new_rtx = make_extraction (inner_mode, new_rtx, 0,
8048 XEXP (inner_x0, 1),
8049 i, 1, 0, in_code == COMPARE);
8050
8051 /* If we narrowed the mode when dropping the subreg, then we lose. */
8052 if (GET_MODE_SIZE (inner_mode) < GET_MODE_SIZE (mode))
8053 new_rtx = NULL;
8054
8055 /* If that didn't give anything, see if the AND simplifies on
8056 its own. */
8057 if (!new_rtx && i >= 0)
8058 {
8059 new_rtx = make_compound_operation (XEXP (x, 0), next_code);
8060 new_rtx = make_extraction (mode, new_rtx, 0, NULL_RTX, i, 1,
8061 0, in_code == COMPARE);
8062 }
8063 }
8064 /* Same as previous, but for (xor/ior (lshiftrt...) (lshiftrt...)). */
8065 else if ((GET_CODE (XEXP (x, 0)) == XOR
8066 || GET_CODE (XEXP (x, 0)) == IOR)
8067 && GET_CODE (XEXP (XEXP (x, 0), 0)) == LSHIFTRT
8068 && GET_CODE (XEXP (XEXP (x, 0), 1)) == LSHIFTRT
8069 && (i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0)
8070 {
8071 /* Apply the distributive law, and then try to make extractions. */
8072 new_rtx = gen_rtx_fmt_ee (GET_CODE (XEXP (x, 0)), mode,
8073 gen_rtx_AND (mode, XEXP (XEXP (x, 0), 0),
8074 XEXP (x, 1)),
8075 gen_rtx_AND (mode, XEXP (XEXP (x, 0), 1),
8076 XEXP (x, 1)));
8077 new_rtx = make_compound_operation (new_rtx, in_code);
8078 }
8079
8080 /* If we are have (and (rotate X C) M) and C is larger than the number
8081 of bits in M, this is an extraction. */
8082
8083 else if (GET_CODE (XEXP (x, 0)) == ROTATE
8084 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
8085 && (i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0
8086 && i <= INTVAL (XEXP (XEXP (x, 0), 1)))
8087 {
8088 new_rtx = make_compound_operation (XEXP (XEXP (x, 0), 0), next_code);
8089 new_rtx = make_extraction (mode, new_rtx,
8090 (GET_MODE_PRECISION (mode)
8091 - INTVAL (XEXP (XEXP (x, 0), 1))),
8092 NULL_RTX, i, 1, 0, in_code == COMPARE);
8093 }
8094
8095 /* On machines without logical shifts, if the operand of the AND is
8096 a logical shift and our mask turns off all the propagated sign
8097 bits, we can replace the logical shift with an arithmetic shift. */
8098 else if (GET_CODE (XEXP (x, 0)) == LSHIFTRT
8099 && !have_insn_for (LSHIFTRT, mode)
8100 && have_insn_for (ASHIFTRT, mode)
8101 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
8102 && INTVAL (XEXP (XEXP (x, 0), 1)) >= 0
8103 && INTVAL (XEXP (XEXP (x, 0), 1)) < HOST_BITS_PER_WIDE_INT
8104 && mode_width <= HOST_BITS_PER_WIDE_INT)
8105 {
8106 unsigned HOST_WIDE_INT mask = GET_MODE_MASK (mode);
8107
8108 mask >>= INTVAL (XEXP (XEXP (x, 0), 1));
8109 if ((INTVAL (XEXP (x, 1)) & ~mask) == 0)
8110 SUBST (XEXP (x, 0),
8111 gen_rtx_ASHIFTRT (mode,
8112 make_compound_operation (XEXP (XEXP (x,
8113 0),
8114 0),
8115 next_code),
8116 XEXP (XEXP (x, 0), 1)));
8117 }
8118
8119 /* If the constant is one less than a power of two, this might be
8120 representable by an extraction even if no shift is present.
8121 If it doesn't end up being a ZERO_EXTEND, we will ignore it unless
8122 we are in a COMPARE. */
8123 else if ((i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0)
8124 new_rtx = make_extraction (mode,
8125 make_compound_operation (XEXP (x, 0),
8126 next_code),
8127 0, NULL_RTX, i, 1, 0, in_code == COMPARE);
8128
8129 /* If we are in a comparison and this is an AND with a power of two,
8130 convert this into the appropriate bit extract. */
8131 else if (in_code == COMPARE
8132 && (i = exact_log2 (UINTVAL (XEXP (x, 1)))) >= 0
8133 && (equality_comparison || i < GET_MODE_PRECISION (mode) - 1))
8134 new_rtx = make_extraction (mode,
8135 make_compound_operation (XEXP (x, 0),
8136 next_code),
8137 i, NULL_RTX, 1, 1, 0, 1);
8138
8139 /* If the one operand is a paradoxical subreg of a register or memory and
8140 the constant (limited to the smaller mode) has only zero bits where
8141 the sub expression has known zero bits, this can be expressed as
8142 a zero_extend. */
8143 else if (GET_CODE (XEXP (x, 0)) == SUBREG)
8144 {
8145 rtx sub;
8146
8147 sub = XEXP (XEXP (x, 0), 0);
8148 machine_mode sub_mode = GET_MODE (sub);
8149 if ((REG_P (sub) || MEM_P (sub))
8150 && GET_MODE_PRECISION (sub_mode) < mode_width)
8151 {
8152 unsigned HOST_WIDE_INT mode_mask = GET_MODE_MASK (sub_mode);
8153 unsigned HOST_WIDE_INT mask;
8154
8155 /* original AND constant with all the known zero bits set */
8156 mask = UINTVAL (XEXP (x, 1)) | (~nonzero_bits (sub, sub_mode));
8157 if ((mask & mode_mask) == mode_mask)
8158 {
8159 new_rtx = make_compound_operation (sub, next_code);
8160 new_rtx = make_extraction (mode, new_rtx, 0, 0,
8161 GET_MODE_PRECISION (sub_mode),
8162 1, 0, in_code == COMPARE);
8163 }
8164 }
8165 }
8166
8167 break;
8168
8169 case LSHIFTRT:
8170 /* If the sign bit is known to be zero, replace this with an
8171 arithmetic shift. */
8172 if (have_insn_for (ASHIFTRT, mode)
8173 && ! have_insn_for (LSHIFTRT, mode)
8174 && mode_width <= HOST_BITS_PER_WIDE_INT
8175 && (nonzero_bits (XEXP (x, 0), mode) & (1 << (mode_width - 1))) == 0)
8176 {
8177 new_rtx = gen_rtx_ASHIFTRT (mode,
8178 make_compound_operation (XEXP (x, 0),
8179 next_code),
8180 XEXP (x, 1));
8181 break;
8182 }
8183
8184 /* fall through */
8185
8186 case ASHIFTRT:
8187 lhs = XEXP (x, 0);
8188 rhs = XEXP (x, 1);
8189
8190 /* If we have (ashiftrt (ashift foo C1) C2) with C2 >= C1,
8191 this is a SIGN_EXTRACT. */
8192 if (CONST_INT_P (rhs)
8193 && GET_CODE (lhs) == ASHIFT
8194 && CONST_INT_P (XEXP (lhs, 1))
8195 && INTVAL (rhs) >= INTVAL (XEXP (lhs, 1))
8196 && INTVAL (XEXP (lhs, 1)) >= 0
8197 && INTVAL (rhs) < mode_width)
8198 {
8199 new_rtx = make_compound_operation (XEXP (lhs, 0), next_code);
8200 new_rtx = make_extraction (mode, new_rtx,
8201 INTVAL (rhs) - INTVAL (XEXP (lhs, 1)),
8202 NULL_RTX, mode_width - INTVAL (rhs),
8203 code == LSHIFTRT, 0, in_code == COMPARE);
8204 break;
8205 }
8206
8207 /* See if we have operations between an ASHIFTRT and an ASHIFT.
8208 If so, try to merge the shifts into a SIGN_EXTEND. We could
8209 also do this for some cases of SIGN_EXTRACT, but it doesn't
8210 seem worth the effort; the case checked for occurs on Alpha. */
8211
8212 if (!OBJECT_P (lhs)
8213 && ! (GET_CODE (lhs) == SUBREG
8214 && (OBJECT_P (SUBREG_REG (lhs))))
8215 && CONST_INT_P (rhs)
8216 && INTVAL (rhs) >= 0
8217 && INTVAL (rhs) < HOST_BITS_PER_WIDE_INT
8218 && INTVAL (rhs) < mode_width
8219 && (new_rtx = extract_left_shift (mode, lhs, INTVAL (rhs))) != 0)
8220 new_rtx = make_extraction (mode, make_compound_operation (new_rtx,
8221 next_code),
8222 0, NULL_RTX, mode_width - INTVAL (rhs),
8223 code == LSHIFTRT, 0, in_code == COMPARE);
8224
8225 break;
8226
8227 case SUBREG:
8228 /* Call ourselves recursively on the inner expression. If we are
8229 narrowing the object and it has a different RTL code from
8230 what it originally did, do this SUBREG as a force_to_mode. */
8231 {
8232 rtx inner = SUBREG_REG (x), simplified;
8233 enum rtx_code subreg_code = in_code;
8234
8235 /* If the SUBREG is masking of a logical right shift,
8236 make an extraction. */
8237 if (GET_CODE (inner) == LSHIFTRT
8238 && is_a <scalar_int_mode> (GET_MODE (inner), &inner_mode)
8239 && GET_MODE_SIZE (mode) < GET_MODE_SIZE (inner_mode)
8240 && CONST_INT_P (XEXP (inner, 1))
8241 && UINTVAL (XEXP (inner, 1)) < GET_MODE_PRECISION (inner_mode)
8242 && subreg_lowpart_p (x))
8243 {
8244 new_rtx = make_compound_operation (XEXP (inner, 0), next_code);
8245 int width = GET_MODE_PRECISION (inner_mode)
8246 - INTVAL (XEXP (inner, 1));
8247 if (width > mode_width)
8248 width = mode_width;
8249 new_rtx = make_extraction (mode, new_rtx, 0, XEXP (inner, 1),
8250 width, 1, 0, in_code == COMPARE);
8251 break;
8252 }
8253
8254 /* If in_code is COMPARE, it isn't always safe to pass it through
8255 to the recursive make_compound_operation call. */
8256 if (subreg_code == COMPARE
8257 && (!subreg_lowpart_p (x)
8258 || GET_CODE (inner) == SUBREG
8259 /* (subreg:SI (and:DI (reg:DI) (const_int 0x800000000)) 0)
8260 is (const_int 0), rather than
8261 (subreg:SI (lshiftrt:DI (reg:DI) (const_int 35)) 0).
8262 Similarly (subreg:QI (and:SI (reg:SI) (const_int 0x80)) 0)
8263 for non-equality comparisons against 0 is not equivalent
8264 to (subreg:QI (lshiftrt:SI (reg:SI) (const_int 7)) 0). */
8265 || (GET_CODE (inner) == AND
8266 && CONST_INT_P (XEXP (inner, 1))
8267 && partial_subreg_p (x)
8268 && exact_log2 (UINTVAL (XEXP (inner, 1)))
8269 >= GET_MODE_BITSIZE (mode) - 1)))
8270 subreg_code = SET;
8271
8272 tem = make_compound_operation (inner, subreg_code);
8273
8274 simplified
8275 = simplify_subreg (mode, tem, GET_MODE (inner), SUBREG_BYTE (x));
8276 if (simplified)
8277 tem = simplified;
8278
8279 if (GET_CODE (tem) != GET_CODE (inner)
8280 && partial_subreg_p (x)
8281 && subreg_lowpart_p (x))
8282 {
8283 rtx newer
8284 = force_to_mode (tem, mode, HOST_WIDE_INT_M1U, 0);
8285
8286 /* If we have something other than a SUBREG, we might have
8287 done an expansion, so rerun ourselves. */
8288 if (GET_CODE (newer) != SUBREG)
8289 newer = make_compound_operation (newer, in_code);
8290
8291 /* force_to_mode can expand compounds. If it just re-expanded
8292 the compound, use gen_lowpart to convert to the desired
8293 mode. */
8294 if (rtx_equal_p (newer, x)
8295 /* Likewise if it re-expanded the compound only partially.
8296 This happens for SUBREG of ZERO_EXTRACT if they extract
8297 the same number of bits. */
8298 || (GET_CODE (newer) == SUBREG
8299 && (GET_CODE (SUBREG_REG (newer)) == LSHIFTRT
8300 || GET_CODE (SUBREG_REG (newer)) == ASHIFTRT)
8301 && GET_CODE (inner) == AND
8302 && rtx_equal_p (SUBREG_REG (newer), XEXP (inner, 0))))
8303 return gen_lowpart (GET_MODE (x), tem);
8304
8305 return newer;
8306 }
8307
8308 if (simplified)
8309 return tem;
8310 }
8311 break;
8312
8313 default:
8314 break;
8315 }
8316
8317 if (new_rtx)
8318 *x_ptr = gen_lowpart (mode, new_rtx);
8319 *next_code_ptr = next_code;
8320 return NULL_RTX;
8321 }
8322
8323 /* Look at the expression rooted at X. Look for expressions
8324 equivalent to ZERO_EXTRACT, SIGN_EXTRACT, ZERO_EXTEND, SIGN_EXTEND.
8325 Form these expressions.
8326
8327 Return the new rtx, usually just X.
8328
8329 Also, for machines like the VAX that don't have logical shift insns,
8330 try to convert logical to arithmetic shift operations in cases where
8331 they are equivalent. This undoes the canonicalizations to logical
8332 shifts done elsewhere.
8333
8334 We try, as much as possible, to re-use rtl expressions to save memory.
8335
8336 IN_CODE says what kind of expression we are processing. Normally, it is
8337 SET. In a memory address it is MEM. When processing the arguments of
8338 a comparison or a COMPARE against zero, it is COMPARE, or EQ if more
8339 precisely it is an equality comparison against zero. */
8340
8341 rtx
8342 make_compound_operation (rtx x, enum rtx_code in_code)
8343 {
8344 enum rtx_code code = GET_CODE (x);
8345 const char *fmt;
8346 int i, j;
8347 enum rtx_code next_code;
8348 rtx new_rtx, tem;
8349
8350 /* Select the code to be used in recursive calls. Once we are inside an
8351 address, we stay there. If we have a comparison, set to COMPARE,
8352 but once inside, go back to our default of SET. */
8353
8354 next_code = (code == MEM ? MEM
8355 : ((code == COMPARE || COMPARISON_P (x))
8356 && XEXP (x, 1) == const0_rtx) ? COMPARE
8357 : in_code == COMPARE || in_code == EQ ? SET : in_code);
8358
8359 scalar_int_mode mode;
8360 if (is_a <scalar_int_mode> (GET_MODE (x), &mode))
8361 {
8362 rtx new_rtx = make_compound_operation_int (mode, &x, in_code,
8363 &next_code);
8364 if (new_rtx)
8365 return new_rtx;
8366 code = GET_CODE (x);
8367 }
8368
8369 /* Now recursively process each operand of this operation. We need to
8370 handle ZERO_EXTEND specially so that we don't lose track of the
8371 inner mode. */
8372 if (code == ZERO_EXTEND)
8373 {
8374 new_rtx = make_compound_operation (XEXP (x, 0), next_code);
8375 tem = simplify_const_unary_operation (ZERO_EXTEND, GET_MODE (x),
8376 new_rtx, GET_MODE (XEXP (x, 0)));
8377 if (tem)
8378 return tem;
8379 SUBST (XEXP (x, 0), new_rtx);
8380 return x;
8381 }
8382
8383 fmt = GET_RTX_FORMAT (code);
8384 for (i = 0; i < GET_RTX_LENGTH (code); i++)
8385 if (fmt[i] == 'e')
8386 {
8387 new_rtx = make_compound_operation (XEXP (x, i), next_code);
8388 SUBST (XEXP (x, i), new_rtx);
8389 }
8390 else if (fmt[i] == 'E')
8391 for (j = 0; j < XVECLEN (x, i); j++)
8392 {
8393 new_rtx = make_compound_operation (XVECEXP (x, i, j), next_code);
8394 SUBST (XVECEXP (x, i, j), new_rtx);
8395 }
8396
8397 maybe_swap_commutative_operands (x);
8398 return x;
8399 }
8400 \f
8401 /* Given M see if it is a value that would select a field of bits
8402 within an item, but not the entire word. Return -1 if not.
8403 Otherwise, return the starting position of the field, where 0 is the
8404 low-order bit.
8405
8406 *PLEN is set to the length of the field. */
8407
8408 static int
8409 get_pos_from_mask (unsigned HOST_WIDE_INT m, unsigned HOST_WIDE_INT *plen)
8410 {
8411 /* Get the bit number of the first 1 bit from the right, -1 if none. */
8412 int pos = m ? ctz_hwi (m) : -1;
8413 int len = 0;
8414
8415 if (pos >= 0)
8416 /* Now shift off the low-order zero bits and see if we have a
8417 power of two minus 1. */
8418 len = exact_log2 ((m >> pos) + 1);
8419
8420 if (len <= 0)
8421 pos = -1;
8422
8423 *plen = len;
8424 return pos;
8425 }
8426 \f
8427 /* If X refers to a register that equals REG in value, replace these
8428 references with REG. */
8429 static rtx
8430 canon_reg_for_combine (rtx x, rtx reg)
8431 {
8432 rtx op0, op1, op2;
8433 const char *fmt;
8434 int i;
8435 bool copied;
8436
8437 enum rtx_code code = GET_CODE (x);
8438 switch (GET_RTX_CLASS (code))
8439 {
8440 case RTX_UNARY:
8441 op0 = canon_reg_for_combine (XEXP (x, 0), reg);
8442 if (op0 != XEXP (x, 0))
8443 return simplify_gen_unary (GET_CODE (x), GET_MODE (x), op0,
8444 GET_MODE (reg));
8445 break;
8446
8447 case RTX_BIN_ARITH:
8448 case RTX_COMM_ARITH:
8449 op0 = canon_reg_for_combine (XEXP (x, 0), reg);
8450 op1 = canon_reg_for_combine (XEXP (x, 1), reg);
8451 if (op0 != XEXP (x, 0) || op1 != XEXP (x, 1))
8452 return simplify_gen_binary (GET_CODE (x), GET_MODE (x), op0, op1);
8453 break;
8454
8455 case RTX_COMPARE:
8456 case RTX_COMM_COMPARE:
8457 op0 = canon_reg_for_combine (XEXP (x, 0), reg);
8458 op1 = canon_reg_for_combine (XEXP (x, 1), reg);
8459 if (op0 != XEXP (x, 0) || op1 != XEXP (x, 1))
8460 return simplify_gen_relational (GET_CODE (x), GET_MODE (x),
8461 GET_MODE (op0), op0, op1);
8462 break;
8463
8464 case RTX_TERNARY:
8465 case RTX_BITFIELD_OPS:
8466 op0 = canon_reg_for_combine (XEXP (x, 0), reg);
8467 op1 = canon_reg_for_combine (XEXP (x, 1), reg);
8468 op2 = canon_reg_for_combine (XEXP (x, 2), reg);
8469 if (op0 != XEXP (x, 0) || op1 != XEXP (x, 1) || op2 != XEXP (x, 2))
8470 return simplify_gen_ternary (GET_CODE (x), GET_MODE (x),
8471 GET_MODE (op0), op0, op1, op2);
8472 /* FALLTHRU */
8473
8474 case RTX_OBJ:
8475 if (REG_P (x))
8476 {
8477 if (rtx_equal_p (get_last_value (reg), x)
8478 || rtx_equal_p (reg, get_last_value (x)))
8479 return reg;
8480 else
8481 break;
8482 }
8483
8484 /* fall through */
8485
8486 default:
8487 fmt = GET_RTX_FORMAT (code);
8488 copied = false;
8489 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
8490 if (fmt[i] == 'e')
8491 {
8492 rtx op = canon_reg_for_combine (XEXP (x, i), reg);
8493 if (op != XEXP (x, i))
8494 {
8495 if (!copied)
8496 {
8497 copied = true;
8498 x = copy_rtx (x);
8499 }
8500 XEXP (x, i) = op;
8501 }
8502 }
8503 else if (fmt[i] == 'E')
8504 {
8505 int j;
8506 for (j = 0; j < XVECLEN (x, i); j++)
8507 {
8508 rtx op = canon_reg_for_combine (XVECEXP (x, i, j), reg);
8509 if (op != XVECEXP (x, i, j))
8510 {
8511 if (!copied)
8512 {
8513 copied = true;
8514 x = copy_rtx (x);
8515 }
8516 XVECEXP (x, i, j) = op;
8517 }
8518 }
8519 }
8520
8521 break;
8522 }
8523
8524 return x;
8525 }
8526
8527 /* Return X converted to MODE. If the value is already truncated to
8528 MODE we can just return a subreg even though in the general case we
8529 would need an explicit truncation. */
8530
8531 static rtx
8532 gen_lowpart_or_truncate (machine_mode mode, rtx x)
8533 {
8534 if (!CONST_INT_P (x)
8535 && partial_subreg_p (mode, GET_MODE (x))
8536 && !TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (x))
8537 && !(REG_P (x) && reg_truncated_to_mode (mode, x)))
8538 {
8539 /* Bit-cast X into an integer mode. */
8540 if (!SCALAR_INT_MODE_P (GET_MODE (x)))
8541 x = gen_lowpart (int_mode_for_mode (GET_MODE (x)).require (), x);
8542 x = simplify_gen_unary (TRUNCATE, int_mode_for_mode (mode).require (),
8543 x, GET_MODE (x));
8544 }
8545
8546 return gen_lowpart (mode, x);
8547 }
8548
8549 /* See if X can be simplified knowing that we will only refer to it in
8550 MODE and will only refer to those bits that are nonzero in MASK.
8551 If other bits are being computed or if masking operations are done
8552 that select a superset of the bits in MASK, they can sometimes be
8553 ignored.
8554
8555 Return a possibly simplified expression, but always convert X to
8556 MODE. If X is a CONST_INT, AND the CONST_INT with MASK.
8557
8558 If JUST_SELECT is nonzero, don't optimize by noticing that bits in MASK
8559 are all off in X. This is used when X will be complemented, by either
8560 NOT, NEG, or XOR. */
8561
8562 static rtx
8563 force_to_mode (rtx x, machine_mode mode, unsigned HOST_WIDE_INT mask,
8564 int just_select)
8565 {
8566 enum rtx_code code = GET_CODE (x);
8567 int next_select = just_select || code == XOR || code == NOT || code == NEG;
8568 machine_mode op_mode;
8569 unsigned HOST_WIDE_INT nonzero;
8570
8571 /* If this is a CALL or ASM_OPERANDS, don't do anything. Some of the
8572 code below will do the wrong thing since the mode of such an
8573 expression is VOIDmode.
8574
8575 Also do nothing if X is a CLOBBER; this can happen if X was
8576 the return value from a call to gen_lowpart. */
8577 if (code == CALL || code == ASM_OPERANDS || code == CLOBBER)
8578 return x;
8579
8580 /* We want to perform the operation in its present mode unless we know
8581 that the operation is valid in MODE, in which case we do the operation
8582 in MODE. */
8583 op_mode = ((GET_MODE_CLASS (mode) == GET_MODE_CLASS (GET_MODE (x))
8584 && have_insn_for (code, mode))
8585 ? mode : GET_MODE (x));
8586
8587 /* It is not valid to do a right-shift in a narrower mode
8588 than the one it came in with. */
8589 if ((code == LSHIFTRT || code == ASHIFTRT)
8590 && partial_subreg_p (mode, GET_MODE (x)))
8591 op_mode = GET_MODE (x);
8592
8593 /* Truncate MASK to fit OP_MODE. */
8594 if (op_mode)
8595 mask &= GET_MODE_MASK (op_mode);
8596
8597 /* Determine what bits of X are guaranteed to be (non)zero. */
8598 nonzero = nonzero_bits (x, mode);
8599
8600 /* If none of the bits in X are needed, return a zero. */
8601 if (!just_select && (nonzero & mask) == 0 && !side_effects_p (x))
8602 x = const0_rtx;
8603
8604 /* If X is a CONST_INT, return a new one. Do this here since the
8605 test below will fail. */
8606 if (CONST_INT_P (x))
8607 {
8608 if (SCALAR_INT_MODE_P (mode))
8609 return gen_int_mode (INTVAL (x) & mask, mode);
8610 else
8611 {
8612 x = GEN_INT (INTVAL (x) & mask);
8613 return gen_lowpart_common (mode, x);
8614 }
8615 }
8616
8617 /* If X is narrower than MODE and we want all the bits in X's mode, just
8618 get X in the proper mode. */
8619 if (paradoxical_subreg_p (mode, GET_MODE (x))
8620 && (GET_MODE_MASK (GET_MODE (x)) & ~mask) == 0)
8621 return gen_lowpart (mode, x);
8622
8623 /* We can ignore the effect of a SUBREG if it narrows the mode or
8624 if the constant masks to zero all the bits the mode doesn't have. */
8625 if (GET_CODE (x) == SUBREG
8626 && subreg_lowpart_p (x)
8627 && (partial_subreg_p (x)
8628 || (0 == (mask
8629 & GET_MODE_MASK (GET_MODE (x))
8630 & ~GET_MODE_MASK (GET_MODE (SUBREG_REG (x)))))))
8631 return force_to_mode (SUBREG_REG (x), mode, mask, next_select);
8632
8633 scalar_int_mode int_mode, xmode;
8634 if (is_a <scalar_int_mode> (mode, &int_mode)
8635 && is_a <scalar_int_mode> (GET_MODE (x), &xmode))
8636 /* OP_MODE is either MODE or XMODE, so it must be a scalar
8637 integer too. */
8638 return force_int_to_mode (x, int_mode, xmode,
8639 as_a <scalar_int_mode> (op_mode),
8640 mask, just_select);
8641
8642 return gen_lowpart_or_truncate (mode, x);
8643 }
8644
8645 /* Subroutine of force_to_mode that handles cases in which both X and
8646 the result are scalar integers. MODE is the mode of the result,
8647 XMODE is the mode of X, and OP_MODE says which of MODE or XMODE
8648 is preferred for simplified versions of X. The other arguments
8649 are as for force_to_mode. */
8650
8651 static rtx
8652 force_int_to_mode (rtx x, scalar_int_mode mode, scalar_int_mode xmode,
8653 scalar_int_mode op_mode, unsigned HOST_WIDE_INT mask,
8654 int just_select)
8655 {
8656 enum rtx_code code = GET_CODE (x);
8657 int next_select = just_select || code == XOR || code == NOT || code == NEG;
8658 unsigned HOST_WIDE_INT fuller_mask;
8659 rtx op0, op1, temp;
8660
8661 /* When we have an arithmetic operation, or a shift whose count we
8662 do not know, we need to assume that all bits up to the highest-order
8663 bit in MASK will be needed. This is how we form such a mask. */
8664 if (mask & (HOST_WIDE_INT_1U << (HOST_BITS_PER_WIDE_INT - 1)))
8665 fuller_mask = HOST_WIDE_INT_M1U;
8666 else
8667 fuller_mask = ((HOST_WIDE_INT_1U << (floor_log2 (mask) + 1))
8668 - 1);
8669
8670 switch (code)
8671 {
8672 case CLOBBER:
8673 /* If X is a (clobber (const_int)), return it since we know we are
8674 generating something that won't match. */
8675 return x;
8676
8677 case SIGN_EXTEND:
8678 case ZERO_EXTEND:
8679 case ZERO_EXTRACT:
8680 case SIGN_EXTRACT:
8681 x = expand_compound_operation (x);
8682 if (GET_CODE (x) != code)
8683 return force_to_mode (x, mode, mask, next_select);
8684 break;
8685
8686 case TRUNCATE:
8687 /* Similarly for a truncate. */
8688 return force_to_mode (XEXP (x, 0), mode, mask, next_select);
8689
8690 case AND:
8691 /* If this is an AND with a constant, convert it into an AND
8692 whose constant is the AND of that constant with MASK. If it
8693 remains an AND of MASK, delete it since it is redundant. */
8694
8695 if (CONST_INT_P (XEXP (x, 1)))
8696 {
8697 x = simplify_and_const_int (x, op_mode, XEXP (x, 0),
8698 mask & INTVAL (XEXP (x, 1)));
8699 xmode = op_mode;
8700
8701 /* If X is still an AND, see if it is an AND with a mask that
8702 is just some low-order bits. If so, and it is MASK, we don't
8703 need it. */
8704
8705 if (GET_CODE (x) == AND && CONST_INT_P (XEXP (x, 1))
8706 && (INTVAL (XEXP (x, 1)) & GET_MODE_MASK (xmode)) == mask)
8707 x = XEXP (x, 0);
8708
8709 /* If it remains an AND, try making another AND with the bits
8710 in the mode mask that aren't in MASK turned on. If the
8711 constant in the AND is wide enough, this might make a
8712 cheaper constant. */
8713
8714 if (GET_CODE (x) == AND && CONST_INT_P (XEXP (x, 1))
8715 && GET_MODE_MASK (xmode) != mask
8716 && HWI_COMPUTABLE_MODE_P (xmode))
8717 {
8718 unsigned HOST_WIDE_INT cval
8719 = UINTVAL (XEXP (x, 1)) | (GET_MODE_MASK (xmode) & ~mask);
8720 rtx y;
8721
8722 y = simplify_gen_binary (AND, xmode, XEXP (x, 0),
8723 gen_int_mode (cval, xmode));
8724 if (set_src_cost (y, xmode, optimize_this_for_speed_p)
8725 < set_src_cost (x, xmode, optimize_this_for_speed_p))
8726 x = y;
8727 }
8728
8729 break;
8730 }
8731
8732 goto binop;
8733
8734 case PLUS:
8735 /* In (and (plus FOO C1) M), if M is a mask that just turns off
8736 low-order bits (as in an alignment operation) and FOO is already
8737 aligned to that boundary, mask C1 to that boundary as well.
8738 This may eliminate that PLUS and, later, the AND. */
8739
8740 {
8741 unsigned int width = GET_MODE_PRECISION (mode);
8742 unsigned HOST_WIDE_INT smask = mask;
8743
8744 /* If MODE is narrower than HOST_WIDE_INT and mask is a negative
8745 number, sign extend it. */
8746
8747 if (width < HOST_BITS_PER_WIDE_INT
8748 && (smask & (HOST_WIDE_INT_1U << (width - 1))) != 0)
8749 smask |= HOST_WIDE_INT_M1U << width;
8750
8751 if (CONST_INT_P (XEXP (x, 1))
8752 && pow2p_hwi (- smask)
8753 && (nonzero_bits (XEXP (x, 0), mode) & ~smask) == 0
8754 && (INTVAL (XEXP (x, 1)) & ~smask) != 0)
8755 return force_to_mode (plus_constant (xmode, XEXP (x, 0),
8756 (INTVAL (XEXP (x, 1)) & smask)),
8757 mode, smask, next_select);
8758 }
8759
8760 /* fall through */
8761
8762 case MULT:
8763 /* Substituting into the operands of a widening MULT is not likely to
8764 create RTL matching a machine insn. */
8765 if (code == MULT
8766 && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
8767 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND)
8768 && (GET_CODE (XEXP (x, 1)) == ZERO_EXTEND
8769 || GET_CODE (XEXP (x, 1)) == SIGN_EXTEND)
8770 && REG_P (XEXP (XEXP (x, 0), 0))
8771 && REG_P (XEXP (XEXP (x, 1), 0)))
8772 return gen_lowpart_or_truncate (mode, x);
8773
8774 /* For PLUS, MINUS and MULT, we need any bits less significant than the
8775 most significant bit in MASK since carries from those bits will
8776 affect the bits we are interested in. */
8777 mask = fuller_mask;
8778 goto binop;
8779
8780 case MINUS:
8781 /* If X is (minus C Y) where C's least set bit is larger than any bit
8782 in the mask, then we may replace with (neg Y). */
8783 if (CONST_INT_P (XEXP (x, 0))
8784 && least_bit_hwi (UINTVAL (XEXP (x, 0))) > mask)
8785 {
8786 x = simplify_gen_unary (NEG, xmode, XEXP (x, 1), xmode);
8787 return force_to_mode (x, mode, mask, next_select);
8788 }
8789
8790 /* Similarly, if C contains every bit in the fuller_mask, then we may
8791 replace with (not Y). */
8792 if (CONST_INT_P (XEXP (x, 0))
8793 && ((UINTVAL (XEXP (x, 0)) | fuller_mask) == UINTVAL (XEXP (x, 0))))
8794 {
8795 x = simplify_gen_unary (NOT, xmode, XEXP (x, 1), xmode);
8796 return force_to_mode (x, mode, mask, next_select);
8797 }
8798
8799 mask = fuller_mask;
8800 goto binop;
8801
8802 case IOR:
8803 case XOR:
8804 /* If X is (ior (lshiftrt FOO C1) C2), try to commute the IOR and
8805 LSHIFTRT so we end up with an (and (lshiftrt (ior ...) ...) ...)
8806 operation which may be a bitfield extraction. Ensure that the
8807 constant we form is not wider than the mode of X. */
8808
8809 if (GET_CODE (XEXP (x, 0)) == LSHIFTRT
8810 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
8811 && INTVAL (XEXP (XEXP (x, 0), 1)) >= 0
8812 && INTVAL (XEXP (XEXP (x, 0), 1)) < HOST_BITS_PER_WIDE_INT
8813 && CONST_INT_P (XEXP (x, 1))
8814 && ((INTVAL (XEXP (XEXP (x, 0), 1))
8815 + floor_log2 (INTVAL (XEXP (x, 1))))
8816 < GET_MODE_PRECISION (xmode))
8817 && (UINTVAL (XEXP (x, 1))
8818 & ~nonzero_bits (XEXP (x, 0), xmode)) == 0)
8819 {
8820 temp = gen_int_mode ((INTVAL (XEXP (x, 1)) & mask)
8821 << INTVAL (XEXP (XEXP (x, 0), 1)),
8822 xmode);
8823 temp = simplify_gen_binary (GET_CODE (x), xmode,
8824 XEXP (XEXP (x, 0), 0), temp);
8825 x = simplify_gen_binary (LSHIFTRT, xmode, temp,
8826 XEXP (XEXP (x, 0), 1));
8827 return force_to_mode (x, mode, mask, next_select);
8828 }
8829
8830 binop:
8831 /* For most binary operations, just propagate into the operation and
8832 change the mode if we have an operation of that mode. */
8833
8834 op0 = force_to_mode (XEXP (x, 0), mode, mask, next_select);
8835 op1 = force_to_mode (XEXP (x, 1), mode, mask, next_select);
8836
8837 /* If we ended up truncating both operands, truncate the result of the
8838 operation instead. */
8839 if (GET_CODE (op0) == TRUNCATE
8840 && GET_CODE (op1) == TRUNCATE)
8841 {
8842 op0 = XEXP (op0, 0);
8843 op1 = XEXP (op1, 0);
8844 }
8845
8846 op0 = gen_lowpart_or_truncate (op_mode, op0);
8847 op1 = gen_lowpart_or_truncate (op_mode, op1);
8848
8849 if (op_mode != xmode || op0 != XEXP (x, 0) || op1 != XEXP (x, 1))
8850 {
8851 x = simplify_gen_binary (code, op_mode, op0, op1);
8852 xmode = op_mode;
8853 }
8854 break;
8855
8856 case ASHIFT:
8857 /* For left shifts, do the same, but just for the first operand.
8858 However, we cannot do anything with shifts where we cannot
8859 guarantee that the counts are smaller than the size of the mode
8860 because such a count will have a different meaning in a
8861 wider mode. */
8862
8863 if (! (CONST_INT_P (XEXP (x, 1))
8864 && INTVAL (XEXP (x, 1)) >= 0
8865 && INTVAL (XEXP (x, 1)) < GET_MODE_PRECISION (mode))
8866 && ! (GET_MODE (XEXP (x, 1)) != VOIDmode
8867 && (nonzero_bits (XEXP (x, 1), GET_MODE (XEXP (x, 1)))
8868 < (unsigned HOST_WIDE_INT) GET_MODE_PRECISION (mode))))
8869 break;
8870
8871 /* If the shift count is a constant and we can do arithmetic in
8872 the mode of the shift, refine which bits we need. Otherwise, use the
8873 conservative form of the mask. */
8874 if (CONST_INT_P (XEXP (x, 1))
8875 && INTVAL (XEXP (x, 1)) >= 0
8876 && INTVAL (XEXP (x, 1)) < GET_MODE_PRECISION (op_mode)
8877 && HWI_COMPUTABLE_MODE_P (op_mode))
8878 mask >>= INTVAL (XEXP (x, 1));
8879 else
8880 mask = fuller_mask;
8881
8882 op0 = gen_lowpart_or_truncate (op_mode,
8883 force_to_mode (XEXP (x, 0), op_mode,
8884 mask, next_select));
8885
8886 if (op_mode != xmode || op0 != XEXP (x, 0))
8887 {
8888 x = simplify_gen_binary (code, op_mode, op0, XEXP (x, 1));
8889 xmode = op_mode;
8890 }
8891 break;
8892
8893 case LSHIFTRT:
8894 /* Here we can only do something if the shift count is a constant,
8895 this shift constant is valid for the host, and we can do arithmetic
8896 in OP_MODE. */
8897
8898 if (CONST_INT_P (XEXP (x, 1))
8899 && INTVAL (XEXP (x, 1)) >= 0
8900 && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT
8901 && HWI_COMPUTABLE_MODE_P (op_mode))
8902 {
8903 rtx inner = XEXP (x, 0);
8904 unsigned HOST_WIDE_INT inner_mask;
8905
8906 /* Select the mask of the bits we need for the shift operand. */
8907 inner_mask = mask << INTVAL (XEXP (x, 1));
8908
8909 /* We can only change the mode of the shift if we can do arithmetic
8910 in the mode of the shift and INNER_MASK is no wider than the
8911 width of X's mode. */
8912 if ((inner_mask & ~GET_MODE_MASK (xmode)) != 0)
8913 op_mode = xmode;
8914
8915 inner = force_to_mode (inner, op_mode, inner_mask, next_select);
8916
8917 if (xmode != op_mode || inner != XEXP (x, 0))
8918 {
8919 x = simplify_gen_binary (LSHIFTRT, op_mode, inner, XEXP (x, 1));
8920 xmode = op_mode;
8921 }
8922 }
8923
8924 /* If we have (and (lshiftrt FOO C1) C2) where the combination of the
8925 shift and AND produces only copies of the sign bit (C2 is one less
8926 than a power of two), we can do this with just a shift. */
8927
8928 if (GET_CODE (x) == LSHIFTRT
8929 && CONST_INT_P (XEXP (x, 1))
8930 /* The shift puts one of the sign bit copies in the least significant
8931 bit. */
8932 && ((INTVAL (XEXP (x, 1))
8933 + num_sign_bit_copies (XEXP (x, 0), GET_MODE (XEXP (x, 0))))
8934 >= GET_MODE_PRECISION (xmode))
8935 && pow2p_hwi (mask + 1)
8936 /* Number of bits left after the shift must be more than the mask
8937 needs. */
8938 && ((INTVAL (XEXP (x, 1)) + exact_log2 (mask + 1))
8939 <= GET_MODE_PRECISION (xmode))
8940 /* Must be more sign bit copies than the mask needs. */
8941 && ((int) num_sign_bit_copies (XEXP (x, 0), GET_MODE (XEXP (x, 0)))
8942 >= exact_log2 (mask + 1)))
8943 x = simplify_gen_binary (LSHIFTRT, xmode, XEXP (x, 0),
8944 GEN_INT (GET_MODE_PRECISION (xmode)
8945 - exact_log2 (mask + 1)));
8946
8947 goto shiftrt;
8948
8949 case ASHIFTRT:
8950 /* If we are just looking for the sign bit, we don't need this shift at
8951 all, even if it has a variable count. */
8952 if (val_signbit_p (xmode, mask))
8953 return force_to_mode (XEXP (x, 0), mode, mask, next_select);
8954
8955 /* If this is a shift by a constant, get a mask that contains those bits
8956 that are not copies of the sign bit. We then have two cases: If
8957 MASK only includes those bits, this can be a logical shift, which may
8958 allow simplifications. If MASK is a single-bit field not within
8959 those bits, we are requesting a copy of the sign bit and hence can
8960 shift the sign bit to the appropriate location. */
8961
8962 if (CONST_INT_P (XEXP (x, 1)) && INTVAL (XEXP (x, 1)) >= 0
8963 && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT)
8964 {
8965 unsigned HOST_WIDE_INT nonzero;
8966 int i;
8967
8968 /* If the considered data is wider than HOST_WIDE_INT, we can't
8969 represent a mask for all its bits in a single scalar.
8970 But we only care about the lower bits, so calculate these. */
8971
8972 if (GET_MODE_PRECISION (xmode) > HOST_BITS_PER_WIDE_INT)
8973 {
8974 nonzero = HOST_WIDE_INT_M1U;
8975
8976 /* GET_MODE_PRECISION (GET_MODE (x)) - INTVAL (XEXP (x, 1))
8977 is the number of bits a full-width mask would have set.
8978 We need only shift if these are fewer than nonzero can
8979 hold. If not, we must keep all bits set in nonzero. */
8980
8981 if (GET_MODE_PRECISION (xmode) - INTVAL (XEXP (x, 1))
8982 < HOST_BITS_PER_WIDE_INT)
8983 nonzero >>= INTVAL (XEXP (x, 1))
8984 + HOST_BITS_PER_WIDE_INT
8985 - GET_MODE_PRECISION (xmode);
8986 }
8987 else
8988 {
8989 nonzero = GET_MODE_MASK (xmode);
8990 nonzero >>= INTVAL (XEXP (x, 1));
8991 }
8992
8993 if ((mask & ~nonzero) == 0)
8994 {
8995 x = simplify_shift_const (NULL_RTX, LSHIFTRT, xmode,
8996 XEXP (x, 0), INTVAL (XEXP (x, 1)));
8997 if (GET_CODE (x) != ASHIFTRT)
8998 return force_to_mode (x, mode, mask, next_select);
8999 }
9000
9001 else if ((i = exact_log2 (mask)) >= 0)
9002 {
9003 x = simplify_shift_const
9004 (NULL_RTX, LSHIFTRT, xmode, XEXP (x, 0),
9005 GET_MODE_PRECISION (xmode) - 1 - i);
9006
9007 if (GET_CODE (x) != ASHIFTRT)
9008 return force_to_mode (x, mode, mask, next_select);
9009 }
9010 }
9011
9012 /* If MASK is 1, convert this to an LSHIFTRT. This can be done
9013 even if the shift count isn't a constant. */
9014 if (mask == 1)
9015 x = simplify_gen_binary (LSHIFTRT, xmode, XEXP (x, 0), XEXP (x, 1));
9016
9017 shiftrt:
9018
9019 /* If this is a zero- or sign-extension operation that just affects bits
9020 we don't care about, remove it. Be sure the call above returned
9021 something that is still a shift. */
9022
9023 if ((GET_CODE (x) == LSHIFTRT || GET_CODE (x) == ASHIFTRT)
9024 && CONST_INT_P (XEXP (x, 1))
9025 && INTVAL (XEXP (x, 1)) >= 0
9026 && (INTVAL (XEXP (x, 1))
9027 <= GET_MODE_PRECISION (xmode) - (floor_log2 (mask) + 1))
9028 && GET_CODE (XEXP (x, 0)) == ASHIFT
9029 && XEXP (XEXP (x, 0), 1) == XEXP (x, 1))
9030 return force_to_mode (XEXP (XEXP (x, 0), 0), mode, mask,
9031 next_select);
9032
9033 break;
9034
9035 case ROTATE:
9036 case ROTATERT:
9037 /* If the shift count is constant and we can do computations
9038 in the mode of X, compute where the bits we care about are.
9039 Otherwise, we can't do anything. Don't change the mode of
9040 the shift or propagate MODE into the shift, though. */
9041 if (CONST_INT_P (XEXP (x, 1))
9042 && INTVAL (XEXP (x, 1)) >= 0)
9043 {
9044 temp = simplify_binary_operation (code == ROTATE ? ROTATERT : ROTATE,
9045 xmode, gen_int_mode (mask, xmode),
9046 XEXP (x, 1));
9047 if (temp && CONST_INT_P (temp))
9048 x = simplify_gen_binary (code, xmode,
9049 force_to_mode (XEXP (x, 0), xmode,
9050 INTVAL (temp), next_select),
9051 XEXP (x, 1));
9052 }
9053 break;
9054
9055 case NEG:
9056 /* If we just want the low-order bit, the NEG isn't needed since it
9057 won't change the low-order bit. */
9058 if (mask == 1)
9059 return force_to_mode (XEXP (x, 0), mode, mask, just_select);
9060
9061 /* We need any bits less significant than the most significant bit in
9062 MASK since carries from those bits will affect the bits we are
9063 interested in. */
9064 mask = fuller_mask;
9065 goto unop;
9066
9067 case NOT:
9068 /* (not FOO) is (xor FOO CONST), so if FOO is an LSHIFTRT, we can do the
9069 same as the XOR case above. Ensure that the constant we form is not
9070 wider than the mode of X. */
9071
9072 if (GET_CODE (XEXP (x, 0)) == LSHIFTRT
9073 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
9074 && INTVAL (XEXP (XEXP (x, 0), 1)) >= 0
9075 && (INTVAL (XEXP (XEXP (x, 0), 1)) + floor_log2 (mask)
9076 < GET_MODE_PRECISION (xmode))
9077 && INTVAL (XEXP (XEXP (x, 0), 1)) < HOST_BITS_PER_WIDE_INT)
9078 {
9079 temp = gen_int_mode (mask << INTVAL (XEXP (XEXP (x, 0), 1)), xmode);
9080 temp = simplify_gen_binary (XOR, xmode, XEXP (XEXP (x, 0), 0), temp);
9081 x = simplify_gen_binary (LSHIFTRT, xmode,
9082 temp, XEXP (XEXP (x, 0), 1));
9083
9084 return force_to_mode (x, mode, mask, next_select);
9085 }
9086
9087 /* (and (not FOO) CONST) is (not (or FOO (not CONST))), so we must
9088 use the full mask inside the NOT. */
9089 mask = fuller_mask;
9090
9091 unop:
9092 op0 = gen_lowpart_or_truncate (op_mode,
9093 force_to_mode (XEXP (x, 0), mode, mask,
9094 next_select));
9095 if (op_mode != xmode || op0 != XEXP (x, 0))
9096 {
9097 x = simplify_gen_unary (code, op_mode, op0, op_mode);
9098 xmode = op_mode;
9099 }
9100 break;
9101
9102 case NE:
9103 /* (and (ne FOO 0) CONST) can be (and FOO CONST) if CONST is included
9104 in STORE_FLAG_VALUE and FOO has a single bit that might be nonzero,
9105 which is equal to STORE_FLAG_VALUE. */
9106 if ((mask & ~STORE_FLAG_VALUE) == 0
9107 && XEXP (x, 1) == const0_rtx
9108 && GET_MODE (XEXP (x, 0)) == mode
9109 && pow2p_hwi (nonzero_bits (XEXP (x, 0), mode))
9110 && (nonzero_bits (XEXP (x, 0), mode)
9111 == (unsigned HOST_WIDE_INT) STORE_FLAG_VALUE))
9112 return force_to_mode (XEXP (x, 0), mode, mask, next_select);
9113
9114 break;
9115
9116 case IF_THEN_ELSE:
9117 /* We have no way of knowing if the IF_THEN_ELSE can itself be
9118 written in a narrower mode. We play it safe and do not do so. */
9119
9120 op0 = gen_lowpart_or_truncate (xmode,
9121 force_to_mode (XEXP (x, 1), mode,
9122 mask, next_select));
9123 op1 = gen_lowpart_or_truncate (xmode,
9124 force_to_mode (XEXP (x, 2), mode,
9125 mask, next_select));
9126 if (op0 != XEXP (x, 1) || op1 != XEXP (x, 2))
9127 x = simplify_gen_ternary (IF_THEN_ELSE, xmode,
9128 GET_MODE (XEXP (x, 0)), XEXP (x, 0),
9129 op0, op1);
9130 break;
9131
9132 default:
9133 break;
9134 }
9135
9136 /* Ensure we return a value of the proper mode. */
9137 return gen_lowpart_or_truncate (mode, x);
9138 }
9139 \f
9140 /* Return nonzero if X is an expression that has one of two values depending on
9141 whether some other value is zero or nonzero. In that case, we return the
9142 value that is being tested, *PTRUE is set to the value if the rtx being
9143 returned has a nonzero value, and *PFALSE is set to the other alternative.
9144
9145 If we return zero, we set *PTRUE and *PFALSE to X. */
9146
9147 static rtx
9148 if_then_else_cond (rtx x, rtx *ptrue, rtx *pfalse)
9149 {
9150 machine_mode mode = GET_MODE (x);
9151 enum rtx_code code = GET_CODE (x);
9152 rtx cond0, cond1, true0, true1, false0, false1;
9153 unsigned HOST_WIDE_INT nz;
9154 scalar_int_mode int_mode;
9155
9156 /* If we are comparing a value against zero, we are done. */
9157 if ((code == NE || code == EQ)
9158 && XEXP (x, 1) == const0_rtx)
9159 {
9160 *ptrue = (code == NE) ? const_true_rtx : const0_rtx;
9161 *pfalse = (code == NE) ? const0_rtx : const_true_rtx;
9162 return XEXP (x, 0);
9163 }
9164
9165 /* If this is a unary operation whose operand has one of two values, apply
9166 our opcode to compute those values. */
9167 else if (UNARY_P (x)
9168 && (cond0 = if_then_else_cond (XEXP (x, 0), &true0, &false0)) != 0)
9169 {
9170 *ptrue = simplify_gen_unary (code, mode, true0, GET_MODE (XEXP (x, 0)));
9171 *pfalse = simplify_gen_unary (code, mode, false0,
9172 GET_MODE (XEXP (x, 0)));
9173 return cond0;
9174 }
9175
9176 /* If this is a COMPARE, do nothing, since the IF_THEN_ELSE we would
9177 make can't possibly match and would suppress other optimizations. */
9178 else if (code == COMPARE)
9179 ;
9180
9181 /* If this is a binary operation, see if either side has only one of two
9182 values. If either one does or if both do and they are conditional on
9183 the same value, compute the new true and false values. */
9184 else if (BINARY_P (x))
9185 {
9186 rtx op0 = XEXP (x, 0);
9187 rtx op1 = XEXP (x, 1);
9188 cond0 = if_then_else_cond (op0, &true0, &false0);
9189 cond1 = if_then_else_cond (op1, &true1, &false1);
9190
9191 if ((cond0 != 0 && cond1 != 0 && !rtx_equal_p (cond0, cond1))
9192 && (REG_P (op0) || REG_P (op1)))
9193 {
9194 /* Try to enable a simplification by undoing work done by
9195 if_then_else_cond if it converted a REG into something more
9196 complex. */
9197 if (REG_P (op0))
9198 {
9199 cond0 = 0;
9200 true0 = false0 = op0;
9201 }
9202 else
9203 {
9204 cond1 = 0;
9205 true1 = false1 = op1;
9206 }
9207 }
9208
9209 if ((cond0 != 0 || cond1 != 0)
9210 && ! (cond0 != 0 && cond1 != 0 && !rtx_equal_p (cond0, cond1)))
9211 {
9212 /* If if_then_else_cond returned zero, then true/false are the
9213 same rtl. We must copy one of them to prevent invalid rtl
9214 sharing. */
9215 if (cond0 == 0)
9216 true0 = copy_rtx (true0);
9217 else if (cond1 == 0)
9218 true1 = copy_rtx (true1);
9219
9220 if (COMPARISON_P (x))
9221 {
9222 *ptrue = simplify_gen_relational (code, mode, VOIDmode,
9223 true0, true1);
9224 *pfalse = simplify_gen_relational (code, mode, VOIDmode,
9225 false0, false1);
9226 }
9227 else
9228 {
9229 *ptrue = simplify_gen_binary (code, mode, true0, true1);
9230 *pfalse = simplify_gen_binary (code, mode, false0, false1);
9231 }
9232
9233 return cond0 ? cond0 : cond1;
9234 }
9235
9236 /* See if we have PLUS, IOR, XOR, MINUS or UMAX, where one of the
9237 operands is zero when the other is nonzero, and vice-versa,
9238 and STORE_FLAG_VALUE is 1 or -1. */
9239
9240 if ((STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
9241 && (code == PLUS || code == IOR || code == XOR || code == MINUS
9242 || code == UMAX)
9243 && GET_CODE (XEXP (x, 0)) == MULT && GET_CODE (XEXP (x, 1)) == MULT)
9244 {
9245 rtx op0 = XEXP (XEXP (x, 0), 1);
9246 rtx op1 = XEXP (XEXP (x, 1), 1);
9247
9248 cond0 = XEXP (XEXP (x, 0), 0);
9249 cond1 = XEXP (XEXP (x, 1), 0);
9250
9251 if (COMPARISON_P (cond0)
9252 && COMPARISON_P (cond1)
9253 && ((GET_CODE (cond0) == reversed_comparison_code (cond1, NULL)
9254 && rtx_equal_p (XEXP (cond0, 0), XEXP (cond1, 0))
9255 && rtx_equal_p (XEXP (cond0, 1), XEXP (cond1, 1)))
9256 || ((swap_condition (GET_CODE (cond0))
9257 == reversed_comparison_code (cond1, NULL))
9258 && rtx_equal_p (XEXP (cond0, 0), XEXP (cond1, 1))
9259 && rtx_equal_p (XEXP (cond0, 1), XEXP (cond1, 0))))
9260 && ! side_effects_p (x))
9261 {
9262 *ptrue = simplify_gen_binary (MULT, mode, op0, const_true_rtx);
9263 *pfalse = simplify_gen_binary (MULT, mode,
9264 (code == MINUS
9265 ? simplify_gen_unary (NEG, mode,
9266 op1, mode)
9267 : op1),
9268 const_true_rtx);
9269 return cond0;
9270 }
9271 }
9272
9273 /* Similarly for MULT, AND and UMIN, except that for these the result
9274 is always zero. */
9275 if ((STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
9276 && (code == MULT || code == AND || code == UMIN)
9277 && GET_CODE (XEXP (x, 0)) == MULT && GET_CODE (XEXP (x, 1)) == MULT)
9278 {
9279 cond0 = XEXP (XEXP (x, 0), 0);
9280 cond1 = XEXP (XEXP (x, 1), 0);
9281
9282 if (COMPARISON_P (cond0)
9283 && COMPARISON_P (cond1)
9284 && ((GET_CODE (cond0) == reversed_comparison_code (cond1, NULL)
9285 && rtx_equal_p (XEXP (cond0, 0), XEXP (cond1, 0))
9286 && rtx_equal_p (XEXP (cond0, 1), XEXP (cond1, 1)))
9287 || ((swap_condition (GET_CODE (cond0))
9288 == reversed_comparison_code (cond1, NULL))
9289 && rtx_equal_p (XEXP (cond0, 0), XEXP (cond1, 1))
9290 && rtx_equal_p (XEXP (cond0, 1), XEXP (cond1, 0))))
9291 && ! side_effects_p (x))
9292 {
9293 *ptrue = *pfalse = const0_rtx;
9294 return cond0;
9295 }
9296 }
9297 }
9298
9299 else if (code == IF_THEN_ELSE)
9300 {
9301 /* If we have IF_THEN_ELSE already, extract the condition and
9302 canonicalize it if it is NE or EQ. */
9303 cond0 = XEXP (x, 0);
9304 *ptrue = XEXP (x, 1), *pfalse = XEXP (x, 2);
9305 if (GET_CODE (cond0) == NE && XEXP (cond0, 1) == const0_rtx)
9306 return XEXP (cond0, 0);
9307 else if (GET_CODE (cond0) == EQ && XEXP (cond0, 1) == const0_rtx)
9308 {
9309 *ptrue = XEXP (x, 2), *pfalse = XEXP (x, 1);
9310 return XEXP (cond0, 0);
9311 }
9312 else
9313 return cond0;
9314 }
9315
9316 /* If X is a SUBREG, we can narrow both the true and false values
9317 if the inner expression, if there is a condition. */
9318 else if (code == SUBREG
9319 && 0 != (cond0 = if_then_else_cond (SUBREG_REG (x),
9320 &true0, &false0)))
9321 {
9322 true0 = simplify_gen_subreg (mode, true0,
9323 GET_MODE (SUBREG_REG (x)), SUBREG_BYTE (x));
9324 false0 = simplify_gen_subreg (mode, false0,
9325 GET_MODE (SUBREG_REG (x)), SUBREG_BYTE (x));
9326 if (true0 && false0)
9327 {
9328 *ptrue = true0;
9329 *pfalse = false0;
9330 return cond0;
9331 }
9332 }
9333
9334 /* If X is a constant, this isn't special and will cause confusions
9335 if we treat it as such. Likewise if it is equivalent to a constant. */
9336 else if (CONSTANT_P (x)
9337 || ((cond0 = get_last_value (x)) != 0 && CONSTANT_P (cond0)))
9338 ;
9339
9340 /* If we're in BImode, canonicalize on 0 and STORE_FLAG_VALUE, as that
9341 will be least confusing to the rest of the compiler. */
9342 else if (mode == BImode)
9343 {
9344 *ptrue = GEN_INT (STORE_FLAG_VALUE), *pfalse = const0_rtx;
9345 return x;
9346 }
9347
9348 /* If X is known to be either 0 or -1, those are the true and
9349 false values when testing X. */
9350 else if (x == constm1_rtx || x == const0_rtx
9351 || (is_a <scalar_int_mode> (mode, &int_mode)
9352 && (num_sign_bit_copies (x, int_mode)
9353 == GET_MODE_PRECISION (int_mode))))
9354 {
9355 *ptrue = constm1_rtx, *pfalse = const0_rtx;
9356 return x;
9357 }
9358
9359 /* Likewise for 0 or a single bit. */
9360 else if (HWI_COMPUTABLE_MODE_P (mode)
9361 && pow2p_hwi (nz = nonzero_bits (x, mode)))
9362 {
9363 *ptrue = gen_int_mode (nz, mode), *pfalse = const0_rtx;
9364 return x;
9365 }
9366
9367 /* Otherwise fail; show no condition with true and false values the same. */
9368 *ptrue = *pfalse = x;
9369 return 0;
9370 }
9371 \f
9372 /* Return the value of expression X given the fact that condition COND
9373 is known to be true when applied to REG as its first operand and VAL
9374 as its second. X is known to not be shared and so can be modified in
9375 place.
9376
9377 We only handle the simplest cases, and specifically those cases that
9378 arise with IF_THEN_ELSE expressions. */
9379
9380 static rtx
9381 known_cond (rtx x, enum rtx_code cond, rtx reg, rtx val)
9382 {
9383 enum rtx_code code = GET_CODE (x);
9384 const char *fmt;
9385 int i, j;
9386
9387 if (side_effects_p (x))
9388 return x;
9389
9390 /* If either operand of the condition is a floating point value,
9391 then we have to avoid collapsing an EQ comparison. */
9392 if (cond == EQ
9393 && rtx_equal_p (x, reg)
9394 && ! FLOAT_MODE_P (GET_MODE (x))
9395 && ! FLOAT_MODE_P (GET_MODE (val)))
9396 return val;
9397
9398 if (cond == UNEQ && rtx_equal_p (x, reg))
9399 return val;
9400
9401 /* If X is (abs REG) and we know something about REG's relationship
9402 with zero, we may be able to simplify this. */
9403
9404 if (code == ABS && rtx_equal_p (XEXP (x, 0), reg) && val == const0_rtx)
9405 switch (cond)
9406 {
9407 case GE: case GT: case EQ:
9408 return XEXP (x, 0);
9409 case LT: case LE:
9410 return simplify_gen_unary (NEG, GET_MODE (XEXP (x, 0)),
9411 XEXP (x, 0),
9412 GET_MODE (XEXP (x, 0)));
9413 default:
9414 break;
9415 }
9416
9417 /* The only other cases we handle are MIN, MAX, and comparisons if the
9418 operands are the same as REG and VAL. */
9419
9420 else if (COMPARISON_P (x) || COMMUTATIVE_ARITH_P (x))
9421 {
9422 if (rtx_equal_p (XEXP (x, 0), val))
9423 {
9424 std::swap (val, reg);
9425 cond = swap_condition (cond);
9426 }
9427
9428 if (rtx_equal_p (XEXP (x, 0), reg) && rtx_equal_p (XEXP (x, 1), val))
9429 {
9430 if (COMPARISON_P (x))
9431 {
9432 if (comparison_dominates_p (cond, code))
9433 return const_true_rtx;
9434
9435 code = reversed_comparison_code (x, NULL);
9436 if (code != UNKNOWN
9437 && comparison_dominates_p (cond, code))
9438 return const0_rtx;
9439 else
9440 return x;
9441 }
9442 else if (code == SMAX || code == SMIN
9443 || code == UMIN || code == UMAX)
9444 {
9445 int unsignedp = (code == UMIN || code == UMAX);
9446
9447 /* Do not reverse the condition when it is NE or EQ.
9448 This is because we cannot conclude anything about
9449 the value of 'SMAX (x, y)' when x is not equal to y,
9450 but we can when x equals y. */
9451 if ((code == SMAX || code == UMAX)
9452 && ! (cond == EQ || cond == NE))
9453 cond = reverse_condition (cond);
9454
9455 switch (cond)
9456 {
9457 case GE: case GT:
9458 return unsignedp ? x : XEXP (x, 1);
9459 case LE: case LT:
9460 return unsignedp ? x : XEXP (x, 0);
9461 case GEU: case GTU:
9462 return unsignedp ? XEXP (x, 1) : x;
9463 case LEU: case LTU:
9464 return unsignedp ? XEXP (x, 0) : x;
9465 default:
9466 break;
9467 }
9468 }
9469 }
9470 }
9471 else if (code == SUBREG)
9472 {
9473 machine_mode inner_mode = GET_MODE (SUBREG_REG (x));
9474 rtx new_rtx, r = known_cond (SUBREG_REG (x), cond, reg, val);
9475
9476 if (SUBREG_REG (x) != r)
9477 {
9478 /* We must simplify subreg here, before we lose track of the
9479 original inner_mode. */
9480 new_rtx = simplify_subreg (GET_MODE (x), r,
9481 inner_mode, SUBREG_BYTE (x));
9482 if (new_rtx)
9483 return new_rtx;
9484 else
9485 SUBST (SUBREG_REG (x), r);
9486 }
9487
9488 return x;
9489 }
9490 /* We don't have to handle SIGN_EXTEND here, because even in the
9491 case of replacing something with a modeless CONST_INT, a
9492 CONST_INT is already (supposed to be) a valid sign extension for
9493 its narrower mode, which implies it's already properly
9494 sign-extended for the wider mode. Now, for ZERO_EXTEND, the
9495 story is different. */
9496 else if (code == ZERO_EXTEND)
9497 {
9498 machine_mode inner_mode = GET_MODE (XEXP (x, 0));
9499 rtx new_rtx, r = known_cond (XEXP (x, 0), cond, reg, val);
9500
9501 if (XEXP (x, 0) != r)
9502 {
9503 /* We must simplify the zero_extend here, before we lose
9504 track of the original inner_mode. */
9505 new_rtx = simplify_unary_operation (ZERO_EXTEND, GET_MODE (x),
9506 r, inner_mode);
9507 if (new_rtx)
9508 return new_rtx;
9509 else
9510 SUBST (XEXP (x, 0), r);
9511 }
9512
9513 return x;
9514 }
9515
9516 fmt = GET_RTX_FORMAT (code);
9517 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
9518 {
9519 if (fmt[i] == 'e')
9520 SUBST (XEXP (x, i), known_cond (XEXP (x, i), cond, reg, val));
9521 else if (fmt[i] == 'E')
9522 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
9523 SUBST (XVECEXP (x, i, j), known_cond (XVECEXP (x, i, j),
9524 cond, reg, val));
9525 }
9526
9527 return x;
9528 }
9529 \f
9530 /* See if X and Y are equal for the purposes of seeing if we can rewrite an
9531 assignment as a field assignment. */
9532
9533 static int
9534 rtx_equal_for_field_assignment_p (rtx x, rtx y, bool widen_x)
9535 {
9536 if (widen_x && GET_MODE (x) != GET_MODE (y))
9537 {
9538 if (paradoxical_subreg_p (GET_MODE (x), GET_MODE (y)))
9539 return 0;
9540 if (BYTES_BIG_ENDIAN != WORDS_BIG_ENDIAN)
9541 return 0;
9542 x = adjust_address_nv (x, GET_MODE (y),
9543 byte_lowpart_offset (GET_MODE (y),
9544 GET_MODE (x)));
9545 }
9546
9547 if (x == y || rtx_equal_p (x, y))
9548 return 1;
9549
9550 if (x == 0 || y == 0 || GET_MODE (x) != GET_MODE (y))
9551 return 0;
9552
9553 /* Check for a paradoxical SUBREG of a MEM compared with the MEM.
9554 Note that all SUBREGs of MEM are paradoxical; otherwise they
9555 would have been rewritten. */
9556 if (MEM_P (x) && GET_CODE (y) == SUBREG
9557 && MEM_P (SUBREG_REG (y))
9558 && rtx_equal_p (SUBREG_REG (y),
9559 gen_lowpart (GET_MODE (SUBREG_REG (y)), x)))
9560 return 1;
9561
9562 if (MEM_P (y) && GET_CODE (x) == SUBREG
9563 && MEM_P (SUBREG_REG (x))
9564 && rtx_equal_p (SUBREG_REG (x),
9565 gen_lowpart (GET_MODE (SUBREG_REG (x)), y)))
9566 return 1;
9567
9568 /* We used to see if get_last_value of X and Y were the same but that's
9569 not correct. In one direction, we'll cause the assignment to have
9570 the wrong destination and in the case, we'll import a register into this
9571 insn that might have already have been dead. So fail if none of the
9572 above cases are true. */
9573 return 0;
9574 }
9575 \f
9576 /* See if X, a SET operation, can be rewritten as a bit-field assignment.
9577 Return that assignment if so.
9578
9579 We only handle the most common cases. */
9580
9581 static rtx
9582 make_field_assignment (rtx x)
9583 {
9584 rtx dest = SET_DEST (x);
9585 rtx src = SET_SRC (x);
9586 rtx assign;
9587 rtx rhs, lhs;
9588 HOST_WIDE_INT c1;
9589 HOST_WIDE_INT pos;
9590 unsigned HOST_WIDE_INT len;
9591 rtx other;
9592
9593 /* All the rules in this function are specific to scalar integers. */
9594 scalar_int_mode mode;
9595 if (!is_a <scalar_int_mode> (GET_MODE (dest), &mode))
9596 return x;
9597
9598 /* If SRC was (and (not (ashift (const_int 1) POS)) DEST), this is
9599 a clear of a one-bit field. We will have changed it to
9600 (and (rotate (const_int -2) POS) DEST), so check for that. Also check
9601 for a SUBREG. */
9602
9603 if (GET_CODE (src) == AND && GET_CODE (XEXP (src, 0)) == ROTATE
9604 && CONST_INT_P (XEXP (XEXP (src, 0), 0))
9605 && INTVAL (XEXP (XEXP (src, 0), 0)) == -2
9606 && rtx_equal_for_field_assignment_p (dest, XEXP (src, 1)))
9607 {
9608 assign = make_extraction (VOIDmode, dest, 0, XEXP (XEXP (src, 0), 1),
9609 1, 1, 1, 0);
9610 if (assign != 0)
9611 return gen_rtx_SET (assign, const0_rtx);
9612 return x;
9613 }
9614
9615 if (GET_CODE (src) == AND && GET_CODE (XEXP (src, 0)) == SUBREG
9616 && subreg_lowpart_p (XEXP (src, 0))
9617 && partial_subreg_p (XEXP (src, 0))
9618 && GET_CODE (SUBREG_REG (XEXP (src, 0))) == ROTATE
9619 && CONST_INT_P (XEXP (SUBREG_REG (XEXP (src, 0)), 0))
9620 && INTVAL (XEXP (SUBREG_REG (XEXP (src, 0)), 0)) == -2
9621 && rtx_equal_for_field_assignment_p (dest, XEXP (src, 1)))
9622 {
9623 assign = make_extraction (VOIDmode, dest, 0,
9624 XEXP (SUBREG_REG (XEXP (src, 0)), 1),
9625 1, 1, 1, 0);
9626 if (assign != 0)
9627 return gen_rtx_SET (assign, const0_rtx);
9628 return x;
9629 }
9630
9631 /* If SRC is (ior (ashift (const_int 1) POS) DEST), this is a set of a
9632 one-bit field. */
9633 if (GET_CODE (src) == IOR && GET_CODE (XEXP (src, 0)) == ASHIFT
9634 && XEXP (XEXP (src, 0), 0) == const1_rtx
9635 && rtx_equal_for_field_assignment_p (dest, XEXP (src, 1)))
9636 {
9637 assign = make_extraction (VOIDmode, dest, 0, XEXP (XEXP (src, 0), 1),
9638 1, 1, 1, 0);
9639 if (assign != 0)
9640 return gen_rtx_SET (assign, const1_rtx);
9641 return x;
9642 }
9643
9644 /* If DEST is already a field assignment, i.e. ZERO_EXTRACT, and the
9645 SRC is an AND with all bits of that field set, then we can discard
9646 the AND. */
9647 if (GET_CODE (dest) == ZERO_EXTRACT
9648 && CONST_INT_P (XEXP (dest, 1))
9649 && GET_CODE (src) == AND
9650 && CONST_INT_P (XEXP (src, 1)))
9651 {
9652 HOST_WIDE_INT width = INTVAL (XEXP (dest, 1));
9653 unsigned HOST_WIDE_INT and_mask = INTVAL (XEXP (src, 1));
9654 unsigned HOST_WIDE_INT ze_mask;
9655
9656 if (width >= HOST_BITS_PER_WIDE_INT)
9657 ze_mask = -1;
9658 else
9659 ze_mask = ((unsigned HOST_WIDE_INT)1 << width) - 1;
9660
9661 /* Complete overlap. We can remove the source AND. */
9662 if ((and_mask & ze_mask) == ze_mask)
9663 return gen_rtx_SET (dest, XEXP (src, 0));
9664
9665 /* Partial overlap. We can reduce the source AND. */
9666 if ((and_mask & ze_mask) != and_mask)
9667 {
9668 src = gen_rtx_AND (mode, XEXP (src, 0),
9669 gen_int_mode (and_mask & ze_mask, mode));
9670 return gen_rtx_SET (dest, src);
9671 }
9672 }
9673
9674 /* The other case we handle is assignments into a constant-position
9675 field. They look like (ior/xor (and DEST C1) OTHER). If C1 represents
9676 a mask that has all one bits except for a group of zero bits and
9677 OTHER is known to have zeros where C1 has ones, this is such an
9678 assignment. Compute the position and length from C1. Shift OTHER
9679 to the appropriate position, force it to the required mode, and
9680 make the extraction. Check for the AND in both operands. */
9681
9682 /* One or more SUBREGs might obscure the constant-position field
9683 assignment. The first one we are likely to encounter is an outer
9684 narrowing SUBREG, which we can just strip for the purposes of
9685 identifying the constant-field assignment. */
9686 scalar_int_mode src_mode = mode;
9687 if (GET_CODE (src) == SUBREG
9688 && subreg_lowpart_p (src)
9689 && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (src)), &src_mode))
9690 src = SUBREG_REG (src);
9691
9692 if (GET_CODE (src) != IOR && GET_CODE (src) != XOR)
9693 return x;
9694
9695 rhs = expand_compound_operation (XEXP (src, 0));
9696 lhs = expand_compound_operation (XEXP (src, 1));
9697
9698 if (GET_CODE (rhs) == AND
9699 && CONST_INT_P (XEXP (rhs, 1))
9700 && rtx_equal_for_field_assignment_p (XEXP (rhs, 0), dest))
9701 c1 = INTVAL (XEXP (rhs, 1)), other = lhs;
9702 /* The second SUBREG that might get in the way is a paradoxical
9703 SUBREG around the first operand of the AND. We want to
9704 pretend the operand is as wide as the destination here. We
9705 do this by adjusting the MEM to wider mode for the sole
9706 purpose of the call to rtx_equal_for_field_assignment_p. Also
9707 note this trick only works for MEMs. */
9708 else if (GET_CODE (rhs) == AND
9709 && paradoxical_subreg_p (XEXP (rhs, 0))
9710 && MEM_P (SUBREG_REG (XEXP (rhs, 0)))
9711 && CONST_INT_P (XEXP (rhs, 1))
9712 && rtx_equal_for_field_assignment_p (SUBREG_REG (XEXP (rhs, 0)),
9713 dest, true))
9714 c1 = INTVAL (XEXP (rhs, 1)), other = lhs;
9715 else if (GET_CODE (lhs) == AND
9716 && CONST_INT_P (XEXP (lhs, 1))
9717 && rtx_equal_for_field_assignment_p (XEXP (lhs, 0), dest))
9718 c1 = INTVAL (XEXP (lhs, 1)), other = rhs;
9719 /* The second SUBREG that might get in the way is a paradoxical
9720 SUBREG around the first operand of the AND. We want to
9721 pretend the operand is as wide as the destination here. We
9722 do this by adjusting the MEM to wider mode for the sole
9723 purpose of the call to rtx_equal_for_field_assignment_p. Also
9724 note this trick only works for MEMs. */
9725 else if (GET_CODE (lhs) == AND
9726 && paradoxical_subreg_p (XEXP (lhs, 0))
9727 && MEM_P (SUBREG_REG (XEXP (lhs, 0)))
9728 && CONST_INT_P (XEXP (lhs, 1))
9729 && rtx_equal_for_field_assignment_p (SUBREG_REG (XEXP (lhs, 0)),
9730 dest, true))
9731 c1 = INTVAL (XEXP (lhs, 1)), other = rhs;
9732 else
9733 return x;
9734
9735 pos = get_pos_from_mask ((~c1) & GET_MODE_MASK (mode), &len);
9736 if (pos < 0
9737 || pos + len > GET_MODE_PRECISION (mode)
9738 || GET_MODE_PRECISION (mode) > HOST_BITS_PER_WIDE_INT
9739 || (c1 & nonzero_bits (other, mode)) != 0)
9740 return x;
9741
9742 assign = make_extraction (VOIDmode, dest, pos, NULL_RTX, len, 1, 1, 0);
9743 if (assign == 0)
9744 return x;
9745
9746 /* The mode to use for the source is the mode of the assignment, or of
9747 what is inside a possible STRICT_LOW_PART. */
9748 machine_mode new_mode = (GET_CODE (assign) == STRICT_LOW_PART
9749 ? GET_MODE (XEXP (assign, 0)) : GET_MODE (assign));
9750
9751 /* Shift OTHER right POS places and make it the source, restricting it
9752 to the proper length and mode. */
9753
9754 src = canon_reg_for_combine (simplify_shift_const (NULL_RTX, LSHIFTRT,
9755 src_mode, other, pos),
9756 dest);
9757 src = force_to_mode (src, new_mode,
9758 len >= HOST_BITS_PER_WIDE_INT
9759 ? HOST_WIDE_INT_M1U
9760 : (HOST_WIDE_INT_1U << len) - 1,
9761 0);
9762
9763 /* If SRC is masked by an AND that does not make a difference in
9764 the value being stored, strip it. */
9765 if (GET_CODE (assign) == ZERO_EXTRACT
9766 && CONST_INT_P (XEXP (assign, 1))
9767 && INTVAL (XEXP (assign, 1)) < HOST_BITS_PER_WIDE_INT
9768 && GET_CODE (src) == AND
9769 && CONST_INT_P (XEXP (src, 1))
9770 && UINTVAL (XEXP (src, 1))
9771 == (HOST_WIDE_INT_1U << INTVAL (XEXP (assign, 1))) - 1)
9772 src = XEXP (src, 0);
9773
9774 return gen_rtx_SET (assign, src);
9775 }
9776 \f
9777 /* See if X is of the form (+ (* a c) (* b c)) and convert to (* (+ a b) c)
9778 if so. */
9779
9780 static rtx
9781 apply_distributive_law (rtx x)
9782 {
9783 enum rtx_code code = GET_CODE (x);
9784 enum rtx_code inner_code;
9785 rtx lhs, rhs, other;
9786 rtx tem;
9787
9788 /* Distributivity is not true for floating point as it can change the
9789 value. So we don't do it unless -funsafe-math-optimizations. */
9790 if (FLOAT_MODE_P (GET_MODE (x))
9791 && ! flag_unsafe_math_optimizations)
9792 return x;
9793
9794 /* The outer operation can only be one of the following: */
9795 if (code != IOR && code != AND && code != XOR
9796 && code != PLUS && code != MINUS)
9797 return x;
9798
9799 lhs = XEXP (x, 0);
9800 rhs = XEXP (x, 1);
9801
9802 /* If either operand is a primitive we can't do anything, so get out
9803 fast. */
9804 if (OBJECT_P (lhs) || OBJECT_P (rhs))
9805 return x;
9806
9807 lhs = expand_compound_operation (lhs);
9808 rhs = expand_compound_operation (rhs);
9809 inner_code = GET_CODE (lhs);
9810 if (inner_code != GET_CODE (rhs))
9811 return x;
9812
9813 /* See if the inner and outer operations distribute. */
9814 switch (inner_code)
9815 {
9816 case LSHIFTRT:
9817 case ASHIFTRT:
9818 case AND:
9819 case IOR:
9820 /* These all distribute except over PLUS. */
9821 if (code == PLUS || code == MINUS)
9822 return x;
9823 break;
9824
9825 case MULT:
9826 if (code != PLUS && code != MINUS)
9827 return x;
9828 break;
9829
9830 case ASHIFT:
9831 /* This is also a multiply, so it distributes over everything. */
9832 break;
9833
9834 /* This used to handle SUBREG, but this turned out to be counter-
9835 productive, since (subreg (op ...)) usually is not handled by
9836 insn patterns, and this "optimization" therefore transformed
9837 recognizable patterns into unrecognizable ones. Therefore the
9838 SUBREG case was removed from here.
9839
9840 It is possible that distributing SUBREG over arithmetic operations
9841 leads to an intermediate result than can then be optimized further,
9842 e.g. by moving the outer SUBREG to the other side of a SET as done
9843 in simplify_set. This seems to have been the original intent of
9844 handling SUBREGs here.
9845
9846 However, with current GCC this does not appear to actually happen,
9847 at least on major platforms. If some case is found where removing
9848 the SUBREG case here prevents follow-on optimizations, distributing
9849 SUBREGs ought to be re-added at that place, e.g. in simplify_set. */
9850
9851 default:
9852 return x;
9853 }
9854
9855 /* Set LHS and RHS to the inner operands (A and B in the example
9856 above) and set OTHER to the common operand (C in the example).
9857 There is only one way to do this unless the inner operation is
9858 commutative. */
9859 if (COMMUTATIVE_ARITH_P (lhs)
9860 && rtx_equal_p (XEXP (lhs, 0), XEXP (rhs, 0)))
9861 other = XEXP (lhs, 0), lhs = XEXP (lhs, 1), rhs = XEXP (rhs, 1);
9862 else if (COMMUTATIVE_ARITH_P (lhs)
9863 && rtx_equal_p (XEXP (lhs, 0), XEXP (rhs, 1)))
9864 other = XEXP (lhs, 0), lhs = XEXP (lhs, 1), rhs = XEXP (rhs, 0);
9865 else if (COMMUTATIVE_ARITH_P (lhs)
9866 && rtx_equal_p (XEXP (lhs, 1), XEXP (rhs, 0)))
9867 other = XEXP (lhs, 1), lhs = XEXP (lhs, 0), rhs = XEXP (rhs, 1);
9868 else if (rtx_equal_p (XEXP (lhs, 1), XEXP (rhs, 1)))
9869 other = XEXP (lhs, 1), lhs = XEXP (lhs, 0), rhs = XEXP (rhs, 0);
9870 else
9871 return x;
9872
9873 /* Form the new inner operation, seeing if it simplifies first. */
9874 tem = simplify_gen_binary (code, GET_MODE (x), lhs, rhs);
9875
9876 /* There is one exception to the general way of distributing:
9877 (a | c) ^ (b | c) -> (a ^ b) & ~c */
9878 if (code == XOR && inner_code == IOR)
9879 {
9880 inner_code = AND;
9881 other = simplify_gen_unary (NOT, GET_MODE (x), other, GET_MODE (x));
9882 }
9883
9884 /* We may be able to continuing distributing the result, so call
9885 ourselves recursively on the inner operation before forming the
9886 outer operation, which we return. */
9887 return simplify_gen_binary (inner_code, GET_MODE (x),
9888 apply_distributive_law (tem), other);
9889 }
9890
9891 /* See if X is of the form (* (+ A B) C), and if so convert to
9892 (+ (* A C) (* B C)) and try to simplify.
9893
9894 Most of the time, this results in no change. However, if some of
9895 the operands are the same or inverses of each other, simplifications
9896 will result.
9897
9898 For example, (and (ior A B) (not B)) can occur as the result of
9899 expanding a bit field assignment. When we apply the distributive
9900 law to this, we get (ior (and (A (not B))) (and (B (not B)))),
9901 which then simplifies to (and (A (not B))).
9902
9903 Note that no checks happen on the validity of applying the inverse
9904 distributive law. This is pointless since we can do it in the
9905 few places where this routine is called.
9906
9907 N is the index of the term that is decomposed (the arithmetic operation,
9908 i.e. (+ A B) in the first example above). !N is the index of the term that
9909 is distributed, i.e. of C in the first example above. */
9910 static rtx
9911 distribute_and_simplify_rtx (rtx x, int n)
9912 {
9913 machine_mode mode;
9914 enum rtx_code outer_code, inner_code;
9915 rtx decomposed, distributed, inner_op0, inner_op1, new_op0, new_op1, tmp;
9916
9917 /* Distributivity is not true for floating point as it can change the
9918 value. So we don't do it unless -funsafe-math-optimizations. */
9919 if (FLOAT_MODE_P (GET_MODE (x))
9920 && ! flag_unsafe_math_optimizations)
9921 return NULL_RTX;
9922
9923 decomposed = XEXP (x, n);
9924 if (!ARITHMETIC_P (decomposed))
9925 return NULL_RTX;
9926
9927 mode = GET_MODE (x);
9928 outer_code = GET_CODE (x);
9929 distributed = XEXP (x, !n);
9930
9931 inner_code = GET_CODE (decomposed);
9932 inner_op0 = XEXP (decomposed, 0);
9933 inner_op1 = XEXP (decomposed, 1);
9934
9935 /* Special case (and (xor B C) (not A)), which is equivalent to
9936 (xor (ior A B) (ior A C)) */
9937 if (outer_code == AND && inner_code == XOR && GET_CODE (distributed) == NOT)
9938 {
9939 distributed = XEXP (distributed, 0);
9940 outer_code = IOR;
9941 }
9942
9943 if (n == 0)
9944 {
9945 /* Distribute the second term. */
9946 new_op0 = simplify_gen_binary (outer_code, mode, inner_op0, distributed);
9947 new_op1 = simplify_gen_binary (outer_code, mode, inner_op1, distributed);
9948 }
9949 else
9950 {
9951 /* Distribute the first term. */
9952 new_op0 = simplify_gen_binary (outer_code, mode, distributed, inner_op0);
9953 new_op1 = simplify_gen_binary (outer_code, mode, distributed, inner_op1);
9954 }
9955
9956 tmp = apply_distributive_law (simplify_gen_binary (inner_code, mode,
9957 new_op0, new_op1));
9958 if (GET_CODE (tmp) != outer_code
9959 && (set_src_cost (tmp, mode, optimize_this_for_speed_p)
9960 < set_src_cost (x, mode, optimize_this_for_speed_p)))
9961 return tmp;
9962
9963 return NULL_RTX;
9964 }
9965 \f
9966 /* Simplify a logical `and' of VAROP with the constant CONSTOP, to be done
9967 in MODE. Return an equivalent form, if different from (and VAROP
9968 (const_int CONSTOP)). Otherwise, return NULL_RTX. */
9969
9970 static rtx
9971 simplify_and_const_int_1 (scalar_int_mode mode, rtx varop,
9972 unsigned HOST_WIDE_INT constop)
9973 {
9974 unsigned HOST_WIDE_INT nonzero;
9975 unsigned HOST_WIDE_INT orig_constop;
9976 rtx orig_varop;
9977 int i;
9978
9979 orig_varop = varop;
9980 orig_constop = constop;
9981 if (GET_CODE (varop) == CLOBBER)
9982 return NULL_RTX;
9983
9984 /* Simplify VAROP knowing that we will be only looking at some of the
9985 bits in it.
9986
9987 Note by passing in CONSTOP, we guarantee that the bits not set in
9988 CONSTOP are not significant and will never be examined. We must
9989 ensure that is the case by explicitly masking out those bits
9990 before returning. */
9991 varop = force_to_mode (varop, mode, constop, 0);
9992
9993 /* If VAROP is a CLOBBER, we will fail so return it. */
9994 if (GET_CODE (varop) == CLOBBER)
9995 return varop;
9996
9997 /* If VAROP is a CONST_INT, then we need to apply the mask in CONSTOP
9998 to VAROP and return the new constant. */
9999 if (CONST_INT_P (varop))
10000 return gen_int_mode (INTVAL (varop) & constop, mode);
10001
10002 /* See what bits may be nonzero in VAROP. Unlike the general case of
10003 a call to nonzero_bits, here we don't care about bits outside
10004 MODE. */
10005
10006 nonzero = nonzero_bits (varop, mode) & GET_MODE_MASK (mode);
10007
10008 /* Turn off all bits in the constant that are known to already be zero.
10009 Thus, if the AND isn't needed at all, we will have CONSTOP == NONZERO_BITS
10010 which is tested below. */
10011
10012 constop &= nonzero;
10013
10014 /* If we don't have any bits left, return zero. */
10015 if (constop == 0)
10016 return const0_rtx;
10017
10018 /* If VAROP is a NEG of something known to be zero or 1 and CONSTOP is
10019 a power of two, we can replace this with an ASHIFT. */
10020 if (GET_CODE (varop) == NEG && nonzero_bits (XEXP (varop, 0), mode) == 1
10021 && (i = exact_log2 (constop)) >= 0)
10022 return simplify_shift_const (NULL_RTX, ASHIFT, mode, XEXP (varop, 0), i);
10023
10024 /* If VAROP is an IOR or XOR, apply the AND to both branches of the IOR
10025 or XOR, then try to apply the distributive law. This may eliminate
10026 operations if either branch can be simplified because of the AND.
10027 It may also make some cases more complex, but those cases probably
10028 won't match a pattern either with or without this. */
10029
10030 if (GET_CODE (varop) == IOR || GET_CODE (varop) == XOR)
10031 {
10032 scalar_int_mode varop_mode = as_a <scalar_int_mode> (GET_MODE (varop));
10033 return
10034 gen_lowpart
10035 (mode,
10036 apply_distributive_law
10037 (simplify_gen_binary (GET_CODE (varop), varop_mode,
10038 simplify_and_const_int (NULL_RTX, varop_mode,
10039 XEXP (varop, 0),
10040 constop),
10041 simplify_and_const_int (NULL_RTX, varop_mode,
10042 XEXP (varop, 1),
10043 constop))));
10044 }
10045
10046 /* If VAROP is PLUS, and the constant is a mask of low bits, distribute
10047 the AND and see if one of the operands simplifies to zero. If so, we
10048 may eliminate it. */
10049
10050 if (GET_CODE (varop) == PLUS
10051 && pow2p_hwi (constop + 1))
10052 {
10053 rtx o0, o1;
10054
10055 o0 = simplify_and_const_int (NULL_RTX, mode, XEXP (varop, 0), constop);
10056 o1 = simplify_and_const_int (NULL_RTX, mode, XEXP (varop, 1), constop);
10057 if (o0 == const0_rtx)
10058 return o1;
10059 if (o1 == const0_rtx)
10060 return o0;
10061 }
10062
10063 /* Make a SUBREG if necessary. If we can't make it, fail. */
10064 varop = gen_lowpart (mode, varop);
10065 if (varop == NULL_RTX || GET_CODE (varop) == CLOBBER)
10066 return NULL_RTX;
10067
10068 /* If we are only masking insignificant bits, return VAROP. */
10069 if (constop == nonzero)
10070 return varop;
10071
10072 if (varop == orig_varop && constop == orig_constop)
10073 return NULL_RTX;
10074
10075 /* Otherwise, return an AND. */
10076 return simplify_gen_binary (AND, mode, varop, gen_int_mode (constop, mode));
10077 }
10078
10079
10080 /* We have X, a logical `and' of VAROP with the constant CONSTOP, to be done
10081 in MODE.
10082
10083 Return an equivalent form, if different from X. Otherwise, return X. If
10084 X is zero, we are to always construct the equivalent form. */
10085
10086 static rtx
10087 simplify_and_const_int (rtx x, scalar_int_mode mode, rtx varop,
10088 unsigned HOST_WIDE_INT constop)
10089 {
10090 rtx tem = simplify_and_const_int_1 (mode, varop, constop);
10091 if (tem)
10092 return tem;
10093
10094 if (!x)
10095 x = simplify_gen_binary (AND, GET_MODE (varop), varop,
10096 gen_int_mode (constop, mode));
10097 if (GET_MODE (x) != mode)
10098 x = gen_lowpart (mode, x);
10099 return x;
10100 }
10101 \f
10102 /* Given a REG X of mode XMODE, compute which bits in X can be nonzero.
10103 We don't care about bits outside of those defined in MODE.
10104
10105 For most X this is simply GET_MODE_MASK (GET_MODE (MODE)), but if X is
10106 a shift, AND, or zero_extract, we can do better. */
10107
10108 static rtx
10109 reg_nonzero_bits_for_combine (const_rtx x, scalar_int_mode xmode,
10110 scalar_int_mode mode,
10111 unsigned HOST_WIDE_INT *nonzero)
10112 {
10113 rtx tem;
10114 reg_stat_type *rsp;
10115
10116 /* If X is a register whose nonzero bits value is current, use it.
10117 Otherwise, if X is a register whose value we can find, use that
10118 value. Otherwise, use the previously-computed global nonzero bits
10119 for this register. */
10120
10121 rsp = &reg_stat[REGNO (x)];
10122 if (rsp->last_set_value != 0
10123 && (rsp->last_set_mode == mode
10124 || (GET_MODE_CLASS (rsp->last_set_mode) == MODE_INT
10125 && GET_MODE_CLASS (mode) == MODE_INT))
10126 && ((rsp->last_set_label >= label_tick_ebb_start
10127 && rsp->last_set_label < label_tick)
10128 || (rsp->last_set_label == label_tick
10129 && DF_INSN_LUID (rsp->last_set) < subst_low_luid)
10130 || (REGNO (x) >= FIRST_PSEUDO_REGISTER
10131 && REGNO (x) < reg_n_sets_max
10132 && REG_N_SETS (REGNO (x)) == 1
10133 && !REGNO_REG_SET_P
10134 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb),
10135 REGNO (x)))))
10136 {
10137 /* Note that, even if the precision of last_set_mode is lower than that
10138 of mode, record_value_for_reg invoked nonzero_bits on the register
10139 with nonzero_bits_mode (because last_set_mode is necessarily integral
10140 and HWI_COMPUTABLE_MODE_P in this case) so bits in nonzero_bits_mode
10141 are all valid, hence in mode too since nonzero_bits_mode is defined
10142 to the largest HWI_COMPUTABLE_MODE_P mode. */
10143 *nonzero &= rsp->last_set_nonzero_bits;
10144 return NULL;
10145 }
10146
10147 tem = get_last_value (x);
10148 if (tem)
10149 {
10150 if (SHORT_IMMEDIATES_SIGN_EXTEND)
10151 tem = sign_extend_short_imm (tem, xmode, GET_MODE_PRECISION (mode));
10152
10153 return tem;
10154 }
10155
10156 if (nonzero_sign_valid && rsp->nonzero_bits)
10157 {
10158 unsigned HOST_WIDE_INT mask = rsp->nonzero_bits;
10159
10160 if (GET_MODE_PRECISION (xmode) < GET_MODE_PRECISION (mode))
10161 /* We don't know anything about the upper bits. */
10162 mask |= GET_MODE_MASK (mode) ^ GET_MODE_MASK (xmode);
10163
10164 *nonzero &= mask;
10165 }
10166
10167 return NULL;
10168 }
10169
10170 /* Given a reg X of mode XMODE, return the number of bits at the high-order
10171 end of X that are known to be equal to the sign bit. X will be used
10172 in mode MODE; the returned value will always be between 1 and the
10173 number of bits in MODE. */
10174
10175 static rtx
10176 reg_num_sign_bit_copies_for_combine (const_rtx x, scalar_int_mode xmode,
10177 scalar_int_mode mode,
10178 unsigned int *result)
10179 {
10180 rtx tem;
10181 reg_stat_type *rsp;
10182
10183 rsp = &reg_stat[REGNO (x)];
10184 if (rsp->last_set_value != 0
10185 && rsp->last_set_mode == mode
10186 && ((rsp->last_set_label >= label_tick_ebb_start
10187 && rsp->last_set_label < label_tick)
10188 || (rsp->last_set_label == label_tick
10189 && DF_INSN_LUID (rsp->last_set) < subst_low_luid)
10190 || (REGNO (x) >= FIRST_PSEUDO_REGISTER
10191 && REGNO (x) < reg_n_sets_max
10192 && REG_N_SETS (REGNO (x)) == 1
10193 && !REGNO_REG_SET_P
10194 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb),
10195 REGNO (x)))))
10196 {
10197 *result = rsp->last_set_sign_bit_copies;
10198 return NULL;
10199 }
10200
10201 tem = get_last_value (x);
10202 if (tem != 0)
10203 return tem;
10204
10205 if (nonzero_sign_valid && rsp->sign_bit_copies != 0
10206 && GET_MODE_PRECISION (xmode) == GET_MODE_PRECISION (mode))
10207 *result = rsp->sign_bit_copies;
10208
10209 return NULL;
10210 }
10211 \f
10212 /* Return the number of "extended" bits there are in X, when interpreted
10213 as a quantity in MODE whose signedness is indicated by UNSIGNEDP. For
10214 unsigned quantities, this is the number of high-order zero bits.
10215 For signed quantities, this is the number of copies of the sign bit
10216 minus 1. In both case, this function returns the number of "spare"
10217 bits. For example, if two quantities for which this function returns
10218 at least 1 are added, the addition is known not to overflow.
10219
10220 This function will always return 0 unless called during combine, which
10221 implies that it must be called from a define_split. */
10222
10223 unsigned int
10224 extended_count (const_rtx x, machine_mode mode, int unsignedp)
10225 {
10226 if (nonzero_sign_valid == 0)
10227 return 0;
10228
10229 scalar_int_mode int_mode;
10230 return (unsignedp
10231 ? (is_a <scalar_int_mode> (mode, &int_mode)
10232 && HWI_COMPUTABLE_MODE_P (int_mode)
10233 ? (unsigned int) (GET_MODE_PRECISION (int_mode) - 1
10234 - floor_log2 (nonzero_bits (x, int_mode)))
10235 : 0)
10236 : num_sign_bit_copies (x, mode) - 1);
10237 }
10238
10239 /* This function is called from `simplify_shift_const' to merge two
10240 outer operations. Specifically, we have already found that we need
10241 to perform operation *POP0 with constant *PCONST0 at the outermost
10242 position. We would now like to also perform OP1 with constant CONST1
10243 (with *POP0 being done last).
10244
10245 Return 1 if we can do the operation and update *POP0 and *PCONST0 with
10246 the resulting operation. *PCOMP_P is set to 1 if we would need to
10247 complement the innermost operand, otherwise it is unchanged.
10248
10249 MODE is the mode in which the operation will be done. No bits outside
10250 the width of this mode matter. It is assumed that the width of this mode
10251 is smaller than or equal to HOST_BITS_PER_WIDE_INT.
10252
10253 If *POP0 or OP1 are UNKNOWN, it means no operation is required. Only NEG, PLUS,
10254 IOR, XOR, and AND are supported. We may set *POP0 to SET if the proper
10255 result is simply *PCONST0.
10256
10257 If the resulting operation cannot be expressed as one operation, we
10258 return 0 and do not change *POP0, *PCONST0, and *PCOMP_P. */
10259
10260 static int
10261 merge_outer_ops (enum rtx_code *pop0, HOST_WIDE_INT *pconst0, enum rtx_code op1, HOST_WIDE_INT const1, machine_mode mode, int *pcomp_p)
10262 {
10263 enum rtx_code op0 = *pop0;
10264 HOST_WIDE_INT const0 = *pconst0;
10265
10266 const0 &= GET_MODE_MASK (mode);
10267 const1 &= GET_MODE_MASK (mode);
10268
10269 /* If OP0 is an AND, clear unimportant bits in CONST1. */
10270 if (op0 == AND)
10271 const1 &= const0;
10272
10273 /* If OP0 or OP1 is UNKNOWN, this is easy. Similarly if they are the same or
10274 if OP0 is SET. */
10275
10276 if (op1 == UNKNOWN || op0 == SET)
10277 return 1;
10278
10279 else if (op0 == UNKNOWN)
10280 op0 = op1, const0 = const1;
10281
10282 else if (op0 == op1)
10283 {
10284 switch (op0)
10285 {
10286 case AND:
10287 const0 &= const1;
10288 break;
10289 case IOR:
10290 const0 |= const1;
10291 break;
10292 case XOR:
10293 const0 ^= const1;
10294 break;
10295 case PLUS:
10296 const0 += const1;
10297 break;
10298 case NEG:
10299 op0 = UNKNOWN;
10300 break;
10301 default:
10302 break;
10303 }
10304 }
10305
10306 /* Otherwise, if either is a PLUS or NEG, we can't do anything. */
10307 else if (op0 == PLUS || op1 == PLUS || op0 == NEG || op1 == NEG)
10308 return 0;
10309
10310 /* If the two constants aren't the same, we can't do anything. The
10311 remaining six cases can all be done. */
10312 else if (const0 != const1)
10313 return 0;
10314
10315 else
10316 switch (op0)
10317 {
10318 case IOR:
10319 if (op1 == AND)
10320 /* (a & b) | b == b */
10321 op0 = SET;
10322 else /* op1 == XOR */
10323 /* (a ^ b) | b == a | b */
10324 {;}
10325 break;
10326
10327 case XOR:
10328 if (op1 == AND)
10329 /* (a & b) ^ b == (~a) & b */
10330 op0 = AND, *pcomp_p = 1;
10331 else /* op1 == IOR */
10332 /* (a | b) ^ b == a & ~b */
10333 op0 = AND, const0 = ~const0;
10334 break;
10335
10336 case AND:
10337 if (op1 == IOR)
10338 /* (a | b) & b == b */
10339 op0 = SET;
10340 else /* op1 == XOR */
10341 /* (a ^ b) & b) == (~a) & b */
10342 *pcomp_p = 1;
10343 break;
10344 default:
10345 break;
10346 }
10347
10348 /* Check for NO-OP cases. */
10349 const0 &= GET_MODE_MASK (mode);
10350 if (const0 == 0
10351 && (op0 == IOR || op0 == XOR || op0 == PLUS))
10352 op0 = UNKNOWN;
10353 else if (const0 == 0 && op0 == AND)
10354 op0 = SET;
10355 else if ((unsigned HOST_WIDE_INT) const0 == GET_MODE_MASK (mode)
10356 && op0 == AND)
10357 op0 = UNKNOWN;
10358
10359 *pop0 = op0;
10360
10361 /* ??? Slightly redundant with the above mask, but not entirely.
10362 Moving this above means we'd have to sign-extend the mode mask
10363 for the final test. */
10364 if (op0 != UNKNOWN && op0 != NEG)
10365 *pconst0 = trunc_int_for_mode (const0, mode);
10366
10367 return 1;
10368 }
10369 \f
10370 /* A helper to simplify_shift_const_1 to determine the mode we can perform
10371 the shift in. The original shift operation CODE is performed on OP in
10372 ORIG_MODE. Return the wider mode MODE if we can perform the operation
10373 in that mode. Return ORIG_MODE otherwise. We can also assume that the
10374 result of the shift is subject to operation OUTER_CODE with operand
10375 OUTER_CONST. */
10376
10377 static scalar_int_mode
10378 try_widen_shift_mode (enum rtx_code code, rtx op, int count,
10379 scalar_int_mode orig_mode, scalar_int_mode mode,
10380 enum rtx_code outer_code, HOST_WIDE_INT outer_const)
10381 {
10382 gcc_assert (GET_MODE_PRECISION (mode) > GET_MODE_PRECISION (orig_mode));
10383
10384 /* In general we can't perform in wider mode for right shift and rotate. */
10385 switch (code)
10386 {
10387 case ASHIFTRT:
10388 /* We can still widen if the bits brought in from the left are identical
10389 to the sign bit of ORIG_MODE. */
10390 if (num_sign_bit_copies (op, mode)
10391 > (unsigned) (GET_MODE_PRECISION (mode)
10392 - GET_MODE_PRECISION (orig_mode)))
10393 return mode;
10394 return orig_mode;
10395
10396 case LSHIFTRT:
10397 /* Similarly here but with zero bits. */
10398 if (HWI_COMPUTABLE_MODE_P (mode)
10399 && (nonzero_bits (op, mode) & ~GET_MODE_MASK (orig_mode)) == 0)
10400 return mode;
10401
10402 /* We can also widen if the bits brought in will be masked off. This
10403 operation is performed in ORIG_MODE. */
10404 if (outer_code == AND)
10405 {
10406 int care_bits = low_bitmask_len (orig_mode, outer_const);
10407
10408 if (care_bits >= 0
10409 && GET_MODE_PRECISION (orig_mode) - care_bits >= count)
10410 return mode;
10411 }
10412 /* fall through */
10413
10414 case ROTATE:
10415 return orig_mode;
10416
10417 case ROTATERT:
10418 gcc_unreachable ();
10419
10420 default:
10421 return mode;
10422 }
10423 }
10424
10425 /* Simplify a shift of VAROP by ORIG_COUNT bits. CODE says what kind
10426 of shift. The result of the shift is RESULT_MODE. Return NULL_RTX
10427 if we cannot simplify it. Otherwise, return a simplified value.
10428
10429 The shift is normally computed in the widest mode we find in VAROP, as
10430 long as it isn't a different number of words than RESULT_MODE. Exceptions
10431 are ASHIFTRT and ROTATE, which are always done in their original mode. */
10432
10433 static rtx
10434 simplify_shift_const_1 (enum rtx_code code, machine_mode result_mode,
10435 rtx varop, int orig_count)
10436 {
10437 enum rtx_code orig_code = code;
10438 rtx orig_varop = varop;
10439 int count;
10440 machine_mode mode = result_mode;
10441 machine_mode shift_mode;
10442 scalar_int_mode tmode, inner_mode, int_mode, int_varop_mode, int_result_mode;
10443 unsigned int mode_words
10444 = (GET_MODE_SIZE (mode) + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD;
10445 /* We form (outer_op (code varop count) (outer_const)). */
10446 enum rtx_code outer_op = UNKNOWN;
10447 HOST_WIDE_INT outer_const = 0;
10448 int complement_p = 0;
10449 rtx new_rtx, x;
10450
10451 /* Make sure and truncate the "natural" shift on the way in. We don't
10452 want to do this inside the loop as it makes it more difficult to
10453 combine shifts. */
10454 if (SHIFT_COUNT_TRUNCATED)
10455 orig_count &= GET_MODE_UNIT_BITSIZE (mode) - 1;
10456
10457 /* If we were given an invalid count, don't do anything except exactly
10458 what was requested. */
10459
10460 if (orig_count < 0 || orig_count >= (int) GET_MODE_UNIT_PRECISION (mode))
10461 return NULL_RTX;
10462
10463 count = orig_count;
10464
10465 /* Unless one of the branches of the `if' in this loop does a `continue',
10466 we will `break' the loop after the `if'. */
10467
10468 while (count != 0)
10469 {
10470 /* If we have an operand of (clobber (const_int 0)), fail. */
10471 if (GET_CODE (varop) == CLOBBER)
10472 return NULL_RTX;
10473
10474 /* Convert ROTATERT to ROTATE. */
10475 if (code == ROTATERT)
10476 {
10477 unsigned int bitsize = GET_MODE_UNIT_PRECISION (result_mode);
10478 code = ROTATE;
10479 count = bitsize - count;
10480 }
10481
10482 shift_mode = result_mode;
10483 if (shift_mode != mode)
10484 {
10485 /* We only change the modes of scalar shifts. */
10486 int_mode = as_a <scalar_int_mode> (mode);
10487 int_result_mode = as_a <scalar_int_mode> (result_mode);
10488 shift_mode = try_widen_shift_mode (code, varop, count,
10489 int_result_mode, int_mode,
10490 outer_op, outer_const);
10491 }
10492
10493 scalar_int_mode shift_unit_mode
10494 = as_a <scalar_int_mode> (GET_MODE_INNER (shift_mode));
10495
10496 /* Handle cases where the count is greater than the size of the mode
10497 minus 1. For ASHIFT, use the size minus one as the count (this can
10498 occur when simplifying (lshiftrt (ashiftrt ..))). For rotates,
10499 take the count modulo the size. For other shifts, the result is
10500 zero.
10501
10502 Since these shifts are being produced by the compiler by combining
10503 multiple operations, each of which are defined, we know what the
10504 result is supposed to be. */
10505
10506 if (count > (GET_MODE_PRECISION (shift_unit_mode) - 1))
10507 {
10508 if (code == ASHIFTRT)
10509 count = GET_MODE_PRECISION (shift_unit_mode) - 1;
10510 else if (code == ROTATE || code == ROTATERT)
10511 count %= GET_MODE_PRECISION (shift_unit_mode);
10512 else
10513 {
10514 /* We can't simply return zero because there may be an
10515 outer op. */
10516 varop = const0_rtx;
10517 count = 0;
10518 break;
10519 }
10520 }
10521
10522 /* If we discovered we had to complement VAROP, leave. Making a NOT
10523 here would cause an infinite loop. */
10524 if (complement_p)
10525 break;
10526
10527 if (shift_mode == shift_unit_mode)
10528 {
10529 /* An arithmetic right shift of a quantity known to be -1 or 0
10530 is a no-op. */
10531 if (code == ASHIFTRT
10532 && (num_sign_bit_copies (varop, shift_unit_mode)
10533 == GET_MODE_PRECISION (shift_unit_mode)))
10534 {
10535 count = 0;
10536 break;
10537 }
10538
10539 /* If we are doing an arithmetic right shift and discarding all but
10540 the sign bit copies, this is equivalent to doing a shift by the
10541 bitsize minus one. Convert it into that shift because it will
10542 often allow other simplifications. */
10543
10544 if (code == ASHIFTRT
10545 && (count + num_sign_bit_copies (varop, shift_unit_mode)
10546 >= GET_MODE_PRECISION (shift_unit_mode)))
10547 count = GET_MODE_PRECISION (shift_unit_mode) - 1;
10548
10549 /* We simplify the tests below and elsewhere by converting
10550 ASHIFTRT to LSHIFTRT if we know the sign bit is clear.
10551 `make_compound_operation' will convert it to an ASHIFTRT for
10552 those machines (such as VAX) that don't have an LSHIFTRT. */
10553 if (code == ASHIFTRT
10554 && HWI_COMPUTABLE_MODE_P (shift_unit_mode)
10555 && val_signbit_known_clear_p (shift_unit_mode,
10556 nonzero_bits (varop,
10557 shift_unit_mode)))
10558 code = LSHIFTRT;
10559
10560 if (((code == LSHIFTRT
10561 && HWI_COMPUTABLE_MODE_P (shift_unit_mode)
10562 && !(nonzero_bits (varop, shift_unit_mode) >> count))
10563 || (code == ASHIFT
10564 && HWI_COMPUTABLE_MODE_P (shift_unit_mode)
10565 && !((nonzero_bits (varop, shift_unit_mode) << count)
10566 & GET_MODE_MASK (shift_unit_mode))))
10567 && !side_effects_p (varop))
10568 varop = const0_rtx;
10569 }
10570
10571 switch (GET_CODE (varop))
10572 {
10573 case SIGN_EXTEND:
10574 case ZERO_EXTEND:
10575 case SIGN_EXTRACT:
10576 case ZERO_EXTRACT:
10577 new_rtx = expand_compound_operation (varop);
10578 if (new_rtx != varop)
10579 {
10580 varop = new_rtx;
10581 continue;
10582 }
10583 break;
10584
10585 case MEM:
10586 /* The following rules apply only to scalars. */
10587 if (shift_mode != shift_unit_mode)
10588 break;
10589 int_mode = as_a <scalar_int_mode> (mode);
10590
10591 /* If we have (xshiftrt (mem ...) C) and C is MODE_WIDTH
10592 minus the width of a smaller mode, we can do this with a
10593 SIGN_EXTEND or ZERO_EXTEND from the narrower memory location. */
10594 if ((code == ASHIFTRT || code == LSHIFTRT)
10595 && ! mode_dependent_address_p (XEXP (varop, 0),
10596 MEM_ADDR_SPACE (varop))
10597 && ! MEM_VOLATILE_P (varop)
10598 && (int_mode_for_size (GET_MODE_BITSIZE (int_mode) - count, 1)
10599 .exists (&tmode)))
10600 {
10601 new_rtx = adjust_address_nv (varop, tmode,
10602 BYTES_BIG_ENDIAN ? 0
10603 : count / BITS_PER_UNIT);
10604
10605 varop = gen_rtx_fmt_e (code == ASHIFTRT ? SIGN_EXTEND
10606 : ZERO_EXTEND, int_mode, new_rtx);
10607 count = 0;
10608 continue;
10609 }
10610 break;
10611
10612 case SUBREG:
10613 /* The following rules apply only to scalars. */
10614 if (shift_mode != shift_unit_mode)
10615 break;
10616 int_mode = as_a <scalar_int_mode> (mode);
10617 int_varop_mode = as_a <scalar_int_mode> (GET_MODE (varop));
10618
10619 /* If VAROP is a SUBREG, strip it as long as the inner operand has
10620 the same number of words as what we've seen so far. Then store
10621 the widest mode in MODE. */
10622 if (subreg_lowpart_p (varop)
10623 && is_int_mode (GET_MODE (SUBREG_REG (varop)), &inner_mode)
10624 && GET_MODE_SIZE (inner_mode) > GET_MODE_SIZE (int_varop_mode)
10625 && (unsigned int) ((GET_MODE_SIZE (inner_mode)
10626 + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)
10627 == mode_words
10628 && GET_MODE_CLASS (int_varop_mode) == MODE_INT)
10629 {
10630 varop = SUBREG_REG (varop);
10631 if (GET_MODE_SIZE (inner_mode) > GET_MODE_SIZE (int_mode))
10632 mode = inner_mode;
10633 continue;
10634 }
10635 break;
10636
10637 case MULT:
10638 /* Some machines use MULT instead of ASHIFT because MULT
10639 is cheaper. But it is still better on those machines to
10640 merge two shifts into one. */
10641 if (CONST_INT_P (XEXP (varop, 1))
10642 && exact_log2 (UINTVAL (XEXP (varop, 1))) >= 0)
10643 {
10644 varop
10645 = simplify_gen_binary (ASHIFT, GET_MODE (varop),
10646 XEXP (varop, 0),
10647 GEN_INT (exact_log2 (
10648 UINTVAL (XEXP (varop, 1)))));
10649 continue;
10650 }
10651 break;
10652
10653 case UDIV:
10654 /* Similar, for when divides are cheaper. */
10655 if (CONST_INT_P (XEXP (varop, 1))
10656 && exact_log2 (UINTVAL (XEXP (varop, 1))) >= 0)
10657 {
10658 varop
10659 = simplify_gen_binary (LSHIFTRT, GET_MODE (varop),
10660 XEXP (varop, 0),
10661 GEN_INT (exact_log2 (
10662 UINTVAL (XEXP (varop, 1)))));
10663 continue;
10664 }
10665 break;
10666
10667 case ASHIFTRT:
10668 /* If we are extracting just the sign bit of an arithmetic
10669 right shift, that shift is not needed. However, the sign
10670 bit of a wider mode may be different from what would be
10671 interpreted as the sign bit in a narrower mode, so, if
10672 the result is narrower, don't discard the shift. */
10673 if (code == LSHIFTRT
10674 && count == (GET_MODE_UNIT_BITSIZE (result_mode) - 1)
10675 && (GET_MODE_UNIT_BITSIZE (result_mode)
10676 >= GET_MODE_UNIT_BITSIZE (GET_MODE (varop))))
10677 {
10678 varop = XEXP (varop, 0);
10679 continue;
10680 }
10681
10682 /* fall through */
10683
10684 case LSHIFTRT:
10685 case ASHIFT:
10686 case ROTATE:
10687 /* The following rules apply only to scalars. */
10688 if (shift_mode != shift_unit_mode)
10689 break;
10690 int_mode = as_a <scalar_int_mode> (mode);
10691 int_varop_mode = as_a <scalar_int_mode> (GET_MODE (varop));
10692 int_result_mode = as_a <scalar_int_mode> (result_mode);
10693
10694 /* Here we have two nested shifts. The result is usually the
10695 AND of a new shift with a mask. We compute the result below. */
10696 if (CONST_INT_P (XEXP (varop, 1))
10697 && INTVAL (XEXP (varop, 1)) >= 0
10698 && INTVAL (XEXP (varop, 1)) < GET_MODE_PRECISION (int_varop_mode)
10699 && HWI_COMPUTABLE_MODE_P (int_result_mode)
10700 && HWI_COMPUTABLE_MODE_P (int_mode))
10701 {
10702 enum rtx_code first_code = GET_CODE (varop);
10703 unsigned int first_count = INTVAL (XEXP (varop, 1));
10704 unsigned HOST_WIDE_INT mask;
10705 rtx mask_rtx;
10706
10707 /* We have one common special case. We can't do any merging if
10708 the inner code is an ASHIFTRT of a smaller mode. However, if
10709 we have (ashift:M1 (subreg:M1 (ashiftrt:M2 FOO C1) 0) C2)
10710 with C2 == GET_MODE_BITSIZE (M1) - GET_MODE_BITSIZE (M2),
10711 we can convert it to
10712 (ashiftrt:M1 (ashift:M1 (and:M1 (subreg:M1 FOO 0) C3) C2) C1).
10713 This simplifies certain SIGN_EXTEND operations. */
10714 if (code == ASHIFT && first_code == ASHIFTRT
10715 && count == (GET_MODE_PRECISION (int_result_mode)
10716 - GET_MODE_PRECISION (int_varop_mode)))
10717 {
10718 /* C3 has the low-order C1 bits zero. */
10719
10720 mask = GET_MODE_MASK (int_mode)
10721 & ~((HOST_WIDE_INT_1U << first_count) - 1);
10722
10723 varop = simplify_and_const_int (NULL_RTX, int_result_mode,
10724 XEXP (varop, 0), mask);
10725 varop = simplify_shift_const (NULL_RTX, ASHIFT,
10726 int_result_mode, varop, count);
10727 count = first_count;
10728 code = ASHIFTRT;
10729 continue;
10730 }
10731
10732 /* If this was (ashiftrt (ashift foo C1) C2) and FOO has more
10733 than C1 high-order bits equal to the sign bit, we can convert
10734 this to either an ASHIFT or an ASHIFTRT depending on the
10735 two counts.
10736
10737 We cannot do this if VAROP's mode is not SHIFT_UNIT_MODE. */
10738
10739 if (code == ASHIFTRT && first_code == ASHIFT
10740 && int_varop_mode == shift_unit_mode
10741 && (num_sign_bit_copies (XEXP (varop, 0), shift_unit_mode)
10742 > first_count))
10743 {
10744 varop = XEXP (varop, 0);
10745 count -= first_count;
10746 if (count < 0)
10747 {
10748 count = -count;
10749 code = ASHIFT;
10750 }
10751
10752 continue;
10753 }
10754
10755 /* There are some cases we can't do. If CODE is ASHIFTRT,
10756 we can only do this if FIRST_CODE is also ASHIFTRT.
10757
10758 We can't do the case when CODE is ROTATE and FIRST_CODE is
10759 ASHIFTRT.
10760
10761 If the mode of this shift is not the mode of the outer shift,
10762 we can't do this if either shift is a right shift or ROTATE.
10763
10764 Finally, we can't do any of these if the mode is too wide
10765 unless the codes are the same.
10766
10767 Handle the case where the shift codes are the same
10768 first. */
10769
10770 if (code == first_code)
10771 {
10772 if (int_varop_mode != int_result_mode
10773 && (code == ASHIFTRT || code == LSHIFTRT
10774 || code == ROTATE))
10775 break;
10776
10777 count += first_count;
10778 varop = XEXP (varop, 0);
10779 continue;
10780 }
10781
10782 if (code == ASHIFTRT
10783 || (code == ROTATE && first_code == ASHIFTRT)
10784 || GET_MODE_PRECISION (int_mode) > HOST_BITS_PER_WIDE_INT
10785 || (int_varop_mode != int_result_mode
10786 && (first_code == ASHIFTRT || first_code == LSHIFTRT
10787 || first_code == ROTATE
10788 || code == ROTATE)))
10789 break;
10790
10791 /* To compute the mask to apply after the shift, shift the
10792 nonzero bits of the inner shift the same way the
10793 outer shift will. */
10794
10795 mask_rtx = gen_int_mode (nonzero_bits (varop, int_varop_mode),
10796 int_result_mode);
10797
10798 mask_rtx
10799 = simplify_const_binary_operation (code, int_result_mode,
10800 mask_rtx, GEN_INT (count));
10801
10802 /* Give up if we can't compute an outer operation to use. */
10803 if (mask_rtx == 0
10804 || !CONST_INT_P (mask_rtx)
10805 || ! merge_outer_ops (&outer_op, &outer_const, AND,
10806 INTVAL (mask_rtx),
10807 int_result_mode, &complement_p))
10808 break;
10809
10810 /* If the shifts are in the same direction, we add the
10811 counts. Otherwise, we subtract them. */
10812 if ((code == ASHIFTRT || code == LSHIFTRT)
10813 == (first_code == ASHIFTRT || first_code == LSHIFTRT))
10814 count += first_count;
10815 else
10816 count -= first_count;
10817
10818 /* If COUNT is positive, the new shift is usually CODE,
10819 except for the two exceptions below, in which case it is
10820 FIRST_CODE. If the count is negative, FIRST_CODE should
10821 always be used */
10822 if (count > 0
10823 && ((first_code == ROTATE && code == ASHIFT)
10824 || (first_code == ASHIFTRT && code == LSHIFTRT)))
10825 code = first_code;
10826 else if (count < 0)
10827 code = first_code, count = -count;
10828
10829 varop = XEXP (varop, 0);
10830 continue;
10831 }
10832
10833 /* If we have (A << B << C) for any shift, we can convert this to
10834 (A << C << B). This wins if A is a constant. Only try this if
10835 B is not a constant. */
10836
10837 else if (GET_CODE (varop) == code
10838 && CONST_INT_P (XEXP (varop, 0))
10839 && !CONST_INT_P (XEXP (varop, 1)))
10840 {
10841 /* For ((unsigned) (cstULL >> count)) >> cst2 we have to make
10842 sure the result will be masked. See PR70222. */
10843 if (code == LSHIFTRT
10844 && int_mode != int_result_mode
10845 && !merge_outer_ops (&outer_op, &outer_const, AND,
10846 GET_MODE_MASK (int_result_mode)
10847 >> orig_count, int_result_mode,
10848 &complement_p))
10849 break;
10850 /* For ((int) (cstLL >> count)) >> cst2 just give up. Queuing
10851 up outer sign extension (often left and right shift) is
10852 hardly more efficient than the original. See PR70429. */
10853 if (code == ASHIFTRT && int_mode != int_result_mode)
10854 break;
10855
10856 rtx new_rtx = simplify_const_binary_operation (code, int_mode,
10857 XEXP (varop, 0),
10858 GEN_INT (count));
10859 varop = gen_rtx_fmt_ee (code, int_mode, new_rtx, XEXP (varop, 1));
10860 count = 0;
10861 continue;
10862 }
10863 break;
10864
10865 case NOT:
10866 /* The following rules apply only to scalars. */
10867 if (shift_mode != shift_unit_mode)
10868 break;
10869
10870 /* Make this fit the case below. */
10871 varop = gen_rtx_XOR (mode, XEXP (varop, 0), constm1_rtx);
10872 continue;
10873
10874 case IOR:
10875 case AND:
10876 case XOR:
10877 /* The following rules apply only to scalars. */
10878 if (shift_mode != shift_unit_mode)
10879 break;
10880 int_varop_mode = as_a <scalar_int_mode> (GET_MODE (varop));
10881 int_result_mode = as_a <scalar_int_mode> (result_mode);
10882
10883 /* If we have (xshiftrt (ior (plus X (const_int -1)) X) C)
10884 with C the size of VAROP - 1 and the shift is logical if
10885 STORE_FLAG_VALUE is 1 and arithmetic if STORE_FLAG_VALUE is -1,
10886 we have an (le X 0) operation. If we have an arithmetic shift
10887 and STORE_FLAG_VALUE is 1 or we have a logical shift with
10888 STORE_FLAG_VALUE of -1, we have a (neg (le X 0)) operation. */
10889
10890 if (GET_CODE (varop) == IOR && GET_CODE (XEXP (varop, 0)) == PLUS
10891 && XEXP (XEXP (varop, 0), 1) == constm1_rtx
10892 && (STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
10893 && (code == LSHIFTRT || code == ASHIFTRT)
10894 && count == (GET_MODE_PRECISION (int_varop_mode) - 1)
10895 && rtx_equal_p (XEXP (XEXP (varop, 0), 0), XEXP (varop, 1)))
10896 {
10897 count = 0;
10898 varop = gen_rtx_LE (int_varop_mode, XEXP (varop, 1),
10899 const0_rtx);
10900
10901 if (STORE_FLAG_VALUE == 1 ? code == ASHIFTRT : code == LSHIFTRT)
10902 varop = gen_rtx_NEG (int_varop_mode, varop);
10903
10904 continue;
10905 }
10906
10907 /* If we have (shift (logical)), move the logical to the outside
10908 to allow it to possibly combine with another logical and the
10909 shift to combine with another shift. This also canonicalizes to
10910 what a ZERO_EXTRACT looks like. Also, some machines have
10911 (and (shift)) insns. */
10912
10913 if (CONST_INT_P (XEXP (varop, 1))
10914 /* We can't do this if we have (ashiftrt (xor)) and the
10915 constant has its sign bit set in shift_unit_mode with
10916 shift_unit_mode wider than result_mode. */
10917 && !(code == ASHIFTRT && GET_CODE (varop) == XOR
10918 && int_result_mode != shift_unit_mode
10919 && 0 > trunc_int_for_mode (INTVAL (XEXP (varop, 1)),
10920 shift_unit_mode))
10921 && (new_rtx = simplify_const_binary_operation
10922 (code, int_result_mode,
10923 gen_int_mode (INTVAL (XEXP (varop, 1)), int_result_mode),
10924 GEN_INT (count))) != 0
10925 && CONST_INT_P (new_rtx)
10926 && merge_outer_ops (&outer_op, &outer_const, GET_CODE (varop),
10927 INTVAL (new_rtx), int_result_mode,
10928 &complement_p))
10929 {
10930 varop = XEXP (varop, 0);
10931 continue;
10932 }
10933
10934 /* If we can't do that, try to simplify the shift in each arm of the
10935 logical expression, make a new logical expression, and apply
10936 the inverse distributive law. This also can't be done for
10937 (ashiftrt (xor)) where we've widened the shift and the constant
10938 changes the sign bit. */
10939 if (CONST_INT_P (XEXP (varop, 1))
10940 && !(code == ASHIFTRT && GET_CODE (varop) == XOR
10941 && int_result_mode != shift_unit_mode
10942 && 0 > trunc_int_for_mode (INTVAL (XEXP (varop, 1)),
10943 shift_unit_mode)))
10944 {
10945 rtx lhs = simplify_shift_const (NULL_RTX, code, shift_unit_mode,
10946 XEXP (varop, 0), count);
10947 rtx rhs = simplify_shift_const (NULL_RTX, code, shift_unit_mode,
10948 XEXP (varop, 1), count);
10949
10950 varop = simplify_gen_binary (GET_CODE (varop), shift_unit_mode,
10951 lhs, rhs);
10952 varop = apply_distributive_law (varop);
10953
10954 count = 0;
10955 continue;
10956 }
10957 break;
10958
10959 case EQ:
10960 /* The following rules apply only to scalars. */
10961 if (shift_mode != shift_unit_mode)
10962 break;
10963 int_result_mode = as_a <scalar_int_mode> (result_mode);
10964
10965 /* Convert (lshiftrt (eq FOO 0) C) to (xor FOO 1) if STORE_FLAG_VALUE
10966 says that the sign bit can be tested, FOO has mode MODE, C is
10967 GET_MODE_PRECISION (MODE) - 1, and FOO has only its low-order bit
10968 that may be nonzero. */
10969 if (code == LSHIFTRT
10970 && XEXP (varop, 1) == const0_rtx
10971 && GET_MODE (XEXP (varop, 0)) == int_result_mode
10972 && count == (GET_MODE_PRECISION (int_result_mode) - 1)
10973 && HWI_COMPUTABLE_MODE_P (int_result_mode)
10974 && STORE_FLAG_VALUE == -1
10975 && nonzero_bits (XEXP (varop, 0), int_result_mode) == 1
10976 && merge_outer_ops (&outer_op, &outer_const, XOR, 1,
10977 int_result_mode, &complement_p))
10978 {
10979 varop = XEXP (varop, 0);
10980 count = 0;
10981 continue;
10982 }
10983 break;
10984
10985 case NEG:
10986 /* The following rules apply only to scalars. */
10987 if (shift_mode != shift_unit_mode)
10988 break;
10989 int_result_mode = as_a <scalar_int_mode> (result_mode);
10990
10991 /* (lshiftrt (neg A) C) where A is either 0 or 1 and C is one less
10992 than the number of bits in the mode is equivalent to A. */
10993 if (code == LSHIFTRT
10994 && count == (GET_MODE_PRECISION (int_result_mode) - 1)
10995 && nonzero_bits (XEXP (varop, 0), int_result_mode) == 1)
10996 {
10997 varop = XEXP (varop, 0);
10998 count = 0;
10999 continue;
11000 }
11001
11002 /* NEG commutes with ASHIFT since it is multiplication. Move the
11003 NEG outside to allow shifts to combine. */
11004 if (code == ASHIFT
11005 && merge_outer_ops (&outer_op, &outer_const, NEG, 0,
11006 int_result_mode, &complement_p))
11007 {
11008 varop = XEXP (varop, 0);
11009 continue;
11010 }
11011 break;
11012
11013 case PLUS:
11014 /* The following rules apply only to scalars. */
11015 if (shift_mode != shift_unit_mode)
11016 break;
11017 int_result_mode = as_a <scalar_int_mode> (result_mode);
11018
11019 /* (lshiftrt (plus A -1) C) where A is either 0 or 1 and C
11020 is one less than the number of bits in the mode is
11021 equivalent to (xor A 1). */
11022 if (code == LSHIFTRT
11023 && count == (GET_MODE_PRECISION (int_result_mode) - 1)
11024 && XEXP (varop, 1) == constm1_rtx
11025 && nonzero_bits (XEXP (varop, 0), int_result_mode) == 1
11026 && merge_outer_ops (&outer_op, &outer_const, XOR, 1,
11027 int_result_mode, &complement_p))
11028 {
11029 count = 0;
11030 varop = XEXP (varop, 0);
11031 continue;
11032 }
11033
11034 /* If we have (xshiftrt (plus FOO BAR) C), and the only bits
11035 that might be nonzero in BAR are those being shifted out and those
11036 bits are known zero in FOO, we can replace the PLUS with FOO.
11037 Similarly in the other operand order. This code occurs when
11038 we are computing the size of a variable-size array. */
11039
11040 if ((code == ASHIFTRT || code == LSHIFTRT)
11041 && count < HOST_BITS_PER_WIDE_INT
11042 && nonzero_bits (XEXP (varop, 1), int_result_mode) >> count == 0
11043 && (nonzero_bits (XEXP (varop, 1), int_result_mode)
11044 & nonzero_bits (XEXP (varop, 0), int_result_mode)) == 0)
11045 {
11046 varop = XEXP (varop, 0);
11047 continue;
11048 }
11049 else if ((code == ASHIFTRT || code == LSHIFTRT)
11050 && count < HOST_BITS_PER_WIDE_INT
11051 && HWI_COMPUTABLE_MODE_P (int_result_mode)
11052 && 0 == (nonzero_bits (XEXP (varop, 0), int_result_mode)
11053 >> count)
11054 && 0 == (nonzero_bits (XEXP (varop, 0), int_result_mode)
11055 & nonzero_bits (XEXP (varop, 1), int_result_mode)))
11056 {
11057 varop = XEXP (varop, 1);
11058 continue;
11059 }
11060
11061 /* (ashift (plus foo C) N) is (plus (ashift foo N) C'). */
11062 if (code == ASHIFT
11063 && CONST_INT_P (XEXP (varop, 1))
11064 && (new_rtx = simplify_const_binary_operation
11065 (ASHIFT, int_result_mode,
11066 gen_int_mode (INTVAL (XEXP (varop, 1)), int_result_mode),
11067 GEN_INT (count))) != 0
11068 && CONST_INT_P (new_rtx)
11069 && merge_outer_ops (&outer_op, &outer_const, PLUS,
11070 INTVAL (new_rtx), int_result_mode,
11071 &complement_p))
11072 {
11073 varop = XEXP (varop, 0);
11074 continue;
11075 }
11076
11077 /* Check for 'PLUS signbit', which is the canonical form of 'XOR
11078 signbit', and attempt to change the PLUS to an XOR and move it to
11079 the outer operation as is done above in the AND/IOR/XOR case
11080 leg for shift(logical). See details in logical handling above
11081 for reasoning in doing so. */
11082 if (code == LSHIFTRT
11083 && CONST_INT_P (XEXP (varop, 1))
11084 && mode_signbit_p (int_result_mode, XEXP (varop, 1))
11085 && (new_rtx = simplify_const_binary_operation
11086 (code, int_result_mode,
11087 gen_int_mode (INTVAL (XEXP (varop, 1)), int_result_mode),
11088 GEN_INT (count))) != 0
11089 && CONST_INT_P (new_rtx)
11090 && merge_outer_ops (&outer_op, &outer_const, XOR,
11091 INTVAL (new_rtx), int_result_mode,
11092 &complement_p))
11093 {
11094 varop = XEXP (varop, 0);
11095 continue;
11096 }
11097
11098 break;
11099
11100 case MINUS:
11101 /* The following rules apply only to scalars. */
11102 if (shift_mode != shift_unit_mode)
11103 break;
11104 int_varop_mode = as_a <scalar_int_mode> (GET_MODE (varop));
11105
11106 /* If we have (xshiftrt (minus (ashiftrt X C)) X) C)
11107 with C the size of VAROP - 1 and the shift is logical if
11108 STORE_FLAG_VALUE is 1 and arithmetic if STORE_FLAG_VALUE is -1,
11109 we have a (gt X 0) operation. If the shift is arithmetic with
11110 STORE_FLAG_VALUE of 1 or logical with STORE_FLAG_VALUE == -1,
11111 we have a (neg (gt X 0)) operation. */
11112
11113 if ((STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
11114 && GET_CODE (XEXP (varop, 0)) == ASHIFTRT
11115 && count == (GET_MODE_PRECISION (int_varop_mode) - 1)
11116 && (code == LSHIFTRT || code == ASHIFTRT)
11117 && CONST_INT_P (XEXP (XEXP (varop, 0), 1))
11118 && INTVAL (XEXP (XEXP (varop, 0), 1)) == count
11119 && rtx_equal_p (XEXP (XEXP (varop, 0), 0), XEXP (varop, 1)))
11120 {
11121 count = 0;
11122 varop = gen_rtx_GT (int_varop_mode, XEXP (varop, 1),
11123 const0_rtx);
11124
11125 if (STORE_FLAG_VALUE == 1 ? code == ASHIFTRT : code == LSHIFTRT)
11126 varop = gen_rtx_NEG (int_varop_mode, varop);
11127
11128 continue;
11129 }
11130 break;
11131
11132 case TRUNCATE:
11133 /* Change (lshiftrt (truncate (lshiftrt))) to (truncate (lshiftrt))
11134 if the truncate does not affect the value. */
11135 if (code == LSHIFTRT
11136 && GET_CODE (XEXP (varop, 0)) == LSHIFTRT
11137 && CONST_INT_P (XEXP (XEXP (varop, 0), 1))
11138 && (INTVAL (XEXP (XEXP (varop, 0), 1))
11139 >= (GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (varop, 0)))
11140 - GET_MODE_UNIT_PRECISION (GET_MODE (varop)))))
11141 {
11142 rtx varop_inner = XEXP (varop, 0);
11143
11144 varop_inner
11145 = gen_rtx_LSHIFTRT (GET_MODE (varop_inner),
11146 XEXP (varop_inner, 0),
11147 GEN_INT
11148 (count + INTVAL (XEXP (varop_inner, 1))));
11149 varop = gen_rtx_TRUNCATE (GET_MODE (varop), varop_inner);
11150 count = 0;
11151 continue;
11152 }
11153 break;
11154
11155 default:
11156 break;
11157 }
11158
11159 break;
11160 }
11161
11162 shift_mode = result_mode;
11163 if (shift_mode != mode)
11164 {
11165 /* We only change the modes of scalar shifts. */
11166 int_mode = as_a <scalar_int_mode> (mode);
11167 int_result_mode = as_a <scalar_int_mode> (result_mode);
11168 shift_mode = try_widen_shift_mode (code, varop, count, int_result_mode,
11169 int_mode, outer_op, outer_const);
11170 }
11171
11172 /* We have now finished analyzing the shift. The result should be
11173 a shift of type CODE with SHIFT_MODE shifting VAROP COUNT places. If
11174 OUTER_OP is non-UNKNOWN, it is an operation that needs to be applied
11175 to the result of the shift. OUTER_CONST is the relevant constant,
11176 but we must turn off all bits turned off in the shift. */
11177
11178 if (outer_op == UNKNOWN
11179 && orig_code == code && orig_count == count
11180 && varop == orig_varop
11181 && shift_mode == GET_MODE (varop))
11182 return NULL_RTX;
11183
11184 /* Make a SUBREG if necessary. If we can't make it, fail. */
11185 varop = gen_lowpart (shift_mode, varop);
11186 if (varop == NULL_RTX || GET_CODE (varop) == CLOBBER)
11187 return NULL_RTX;
11188
11189 /* If we have an outer operation and we just made a shift, it is
11190 possible that we could have simplified the shift were it not
11191 for the outer operation. So try to do the simplification
11192 recursively. */
11193
11194 if (outer_op != UNKNOWN)
11195 x = simplify_shift_const_1 (code, shift_mode, varop, count);
11196 else
11197 x = NULL_RTX;
11198
11199 if (x == NULL_RTX)
11200 x = simplify_gen_binary (code, shift_mode, varop, GEN_INT (count));
11201
11202 /* If we were doing an LSHIFTRT in a wider mode than it was originally,
11203 turn off all the bits that the shift would have turned off. */
11204 if (orig_code == LSHIFTRT && result_mode != shift_mode)
11205 /* We only change the modes of scalar shifts. */
11206 x = simplify_and_const_int (NULL_RTX, as_a <scalar_int_mode> (shift_mode),
11207 x, GET_MODE_MASK (result_mode) >> orig_count);
11208
11209 /* Do the remainder of the processing in RESULT_MODE. */
11210 x = gen_lowpart_or_truncate (result_mode, x);
11211
11212 /* If COMPLEMENT_P is set, we have to complement X before doing the outer
11213 operation. */
11214 if (complement_p)
11215 x = simplify_gen_unary (NOT, result_mode, x, result_mode);
11216
11217 if (outer_op != UNKNOWN)
11218 {
11219 int_result_mode = as_a <scalar_int_mode> (result_mode);
11220
11221 if (GET_RTX_CLASS (outer_op) != RTX_UNARY
11222 && GET_MODE_PRECISION (int_result_mode) < HOST_BITS_PER_WIDE_INT)
11223 outer_const = trunc_int_for_mode (outer_const, int_result_mode);
11224
11225 if (outer_op == AND)
11226 x = simplify_and_const_int (NULL_RTX, int_result_mode, x, outer_const);
11227 else if (outer_op == SET)
11228 {
11229 /* This means that we have determined that the result is
11230 equivalent to a constant. This should be rare. */
11231 if (!side_effects_p (x))
11232 x = GEN_INT (outer_const);
11233 }
11234 else if (GET_RTX_CLASS (outer_op) == RTX_UNARY)
11235 x = simplify_gen_unary (outer_op, int_result_mode, x, int_result_mode);
11236 else
11237 x = simplify_gen_binary (outer_op, int_result_mode, x,
11238 GEN_INT (outer_const));
11239 }
11240
11241 return x;
11242 }
11243
11244 /* Simplify a shift of VAROP by COUNT bits. CODE says what kind of shift.
11245 The result of the shift is RESULT_MODE. If we cannot simplify it,
11246 return X or, if it is NULL, synthesize the expression with
11247 simplify_gen_binary. Otherwise, return a simplified value.
11248
11249 The shift is normally computed in the widest mode we find in VAROP, as
11250 long as it isn't a different number of words than RESULT_MODE. Exceptions
11251 are ASHIFTRT and ROTATE, which are always done in their original mode. */
11252
11253 static rtx
11254 simplify_shift_const (rtx x, enum rtx_code code, machine_mode result_mode,
11255 rtx varop, int count)
11256 {
11257 rtx tem = simplify_shift_const_1 (code, result_mode, varop, count);
11258 if (tem)
11259 return tem;
11260
11261 if (!x)
11262 x = simplify_gen_binary (code, GET_MODE (varop), varop, GEN_INT (count));
11263 if (GET_MODE (x) != result_mode)
11264 x = gen_lowpart (result_mode, x);
11265 return x;
11266 }
11267
11268 \f
11269 /* A subroutine of recog_for_combine. See there for arguments and
11270 return value. */
11271
11272 static int
11273 recog_for_combine_1 (rtx *pnewpat, rtx_insn *insn, rtx *pnotes)
11274 {
11275 rtx pat = *pnewpat;
11276 rtx pat_without_clobbers;
11277 int insn_code_number;
11278 int num_clobbers_to_add = 0;
11279 int i;
11280 rtx notes = NULL_RTX;
11281 rtx old_notes, old_pat;
11282 int old_icode;
11283
11284 /* If PAT is a PARALLEL, check to see if it contains the CLOBBER
11285 we use to indicate that something didn't match. If we find such a
11286 thing, force rejection. */
11287 if (GET_CODE (pat) == PARALLEL)
11288 for (i = XVECLEN (pat, 0) - 1; i >= 0; i--)
11289 if (GET_CODE (XVECEXP (pat, 0, i)) == CLOBBER
11290 && XEXP (XVECEXP (pat, 0, i), 0) == const0_rtx)
11291 return -1;
11292
11293 old_pat = PATTERN (insn);
11294 old_notes = REG_NOTES (insn);
11295 PATTERN (insn) = pat;
11296 REG_NOTES (insn) = NULL_RTX;
11297
11298 insn_code_number = recog (pat, insn, &num_clobbers_to_add);
11299 if (dump_file && (dump_flags & TDF_DETAILS))
11300 {
11301 if (insn_code_number < 0)
11302 fputs ("Failed to match this instruction:\n", dump_file);
11303 else
11304 fputs ("Successfully matched this instruction:\n", dump_file);
11305 print_rtl_single (dump_file, pat);
11306 }
11307
11308 /* If it isn't, there is the possibility that we previously had an insn
11309 that clobbered some register as a side effect, but the combined
11310 insn doesn't need to do that. So try once more without the clobbers
11311 unless this represents an ASM insn. */
11312
11313 if (insn_code_number < 0 && ! check_asm_operands (pat)
11314 && GET_CODE (pat) == PARALLEL)
11315 {
11316 int pos;
11317
11318 for (pos = 0, i = 0; i < XVECLEN (pat, 0); i++)
11319 if (GET_CODE (XVECEXP (pat, 0, i)) != CLOBBER)
11320 {
11321 if (i != pos)
11322 SUBST (XVECEXP (pat, 0, pos), XVECEXP (pat, 0, i));
11323 pos++;
11324 }
11325
11326 SUBST_INT (XVECLEN (pat, 0), pos);
11327
11328 if (pos == 1)
11329 pat = XVECEXP (pat, 0, 0);
11330
11331 PATTERN (insn) = pat;
11332 insn_code_number = recog (pat, insn, &num_clobbers_to_add);
11333 if (dump_file && (dump_flags & TDF_DETAILS))
11334 {
11335 if (insn_code_number < 0)
11336 fputs ("Failed to match this instruction:\n", dump_file);
11337 else
11338 fputs ("Successfully matched this instruction:\n", dump_file);
11339 print_rtl_single (dump_file, pat);
11340 }
11341 }
11342
11343 pat_without_clobbers = pat;
11344
11345 PATTERN (insn) = old_pat;
11346 REG_NOTES (insn) = old_notes;
11347
11348 /* Recognize all noop sets, these will be killed by followup pass. */
11349 if (insn_code_number < 0 && GET_CODE (pat) == SET && set_noop_p (pat))
11350 insn_code_number = NOOP_MOVE_INSN_CODE, num_clobbers_to_add = 0;
11351
11352 /* If we had any clobbers to add, make a new pattern than contains
11353 them. Then check to make sure that all of them are dead. */
11354 if (num_clobbers_to_add)
11355 {
11356 rtx newpat = gen_rtx_PARALLEL (VOIDmode,
11357 rtvec_alloc (GET_CODE (pat) == PARALLEL
11358 ? (XVECLEN (pat, 0)
11359 + num_clobbers_to_add)
11360 : num_clobbers_to_add + 1));
11361
11362 if (GET_CODE (pat) == PARALLEL)
11363 for (i = 0; i < XVECLEN (pat, 0); i++)
11364 XVECEXP (newpat, 0, i) = XVECEXP (pat, 0, i);
11365 else
11366 XVECEXP (newpat, 0, 0) = pat;
11367
11368 add_clobbers (newpat, insn_code_number);
11369
11370 for (i = XVECLEN (newpat, 0) - num_clobbers_to_add;
11371 i < XVECLEN (newpat, 0); i++)
11372 {
11373 if (REG_P (XEXP (XVECEXP (newpat, 0, i), 0))
11374 && ! reg_dead_at_p (XEXP (XVECEXP (newpat, 0, i), 0), insn))
11375 return -1;
11376 if (GET_CODE (XEXP (XVECEXP (newpat, 0, i), 0)) != SCRATCH)
11377 {
11378 gcc_assert (REG_P (XEXP (XVECEXP (newpat, 0, i), 0)));
11379 notes = alloc_reg_note (REG_UNUSED,
11380 XEXP (XVECEXP (newpat, 0, i), 0), notes);
11381 }
11382 }
11383 pat = newpat;
11384 }
11385
11386 if (insn_code_number >= 0
11387 && insn_code_number != NOOP_MOVE_INSN_CODE)
11388 {
11389 old_pat = PATTERN (insn);
11390 old_notes = REG_NOTES (insn);
11391 old_icode = INSN_CODE (insn);
11392 PATTERN (insn) = pat;
11393 REG_NOTES (insn) = notes;
11394 INSN_CODE (insn) = insn_code_number;
11395
11396 /* Allow targets to reject combined insn. */
11397 if (!targetm.legitimate_combined_insn (insn))
11398 {
11399 if (dump_file && (dump_flags & TDF_DETAILS))
11400 fputs ("Instruction not appropriate for target.",
11401 dump_file);
11402
11403 /* Callers expect recog_for_combine to strip
11404 clobbers from the pattern on failure. */
11405 pat = pat_without_clobbers;
11406 notes = NULL_RTX;
11407
11408 insn_code_number = -1;
11409 }
11410
11411 PATTERN (insn) = old_pat;
11412 REG_NOTES (insn) = old_notes;
11413 INSN_CODE (insn) = old_icode;
11414 }
11415
11416 *pnewpat = pat;
11417 *pnotes = notes;
11418
11419 return insn_code_number;
11420 }
11421
11422 /* Change every ZERO_EXTRACT and ZERO_EXTEND of a SUBREG that can be
11423 expressed as an AND and maybe an LSHIFTRT, to that formulation.
11424 Return whether anything was so changed. */
11425
11426 static bool
11427 change_zero_ext (rtx pat)
11428 {
11429 bool changed = false;
11430 rtx *src = &SET_SRC (pat);
11431
11432 subrtx_ptr_iterator::array_type array;
11433 FOR_EACH_SUBRTX_PTR (iter, array, src, NONCONST)
11434 {
11435 rtx x = **iter;
11436 scalar_int_mode mode, inner_mode;
11437 if (!is_a <scalar_int_mode> (GET_MODE (x), &mode))
11438 continue;
11439 int size;
11440
11441 if (GET_CODE (x) == ZERO_EXTRACT
11442 && CONST_INT_P (XEXP (x, 1))
11443 && CONST_INT_P (XEXP (x, 2))
11444 && is_a <scalar_int_mode> (GET_MODE (XEXP (x, 0)), &inner_mode)
11445 && GET_MODE_PRECISION (inner_mode) <= GET_MODE_PRECISION (mode))
11446 {
11447 size = INTVAL (XEXP (x, 1));
11448
11449 int start = INTVAL (XEXP (x, 2));
11450 if (BITS_BIG_ENDIAN)
11451 start = GET_MODE_PRECISION (inner_mode) - size - start;
11452
11453 if (start)
11454 x = gen_rtx_LSHIFTRT (inner_mode, XEXP (x, 0), GEN_INT (start));
11455 else
11456 x = XEXP (x, 0);
11457 if (mode != inner_mode)
11458 x = gen_lowpart_SUBREG (mode, x);
11459 }
11460 else if (GET_CODE (x) == ZERO_EXTEND
11461 && GET_CODE (XEXP (x, 0)) == SUBREG
11462 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (XEXP (x, 0))))
11463 && !paradoxical_subreg_p (XEXP (x, 0))
11464 && subreg_lowpart_p (XEXP (x, 0)))
11465 {
11466 inner_mode = as_a <scalar_int_mode> (GET_MODE (XEXP (x, 0)));
11467 size = GET_MODE_PRECISION (inner_mode);
11468 x = SUBREG_REG (XEXP (x, 0));
11469 if (GET_MODE (x) != mode)
11470 x = gen_lowpart_SUBREG (mode, x);
11471 }
11472 else if (GET_CODE (x) == ZERO_EXTEND
11473 && REG_P (XEXP (x, 0))
11474 && HARD_REGISTER_P (XEXP (x, 0))
11475 && can_change_dest_mode (XEXP (x, 0), 0, mode))
11476 {
11477 inner_mode = as_a <scalar_int_mode> (GET_MODE (XEXP (x, 0)));
11478 size = GET_MODE_PRECISION (inner_mode);
11479 x = gen_rtx_REG (mode, REGNO (XEXP (x, 0)));
11480 }
11481 else
11482 continue;
11483
11484 if (!(GET_CODE (x) == LSHIFTRT
11485 && CONST_INT_P (XEXP (x, 1))
11486 && size + INTVAL (XEXP (x, 1)) == GET_MODE_PRECISION (mode)))
11487 {
11488 wide_int mask = wi::mask (size, false, GET_MODE_PRECISION (mode));
11489 x = gen_rtx_AND (mode, x, immed_wide_int_const (mask, mode));
11490 }
11491
11492 SUBST (**iter, x);
11493 changed = true;
11494 }
11495
11496 if (changed)
11497 FOR_EACH_SUBRTX_PTR (iter, array, src, NONCONST)
11498 maybe_swap_commutative_operands (**iter);
11499
11500 rtx *dst = &SET_DEST (pat);
11501 scalar_int_mode mode;
11502 if (GET_CODE (*dst) == ZERO_EXTRACT
11503 && REG_P (XEXP (*dst, 0))
11504 && is_a <scalar_int_mode> (GET_MODE (XEXP (*dst, 0)), &mode)
11505 && CONST_INT_P (XEXP (*dst, 1))
11506 && CONST_INT_P (XEXP (*dst, 2)))
11507 {
11508 rtx reg = XEXP (*dst, 0);
11509 int width = INTVAL (XEXP (*dst, 1));
11510 int offset = INTVAL (XEXP (*dst, 2));
11511 int reg_width = GET_MODE_PRECISION (mode);
11512 if (BITS_BIG_ENDIAN)
11513 offset = reg_width - width - offset;
11514
11515 rtx x, y, z, w;
11516 wide_int mask = wi::shifted_mask (offset, width, true, reg_width);
11517 wide_int mask2 = wi::shifted_mask (offset, width, false, reg_width);
11518 x = gen_rtx_AND (mode, reg, immed_wide_int_const (mask, mode));
11519 if (offset)
11520 y = gen_rtx_ASHIFT (mode, SET_SRC (pat), GEN_INT (offset));
11521 else
11522 y = SET_SRC (pat);
11523 z = gen_rtx_AND (mode, y, immed_wide_int_const (mask2, mode));
11524 w = gen_rtx_IOR (mode, x, z);
11525 SUBST (SET_DEST (pat), reg);
11526 SUBST (SET_SRC (pat), w);
11527
11528 changed = true;
11529 }
11530
11531 return changed;
11532 }
11533
11534 /* Like recog, but we receive the address of a pointer to a new pattern.
11535 We try to match the rtx that the pointer points to.
11536 If that fails, we may try to modify or replace the pattern,
11537 storing the replacement into the same pointer object.
11538
11539 Modifications include deletion or addition of CLOBBERs. If the
11540 instruction will still not match, we change ZERO_EXTEND and ZERO_EXTRACT
11541 to the equivalent AND and perhaps LSHIFTRT patterns, and try with that
11542 (and undo if that fails).
11543
11544 PNOTES is a pointer to a location where any REG_UNUSED notes added for
11545 the CLOBBERs are placed.
11546
11547 The value is the final insn code from the pattern ultimately matched,
11548 or -1. */
11549
11550 static int
11551 recog_for_combine (rtx *pnewpat, rtx_insn *insn, rtx *pnotes)
11552 {
11553 rtx pat = *pnewpat;
11554 int insn_code_number = recog_for_combine_1 (pnewpat, insn, pnotes);
11555 if (insn_code_number >= 0 || check_asm_operands (pat))
11556 return insn_code_number;
11557
11558 void *marker = get_undo_marker ();
11559 bool changed = false;
11560
11561 if (GET_CODE (pat) == SET)
11562 changed = change_zero_ext (pat);
11563 else if (GET_CODE (pat) == PARALLEL)
11564 {
11565 int i;
11566 for (i = 0; i < XVECLEN (pat, 0); i++)
11567 {
11568 rtx set = XVECEXP (pat, 0, i);
11569 if (GET_CODE (set) == SET)
11570 changed |= change_zero_ext (set);
11571 }
11572 }
11573
11574 if (changed)
11575 {
11576 insn_code_number = recog_for_combine_1 (pnewpat, insn, pnotes);
11577
11578 if (insn_code_number < 0)
11579 undo_to_marker (marker);
11580 }
11581
11582 return insn_code_number;
11583 }
11584 \f
11585 /* Like gen_lowpart_general but for use by combine. In combine it
11586 is not possible to create any new pseudoregs. However, it is
11587 safe to create invalid memory addresses, because combine will
11588 try to recognize them and all they will do is make the combine
11589 attempt fail.
11590
11591 If for some reason this cannot do its job, an rtx
11592 (clobber (const_int 0)) is returned.
11593 An insn containing that will not be recognized. */
11594
11595 static rtx
11596 gen_lowpart_for_combine (machine_mode omode, rtx x)
11597 {
11598 machine_mode imode = GET_MODE (x);
11599 unsigned int osize = GET_MODE_SIZE (omode);
11600 unsigned int isize = GET_MODE_SIZE (imode);
11601 rtx result;
11602
11603 if (omode == imode)
11604 return x;
11605
11606 /* We can only support MODE being wider than a word if X is a
11607 constant integer or has a mode the same size. */
11608 if (GET_MODE_SIZE (omode) > UNITS_PER_WORD
11609 && ! (CONST_SCALAR_INT_P (x) || isize == osize))
11610 goto fail;
11611
11612 /* X might be a paradoxical (subreg (mem)). In that case, gen_lowpart
11613 won't know what to do. So we will strip off the SUBREG here and
11614 process normally. */
11615 if (GET_CODE (x) == SUBREG && MEM_P (SUBREG_REG (x)))
11616 {
11617 x = SUBREG_REG (x);
11618
11619 /* For use in case we fall down into the address adjustments
11620 further below, we need to adjust the known mode and size of
11621 x; imode and isize, since we just adjusted x. */
11622 imode = GET_MODE (x);
11623
11624 if (imode == omode)
11625 return x;
11626
11627 isize = GET_MODE_SIZE (imode);
11628 }
11629
11630 result = gen_lowpart_common (omode, x);
11631
11632 if (result)
11633 return result;
11634
11635 if (MEM_P (x))
11636 {
11637 /* Refuse to work on a volatile memory ref or one with a mode-dependent
11638 address. */
11639 if (MEM_VOLATILE_P (x)
11640 || mode_dependent_address_p (XEXP (x, 0), MEM_ADDR_SPACE (x)))
11641 goto fail;
11642
11643 /* If we want to refer to something bigger than the original memref,
11644 generate a paradoxical subreg instead. That will force a reload
11645 of the original memref X. */
11646 if (paradoxical_subreg_p (omode, imode))
11647 return gen_rtx_SUBREG (omode, x, 0);
11648
11649 HOST_WIDE_INT offset = byte_lowpart_offset (omode, imode);
11650 return adjust_address_nv (x, omode, offset);
11651 }
11652
11653 /* If X is a comparison operator, rewrite it in a new mode. This
11654 probably won't match, but may allow further simplifications. */
11655 else if (COMPARISON_P (x))
11656 return gen_rtx_fmt_ee (GET_CODE (x), omode, XEXP (x, 0), XEXP (x, 1));
11657
11658 /* If we couldn't simplify X any other way, just enclose it in a
11659 SUBREG. Normally, this SUBREG won't match, but some patterns may
11660 include an explicit SUBREG or we may simplify it further in combine. */
11661 else
11662 {
11663 rtx res;
11664
11665 if (imode == VOIDmode)
11666 {
11667 imode = int_mode_for_mode (omode).require ();
11668 x = gen_lowpart_common (imode, x);
11669 if (x == NULL)
11670 goto fail;
11671 }
11672 res = lowpart_subreg (omode, x, imode);
11673 if (res)
11674 return res;
11675 }
11676
11677 fail:
11678 return gen_rtx_CLOBBER (omode, const0_rtx);
11679 }
11680 \f
11681 /* Try to simplify a comparison between OP0 and a constant OP1,
11682 where CODE is the comparison code that will be tested, into a
11683 (CODE OP0 const0_rtx) form.
11684
11685 The result is a possibly different comparison code to use.
11686 *POP1 may be updated. */
11687
11688 static enum rtx_code
11689 simplify_compare_const (enum rtx_code code, machine_mode mode,
11690 rtx op0, rtx *pop1)
11691 {
11692 scalar_int_mode int_mode;
11693 HOST_WIDE_INT const_op = INTVAL (*pop1);
11694
11695 /* Get the constant we are comparing against and turn off all bits
11696 not on in our mode. */
11697 if (mode != VOIDmode)
11698 const_op = trunc_int_for_mode (const_op, mode);
11699
11700 /* If we are comparing against a constant power of two and the value
11701 being compared can only have that single bit nonzero (e.g., it was
11702 `and'ed with that bit), we can replace this with a comparison
11703 with zero. */
11704 if (const_op
11705 && (code == EQ || code == NE || code == GE || code == GEU
11706 || code == LT || code == LTU)
11707 && is_a <scalar_int_mode> (mode, &int_mode)
11708 && GET_MODE_PRECISION (int_mode) - 1 < HOST_BITS_PER_WIDE_INT
11709 && pow2p_hwi (const_op & GET_MODE_MASK (int_mode))
11710 && (nonzero_bits (op0, int_mode)
11711 == (unsigned HOST_WIDE_INT) (const_op & GET_MODE_MASK (int_mode))))
11712 {
11713 code = (code == EQ || code == GE || code == GEU ? NE : EQ);
11714 const_op = 0;
11715 }
11716
11717 /* Similarly, if we are comparing a value known to be either -1 or
11718 0 with -1, change it to the opposite comparison against zero. */
11719 if (const_op == -1
11720 && (code == EQ || code == NE || code == GT || code == LE
11721 || code == GEU || code == LTU)
11722 && is_a <scalar_int_mode> (mode, &int_mode)
11723 && num_sign_bit_copies (op0, int_mode) == GET_MODE_PRECISION (int_mode))
11724 {
11725 code = (code == EQ || code == LE || code == GEU ? NE : EQ);
11726 const_op = 0;
11727 }
11728
11729 /* Do some canonicalizations based on the comparison code. We prefer
11730 comparisons against zero and then prefer equality comparisons.
11731 If we can reduce the size of a constant, we will do that too. */
11732 switch (code)
11733 {
11734 case LT:
11735 /* < C is equivalent to <= (C - 1) */
11736 if (const_op > 0)
11737 {
11738 const_op -= 1;
11739 code = LE;
11740 /* ... fall through to LE case below. */
11741 gcc_fallthrough ();
11742 }
11743 else
11744 break;
11745
11746 case LE:
11747 /* <= C is equivalent to < (C + 1); we do this for C < 0 */
11748 if (const_op < 0)
11749 {
11750 const_op += 1;
11751 code = LT;
11752 }
11753
11754 /* If we are doing a <= 0 comparison on a value known to have
11755 a zero sign bit, we can replace this with == 0. */
11756 else if (const_op == 0
11757 && is_a <scalar_int_mode> (mode, &int_mode)
11758 && GET_MODE_PRECISION (int_mode) - 1 < HOST_BITS_PER_WIDE_INT
11759 && (nonzero_bits (op0, int_mode)
11760 & (HOST_WIDE_INT_1U << (GET_MODE_PRECISION (int_mode) - 1)))
11761 == 0)
11762 code = EQ;
11763 break;
11764
11765 case GE:
11766 /* >= C is equivalent to > (C - 1). */
11767 if (const_op > 0)
11768 {
11769 const_op -= 1;
11770 code = GT;
11771 /* ... fall through to GT below. */
11772 gcc_fallthrough ();
11773 }
11774 else
11775 break;
11776
11777 case GT:
11778 /* > C is equivalent to >= (C + 1); we do this for C < 0. */
11779 if (const_op < 0)
11780 {
11781 const_op += 1;
11782 code = GE;
11783 }
11784
11785 /* If we are doing a > 0 comparison on a value known to have
11786 a zero sign bit, we can replace this with != 0. */
11787 else if (const_op == 0
11788 && is_a <scalar_int_mode> (mode, &int_mode)
11789 && GET_MODE_PRECISION (int_mode) - 1 < HOST_BITS_PER_WIDE_INT
11790 && (nonzero_bits (op0, int_mode)
11791 & (HOST_WIDE_INT_1U << (GET_MODE_PRECISION (int_mode) - 1)))
11792 == 0)
11793 code = NE;
11794 break;
11795
11796 case LTU:
11797 /* < C is equivalent to <= (C - 1). */
11798 if (const_op > 0)
11799 {
11800 const_op -= 1;
11801 code = LEU;
11802 /* ... fall through ... */
11803 gcc_fallthrough ();
11804 }
11805 /* (unsigned) < 0x80000000 is equivalent to >= 0. */
11806 else if (is_a <scalar_int_mode> (mode, &int_mode)
11807 && GET_MODE_PRECISION (int_mode) - 1 < HOST_BITS_PER_WIDE_INT
11808 && ((unsigned HOST_WIDE_INT) const_op
11809 == HOST_WIDE_INT_1U << (GET_MODE_PRECISION (int_mode) - 1)))
11810 {
11811 const_op = 0;
11812 code = GE;
11813 break;
11814 }
11815 else
11816 break;
11817
11818 case LEU:
11819 /* unsigned <= 0 is equivalent to == 0 */
11820 if (const_op == 0)
11821 code = EQ;
11822 /* (unsigned) <= 0x7fffffff is equivalent to >= 0. */
11823 else if (is_a <scalar_int_mode> (mode, &int_mode)
11824 && GET_MODE_PRECISION (int_mode) - 1 < HOST_BITS_PER_WIDE_INT
11825 && ((unsigned HOST_WIDE_INT) const_op
11826 == ((HOST_WIDE_INT_1U
11827 << (GET_MODE_PRECISION (int_mode) - 1)) - 1)))
11828 {
11829 const_op = 0;
11830 code = GE;
11831 }
11832 break;
11833
11834 case GEU:
11835 /* >= C is equivalent to > (C - 1). */
11836 if (const_op > 1)
11837 {
11838 const_op -= 1;
11839 code = GTU;
11840 /* ... fall through ... */
11841 gcc_fallthrough ();
11842 }
11843
11844 /* (unsigned) >= 0x80000000 is equivalent to < 0. */
11845 else if (is_a <scalar_int_mode> (mode, &int_mode)
11846 && GET_MODE_PRECISION (int_mode) - 1 < HOST_BITS_PER_WIDE_INT
11847 && ((unsigned HOST_WIDE_INT) const_op
11848 == HOST_WIDE_INT_1U << (GET_MODE_PRECISION (int_mode) - 1)))
11849 {
11850 const_op = 0;
11851 code = LT;
11852 break;
11853 }
11854 else
11855 break;
11856
11857 case GTU:
11858 /* unsigned > 0 is equivalent to != 0 */
11859 if (const_op == 0)
11860 code = NE;
11861 /* (unsigned) > 0x7fffffff is equivalent to < 0. */
11862 else if (is_a <scalar_int_mode> (mode, &int_mode)
11863 && GET_MODE_PRECISION (int_mode) - 1 < HOST_BITS_PER_WIDE_INT
11864 && ((unsigned HOST_WIDE_INT) const_op
11865 == (HOST_WIDE_INT_1U
11866 << (GET_MODE_PRECISION (int_mode) - 1)) - 1))
11867 {
11868 const_op = 0;
11869 code = LT;
11870 }
11871 break;
11872
11873 default:
11874 break;
11875 }
11876
11877 *pop1 = GEN_INT (const_op);
11878 return code;
11879 }
11880 \f
11881 /* Simplify a comparison between *POP0 and *POP1 where CODE is the
11882 comparison code that will be tested.
11883
11884 The result is a possibly different comparison code to use. *POP0 and
11885 *POP1 may be updated.
11886
11887 It is possible that we might detect that a comparison is either always
11888 true or always false. However, we do not perform general constant
11889 folding in combine, so this knowledge isn't useful. Such tautologies
11890 should have been detected earlier. Hence we ignore all such cases. */
11891
11892 static enum rtx_code
11893 simplify_comparison (enum rtx_code code, rtx *pop0, rtx *pop1)
11894 {
11895 rtx op0 = *pop0;
11896 rtx op1 = *pop1;
11897 rtx tem, tem1;
11898 int i;
11899 scalar_int_mode mode, inner_mode, tmode;
11900 opt_scalar_int_mode tmode_iter;
11901
11902 /* Try a few ways of applying the same transformation to both operands. */
11903 while (1)
11904 {
11905 /* The test below this one won't handle SIGN_EXTENDs on these machines,
11906 so check specially. */
11907 if (!WORD_REGISTER_OPERATIONS
11908 && code != GTU && code != GEU && code != LTU && code != LEU
11909 && GET_CODE (op0) == ASHIFTRT && GET_CODE (op1) == ASHIFTRT
11910 && GET_CODE (XEXP (op0, 0)) == ASHIFT
11911 && GET_CODE (XEXP (op1, 0)) == ASHIFT
11912 && GET_CODE (XEXP (XEXP (op0, 0), 0)) == SUBREG
11913 && GET_CODE (XEXP (XEXP (op1, 0), 0)) == SUBREG
11914 && is_a <scalar_int_mode> (GET_MODE (op0), &mode)
11915 && (is_a <scalar_int_mode>
11916 (GET_MODE (SUBREG_REG (XEXP (XEXP (op0, 0), 0))), &inner_mode))
11917 && inner_mode == GET_MODE (SUBREG_REG (XEXP (XEXP (op1, 0), 0)))
11918 && CONST_INT_P (XEXP (op0, 1))
11919 && XEXP (op0, 1) == XEXP (op1, 1)
11920 && XEXP (op0, 1) == XEXP (XEXP (op0, 0), 1)
11921 && XEXP (op0, 1) == XEXP (XEXP (op1, 0), 1)
11922 && (INTVAL (XEXP (op0, 1))
11923 == (GET_MODE_PRECISION (mode)
11924 - GET_MODE_PRECISION (inner_mode))))
11925 {
11926 op0 = SUBREG_REG (XEXP (XEXP (op0, 0), 0));
11927 op1 = SUBREG_REG (XEXP (XEXP (op1, 0), 0));
11928 }
11929
11930 /* If both operands are the same constant shift, see if we can ignore the
11931 shift. We can if the shift is a rotate or if the bits shifted out of
11932 this shift are known to be zero for both inputs and if the type of
11933 comparison is compatible with the shift. */
11934 if (GET_CODE (op0) == GET_CODE (op1)
11935 && HWI_COMPUTABLE_MODE_P (GET_MODE (op0))
11936 && ((GET_CODE (op0) == ROTATE && (code == NE || code == EQ))
11937 || ((GET_CODE (op0) == LSHIFTRT || GET_CODE (op0) == ASHIFT)
11938 && (code != GT && code != LT && code != GE && code != LE))
11939 || (GET_CODE (op0) == ASHIFTRT
11940 && (code != GTU && code != LTU
11941 && code != GEU && code != LEU)))
11942 && CONST_INT_P (XEXP (op0, 1))
11943 && INTVAL (XEXP (op0, 1)) >= 0
11944 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT
11945 && XEXP (op0, 1) == XEXP (op1, 1))
11946 {
11947 machine_mode mode = GET_MODE (op0);
11948 unsigned HOST_WIDE_INT mask = GET_MODE_MASK (mode);
11949 int shift_count = INTVAL (XEXP (op0, 1));
11950
11951 if (GET_CODE (op0) == LSHIFTRT || GET_CODE (op0) == ASHIFTRT)
11952 mask &= (mask >> shift_count) << shift_count;
11953 else if (GET_CODE (op0) == ASHIFT)
11954 mask = (mask & (mask << shift_count)) >> shift_count;
11955
11956 if ((nonzero_bits (XEXP (op0, 0), mode) & ~mask) == 0
11957 && (nonzero_bits (XEXP (op1, 0), mode) & ~mask) == 0)
11958 op0 = XEXP (op0, 0), op1 = XEXP (op1, 0);
11959 else
11960 break;
11961 }
11962
11963 /* If both operands are AND's of a paradoxical SUBREG by constant, the
11964 SUBREGs are of the same mode, and, in both cases, the AND would
11965 be redundant if the comparison was done in the narrower mode,
11966 do the comparison in the narrower mode (e.g., we are AND'ing with 1
11967 and the operand's possibly nonzero bits are 0xffffff01; in that case
11968 if we only care about QImode, we don't need the AND). This case
11969 occurs if the output mode of an scc insn is not SImode and
11970 STORE_FLAG_VALUE == 1 (e.g., the 386).
11971
11972 Similarly, check for a case where the AND's are ZERO_EXTEND
11973 operations from some narrower mode even though a SUBREG is not
11974 present. */
11975
11976 else if (GET_CODE (op0) == AND && GET_CODE (op1) == AND
11977 && CONST_INT_P (XEXP (op0, 1))
11978 && CONST_INT_P (XEXP (op1, 1)))
11979 {
11980 rtx inner_op0 = XEXP (op0, 0);
11981 rtx inner_op1 = XEXP (op1, 0);
11982 HOST_WIDE_INT c0 = INTVAL (XEXP (op0, 1));
11983 HOST_WIDE_INT c1 = INTVAL (XEXP (op1, 1));
11984 int changed = 0;
11985
11986 if (paradoxical_subreg_p (inner_op0)
11987 && GET_CODE (inner_op1) == SUBREG
11988 && HWI_COMPUTABLE_MODE_P (GET_MODE (SUBREG_REG (inner_op0)))
11989 && (GET_MODE (SUBREG_REG (inner_op0))
11990 == GET_MODE (SUBREG_REG (inner_op1)))
11991 && (0 == ((~c0) & nonzero_bits (SUBREG_REG (inner_op0),
11992 GET_MODE (SUBREG_REG (inner_op0)))))
11993 && (0 == ((~c1) & nonzero_bits (SUBREG_REG (inner_op1),
11994 GET_MODE (SUBREG_REG (inner_op1))))))
11995 {
11996 op0 = SUBREG_REG (inner_op0);
11997 op1 = SUBREG_REG (inner_op1);
11998
11999 /* The resulting comparison is always unsigned since we masked
12000 off the original sign bit. */
12001 code = unsigned_condition (code);
12002
12003 changed = 1;
12004 }
12005
12006 else if (c0 == c1)
12007 FOR_EACH_MODE_UNTIL (tmode,
12008 as_a <scalar_int_mode> (GET_MODE (op0)))
12009 if ((unsigned HOST_WIDE_INT) c0 == GET_MODE_MASK (tmode))
12010 {
12011 op0 = gen_lowpart_or_truncate (tmode, inner_op0);
12012 op1 = gen_lowpart_or_truncate (tmode, inner_op1);
12013 code = unsigned_condition (code);
12014 changed = 1;
12015 break;
12016 }
12017
12018 if (! changed)
12019 break;
12020 }
12021
12022 /* If both operands are NOT, we can strip off the outer operation
12023 and adjust the comparison code for swapped operands; similarly for
12024 NEG, except that this must be an equality comparison. */
12025 else if ((GET_CODE (op0) == NOT && GET_CODE (op1) == NOT)
12026 || (GET_CODE (op0) == NEG && GET_CODE (op1) == NEG
12027 && (code == EQ || code == NE)))
12028 op0 = XEXP (op0, 0), op1 = XEXP (op1, 0), code = swap_condition (code);
12029
12030 else
12031 break;
12032 }
12033
12034 /* If the first operand is a constant, swap the operands and adjust the
12035 comparison code appropriately, but don't do this if the second operand
12036 is already a constant integer. */
12037 if (swap_commutative_operands_p (op0, op1))
12038 {
12039 std::swap (op0, op1);
12040 code = swap_condition (code);
12041 }
12042
12043 /* We now enter a loop during which we will try to simplify the comparison.
12044 For the most part, we only are concerned with comparisons with zero,
12045 but some things may really be comparisons with zero but not start
12046 out looking that way. */
12047
12048 while (CONST_INT_P (op1))
12049 {
12050 machine_mode raw_mode = GET_MODE (op0);
12051 scalar_int_mode int_mode;
12052 int equality_comparison_p;
12053 int sign_bit_comparison_p;
12054 int unsigned_comparison_p;
12055 HOST_WIDE_INT const_op;
12056
12057 /* We only want to handle integral modes. This catches VOIDmode,
12058 CCmode, and the floating-point modes. An exception is that we
12059 can handle VOIDmode if OP0 is a COMPARE or a comparison
12060 operation. */
12061
12062 if (GET_MODE_CLASS (raw_mode) != MODE_INT
12063 && ! (raw_mode == VOIDmode
12064 && (GET_CODE (op0) == COMPARE || COMPARISON_P (op0))))
12065 break;
12066
12067 /* Try to simplify the compare to constant, possibly changing the
12068 comparison op, and/or changing op1 to zero. */
12069 code = simplify_compare_const (code, raw_mode, op0, &op1);
12070 const_op = INTVAL (op1);
12071
12072 /* Compute some predicates to simplify code below. */
12073
12074 equality_comparison_p = (code == EQ || code == NE);
12075 sign_bit_comparison_p = ((code == LT || code == GE) && const_op == 0);
12076 unsigned_comparison_p = (code == LTU || code == LEU || code == GTU
12077 || code == GEU);
12078
12079 /* If this is a sign bit comparison and we can do arithmetic in
12080 MODE, say that we will only be needing the sign bit of OP0. */
12081 if (sign_bit_comparison_p
12082 && is_a <scalar_int_mode> (raw_mode, &int_mode)
12083 && HWI_COMPUTABLE_MODE_P (int_mode))
12084 op0 = force_to_mode (op0, int_mode,
12085 HOST_WIDE_INT_1U
12086 << (GET_MODE_PRECISION (int_mode) - 1),
12087 0);
12088
12089 if (COMPARISON_P (op0))
12090 {
12091 /* We can't do anything if OP0 is a condition code value, rather
12092 than an actual data value. */
12093 if (const_op != 0
12094 || CC0_P (XEXP (op0, 0))
12095 || GET_MODE_CLASS (GET_MODE (XEXP (op0, 0))) == MODE_CC)
12096 break;
12097
12098 /* Get the two operands being compared. */
12099 if (GET_CODE (XEXP (op0, 0)) == COMPARE)
12100 tem = XEXP (XEXP (op0, 0), 0), tem1 = XEXP (XEXP (op0, 0), 1);
12101 else
12102 tem = XEXP (op0, 0), tem1 = XEXP (op0, 1);
12103
12104 /* Check for the cases where we simply want the result of the
12105 earlier test or the opposite of that result. */
12106 if (code == NE || code == EQ
12107 || (val_signbit_known_set_p (raw_mode, STORE_FLAG_VALUE)
12108 && (code == LT || code == GE)))
12109 {
12110 enum rtx_code new_code;
12111 if (code == LT || code == NE)
12112 new_code = GET_CODE (op0);
12113 else
12114 new_code = reversed_comparison_code (op0, NULL);
12115
12116 if (new_code != UNKNOWN)
12117 {
12118 code = new_code;
12119 op0 = tem;
12120 op1 = tem1;
12121 continue;
12122 }
12123 }
12124 break;
12125 }
12126
12127 if (raw_mode == VOIDmode)
12128 break;
12129 scalar_int_mode mode = as_a <scalar_int_mode> (raw_mode);
12130
12131 /* Now try cases based on the opcode of OP0. If none of the cases
12132 does a "continue", we exit this loop immediately after the
12133 switch. */
12134
12135 unsigned int mode_width = GET_MODE_PRECISION (mode);
12136 unsigned HOST_WIDE_INT mask = GET_MODE_MASK (mode);
12137 switch (GET_CODE (op0))
12138 {
12139 case ZERO_EXTRACT:
12140 /* If we are extracting a single bit from a variable position in
12141 a constant that has only a single bit set and are comparing it
12142 with zero, we can convert this into an equality comparison
12143 between the position and the location of the single bit. */
12144 /* Except we can't if SHIFT_COUNT_TRUNCATED is set, since we might
12145 have already reduced the shift count modulo the word size. */
12146 if (!SHIFT_COUNT_TRUNCATED
12147 && CONST_INT_P (XEXP (op0, 0))
12148 && XEXP (op0, 1) == const1_rtx
12149 && equality_comparison_p && const_op == 0
12150 && (i = exact_log2 (UINTVAL (XEXP (op0, 0)))) >= 0)
12151 {
12152 if (BITS_BIG_ENDIAN)
12153 i = BITS_PER_WORD - 1 - i;
12154
12155 op0 = XEXP (op0, 2);
12156 op1 = GEN_INT (i);
12157 const_op = i;
12158
12159 /* Result is nonzero iff shift count is equal to I. */
12160 code = reverse_condition (code);
12161 continue;
12162 }
12163
12164 /* fall through */
12165
12166 case SIGN_EXTRACT:
12167 tem = expand_compound_operation (op0);
12168 if (tem != op0)
12169 {
12170 op0 = tem;
12171 continue;
12172 }
12173 break;
12174
12175 case NOT:
12176 /* If testing for equality, we can take the NOT of the constant. */
12177 if (equality_comparison_p
12178 && (tem = simplify_unary_operation (NOT, mode, op1, mode)) != 0)
12179 {
12180 op0 = XEXP (op0, 0);
12181 op1 = tem;
12182 continue;
12183 }
12184
12185 /* If just looking at the sign bit, reverse the sense of the
12186 comparison. */
12187 if (sign_bit_comparison_p)
12188 {
12189 op0 = XEXP (op0, 0);
12190 code = (code == GE ? LT : GE);
12191 continue;
12192 }
12193 break;
12194
12195 case NEG:
12196 /* If testing for equality, we can take the NEG of the constant. */
12197 if (equality_comparison_p
12198 && (tem = simplify_unary_operation (NEG, mode, op1, mode)) != 0)
12199 {
12200 op0 = XEXP (op0, 0);
12201 op1 = tem;
12202 continue;
12203 }
12204
12205 /* The remaining cases only apply to comparisons with zero. */
12206 if (const_op != 0)
12207 break;
12208
12209 /* When X is ABS or is known positive,
12210 (neg X) is < 0 if and only if X != 0. */
12211
12212 if (sign_bit_comparison_p
12213 && (GET_CODE (XEXP (op0, 0)) == ABS
12214 || (mode_width <= HOST_BITS_PER_WIDE_INT
12215 && (nonzero_bits (XEXP (op0, 0), mode)
12216 & (HOST_WIDE_INT_1U << (mode_width - 1)))
12217 == 0)))
12218 {
12219 op0 = XEXP (op0, 0);
12220 code = (code == LT ? NE : EQ);
12221 continue;
12222 }
12223
12224 /* If we have NEG of something whose two high-order bits are the
12225 same, we know that "(-a) < 0" is equivalent to "a > 0". */
12226 if (num_sign_bit_copies (op0, mode) >= 2)
12227 {
12228 op0 = XEXP (op0, 0);
12229 code = swap_condition (code);
12230 continue;
12231 }
12232 break;
12233
12234 case ROTATE:
12235 /* If we are testing equality and our count is a constant, we
12236 can perform the inverse operation on our RHS. */
12237 if (equality_comparison_p && CONST_INT_P (XEXP (op0, 1))
12238 && (tem = simplify_binary_operation (ROTATERT, mode,
12239 op1, XEXP (op0, 1))) != 0)
12240 {
12241 op0 = XEXP (op0, 0);
12242 op1 = tem;
12243 continue;
12244 }
12245
12246 /* If we are doing a < 0 or >= 0 comparison, it means we are testing
12247 a particular bit. Convert it to an AND of a constant of that
12248 bit. This will be converted into a ZERO_EXTRACT. */
12249 if (const_op == 0 && sign_bit_comparison_p
12250 && CONST_INT_P (XEXP (op0, 1))
12251 && mode_width <= HOST_BITS_PER_WIDE_INT)
12252 {
12253 op0 = simplify_and_const_int (NULL_RTX, mode, XEXP (op0, 0),
12254 (HOST_WIDE_INT_1U
12255 << (mode_width - 1
12256 - INTVAL (XEXP (op0, 1)))));
12257 code = (code == LT ? NE : EQ);
12258 continue;
12259 }
12260
12261 /* Fall through. */
12262
12263 case ABS:
12264 /* ABS is ignorable inside an equality comparison with zero. */
12265 if (const_op == 0 && equality_comparison_p)
12266 {
12267 op0 = XEXP (op0, 0);
12268 continue;
12269 }
12270 break;
12271
12272 case SIGN_EXTEND:
12273 /* Can simplify (compare (zero/sign_extend FOO) CONST) to
12274 (compare FOO CONST) if CONST fits in FOO's mode and we
12275 are either testing inequality or have an unsigned
12276 comparison with ZERO_EXTEND or a signed comparison with
12277 SIGN_EXTEND. But don't do it if we don't have a compare
12278 insn of the given mode, since we'd have to revert it
12279 later on, and then we wouldn't know whether to sign- or
12280 zero-extend. */
12281 if (is_int_mode (GET_MODE (XEXP (op0, 0)), &mode)
12282 && ! unsigned_comparison_p
12283 && HWI_COMPUTABLE_MODE_P (mode)
12284 && trunc_int_for_mode (const_op, mode) == const_op
12285 && have_insn_for (COMPARE, mode))
12286 {
12287 op0 = XEXP (op0, 0);
12288 continue;
12289 }
12290 break;
12291
12292 case SUBREG:
12293 /* Check for the case where we are comparing A - C1 with C2, that is
12294
12295 (subreg:MODE (plus (A) (-C1))) op (C2)
12296
12297 with C1 a constant, and try to lift the SUBREG, i.e. to do the
12298 comparison in the wider mode. One of the following two conditions
12299 must be true in order for this to be valid:
12300
12301 1. The mode extension results in the same bit pattern being added
12302 on both sides and the comparison is equality or unsigned. As
12303 C2 has been truncated to fit in MODE, the pattern can only be
12304 all 0s or all 1s.
12305
12306 2. The mode extension results in the sign bit being copied on
12307 each side.
12308
12309 The difficulty here is that we have predicates for A but not for
12310 (A - C1) so we need to check that C1 is within proper bounds so
12311 as to perturbate A as little as possible. */
12312
12313 if (mode_width <= HOST_BITS_PER_WIDE_INT
12314 && subreg_lowpart_p (op0)
12315 && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (op0)),
12316 &inner_mode)
12317 && GET_MODE_PRECISION (inner_mode) > mode_width
12318 && GET_CODE (SUBREG_REG (op0)) == PLUS
12319 && CONST_INT_P (XEXP (SUBREG_REG (op0), 1)))
12320 {
12321 rtx a = XEXP (SUBREG_REG (op0), 0);
12322 HOST_WIDE_INT c1 = -INTVAL (XEXP (SUBREG_REG (op0), 1));
12323
12324 if ((c1 > 0
12325 && (unsigned HOST_WIDE_INT) c1
12326 < HOST_WIDE_INT_1U << (mode_width - 1)
12327 && (equality_comparison_p || unsigned_comparison_p)
12328 /* (A - C1) zero-extends if it is positive and sign-extends
12329 if it is negative, C2 both zero- and sign-extends. */
12330 && ((0 == (nonzero_bits (a, inner_mode)
12331 & ~GET_MODE_MASK (mode))
12332 && const_op >= 0)
12333 /* (A - C1) sign-extends if it is positive and 1-extends
12334 if it is negative, C2 both sign- and 1-extends. */
12335 || (num_sign_bit_copies (a, inner_mode)
12336 > (unsigned int) (GET_MODE_PRECISION (inner_mode)
12337 - mode_width)
12338 && const_op < 0)))
12339 || ((unsigned HOST_WIDE_INT) c1
12340 < HOST_WIDE_INT_1U << (mode_width - 2)
12341 /* (A - C1) always sign-extends, like C2. */
12342 && num_sign_bit_copies (a, inner_mode)
12343 > (unsigned int) (GET_MODE_PRECISION (inner_mode)
12344 - (mode_width - 1))))
12345 {
12346 op0 = SUBREG_REG (op0);
12347 continue;
12348 }
12349 }
12350
12351 /* If the inner mode is narrower and we are extracting the low part,
12352 we can treat the SUBREG as if it were a ZERO_EXTEND. */
12353 if (paradoxical_subreg_p (op0))
12354 ;
12355 else if (subreg_lowpart_p (op0)
12356 && GET_MODE_CLASS (mode) == MODE_INT
12357 && is_int_mode (GET_MODE (SUBREG_REG (op0)), &inner_mode)
12358 && (code == NE || code == EQ)
12359 && GET_MODE_PRECISION (inner_mode) <= HOST_BITS_PER_WIDE_INT
12360 && !paradoxical_subreg_p (op0)
12361 && (nonzero_bits (SUBREG_REG (op0), inner_mode)
12362 & ~GET_MODE_MASK (mode)) == 0)
12363 {
12364 /* Remove outer subregs that don't do anything. */
12365 tem = gen_lowpart (inner_mode, op1);
12366
12367 if ((nonzero_bits (tem, inner_mode)
12368 & ~GET_MODE_MASK (mode)) == 0)
12369 {
12370 op0 = SUBREG_REG (op0);
12371 op1 = tem;
12372 continue;
12373 }
12374 break;
12375 }
12376 else
12377 break;
12378
12379 /* FALLTHROUGH */
12380
12381 case ZERO_EXTEND:
12382 if (is_int_mode (GET_MODE (XEXP (op0, 0)), &mode)
12383 && (unsigned_comparison_p || equality_comparison_p)
12384 && HWI_COMPUTABLE_MODE_P (mode)
12385 && (unsigned HOST_WIDE_INT) const_op <= GET_MODE_MASK (mode)
12386 && const_op >= 0
12387 && have_insn_for (COMPARE, mode))
12388 {
12389 op0 = XEXP (op0, 0);
12390 continue;
12391 }
12392 break;
12393
12394 case PLUS:
12395 /* (eq (plus X A) B) -> (eq X (minus B A)). We can only do
12396 this for equality comparisons due to pathological cases involving
12397 overflows. */
12398 if (equality_comparison_p
12399 && 0 != (tem = simplify_binary_operation (MINUS, mode,
12400 op1, XEXP (op0, 1))))
12401 {
12402 op0 = XEXP (op0, 0);
12403 op1 = tem;
12404 continue;
12405 }
12406
12407 /* (plus (abs X) (const_int -1)) is < 0 if and only if X == 0. */
12408 if (const_op == 0 && XEXP (op0, 1) == constm1_rtx
12409 && GET_CODE (XEXP (op0, 0)) == ABS && sign_bit_comparison_p)
12410 {
12411 op0 = XEXP (XEXP (op0, 0), 0);
12412 code = (code == LT ? EQ : NE);
12413 continue;
12414 }
12415 break;
12416
12417 case MINUS:
12418 /* We used to optimize signed comparisons against zero, but that
12419 was incorrect. Unsigned comparisons against zero (GTU, LEU)
12420 arrive here as equality comparisons, or (GEU, LTU) are
12421 optimized away. No need to special-case them. */
12422
12423 /* (eq (minus A B) C) -> (eq A (plus B C)) or
12424 (eq B (minus A C)), whichever simplifies. We can only do
12425 this for equality comparisons due to pathological cases involving
12426 overflows. */
12427 if (equality_comparison_p
12428 && 0 != (tem = simplify_binary_operation (PLUS, mode,
12429 XEXP (op0, 1), op1)))
12430 {
12431 op0 = XEXP (op0, 0);
12432 op1 = tem;
12433 continue;
12434 }
12435
12436 if (equality_comparison_p
12437 && 0 != (tem = simplify_binary_operation (MINUS, mode,
12438 XEXP (op0, 0), op1)))
12439 {
12440 op0 = XEXP (op0, 1);
12441 op1 = tem;
12442 continue;
12443 }
12444
12445 /* The sign bit of (minus (ashiftrt X C) X), where C is the number
12446 of bits in X minus 1, is one iff X > 0. */
12447 if (sign_bit_comparison_p && GET_CODE (XEXP (op0, 0)) == ASHIFTRT
12448 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
12449 && UINTVAL (XEXP (XEXP (op0, 0), 1)) == mode_width - 1
12450 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), XEXP (op0, 1)))
12451 {
12452 op0 = XEXP (op0, 1);
12453 code = (code == GE ? LE : GT);
12454 continue;
12455 }
12456 break;
12457
12458 case XOR:
12459 /* (eq (xor A B) C) -> (eq A (xor B C)). This is a simplification
12460 if C is zero or B is a constant. */
12461 if (equality_comparison_p
12462 && 0 != (tem = simplify_binary_operation (XOR, mode,
12463 XEXP (op0, 1), op1)))
12464 {
12465 op0 = XEXP (op0, 0);
12466 op1 = tem;
12467 continue;
12468 }
12469 break;
12470
12471
12472 case IOR:
12473 /* The sign bit of (ior (plus X (const_int -1)) X) is nonzero
12474 iff X <= 0. */
12475 if (sign_bit_comparison_p && GET_CODE (XEXP (op0, 0)) == PLUS
12476 && XEXP (XEXP (op0, 0), 1) == constm1_rtx
12477 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), XEXP (op0, 1)))
12478 {
12479 op0 = XEXP (op0, 1);
12480 code = (code == GE ? GT : LE);
12481 continue;
12482 }
12483 break;
12484
12485 case AND:
12486 /* Convert (and (xshift 1 X) Y) to (and (lshiftrt Y X) 1). This
12487 will be converted to a ZERO_EXTRACT later. */
12488 if (const_op == 0 && equality_comparison_p
12489 && GET_CODE (XEXP (op0, 0)) == ASHIFT
12490 && XEXP (XEXP (op0, 0), 0) == const1_rtx)
12491 {
12492 op0 = gen_rtx_LSHIFTRT (mode, XEXP (op0, 1),
12493 XEXP (XEXP (op0, 0), 1));
12494 op0 = simplify_and_const_int (NULL_RTX, mode, op0, 1);
12495 continue;
12496 }
12497
12498 /* If we are comparing (and (lshiftrt X C1) C2) for equality with
12499 zero and X is a comparison and C1 and C2 describe only bits set
12500 in STORE_FLAG_VALUE, we can compare with X. */
12501 if (const_op == 0 && equality_comparison_p
12502 && mode_width <= HOST_BITS_PER_WIDE_INT
12503 && CONST_INT_P (XEXP (op0, 1))
12504 && GET_CODE (XEXP (op0, 0)) == LSHIFTRT
12505 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
12506 && INTVAL (XEXP (XEXP (op0, 0), 1)) >= 0
12507 && INTVAL (XEXP (XEXP (op0, 0), 1)) < HOST_BITS_PER_WIDE_INT)
12508 {
12509 mask = ((INTVAL (XEXP (op0, 1)) & GET_MODE_MASK (mode))
12510 << INTVAL (XEXP (XEXP (op0, 0), 1)));
12511 if ((~STORE_FLAG_VALUE & mask) == 0
12512 && (COMPARISON_P (XEXP (XEXP (op0, 0), 0))
12513 || ((tem = get_last_value (XEXP (XEXP (op0, 0), 0))) != 0
12514 && COMPARISON_P (tem))))
12515 {
12516 op0 = XEXP (XEXP (op0, 0), 0);
12517 continue;
12518 }
12519 }
12520
12521 /* If we are doing an equality comparison of an AND of a bit equal
12522 to the sign bit, replace this with a LT or GE comparison of
12523 the underlying value. */
12524 if (equality_comparison_p
12525 && const_op == 0
12526 && CONST_INT_P (XEXP (op0, 1))
12527 && mode_width <= HOST_BITS_PER_WIDE_INT
12528 && ((INTVAL (XEXP (op0, 1)) & GET_MODE_MASK (mode))
12529 == HOST_WIDE_INT_1U << (mode_width - 1)))
12530 {
12531 op0 = XEXP (op0, 0);
12532 code = (code == EQ ? GE : LT);
12533 continue;
12534 }
12535
12536 /* If this AND operation is really a ZERO_EXTEND from a narrower
12537 mode, the constant fits within that mode, and this is either an
12538 equality or unsigned comparison, try to do this comparison in
12539 the narrower mode.
12540
12541 Note that in:
12542
12543 (ne:DI (and:DI (reg:DI 4) (const_int 0xffffffff)) (const_int 0))
12544 -> (ne:DI (reg:SI 4) (const_int 0))
12545
12546 unless TARGET_TRULY_NOOP_TRUNCATION allows it or the register is
12547 known to hold a value of the required mode the
12548 transformation is invalid. */
12549 if ((equality_comparison_p || unsigned_comparison_p)
12550 && CONST_INT_P (XEXP (op0, 1))
12551 && (i = exact_log2 ((UINTVAL (XEXP (op0, 1))
12552 & GET_MODE_MASK (mode))
12553 + 1)) >= 0
12554 && const_op >> i == 0
12555 && int_mode_for_size (i, 1).exists (&tmode))
12556 {
12557 op0 = gen_lowpart_or_truncate (tmode, XEXP (op0, 0));
12558 continue;
12559 }
12560
12561 /* If this is (and:M1 (subreg:M1 X:M2 0) (const_int C1)) where C1
12562 fits in both M1 and M2 and the SUBREG is either paradoxical
12563 or represents the low part, permute the SUBREG and the AND
12564 and try again. */
12565 if (GET_CODE (XEXP (op0, 0)) == SUBREG
12566 && CONST_INT_P (XEXP (op0, 1)))
12567 {
12568 unsigned HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
12569 /* Require an integral mode, to avoid creating something like
12570 (AND:SF ...). */
12571 if ((is_a <scalar_int_mode>
12572 (GET_MODE (SUBREG_REG (XEXP (op0, 0))), &tmode))
12573 /* It is unsafe to commute the AND into the SUBREG if the
12574 SUBREG is paradoxical and WORD_REGISTER_OPERATIONS is
12575 not defined. As originally written the upper bits
12576 have a defined value due to the AND operation.
12577 However, if we commute the AND inside the SUBREG then
12578 they no longer have defined values and the meaning of
12579 the code has been changed.
12580 Also C1 should not change value in the smaller mode,
12581 see PR67028 (a positive C1 can become negative in the
12582 smaller mode, so that the AND does no longer mask the
12583 upper bits). */
12584 && ((WORD_REGISTER_OPERATIONS
12585 && mode_width > GET_MODE_PRECISION (tmode)
12586 && mode_width <= BITS_PER_WORD
12587 && trunc_int_for_mode (c1, tmode) == (HOST_WIDE_INT) c1)
12588 || (mode_width <= GET_MODE_PRECISION (tmode)
12589 && subreg_lowpart_p (XEXP (op0, 0))))
12590 && mode_width <= HOST_BITS_PER_WIDE_INT
12591 && HWI_COMPUTABLE_MODE_P (tmode)
12592 && (c1 & ~mask) == 0
12593 && (c1 & ~GET_MODE_MASK (tmode)) == 0
12594 && c1 != mask
12595 && c1 != GET_MODE_MASK (tmode))
12596 {
12597 op0 = simplify_gen_binary (AND, tmode,
12598 SUBREG_REG (XEXP (op0, 0)),
12599 gen_int_mode (c1, tmode));
12600 op0 = gen_lowpart (mode, op0);
12601 continue;
12602 }
12603 }
12604
12605 /* Convert (ne (and (not X) 1) 0) to (eq (and X 1) 0). */
12606 if (const_op == 0 && equality_comparison_p
12607 && XEXP (op0, 1) == const1_rtx
12608 && GET_CODE (XEXP (op0, 0)) == NOT)
12609 {
12610 op0 = simplify_and_const_int (NULL_RTX, mode,
12611 XEXP (XEXP (op0, 0), 0), 1);
12612 code = (code == NE ? EQ : NE);
12613 continue;
12614 }
12615
12616 /* Convert (ne (and (lshiftrt (not X)) 1) 0) to
12617 (eq (and (lshiftrt X) 1) 0).
12618 Also handle the case where (not X) is expressed using xor. */
12619 if (const_op == 0 && equality_comparison_p
12620 && XEXP (op0, 1) == const1_rtx
12621 && GET_CODE (XEXP (op0, 0)) == LSHIFTRT)
12622 {
12623 rtx shift_op = XEXP (XEXP (op0, 0), 0);
12624 rtx shift_count = XEXP (XEXP (op0, 0), 1);
12625
12626 if (GET_CODE (shift_op) == NOT
12627 || (GET_CODE (shift_op) == XOR
12628 && CONST_INT_P (XEXP (shift_op, 1))
12629 && CONST_INT_P (shift_count)
12630 && HWI_COMPUTABLE_MODE_P (mode)
12631 && (UINTVAL (XEXP (shift_op, 1))
12632 == HOST_WIDE_INT_1U
12633 << INTVAL (shift_count))))
12634 {
12635 op0
12636 = gen_rtx_LSHIFTRT (mode, XEXP (shift_op, 0), shift_count);
12637 op0 = simplify_and_const_int (NULL_RTX, mode, op0, 1);
12638 code = (code == NE ? EQ : NE);
12639 continue;
12640 }
12641 }
12642 break;
12643
12644 case ASHIFT:
12645 /* If we have (compare (ashift FOO N) (const_int C)) and
12646 the high order N bits of FOO (N+1 if an inequality comparison)
12647 are known to be zero, we can do this by comparing FOO with C
12648 shifted right N bits so long as the low-order N bits of C are
12649 zero. */
12650 if (CONST_INT_P (XEXP (op0, 1))
12651 && INTVAL (XEXP (op0, 1)) >= 0
12652 && ((INTVAL (XEXP (op0, 1)) + ! equality_comparison_p)
12653 < HOST_BITS_PER_WIDE_INT)
12654 && (((unsigned HOST_WIDE_INT) const_op
12655 & ((HOST_WIDE_INT_1U << INTVAL (XEXP (op0, 1)))
12656 - 1)) == 0)
12657 && mode_width <= HOST_BITS_PER_WIDE_INT
12658 && (nonzero_bits (XEXP (op0, 0), mode)
12659 & ~(mask >> (INTVAL (XEXP (op0, 1))
12660 + ! equality_comparison_p))) == 0)
12661 {
12662 /* We must perform a logical shift, not an arithmetic one,
12663 as we want the top N bits of C to be zero. */
12664 unsigned HOST_WIDE_INT temp = const_op & GET_MODE_MASK (mode);
12665
12666 temp >>= INTVAL (XEXP (op0, 1));
12667 op1 = gen_int_mode (temp, mode);
12668 op0 = XEXP (op0, 0);
12669 continue;
12670 }
12671
12672 /* If we are doing a sign bit comparison, it means we are testing
12673 a particular bit. Convert it to the appropriate AND. */
12674 if (sign_bit_comparison_p && CONST_INT_P (XEXP (op0, 1))
12675 && mode_width <= HOST_BITS_PER_WIDE_INT)
12676 {
12677 op0 = simplify_and_const_int (NULL_RTX, mode, XEXP (op0, 0),
12678 (HOST_WIDE_INT_1U
12679 << (mode_width - 1
12680 - INTVAL (XEXP (op0, 1)))));
12681 code = (code == LT ? NE : EQ);
12682 continue;
12683 }
12684
12685 /* If this an equality comparison with zero and we are shifting
12686 the low bit to the sign bit, we can convert this to an AND of the
12687 low-order bit. */
12688 if (const_op == 0 && equality_comparison_p
12689 && CONST_INT_P (XEXP (op0, 1))
12690 && UINTVAL (XEXP (op0, 1)) == mode_width - 1)
12691 {
12692 op0 = simplify_and_const_int (NULL_RTX, mode, XEXP (op0, 0), 1);
12693 continue;
12694 }
12695 break;
12696
12697 case ASHIFTRT:
12698 /* If this is an equality comparison with zero, we can do this
12699 as a logical shift, which might be much simpler. */
12700 if (equality_comparison_p && const_op == 0
12701 && CONST_INT_P (XEXP (op0, 1)))
12702 {
12703 op0 = simplify_shift_const (NULL_RTX, LSHIFTRT, mode,
12704 XEXP (op0, 0),
12705 INTVAL (XEXP (op0, 1)));
12706 continue;
12707 }
12708
12709 /* If OP0 is a sign extension and CODE is not an unsigned comparison,
12710 do the comparison in a narrower mode. */
12711 if (! unsigned_comparison_p
12712 && CONST_INT_P (XEXP (op0, 1))
12713 && GET_CODE (XEXP (op0, 0)) == ASHIFT
12714 && XEXP (op0, 1) == XEXP (XEXP (op0, 0), 1)
12715 && (int_mode_for_size (mode_width - INTVAL (XEXP (op0, 1)), 1)
12716 .exists (&tmode))
12717 && (((unsigned HOST_WIDE_INT) const_op
12718 + (GET_MODE_MASK (tmode) >> 1) + 1)
12719 <= GET_MODE_MASK (tmode)))
12720 {
12721 op0 = gen_lowpart (tmode, XEXP (XEXP (op0, 0), 0));
12722 continue;
12723 }
12724
12725 /* Likewise if OP0 is a PLUS of a sign extension with a
12726 constant, which is usually represented with the PLUS
12727 between the shifts. */
12728 if (! unsigned_comparison_p
12729 && CONST_INT_P (XEXP (op0, 1))
12730 && GET_CODE (XEXP (op0, 0)) == PLUS
12731 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
12732 && GET_CODE (XEXP (XEXP (op0, 0), 0)) == ASHIFT
12733 && XEXP (op0, 1) == XEXP (XEXP (XEXP (op0, 0), 0), 1)
12734 && (int_mode_for_size (mode_width - INTVAL (XEXP (op0, 1)), 1)
12735 .exists (&tmode))
12736 && (((unsigned HOST_WIDE_INT) const_op
12737 + (GET_MODE_MASK (tmode) >> 1) + 1)
12738 <= GET_MODE_MASK (tmode)))
12739 {
12740 rtx inner = XEXP (XEXP (XEXP (op0, 0), 0), 0);
12741 rtx add_const = XEXP (XEXP (op0, 0), 1);
12742 rtx new_const = simplify_gen_binary (ASHIFTRT, mode,
12743 add_const, XEXP (op0, 1));
12744
12745 op0 = simplify_gen_binary (PLUS, tmode,
12746 gen_lowpart (tmode, inner),
12747 new_const);
12748 continue;
12749 }
12750
12751 /* FALLTHROUGH */
12752 case LSHIFTRT:
12753 /* If we have (compare (xshiftrt FOO N) (const_int C)) and
12754 the low order N bits of FOO are known to be zero, we can do this
12755 by comparing FOO with C shifted left N bits so long as no
12756 overflow occurs. Even if the low order N bits of FOO aren't known
12757 to be zero, if the comparison is >= or < we can use the same
12758 optimization and for > or <= by setting all the low
12759 order N bits in the comparison constant. */
12760 if (CONST_INT_P (XEXP (op0, 1))
12761 && INTVAL (XEXP (op0, 1)) > 0
12762 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT
12763 && mode_width <= HOST_BITS_PER_WIDE_INT
12764 && (((unsigned HOST_WIDE_INT) const_op
12765 + (GET_CODE (op0) != LSHIFTRT
12766 ? ((GET_MODE_MASK (mode) >> INTVAL (XEXP (op0, 1)) >> 1)
12767 + 1)
12768 : 0))
12769 <= GET_MODE_MASK (mode) >> INTVAL (XEXP (op0, 1))))
12770 {
12771 unsigned HOST_WIDE_INT low_bits
12772 = (nonzero_bits (XEXP (op0, 0), mode)
12773 & ((HOST_WIDE_INT_1U
12774 << INTVAL (XEXP (op0, 1))) - 1));
12775 if (low_bits == 0 || !equality_comparison_p)
12776 {
12777 /* If the shift was logical, then we must make the condition
12778 unsigned. */
12779 if (GET_CODE (op0) == LSHIFTRT)
12780 code = unsigned_condition (code);
12781
12782 const_op = (unsigned HOST_WIDE_INT) const_op
12783 << INTVAL (XEXP (op0, 1));
12784 if (low_bits != 0
12785 && (code == GT || code == GTU
12786 || code == LE || code == LEU))
12787 const_op
12788 |= ((HOST_WIDE_INT_1 << INTVAL (XEXP (op0, 1))) - 1);
12789 op1 = GEN_INT (const_op);
12790 op0 = XEXP (op0, 0);
12791 continue;
12792 }
12793 }
12794
12795 /* If we are using this shift to extract just the sign bit, we
12796 can replace this with an LT or GE comparison. */
12797 if (const_op == 0
12798 && (equality_comparison_p || sign_bit_comparison_p)
12799 && CONST_INT_P (XEXP (op0, 1))
12800 && UINTVAL (XEXP (op0, 1)) == mode_width - 1)
12801 {
12802 op0 = XEXP (op0, 0);
12803 code = (code == NE || code == GT ? LT : GE);
12804 continue;
12805 }
12806 break;
12807
12808 default:
12809 break;
12810 }
12811
12812 break;
12813 }
12814
12815 /* Now make any compound operations involved in this comparison. Then,
12816 check for an outmost SUBREG on OP0 that is not doing anything or is
12817 paradoxical. The latter transformation must only be performed when
12818 it is known that the "extra" bits will be the same in op0 and op1 or
12819 that they don't matter. There are three cases to consider:
12820
12821 1. SUBREG_REG (op0) is a register. In this case the bits are don't
12822 care bits and we can assume they have any convenient value. So
12823 making the transformation is safe.
12824
12825 2. SUBREG_REG (op0) is a memory and LOAD_EXTEND_OP is UNKNOWN.
12826 In this case the upper bits of op0 are undefined. We should not make
12827 the simplification in that case as we do not know the contents of
12828 those bits.
12829
12830 3. SUBREG_REG (op0) is a memory and LOAD_EXTEND_OP is not UNKNOWN.
12831 In that case we know those bits are zeros or ones. We must also be
12832 sure that they are the same as the upper bits of op1.
12833
12834 We can never remove a SUBREG for a non-equality comparison because
12835 the sign bit is in a different place in the underlying object. */
12836
12837 rtx_code op0_mco_code = SET;
12838 if (op1 == const0_rtx)
12839 op0_mco_code = code == NE || code == EQ ? EQ : COMPARE;
12840
12841 op0 = make_compound_operation (op0, op0_mco_code);
12842 op1 = make_compound_operation (op1, SET);
12843
12844 if (GET_CODE (op0) == SUBREG && subreg_lowpart_p (op0)
12845 && is_int_mode (GET_MODE (op0), &mode)
12846 && is_int_mode (GET_MODE (SUBREG_REG (op0)), &inner_mode)
12847 && (code == NE || code == EQ))
12848 {
12849 if (paradoxical_subreg_p (op0))
12850 {
12851 /* For paradoxical subregs, allow case 1 as above. Case 3 isn't
12852 implemented. */
12853 if (REG_P (SUBREG_REG (op0)))
12854 {
12855 op0 = SUBREG_REG (op0);
12856 op1 = gen_lowpart (inner_mode, op1);
12857 }
12858 }
12859 else if (GET_MODE_PRECISION (inner_mode) <= HOST_BITS_PER_WIDE_INT
12860 && (nonzero_bits (SUBREG_REG (op0), inner_mode)
12861 & ~GET_MODE_MASK (mode)) == 0)
12862 {
12863 tem = gen_lowpart (inner_mode, op1);
12864
12865 if ((nonzero_bits (tem, inner_mode) & ~GET_MODE_MASK (mode)) == 0)
12866 op0 = SUBREG_REG (op0), op1 = tem;
12867 }
12868 }
12869
12870 /* We now do the opposite procedure: Some machines don't have compare
12871 insns in all modes. If OP0's mode is an integer mode smaller than a
12872 word and we can't do a compare in that mode, see if there is a larger
12873 mode for which we can do the compare. There are a number of cases in
12874 which we can use the wider mode. */
12875
12876 if (is_int_mode (GET_MODE (op0), &mode)
12877 && GET_MODE_SIZE (mode) < UNITS_PER_WORD
12878 && ! have_insn_for (COMPARE, mode))
12879 FOR_EACH_WIDER_MODE (tmode_iter, mode)
12880 {
12881 tmode = tmode_iter.require ();
12882 if (!HWI_COMPUTABLE_MODE_P (tmode))
12883 break;
12884 if (have_insn_for (COMPARE, tmode))
12885 {
12886 int zero_extended;
12887
12888 /* If this is a test for negative, we can make an explicit
12889 test of the sign bit. Test this first so we can use
12890 a paradoxical subreg to extend OP0. */
12891
12892 if (op1 == const0_rtx && (code == LT || code == GE)
12893 && HWI_COMPUTABLE_MODE_P (mode))
12894 {
12895 unsigned HOST_WIDE_INT sign
12896 = HOST_WIDE_INT_1U << (GET_MODE_BITSIZE (mode) - 1);
12897 op0 = simplify_gen_binary (AND, tmode,
12898 gen_lowpart (tmode, op0),
12899 gen_int_mode (sign, tmode));
12900 code = (code == LT) ? NE : EQ;
12901 break;
12902 }
12903
12904 /* If the only nonzero bits in OP0 and OP1 are those in the
12905 narrower mode and this is an equality or unsigned comparison,
12906 we can use the wider mode. Similarly for sign-extended
12907 values, in which case it is true for all comparisons. */
12908 zero_extended = ((code == EQ || code == NE
12909 || code == GEU || code == GTU
12910 || code == LEU || code == LTU)
12911 && (nonzero_bits (op0, tmode)
12912 & ~GET_MODE_MASK (mode)) == 0
12913 && ((CONST_INT_P (op1)
12914 || (nonzero_bits (op1, tmode)
12915 & ~GET_MODE_MASK (mode)) == 0)));
12916
12917 if (zero_extended
12918 || ((num_sign_bit_copies (op0, tmode)
12919 > (unsigned int) (GET_MODE_PRECISION (tmode)
12920 - GET_MODE_PRECISION (mode)))
12921 && (num_sign_bit_copies (op1, tmode)
12922 > (unsigned int) (GET_MODE_PRECISION (tmode)
12923 - GET_MODE_PRECISION (mode)))))
12924 {
12925 /* If OP0 is an AND and we don't have an AND in MODE either,
12926 make a new AND in the proper mode. */
12927 if (GET_CODE (op0) == AND
12928 && !have_insn_for (AND, mode))
12929 op0 = simplify_gen_binary (AND, tmode,
12930 gen_lowpart (tmode,
12931 XEXP (op0, 0)),
12932 gen_lowpart (tmode,
12933 XEXP (op0, 1)));
12934 else
12935 {
12936 if (zero_extended)
12937 {
12938 op0 = simplify_gen_unary (ZERO_EXTEND, tmode,
12939 op0, mode);
12940 op1 = simplify_gen_unary (ZERO_EXTEND, tmode,
12941 op1, mode);
12942 }
12943 else
12944 {
12945 op0 = simplify_gen_unary (SIGN_EXTEND, tmode,
12946 op0, mode);
12947 op1 = simplify_gen_unary (SIGN_EXTEND, tmode,
12948 op1, mode);
12949 }
12950 break;
12951 }
12952 }
12953 }
12954 }
12955
12956 /* We may have changed the comparison operands. Re-canonicalize. */
12957 if (swap_commutative_operands_p (op0, op1))
12958 {
12959 std::swap (op0, op1);
12960 code = swap_condition (code);
12961 }
12962
12963 /* If this machine only supports a subset of valid comparisons, see if we
12964 can convert an unsupported one into a supported one. */
12965 target_canonicalize_comparison (&code, &op0, &op1, 0);
12966
12967 *pop0 = op0;
12968 *pop1 = op1;
12969
12970 return code;
12971 }
12972 \f
12973 /* Utility function for record_value_for_reg. Count number of
12974 rtxs in X. */
12975 static int
12976 count_rtxs (rtx x)
12977 {
12978 enum rtx_code code = GET_CODE (x);
12979 const char *fmt;
12980 int i, j, ret = 1;
12981
12982 if (GET_RTX_CLASS (code) == RTX_BIN_ARITH
12983 || GET_RTX_CLASS (code) == RTX_COMM_ARITH)
12984 {
12985 rtx x0 = XEXP (x, 0);
12986 rtx x1 = XEXP (x, 1);
12987
12988 if (x0 == x1)
12989 return 1 + 2 * count_rtxs (x0);
12990
12991 if ((GET_RTX_CLASS (GET_CODE (x1)) == RTX_BIN_ARITH
12992 || GET_RTX_CLASS (GET_CODE (x1)) == RTX_COMM_ARITH)
12993 && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
12994 return 2 + 2 * count_rtxs (x0)
12995 + count_rtxs (x == XEXP (x1, 0)
12996 ? XEXP (x1, 1) : XEXP (x1, 0));
12997
12998 if ((GET_RTX_CLASS (GET_CODE (x0)) == RTX_BIN_ARITH
12999 || GET_RTX_CLASS (GET_CODE (x0)) == RTX_COMM_ARITH)
13000 && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
13001 return 2 + 2 * count_rtxs (x1)
13002 + count_rtxs (x == XEXP (x0, 0)
13003 ? XEXP (x0, 1) : XEXP (x0, 0));
13004 }
13005
13006 fmt = GET_RTX_FORMAT (code);
13007 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
13008 if (fmt[i] == 'e')
13009 ret += count_rtxs (XEXP (x, i));
13010 else if (fmt[i] == 'E')
13011 for (j = 0; j < XVECLEN (x, i); j++)
13012 ret += count_rtxs (XVECEXP (x, i, j));
13013
13014 return ret;
13015 }
13016 \f
13017 /* Utility function for following routine. Called when X is part of a value
13018 being stored into last_set_value. Sets last_set_table_tick
13019 for each register mentioned. Similar to mention_regs in cse.c */
13020
13021 static void
13022 update_table_tick (rtx x)
13023 {
13024 enum rtx_code code = GET_CODE (x);
13025 const char *fmt = GET_RTX_FORMAT (code);
13026 int i, j;
13027
13028 if (code == REG)
13029 {
13030 unsigned int regno = REGNO (x);
13031 unsigned int endregno = END_REGNO (x);
13032 unsigned int r;
13033
13034 for (r = regno; r < endregno; r++)
13035 {
13036 reg_stat_type *rsp = &reg_stat[r];
13037 rsp->last_set_table_tick = label_tick;
13038 }
13039
13040 return;
13041 }
13042
13043 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
13044 if (fmt[i] == 'e')
13045 {
13046 /* Check for identical subexpressions. If x contains
13047 identical subexpression we only have to traverse one of
13048 them. */
13049 if (i == 0 && ARITHMETIC_P (x))
13050 {
13051 /* Note that at this point x1 has already been
13052 processed. */
13053 rtx x0 = XEXP (x, 0);
13054 rtx x1 = XEXP (x, 1);
13055
13056 /* If x0 and x1 are identical then there is no need to
13057 process x0. */
13058 if (x0 == x1)
13059 break;
13060
13061 /* If x0 is identical to a subexpression of x1 then while
13062 processing x1, x0 has already been processed. Thus we
13063 are done with x. */
13064 if (ARITHMETIC_P (x1)
13065 && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
13066 break;
13067
13068 /* If x1 is identical to a subexpression of x0 then we
13069 still have to process the rest of x0. */
13070 if (ARITHMETIC_P (x0)
13071 && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
13072 {
13073 update_table_tick (XEXP (x0, x1 == XEXP (x0, 0) ? 1 : 0));
13074 break;
13075 }
13076 }
13077
13078 update_table_tick (XEXP (x, i));
13079 }
13080 else if (fmt[i] == 'E')
13081 for (j = 0; j < XVECLEN (x, i); j++)
13082 update_table_tick (XVECEXP (x, i, j));
13083 }
13084
13085 /* Record that REG is set to VALUE in insn INSN. If VALUE is zero, we
13086 are saying that the register is clobbered and we no longer know its
13087 value. If INSN is zero, don't update reg_stat[].last_set; this is
13088 only permitted with VALUE also zero and is used to invalidate the
13089 register. */
13090
13091 static void
13092 record_value_for_reg (rtx reg, rtx_insn *insn, rtx value)
13093 {
13094 unsigned int regno = REGNO (reg);
13095 unsigned int endregno = END_REGNO (reg);
13096 unsigned int i;
13097 reg_stat_type *rsp;
13098
13099 /* If VALUE contains REG and we have a previous value for REG, substitute
13100 the previous value. */
13101 if (value && insn && reg_overlap_mentioned_p (reg, value))
13102 {
13103 rtx tem;
13104
13105 /* Set things up so get_last_value is allowed to see anything set up to
13106 our insn. */
13107 subst_low_luid = DF_INSN_LUID (insn);
13108 tem = get_last_value (reg);
13109
13110 /* If TEM is simply a binary operation with two CLOBBERs as operands,
13111 it isn't going to be useful and will take a lot of time to process,
13112 so just use the CLOBBER. */
13113
13114 if (tem)
13115 {
13116 if (ARITHMETIC_P (tem)
13117 && GET_CODE (XEXP (tem, 0)) == CLOBBER
13118 && GET_CODE (XEXP (tem, 1)) == CLOBBER)
13119 tem = XEXP (tem, 0);
13120 else if (count_occurrences (value, reg, 1) >= 2)
13121 {
13122 /* If there are two or more occurrences of REG in VALUE,
13123 prevent the value from growing too much. */
13124 if (count_rtxs (tem) > MAX_LAST_VALUE_RTL)
13125 tem = gen_rtx_CLOBBER (GET_MODE (tem), const0_rtx);
13126 }
13127
13128 value = replace_rtx (copy_rtx (value), reg, tem);
13129 }
13130 }
13131
13132 /* For each register modified, show we don't know its value, that
13133 we don't know about its bitwise content, that its value has been
13134 updated, and that we don't know the location of the death of the
13135 register. */
13136 for (i = regno; i < endregno; i++)
13137 {
13138 rsp = &reg_stat[i];
13139
13140 if (insn)
13141 rsp->last_set = insn;
13142
13143 rsp->last_set_value = 0;
13144 rsp->last_set_mode = VOIDmode;
13145 rsp->last_set_nonzero_bits = 0;
13146 rsp->last_set_sign_bit_copies = 0;
13147 rsp->last_death = 0;
13148 rsp->truncated_to_mode = VOIDmode;
13149 }
13150
13151 /* Mark registers that are being referenced in this value. */
13152 if (value)
13153 update_table_tick (value);
13154
13155 /* Now update the status of each register being set.
13156 If someone is using this register in this block, set this register
13157 to invalid since we will get confused between the two lives in this
13158 basic block. This makes using this register always invalid. In cse, we
13159 scan the table to invalidate all entries using this register, but this
13160 is too much work for us. */
13161
13162 for (i = regno; i < endregno; i++)
13163 {
13164 rsp = &reg_stat[i];
13165 rsp->last_set_label = label_tick;
13166 if (!insn
13167 || (value && rsp->last_set_table_tick >= label_tick_ebb_start))
13168 rsp->last_set_invalid = 1;
13169 else
13170 rsp->last_set_invalid = 0;
13171 }
13172
13173 /* The value being assigned might refer to X (like in "x++;"). In that
13174 case, we must replace it with (clobber (const_int 0)) to prevent
13175 infinite loops. */
13176 rsp = &reg_stat[regno];
13177 if (value && !get_last_value_validate (&value, insn, label_tick, 0))
13178 {
13179 value = copy_rtx (value);
13180 if (!get_last_value_validate (&value, insn, label_tick, 1))
13181 value = 0;
13182 }
13183
13184 /* For the main register being modified, update the value, the mode, the
13185 nonzero bits, and the number of sign bit copies. */
13186
13187 rsp->last_set_value = value;
13188
13189 if (value)
13190 {
13191 machine_mode mode = GET_MODE (reg);
13192 subst_low_luid = DF_INSN_LUID (insn);
13193 rsp->last_set_mode = mode;
13194 if (GET_MODE_CLASS (mode) == MODE_INT
13195 && HWI_COMPUTABLE_MODE_P (mode))
13196 mode = nonzero_bits_mode;
13197 rsp->last_set_nonzero_bits = nonzero_bits (value, mode);
13198 rsp->last_set_sign_bit_copies
13199 = num_sign_bit_copies (value, GET_MODE (reg));
13200 }
13201 }
13202
13203 /* Called via note_stores from record_dead_and_set_regs to handle one
13204 SET or CLOBBER in an insn. DATA is the instruction in which the
13205 set is occurring. */
13206
13207 static void
13208 record_dead_and_set_regs_1 (rtx dest, const_rtx setter, void *data)
13209 {
13210 rtx_insn *record_dead_insn = (rtx_insn *) data;
13211
13212 if (GET_CODE (dest) == SUBREG)
13213 dest = SUBREG_REG (dest);
13214
13215 if (!record_dead_insn)
13216 {
13217 if (REG_P (dest))
13218 record_value_for_reg (dest, NULL, NULL_RTX);
13219 return;
13220 }
13221
13222 if (REG_P (dest))
13223 {
13224 /* If we are setting the whole register, we know its value. Otherwise
13225 show that we don't know the value. We can handle SUBREG in
13226 some cases. */
13227 if (GET_CODE (setter) == SET && dest == SET_DEST (setter))
13228 record_value_for_reg (dest, record_dead_insn, SET_SRC (setter));
13229 else if (GET_CODE (setter) == SET
13230 && GET_CODE (SET_DEST (setter)) == SUBREG
13231 && SUBREG_REG (SET_DEST (setter)) == dest
13232 && GET_MODE_PRECISION (GET_MODE (dest)) <= BITS_PER_WORD
13233 && subreg_lowpart_p (SET_DEST (setter)))
13234 record_value_for_reg (dest, record_dead_insn,
13235 gen_lowpart (GET_MODE (dest),
13236 SET_SRC (setter)));
13237 else
13238 record_value_for_reg (dest, record_dead_insn, NULL_RTX);
13239 }
13240 else if (MEM_P (dest)
13241 /* Ignore pushes, they clobber nothing. */
13242 && ! push_operand (dest, GET_MODE (dest)))
13243 mem_last_set = DF_INSN_LUID (record_dead_insn);
13244 }
13245
13246 /* Update the records of when each REG was most recently set or killed
13247 for the things done by INSN. This is the last thing done in processing
13248 INSN in the combiner loop.
13249
13250 We update reg_stat[], in particular fields last_set, last_set_value,
13251 last_set_mode, last_set_nonzero_bits, last_set_sign_bit_copies,
13252 last_death, and also the similar information mem_last_set (which insn
13253 most recently modified memory) and last_call_luid (which insn was the
13254 most recent subroutine call). */
13255
13256 static void
13257 record_dead_and_set_regs (rtx_insn *insn)
13258 {
13259 rtx link;
13260 unsigned int i;
13261
13262 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
13263 {
13264 if (REG_NOTE_KIND (link) == REG_DEAD
13265 && REG_P (XEXP (link, 0)))
13266 {
13267 unsigned int regno = REGNO (XEXP (link, 0));
13268 unsigned int endregno = END_REGNO (XEXP (link, 0));
13269
13270 for (i = regno; i < endregno; i++)
13271 {
13272 reg_stat_type *rsp;
13273
13274 rsp = &reg_stat[i];
13275 rsp->last_death = insn;
13276 }
13277 }
13278 else if (REG_NOTE_KIND (link) == REG_INC)
13279 record_value_for_reg (XEXP (link, 0), insn, NULL_RTX);
13280 }
13281
13282 if (CALL_P (insn))
13283 {
13284 hard_reg_set_iterator hrsi;
13285 EXECUTE_IF_SET_IN_HARD_REG_SET (regs_invalidated_by_call, 0, i, hrsi)
13286 {
13287 reg_stat_type *rsp;
13288
13289 rsp = &reg_stat[i];
13290 rsp->last_set_invalid = 1;
13291 rsp->last_set = insn;
13292 rsp->last_set_value = 0;
13293 rsp->last_set_mode = VOIDmode;
13294 rsp->last_set_nonzero_bits = 0;
13295 rsp->last_set_sign_bit_copies = 0;
13296 rsp->last_death = 0;
13297 rsp->truncated_to_mode = VOIDmode;
13298 }
13299
13300 last_call_luid = mem_last_set = DF_INSN_LUID (insn);
13301
13302 /* We can't combine into a call pattern. Remember, though, that
13303 the return value register is set at this LUID. We could
13304 still replace a register with the return value from the
13305 wrong subroutine call! */
13306 note_stores (PATTERN (insn), record_dead_and_set_regs_1, NULL_RTX);
13307 }
13308 else
13309 note_stores (PATTERN (insn), record_dead_and_set_regs_1, insn);
13310 }
13311
13312 /* If a SUBREG has the promoted bit set, it is in fact a property of the
13313 register present in the SUBREG, so for each such SUBREG go back and
13314 adjust nonzero and sign bit information of the registers that are
13315 known to have some zero/sign bits set.
13316
13317 This is needed because when combine blows the SUBREGs away, the
13318 information on zero/sign bits is lost and further combines can be
13319 missed because of that. */
13320
13321 static void
13322 record_promoted_value (rtx_insn *insn, rtx subreg)
13323 {
13324 struct insn_link *links;
13325 rtx set;
13326 unsigned int regno = REGNO (SUBREG_REG (subreg));
13327 machine_mode mode = GET_MODE (subreg);
13328
13329 if (!HWI_COMPUTABLE_MODE_P (mode))
13330 return;
13331
13332 for (links = LOG_LINKS (insn); links;)
13333 {
13334 reg_stat_type *rsp;
13335
13336 insn = links->insn;
13337 set = single_set (insn);
13338
13339 if (! set || !REG_P (SET_DEST (set))
13340 || REGNO (SET_DEST (set)) != regno
13341 || GET_MODE (SET_DEST (set)) != GET_MODE (SUBREG_REG (subreg)))
13342 {
13343 links = links->next;
13344 continue;
13345 }
13346
13347 rsp = &reg_stat[regno];
13348 if (rsp->last_set == insn)
13349 {
13350 if (SUBREG_PROMOTED_UNSIGNED_P (subreg))
13351 rsp->last_set_nonzero_bits &= GET_MODE_MASK (mode);
13352 }
13353
13354 if (REG_P (SET_SRC (set)))
13355 {
13356 regno = REGNO (SET_SRC (set));
13357 links = LOG_LINKS (insn);
13358 }
13359 else
13360 break;
13361 }
13362 }
13363
13364 /* Check if X, a register, is known to contain a value already
13365 truncated to MODE. In this case we can use a subreg to refer to
13366 the truncated value even though in the generic case we would need
13367 an explicit truncation. */
13368
13369 static bool
13370 reg_truncated_to_mode (machine_mode mode, const_rtx x)
13371 {
13372 reg_stat_type *rsp = &reg_stat[REGNO (x)];
13373 machine_mode truncated = rsp->truncated_to_mode;
13374
13375 if (truncated == 0
13376 || rsp->truncation_label < label_tick_ebb_start)
13377 return false;
13378 if (!partial_subreg_p (mode, truncated))
13379 return true;
13380 if (TRULY_NOOP_TRUNCATION_MODES_P (mode, truncated))
13381 return true;
13382 return false;
13383 }
13384
13385 /* If X is a hard reg or a subreg record the mode that the register is
13386 accessed in. For non-TARGET_TRULY_NOOP_TRUNCATION targets we might be
13387 able to turn a truncate into a subreg using this information. Return true
13388 if traversing X is complete. */
13389
13390 static bool
13391 record_truncated_value (rtx x)
13392 {
13393 machine_mode truncated_mode;
13394 reg_stat_type *rsp;
13395
13396 if (GET_CODE (x) == SUBREG && REG_P (SUBREG_REG (x)))
13397 {
13398 machine_mode original_mode = GET_MODE (SUBREG_REG (x));
13399 truncated_mode = GET_MODE (x);
13400
13401 if (!partial_subreg_p (truncated_mode, original_mode))
13402 return true;
13403
13404 truncated_mode = GET_MODE (x);
13405 if (TRULY_NOOP_TRUNCATION_MODES_P (truncated_mode, original_mode))
13406 return true;
13407
13408 x = SUBREG_REG (x);
13409 }
13410 /* ??? For hard-regs we now record everything. We might be able to
13411 optimize this using last_set_mode. */
13412 else if (REG_P (x) && REGNO (x) < FIRST_PSEUDO_REGISTER)
13413 truncated_mode = GET_MODE (x);
13414 else
13415 return false;
13416
13417 rsp = &reg_stat[REGNO (x)];
13418 if (rsp->truncated_to_mode == 0
13419 || rsp->truncation_label < label_tick_ebb_start
13420 || partial_subreg_p (truncated_mode, rsp->truncated_to_mode))
13421 {
13422 rsp->truncated_to_mode = truncated_mode;
13423 rsp->truncation_label = label_tick;
13424 }
13425
13426 return true;
13427 }
13428
13429 /* Callback for note_uses. Find hardregs and subregs of pseudos and
13430 the modes they are used in. This can help truning TRUNCATEs into
13431 SUBREGs. */
13432
13433 static void
13434 record_truncated_values (rtx *loc, void *data ATTRIBUTE_UNUSED)
13435 {
13436 subrtx_var_iterator::array_type array;
13437 FOR_EACH_SUBRTX_VAR (iter, array, *loc, NONCONST)
13438 if (record_truncated_value (*iter))
13439 iter.skip_subrtxes ();
13440 }
13441
13442 /* Scan X for promoted SUBREGs. For each one found,
13443 note what it implies to the registers used in it. */
13444
13445 static void
13446 check_promoted_subreg (rtx_insn *insn, rtx x)
13447 {
13448 if (GET_CODE (x) == SUBREG
13449 && SUBREG_PROMOTED_VAR_P (x)
13450 && REG_P (SUBREG_REG (x)))
13451 record_promoted_value (insn, x);
13452 else
13453 {
13454 const char *format = GET_RTX_FORMAT (GET_CODE (x));
13455 int i, j;
13456
13457 for (i = 0; i < GET_RTX_LENGTH (GET_CODE (x)); i++)
13458 switch (format[i])
13459 {
13460 case 'e':
13461 check_promoted_subreg (insn, XEXP (x, i));
13462 break;
13463 case 'V':
13464 case 'E':
13465 if (XVEC (x, i) != 0)
13466 for (j = 0; j < XVECLEN (x, i); j++)
13467 check_promoted_subreg (insn, XVECEXP (x, i, j));
13468 break;
13469 }
13470 }
13471 }
13472 \f
13473 /* Verify that all the registers and memory references mentioned in *LOC are
13474 still valid. *LOC was part of a value set in INSN when label_tick was
13475 equal to TICK. Return 0 if some are not. If REPLACE is nonzero, replace
13476 the invalid references with (clobber (const_int 0)) and return 1. This
13477 replacement is useful because we often can get useful information about
13478 the form of a value (e.g., if it was produced by a shift that always
13479 produces -1 or 0) even though we don't know exactly what registers it
13480 was produced from. */
13481
13482 static int
13483 get_last_value_validate (rtx *loc, rtx_insn *insn, int tick, int replace)
13484 {
13485 rtx x = *loc;
13486 const char *fmt = GET_RTX_FORMAT (GET_CODE (x));
13487 int len = GET_RTX_LENGTH (GET_CODE (x));
13488 int i, j;
13489
13490 if (REG_P (x))
13491 {
13492 unsigned int regno = REGNO (x);
13493 unsigned int endregno = END_REGNO (x);
13494 unsigned int j;
13495
13496 for (j = regno; j < endregno; j++)
13497 {
13498 reg_stat_type *rsp = &reg_stat[j];
13499 if (rsp->last_set_invalid
13500 /* If this is a pseudo-register that was only set once and not
13501 live at the beginning of the function, it is always valid. */
13502 || (! (regno >= FIRST_PSEUDO_REGISTER
13503 && regno < reg_n_sets_max
13504 && REG_N_SETS (regno) == 1
13505 && (!REGNO_REG_SET_P
13506 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb),
13507 regno)))
13508 && rsp->last_set_label > tick))
13509 {
13510 if (replace)
13511 *loc = gen_rtx_CLOBBER (GET_MODE (x), const0_rtx);
13512 return replace;
13513 }
13514 }
13515
13516 return 1;
13517 }
13518 /* If this is a memory reference, make sure that there were no stores after
13519 it that might have clobbered the value. We don't have alias info, so we
13520 assume any store invalidates it. Moreover, we only have local UIDs, so
13521 we also assume that there were stores in the intervening basic blocks. */
13522 else if (MEM_P (x) && !MEM_READONLY_P (x)
13523 && (tick != label_tick || DF_INSN_LUID (insn) <= mem_last_set))
13524 {
13525 if (replace)
13526 *loc = gen_rtx_CLOBBER (GET_MODE (x), const0_rtx);
13527 return replace;
13528 }
13529
13530 for (i = 0; i < len; i++)
13531 {
13532 if (fmt[i] == 'e')
13533 {
13534 /* Check for identical subexpressions. If x contains
13535 identical subexpression we only have to traverse one of
13536 them. */
13537 if (i == 1 && ARITHMETIC_P (x))
13538 {
13539 /* Note that at this point x0 has already been checked
13540 and found valid. */
13541 rtx x0 = XEXP (x, 0);
13542 rtx x1 = XEXP (x, 1);
13543
13544 /* If x0 and x1 are identical then x is also valid. */
13545 if (x0 == x1)
13546 return 1;
13547
13548 /* If x1 is identical to a subexpression of x0 then
13549 while checking x0, x1 has already been checked. Thus
13550 it is valid and so as x. */
13551 if (ARITHMETIC_P (x0)
13552 && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
13553 return 1;
13554
13555 /* If x0 is identical to a subexpression of x1 then x is
13556 valid iff the rest of x1 is valid. */
13557 if (ARITHMETIC_P (x1)
13558 && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
13559 return
13560 get_last_value_validate (&XEXP (x1,
13561 x0 == XEXP (x1, 0) ? 1 : 0),
13562 insn, tick, replace);
13563 }
13564
13565 if (get_last_value_validate (&XEXP (x, i), insn, tick,
13566 replace) == 0)
13567 return 0;
13568 }
13569 else if (fmt[i] == 'E')
13570 for (j = 0; j < XVECLEN (x, i); j++)
13571 if (get_last_value_validate (&XVECEXP (x, i, j),
13572 insn, tick, replace) == 0)
13573 return 0;
13574 }
13575
13576 /* If we haven't found a reason for it to be invalid, it is valid. */
13577 return 1;
13578 }
13579
13580 /* Get the last value assigned to X, if known. Some registers
13581 in the value may be replaced with (clobber (const_int 0)) if their value
13582 is known longer known reliably. */
13583
13584 static rtx
13585 get_last_value (const_rtx x)
13586 {
13587 unsigned int regno;
13588 rtx value;
13589 reg_stat_type *rsp;
13590
13591 /* If this is a non-paradoxical SUBREG, get the value of its operand and
13592 then convert it to the desired mode. If this is a paradoxical SUBREG,
13593 we cannot predict what values the "extra" bits might have. */
13594 if (GET_CODE (x) == SUBREG
13595 && subreg_lowpart_p (x)
13596 && !paradoxical_subreg_p (x)
13597 && (value = get_last_value (SUBREG_REG (x))) != 0)
13598 return gen_lowpart (GET_MODE (x), value);
13599
13600 if (!REG_P (x))
13601 return 0;
13602
13603 regno = REGNO (x);
13604 rsp = &reg_stat[regno];
13605 value = rsp->last_set_value;
13606
13607 /* If we don't have a value, or if it isn't for this basic block and
13608 it's either a hard register, set more than once, or it's a live
13609 at the beginning of the function, return 0.
13610
13611 Because if it's not live at the beginning of the function then the reg
13612 is always set before being used (is never used without being set).
13613 And, if it's set only once, and it's always set before use, then all
13614 uses must have the same last value, even if it's not from this basic
13615 block. */
13616
13617 if (value == 0
13618 || (rsp->last_set_label < label_tick_ebb_start
13619 && (regno < FIRST_PSEUDO_REGISTER
13620 || regno >= reg_n_sets_max
13621 || REG_N_SETS (regno) != 1
13622 || REGNO_REG_SET_P
13623 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb), regno))))
13624 return 0;
13625
13626 /* If the value was set in a later insn than the ones we are processing,
13627 we can't use it even if the register was only set once. */
13628 if (rsp->last_set_label == label_tick
13629 && DF_INSN_LUID (rsp->last_set) >= subst_low_luid)
13630 return 0;
13631
13632 /* If fewer bits were set than what we are asked for now, we cannot use
13633 the value. */
13634 if (GET_MODE_PRECISION (rsp->last_set_mode)
13635 < GET_MODE_PRECISION (GET_MODE (x)))
13636 return 0;
13637
13638 /* If the value has all its registers valid, return it. */
13639 if (get_last_value_validate (&value, rsp->last_set, rsp->last_set_label, 0))
13640 return value;
13641
13642 /* Otherwise, make a copy and replace any invalid register with
13643 (clobber (const_int 0)). If that fails for some reason, return 0. */
13644
13645 value = copy_rtx (value);
13646 if (get_last_value_validate (&value, rsp->last_set, rsp->last_set_label, 1))
13647 return value;
13648
13649 return 0;
13650 }
13651 \f
13652 /* Return nonzero if expression X refers to a REG or to memory
13653 that is set in an instruction more recent than FROM_LUID. */
13654
13655 static int
13656 use_crosses_set_p (const_rtx x, int from_luid)
13657 {
13658 const char *fmt;
13659 int i;
13660 enum rtx_code code = GET_CODE (x);
13661
13662 if (code == REG)
13663 {
13664 unsigned int regno = REGNO (x);
13665 unsigned endreg = END_REGNO (x);
13666
13667 #ifdef PUSH_ROUNDING
13668 /* Don't allow uses of the stack pointer to be moved,
13669 because we don't know whether the move crosses a push insn. */
13670 if (regno == STACK_POINTER_REGNUM && PUSH_ARGS)
13671 return 1;
13672 #endif
13673 for (; regno < endreg; regno++)
13674 {
13675 reg_stat_type *rsp = &reg_stat[regno];
13676 if (rsp->last_set
13677 && rsp->last_set_label == label_tick
13678 && DF_INSN_LUID (rsp->last_set) > from_luid)
13679 return 1;
13680 }
13681 return 0;
13682 }
13683
13684 if (code == MEM && mem_last_set > from_luid)
13685 return 1;
13686
13687 fmt = GET_RTX_FORMAT (code);
13688
13689 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
13690 {
13691 if (fmt[i] == 'E')
13692 {
13693 int j;
13694 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
13695 if (use_crosses_set_p (XVECEXP (x, i, j), from_luid))
13696 return 1;
13697 }
13698 else if (fmt[i] == 'e'
13699 && use_crosses_set_p (XEXP (x, i), from_luid))
13700 return 1;
13701 }
13702 return 0;
13703 }
13704 \f
13705 /* Define three variables used for communication between the following
13706 routines. */
13707
13708 static unsigned int reg_dead_regno, reg_dead_endregno;
13709 static int reg_dead_flag;
13710
13711 /* Function called via note_stores from reg_dead_at_p.
13712
13713 If DEST is within [reg_dead_regno, reg_dead_endregno), set
13714 reg_dead_flag to 1 if X is a CLOBBER and to -1 it is a SET. */
13715
13716 static void
13717 reg_dead_at_p_1 (rtx dest, const_rtx x, void *data ATTRIBUTE_UNUSED)
13718 {
13719 unsigned int regno, endregno;
13720
13721 if (!REG_P (dest))
13722 return;
13723
13724 regno = REGNO (dest);
13725 endregno = END_REGNO (dest);
13726 if (reg_dead_endregno > regno && reg_dead_regno < endregno)
13727 reg_dead_flag = (GET_CODE (x) == CLOBBER) ? 1 : -1;
13728 }
13729
13730 /* Return nonzero if REG is known to be dead at INSN.
13731
13732 We scan backwards from INSN. If we hit a REG_DEAD note or a CLOBBER
13733 referencing REG, it is dead. If we hit a SET referencing REG, it is
13734 live. Otherwise, see if it is live or dead at the start of the basic
13735 block we are in. Hard regs marked as being live in NEWPAT_USED_REGS
13736 must be assumed to be always live. */
13737
13738 static int
13739 reg_dead_at_p (rtx reg, rtx_insn *insn)
13740 {
13741 basic_block block;
13742 unsigned int i;
13743
13744 /* Set variables for reg_dead_at_p_1. */
13745 reg_dead_regno = REGNO (reg);
13746 reg_dead_endregno = END_REGNO (reg);
13747
13748 reg_dead_flag = 0;
13749
13750 /* Check that reg isn't mentioned in NEWPAT_USED_REGS. For fixed registers
13751 we allow the machine description to decide whether use-and-clobber
13752 patterns are OK. */
13753 if (reg_dead_regno < FIRST_PSEUDO_REGISTER)
13754 {
13755 for (i = reg_dead_regno; i < reg_dead_endregno; i++)
13756 if (!fixed_regs[i] && TEST_HARD_REG_BIT (newpat_used_regs, i))
13757 return 0;
13758 }
13759
13760 /* Scan backwards until we find a REG_DEAD note, SET, CLOBBER, or
13761 beginning of basic block. */
13762 block = BLOCK_FOR_INSN (insn);
13763 for (;;)
13764 {
13765 if (INSN_P (insn))
13766 {
13767 if (find_regno_note (insn, REG_UNUSED, reg_dead_regno))
13768 return 1;
13769
13770 note_stores (PATTERN (insn), reg_dead_at_p_1, NULL);
13771 if (reg_dead_flag)
13772 return reg_dead_flag == 1 ? 1 : 0;
13773
13774 if (find_regno_note (insn, REG_DEAD, reg_dead_regno))
13775 return 1;
13776 }
13777
13778 if (insn == BB_HEAD (block))
13779 break;
13780
13781 insn = PREV_INSN (insn);
13782 }
13783
13784 /* Look at live-in sets for the basic block that we were in. */
13785 for (i = reg_dead_regno; i < reg_dead_endregno; i++)
13786 if (REGNO_REG_SET_P (df_get_live_in (block), i))
13787 return 0;
13788
13789 return 1;
13790 }
13791 \f
13792 /* Note hard registers in X that are used. */
13793
13794 static void
13795 mark_used_regs_combine (rtx x)
13796 {
13797 RTX_CODE code = GET_CODE (x);
13798 unsigned int regno;
13799 int i;
13800
13801 switch (code)
13802 {
13803 case LABEL_REF:
13804 case SYMBOL_REF:
13805 case CONST:
13806 CASE_CONST_ANY:
13807 case PC:
13808 case ADDR_VEC:
13809 case ADDR_DIFF_VEC:
13810 case ASM_INPUT:
13811 /* CC0 must die in the insn after it is set, so we don't need to take
13812 special note of it here. */
13813 case CC0:
13814 return;
13815
13816 case CLOBBER:
13817 /* If we are clobbering a MEM, mark any hard registers inside the
13818 address as used. */
13819 if (MEM_P (XEXP (x, 0)))
13820 mark_used_regs_combine (XEXP (XEXP (x, 0), 0));
13821 return;
13822
13823 case REG:
13824 regno = REGNO (x);
13825 /* A hard reg in a wide mode may really be multiple registers.
13826 If so, mark all of them just like the first. */
13827 if (regno < FIRST_PSEUDO_REGISTER)
13828 {
13829 /* None of this applies to the stack, frame or arg pointers. */
13830 if (regno == STACK_POINTER_REGNUM
13831 || (!HARD_FRAME_POINTER_IS_FRAME_POINTER
13832 && regno == HARD_FRAME_POINTER_REGNUM)
13833 || (FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
13834 && regno == ARG_POINTER_REGNUM && fixed_regs[regno])
13835 || regno == FRAME_POINTER_REGNUM)
13836 return;
13837
13838 add_to_hard_reg_set (&newpat_used_regs, GET_MODE (x), regno);
13839 }
13840 return;
13841
13842 case SET:
13843 {
13844 /* If setting a MEM, or a SUBREG of a MEM, then note any hard regs in
13845 the address. */
13846 rtx testreg = SET_DEST (x);
13847
13848 while (GET_CODE (testreg) == SUBREG
13849 || GET_CODE (testreg) == ZERO_EXTRACT
13850 || GET_CODE (testreg) == STRICT_LOW_PART)
13851 testreg = XEXP (testreg, 0);
13852
13853 if (MEM_P (testreg))
13854 mark_used_regs_combine (XEXP (testreg, 0));
13855
13856 mark_used_regs_combine (SET_SRC (x));
13857 }
13858 return;
13859
13860 default:
13861 break;
13862 }
13863
13864 /* Recursively scan the operands of this expression. */
13865
13866 {
13867 const char *fmt = GET_RTX_FORMAT (code);
13868
13869 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
13870 {
13871 if (fmt[i] == 'e')
13872 mark_used_regs_combine (XEXP (x, i));
13873 else if (fmt[i] == 'E')
13874 {
13875 int j;
13876
13877 for (j = 0; j < XVECLEN (x, i); j++)
13878 mark_used_regs_combine (XVECEXP (x, i, j));
13879 }
13880 }
13881 }
13882 }
13883 \f
13884 /* Remove register number REGNO from the dead registers list of INSN.
13885
13886 Return the note used to record the death, if there was one. */
13887
13888 rtx
13889 remove_death (unsigned int regno, rtx_insn *insn)
13890 {
13891 rtx note = find_regno_note (insn, REG_DEAD, regno);
13892
13893 if (note)
13894 remove_note (insn, note);
13895
13896 return note;
13897 }
13898
13899 /* For each register (hardware or pseudo) used within expression X, if its
13900 death is in an instruction with luid between FROM_LUID (inclusive) and
13901 TO_INSN (exclusive), put a REG_DEAD note for that register in the
13902 list headed by PNOTES.
13903
13904 That said, don't move registers killed by maybe_kill_insn.
13905
13906 This is done when X is being merged by combination into TO_INSN. These
13907 notes will then be distributed as needed. */
13908
13909 static void
13910 move_deaths (rtx x, rtx maybe_kill_insn, int from_luid, rtx_insn *to_insn,
13911 rtx *pnotes)
13912 {
13913 const char *fmt;
13914 int len, i;
13915 enum rtx_code code = GET_CODE (x);
13916
13917 if (code == REG)
13918 {
13919 unsigned int regno = REGNO (x);
13920 rtx_insn *where_dead = reg_stat[regno].last_death;
13921
13922 /* Don't move the register if it gets killed in between from and to. */
13923 if (maybe_kill_insn && reg_set_p (x, maybe_kill_insn)
13924 && ! reg_referenced_p (x, maybe_kill_insn))
13925 return;
13926
13927 if (where_dead
13928 && BLOCK_FOR_INSN (where_dead) == BLOCK_FOR_INSN (to_insn)
13929 && DF_INSN_LUID (where_dead) >= from_luid
13930 && DF_INSN_LUID (where_dead) < DF_INSN_LUID (to_insn))
13931 {
13932 rtx note = remove_death (regno, where_dead);
13933
13934 /* It is possible for the call above to return 0. This can occur
13935 when last_death points to I2 or I1 that we combined with.
13936 In that case make a new note.
13937
13938 We must also check for the case where X is a hard register
13939 and NOTE is a death note for a range of hard registers
13940 including X. In that case, we must put REG_DEAD notes for
13941 the remaining registers in place of NOTE. */
13942
13943 if (note != 0 && regno < FIRST_PSEUDO_REGISTER
13944 && partial_subreg_p (GET_MODE (x), GET_MODE (XEXP (note, 0))))
13945 {
13946 unsigned int deadregno = REGNO (XEXP (note, 0));
13947 unsigned int deadend = END_REGNO (XEXP (note, 0));
13948 unsigned int ourend = END_REGNO (x);
13949 unsigned int i;
13950
13951 for (i = deadregno; i < deadend; i++)
13952 if (i < regno || i >= ourend)
13953 add_reg_note (where_dead, REG_DEAD, regno_reg_rtx[i]);
13954 }
13955
13956 /* If we didn't find any note, or if we found a REG_DEAD note that
13957 covers only part of the given reg, and we have a multi-reg hard
13958 register, then to be safe we must check for REG_DEAD notes
13959 for each register other than the first. They could have
13960 their own REG_DEAD notes lying around. */
13961 else if ((note == 0
13962 || (note != 0
13963 && partial_subreg_p (GET_MODE (XEXP (note, 0)),
13964 GET_MODE (x))))
13965 && regno < FIRST_PSEUDO_REGISTER
13966 && REG_NREGS (x) > 1)
13967 {
13968 unsigned int ourend = END_REGNO (x);
13969 unsigned int i, offset;
13970 rtx oldnotes = 0;
13971
13972 if (note)
13973 offset = hard_regno_nregs (regno, GET_MODE (XEXP (note, 0)));
13974 else
13975 offset = 1;
13976
13977 for (i = regno + offset; i < ourend; i++)
13978 move_deaths (regno_reg_rtx[i],
13979 maybe_kill_insn, from_luid, to_insn, &oldnotes);
13980 }
13981
13982 if (note != 0 && GET_MODE (XEXP (note, 0)) == GET_MODE (x))
13983 {
13984 XEXP (note, 1) = *pnotes;
13985 *pnotes = note;
13986 }
13987 else
13988 *pnotes = alloc_reg_note (REG_DEAD, x, *pnotes);
13989 }
13990
13991 return;
13992 }
13993
13994 else if (GET_CODE (x) == SET)
13995 {
13996 rtx dest = SET_DEST (x);
13997
13998 move_deaths (SET_SRC (x), maybe_kill_insn, from_luid, to_insn, pnotes);
13999
14000 /* In the case of a ZERO_EXTRACT, a STRICT_LOW_PART, or a SUBREG
14001 that accesses one word of a multi-word item, some
14002 piece of everything register in the expression is used by
14003 this insn, so remove any old death. */
14004 /* ??? So why do we test for equality of the sizes? */
14005
14006 if (GET_CODE (dest) == ZERO_EXTRACT
14007 || GET_CODE (dest) == STRICT_LOW_PART
14008 || (GET_CODE (dest) == SUBREG
14009 && !read_modify_subreg_p (dest)))
14010 {
14011 move_deaths (dest, maybe_kill_insn, from_luid, to_insn, pnotes);
14012 return;
14013 }
14014
14015 /* If this is some other SUBREG, we know it replaces the entire
14016 value, so use that as the destination. */
14017 if (GET_CODE (dest) == SUBREG)
14018 dest = SUBREG_REG (dest);
14019
14020 /* If this is a MEM, adjust deaths of anything used in the address.
14021 For a REG (the only other possibility), the entire value is
14022 being replaced so the old value is not used in this insn. */
14023
14024 if (MEM_P (dest))
14025 move_deaths (XEXP (dest, 0), maybe_kill_insn, from_luid,
14026 to_insn, pnotes);
14027 return;
14028 }
14029
14030 else if (GET_CODE (x) == CLOBBER)
14031 return;
14032
14033 len = GET_RTX_LENGTH (code);
14034 fmt = GET_RTX_FORMAT (code);
14035
14036 for (i = 0; i < len; i++)
14037 {
14038 if (fmt[i] == 'E')
14039 {
14040 int j;
14041 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
14042 move_deaths (XVECEXP (x, i, j), maybe_kill_insn, from_luid,
14043 to_insn, pnotes);
14044 }
14045 else if (fmt[i] == 'e')
14046 move_deaths (XEXP (x, i), maybe_kill_insn, from_luid, to_insn, pnotes);
14047 }
14048 }
14049 \f
14050 /* Return 1 if X is the target of a bit-field assignment in BODY, the
14051 pattern of an insn. X must be a REG. */
14052
14053 static int
14054 reg_bitfield_target_p (rtx x, rtx body)
14055 {
14056 int i;
14057
14058 if (GET_CODE (body) == SET)
14059 {
14060 rtx dest = SET_DEST (body);
14061 rtx target;
14062 unsigned int regno, tregno, endregno, endtregno;
14063
14064 if (GET_CODE (dest) == ZERO_EXTRACT)
14065 target = XEXP (dest, 0);
14066 else if (GET_CODE (dest) == STRICT_LOW_PART)
14067 target = SUBREG_REG (XEXP (dest, 0));
14068 else
14069 return 0;
14070
14071 if (GET_CODE (target) == SUBREG)
14072 target = SUBREG_REG (target);
14073
14074 if (!REG_P (target))
14075 return 0;
14076
14077 tregno = REGNO (target), regno = REGNO (x);
14078 if (tregno >= FIRST_PSEUDO_REGISTER || regno >= FIRST_PSEUDO_REGISTER)
14079 return target == x;
14080
14081 endtregno = end_hard_regno (GET_MODE (target), tregno);
14082 endregno = end_hard_regno (GET_MODE (x), regno);
14083
14084 return endregno > tregno && regno < endtregno;
14085 }
14086
14087 else if (GET_CODE (body) == PARALLEL)
14088 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
14089 if (reg_bitfield_target_p (x, XVECEXP (body, 0, i)))
14090 return 1;
14091
14092 return 0;
14093 }
14094 \f
14095 /* Given a chain of REG_NOTES originally from FROM_INSN, try to place them
14096 as appropriate. I3 and I2 are the insns resulting from the combination
14097 insns including FROM (I2 may be zero).
14098
14099 ELIM_I2 and ELIM_I1 are either zero or registers that we know will
14100 not need REG_DEAD notes because they are being substituted for. This
14101 saves searching in the most common cases.
14102
14103 Each note in the list is either ignored or placed on some insns, depending
14104 on the type of note. */
14105
14106 static void
14107 distribute_notes (rtx notes, rtx_insn *from_insn, rtx_insn *i3, rtx_insn *i2,
14108 rtx elim_i2, rtx elim_i1, rtx elim_i0)
14109 {
14110 rtx note, next_note;
14111 rtx tem_note;
14112 rtx_insn *tem_insn;
14113
14114 for (note = notes; note; note = next_note)
14115 {
14116 rtx_insn *place = 0, *place2 = 0;
14117
14118 next_note = XEXP (note, 1);
14119 switch (REG_NOTE_KIND (note))
14120 {
14121 case REG_BR_PROB:
14122 case REG_BR_PRED:
14123 /* Doesn't matter much where we put this, as long as it's somewhere.
14124 It is preferable to keep these notes on branches, which is most
14125 likely to be i3. */
14126 place = i3;
14127 break;
14128
14129 case REG_NON_LOCAL_GOTO:
14130 if (JUMP_P (i3))
14131 place = i3;
14132 else
14133 {
14134 gcc_assert (i2 && JUMP_P (i2));
14135 place = i2;
14136 }
14137 break;
14138
14139 case REG_EH_REGION:
14140 /* These notes must remain with the call or trapping instruction. */
14141 if (CALL_P (i3))
14142 place = i3;
14143 else if (i2 && CALL_P (i2))
14144 place = i2;
14145 else
14146 {
14147 gcc_assert (cfun->can_throw_non_call_exceptions);
14148 if (may_trap_p (i3))
14149 place = i3;
14150 else if (i2 && may_trap_p (i2))
14151 place = i2;
14152 /* ??? Otherwise assume we've combined things such that we
14153 can now prove that the instructions can't trap. Drop the
14154 note in this case. */
14155 }
14156 break;
14157
14158 case REG_ARGS_SIZE:
14159 /* ??? How to distribute between i3-i1. Assume i3 contains the
14160 entire adjustment. Assert i3 contains at least some adjust. */
14161 if (!noop_move_p (i3))
14162 {
14163 int old_size, args_size = INTVAL (XEXP (note, 0));
14164 /* fixup_args_size_notes looks at REG_NORETURN note,
14165 so ensure the note is placed there first. */
14166 if (CALL_P (i3))
14167 {
14168 rtx *np;
14169 for (np = &next_note; *np; np = &XEXP (*np, 1))
14170 if (REG_NOTE_KIND (*np) == REG_NORETURN)
14171 {
14172 rtx n = *np;
14173 *np = XEXP (n, 1);
14174 XEXP (n, 1) = REG_NOTES (i3);
14175 REG_NOTES (i3) = n;
14176 break;
14177 }
14178 }
14179 old_size = fixup_args_size_notes (PREV_INSN (i3), i3, args_size);
14180 /* emit_call_1 adds for !ACCUMULATE_OUTGOING_ARGS
14181 REG_ARGS_SIZE note to all noreturn calls, allow that here. */
14182 gcc_assert (old_size != args_size
14183 || (CALL_P (i3)
14184 && !ACCUMULATE_OUTGOING_ARGS
14185 && find_reg_note (i3, REG_NORETURN, NULL_RTX)));
14186 }
14187 break;
14188
14189 case REG_NORETURN:
14190 case REG_SETJMP:
14191 case REG_TM:
14192 case REG_CALL_DECL:
14193 case REG_CALL_NOCF_CHECK:
14194 /* These notes must remain with the call. It should not be
14195 possible for both I2 and I3 to be a call. */
14196 if (CALL_P (i3))
14197 place = i3;
14198 else
14199 {
14200 gcc_assert (i2 && CALL_P (i2));
14201 place = i2;
14202 }
14203 break;
14204
14205 case REG_UNUSED:
14206 /* Any clobbers for i3 may still exist, and so we must process
14207 REG_UNUSED notes from that insn.
14208
14209 Any clobbers from i2 or i1 can only exist if they were added by
14210 recog_for_combine. In that case, recog_for_combine created the
14211 necessary REG_UNUSED notes. Trying to keep any original
14212 REG_UNUSED notes from these insns can cause incorrect output
14213 if it is for the same register as the original i3 dest.
14214 In that case, we will notice that the register is set in i3,
14215 and then add a REG_UNUSED note for the destination of i3, which
14216 is wrong. However, it is possible to have REG_UNUSED notes from
14217 i2 or i1 for register which were both used and clobbered, so
14218 we keep notes from i2 or i1 if they will turn into REG_DEAD
14219 notes. */
14220
14221 /* If this register is set or clobbered in I3, put the note there
14222 unless there is one already. */
14223 if (reg_set_p (XEXP (note, 0), PATTERN (i3)))
14224 {
14225 if (from_insn != i3)
14226 break;
14227
14228 if (! (REG_P (XEXP (note, 0))
14229 ? find_regno_note (i3, REG_UNUSED, REGNO (XEXP (note, 0)))
14230 : find_reg_note (i3, REG_UNUSED, XEXP (note, 0))))
14231 place = i3;
14232 }
14233 /* Otherwise, if this register is used by I3, then this register
14234 now dies here, so we must put a REG_DEAD note here unless there
14235 is one already. */
14236 else if (reg_referenced_p (XEXP (note, 0), PATTERN (i3))
14237 && ! (REG_P (XEXP (note, 0))
14238 ? find_regno_note (i3, REG_DEAD,
14239 REGNO (XEXP (note, 0)))
14240 : find_reg_note (i3, REG_DEAD, XEXP (note, 0))))
14241 {
14242 PUT_REG_NOTE_KIND (note, REG_DEAD);
14243 place = i3;
14244 }
14245 break;
14246
14247 case REG_EQUAL:
14248 case REG_EQUIV:
14249 case REG_NOALIAS:
14250 /* These notes say something about results of an insn. We can
14251 only support them if they used to be on I3 in which case they
14252 remain on I3. Otherwise they are ignored.
14253
14254 If the note refers to an expression that is not a constant, we
14255 must also ignore the note since we cannot tell whether the
14256 equivalence is still true. It might be possible to do
14257 slightly better than this (we only have a problem if I2DEST
14258 or I1DEST is present in the expression), but it doesn't
14259 seem worth the trouble. */
14260
14261 if (from_insn == i3
14262 && (XEXP (note, 0) == 0 || CONSTANT_P (XEXP (note, 0))))
14263 place = i3;
14264 break;
14265
14266 case REG_INC:
14267 /* These notes say something about how a register is used. They must
14268 be present on any use of the register in I2 or I3. */
14269 if (reg_mentioned_p (XEXP (note, 0), PATTERN (i3)))
14270 place = i3;
14271
14272 if (i2 && reg_mentioned_p (XEXP (note, 0), PATTERN (i2)))
14273 {
14274 if (place)
14275 place2 = i2;
14276 else
14277 place = i2;
14278 }
14279 break;
14280
14281 case REG_LABEL_TARGET:
14282 case REG_LABEL_OPERAND:
14283 /* This can show up in several ways -- either directly in the
14284 pattern, or hidden off in the constant pool with (or without?)
14285 a REG_EQUAL note. */
14286 /* ??? Ignore the without-reg_equal-note problem for now. */
14287 if (reg_mentioned_p (XEXP (note, 0), PATTERN (i3))
14288 || ((tem_note = find_reg_note (i3, REG_EQUAL, NULL_RTX))
14289 && GET_CODE (XEXP (tem_note, 0)) == LABEL_REF
14290 && label_ref_label (XEXP (tem_note, 0)) == XEXP (note, 0)))
14291 place = i3;
14292
14293 if (i2
14294 && (reg_mentioned_p (XEXP (note, 0), PATTERN (i2))
14295 || ((tem_note = find_reg_note (i2, REG_EQUAL, NULL_RTX))
14296 && GET_CODE (XEXP (tem_note, 0)) == LABEL_REF
14297 && label_ref_label (XEXP (tem_note, 0)) == XEXP (note, 0))))
14298 {
14299 if (place)
14300 place2 = i2;
14301 else
14302 place = i2;
14303 }
14304
14305 /* For REG_LABEL_TARGET on a JUMP_P, we prefer to put the note
14306 as a JUMP_LABEL or decrement LABEL_NUSES if it's already
14307 there. */
14308 if (place && JUMP_P (place)
14309 && REG_NOTE_KIND (note) == REG_LABEL_TARGET
14310 && (JUMP_LABEL (place) == NULL
14311 || JUMP_LABEL (place) == XEXP (note, 0)))
14312 {
14313 rtx label = JUMP_LABEL (place);
14314
14315 if (!label)
14316 JUMP_LABEL (place) = XEXP (note, 0);
14317 else if (LABEL_P (label))
14318 LABEL_NUSES (label)--;
14319 }
14320
14321 if (place2 && JUMP_P (place2)
14322 && REG_NOTE_KIND (note) == REG_LABEL_TARGET
14323 && (JUMP_LABEL (place2) == NULL
14324 || JUMP_LABEL (place2) == XEXP (note, 0)))
14325 {
14326 rtx label = JUMP_LABEL (place2);
14327
14328 if (!label)
14329 JUMP_LABEL (place2) = XEXP (note, 0);
14330 else if (LABEL_P (label))
14331 LABEL_NUSES (label)--;
14332 place2 = 0;
14333 }
14334 break;
14335
14336 case REG_NONNEG:
14337 /* This note says something about the value of a register prior
14338 to the execution of an insn. It is too much trouble to see
14339 if the note is still correct in all situations. It is better
14340 to simply delete it. */
14341 break;
14342
14343 case REG_DEAD:
14344 /* If we replaced the right hand side of FROM_INSN with a
14345 REG_EQUAL note, the original use of the dying register
14346 will not have been combined into I3 and I2. In such cases,
14347 FROM_INSN is guaranteed to be the first of the combined
14348 instructions, so we simply need to search back before
14349 FROM_INSN for the previous use or set of this register,
14350 then alter the notes there appropriately.
14351
14352 If the register is used as an input in I3, it dies there.
14353 Similarly for I2, if it is nonzero and adjacent to I3.
14354
14355 If the register is not used as an input in either I3 or I2
14356 and it is not one of the registers we were supposed to eliminate,
14357 there are two possibilities. We might have a non-adjacent I2
14358 or we might have somehow eliminated an additional register
14359 from a computation. For example, we might have had A & B where
14360 we discover that B will always be zero. In this case we will
14361 eliminate the reference to A.
14362
14363 In both cases, we must search to see if we can find a previous
14364 use of A and put the death note there. */
14365
14366 if (from_insn
14367 && from_insn == i2mod
14368 && !reg_overlap_mentioned_p (XEXP (note, 0), i2mod_new_rhs))
14369 tem_insn = from_insn;
14370 else
14371 {
14372 if (from_insn
14373 && CALL_P (from_insn)
14374 && find_reg_fusage (from_insn, USE, XEXP (note, 0)))
14375 place = from_insn;
14376 else if (i2 && reg_set_p (XEXP (note, 0), PATTERN (i2)))
14377 {
14378 /* If the new I2 sets the same register that is marked
14379 dead in the note, we do not in general know where to
14380 put the note. One important case we _can_ handle is
14381 when the note comes from I3. */
14382 if (from_insn == i3)
14383 place = i3;
14384 else
14385 break;
14386 }
14387 else if (reg_referenced_p (XEXP (note, 0), PATTERN (i3)))
14388 place = i3;
14389 else if (i2 != 0 && next_nonnote_nondebug_insn (i2) == i3
14390 && reg_referenced_p (XEXP (note, 0), PATTERN (i2)))
14391 place = i2;
14392 else if ((rtx_equal_p (XEXP (note, 0), elim_i2)
14393 && !(i2mod
14394 && reg_overlap_mentioned_p (XEXP (note, 0),
14395 i2mod_old_rhs)))
14396 || rtx_equal_p (XEXP (note, 0), elim_i1)
14397 || rtx_equal_p (XEXP (note, 0), elim_i0))
14398 break;
14399 tem_insn = i3;
14400 }
14401
14402 if (place == 0)
14403 {
14404 basic_block bb = this_basic_block;
14405
14406 for (tem_insn = PREV_INSN (tem_insn); place == 0; tem_insn = PREV_INSN (tem_insn))
14407 {
14408 if (!NONDEBUG_INSN_P (tem_insn))
14409 {
14410 if (tem_insn == BB_HEAD (bb))
14411 break;
14412 continue;
14413 }
14414
14415 /* If the register is being set at TEM_INSN, see if that is all
14416 TEM_INSN is doing. If so, delete TEM_INSN. Otherwise, make this
14417 into a REG_UNUSED note instead. Don't delete sets to
14418 global register vars. */
14419 if ((REGNO (XEXP (note, 0)) >= FIRST_PSEUDO_REGISTER
14420 || !global_regs[REGNO (XEXP (note, 0))])
14421 && reg_set_p (XEXP (note, 0), PATTERN (tem_insn)))
14422 {
14423 rtx set = single_set (tem_insn);
14424 rtx inner_dest = 0;
14425 rtx_insn *cc0_setter = NULL;
14426
14427 if (set != 0)
14428 for (inner_dest = SET_DEST (set);
14429 (GET_CODE (inner_dest) == STRICT_LOW_PART
14430 || GET_CODE (inner_dest) == SUBREG
14431 || GET_CODE (inner_dest) == ZERO_EXTRACT);
14432 inner_dest = XEXP (inner_dest, 0))
14433 ;
14434
14435 /* Verify that it was the set, and not a clobber that
14436 modified the register.
14437
14438 CC0 targets must be careful to maintain setter/user
14439 pairs. If we cannot delete the setter due to side
14440 effects, mark the user with an UNUSED note instead
14441 of deleting it. */
14442
14443 if (set != 0 && ! side_effects_p (SET_SRC (set))
14444 && rtx_equal_p (XEXP (note, 0), inner_dest)
14445 && (!HAVE_cc0
14446 || (! reg_mentioned_p (cc0_rtx, SET_SRC (set))
14447 || ((cc0_setter = prev_cc0_setter (tem_insn)) != NULL
14448 && sets_cc0_p (PATTERN (cc0_setter)) > 0))))
14449 {
14450 /* Move the notes and links of TEM_INSN elsewhere.
14451 This might delete other dead insns recursively.
14452 First set the pattern to something that won't use
14453 any register. */
14454 rtx old_notes = REG_NOTES (tem_insn);
14455
14456 PATTERN (tem_insn) = pc_rtx;
14457 REG_NOTES (tem_insn) = NULL;
14458
14459 distribute_notes (old_notes, tem_insn, tem_insn, NULL,
14460 NULL_RTX, NULL_RTX, NULL_RTX);
14461 distribute_links (LOG_LINKS (tem_insn));
14462
14463 unsigned int regno = REGNO (XEXP (note, 0));
14464 reg_stat_type *rsp = &reg_stat[regno];
14465 if (rsp->last_set == tem_insn)
14466 record_value_for_reg (XEXP (note, 0), NULL, NULL_RTX);
14467
14468 SET_INSN_DELETED (tem_insn);
14469 if (tem_insn == i2)
14470 i2 = NULL;
14471
14472 /* Delete the setter too. */
14473 if (cc0_setter)
14474 {
14475 PATTERN (cc0_setter) = pc_rtx;
14476 old_notes = REG_NOTES (cc0_setter);
14477 REG_NOTES (cc0_setter) = NULL;
14478
14479 distribute_notes (old_notes, cc0_setter,
14480 cc0_setter, NULL,
14481 NULL_RTX, NULL_RTX, NULL_RTX);
14482 distribute_links (LOG_LINKS (cc0_setter));
14483
14484 SET_INSN_DELETED (cc0_setter);
14485 if (cc0_setter == i2)
14486 i2 = NULL;
14487 }
14488 }
14489 else
14490 {
14491 PUT_REG_NOTE_KIND (note, REG_UNUSED);
14492
14493 /* If there isn't already a REG_UNUSED note, put one
14494 here. Do not place a REG_DEAD note, even if
14495 the register is also used here; that would not
14496 match the algorithm used in lifetime analysis
14497 and can cause the consistency check in the
14498 scheduler to fail. */
14499 if (! find_regno_note (tem_insn, REG_UNUSED,
14500 REGNO (XEXP (note, 0))))
14501 place = tem_insn;
14502 break;
14503 }
14504 }
14505 else if (reg_referenced_p (XEXP (note, 0), PATTERN (tem_insn))
14506 || (CALL_P (tem_insn)
14507 && find_reg_fusage (tem_insn, USE, XEXP (note, 0))))
14508 {
14509 place = tem_insn;
14510
14511 /* If we are doing a 3->2 combination, and we have a
14512 register which formerly died in i3 and was not used
14513 by i2, which now no longer dies in i3 and is used in
14514 i2 but does not die in i2, and place is between i2
14515 and i3, then we may need to move a link from place to
14516 i2. */
14517 if (i2 && DF_INSN_LUID (place) > DF_INSN_LUID (i2)
14518 && from_insn
14519 && DF_INSN_LUID (from_insn) > DF_INSN_LUID (i2)
14520 && reg_referenced_p (XEXP (note, 0), PATTERN (i2)))
14521 {
14522 struct insn_link *links = LOG_LINKS (place);
14523 LOG_LINKS (place) = NULL;
14524 distribute_links (links);
14525 }
14526 break;
14527 }
14528
14529 if (tem_insn == BB_HEAD (bb))
14530 break;
14531 }
14532
14533 }
14534
14535 /* If the register is set or already dead at PLACE, we needn't do
14536 anything with this note if it is still a REG_DEAD note.
14537 We check here if it is set at all, not if is it totally replaced,
14538 which is what `dead_or_set_p' checks, so also check for it being
14539 set partially. */
14540
14541 if (place && REG_NOTE_KIND (note) == REG_DEAD)
14542 {
14543 unsigned int regno = REGNO (XEXP (note, 0));
14544 reg_stat_type *rsp = &reg_stat[regno];
14545
14546 if (dead_or_set_p (place, XEXP (note, 0))
14547 || reg_bitfield_target_p (XEXP (note, 0), PATTERN (place)))
14548 {
14549 /* Unless the register previously died in PLACE, clear
14550 last_death. [I no longer understand why this is
14551 being done.] */
14552 if (rsp->last_death != place)
14553 rsp->last_death = 0;
14554 place = 0;
14555 }
14556 else
14557 rsp->last_death = place;
14558
14559 /* If this is a death note for a hard reg that is occupying
14560 multiple registers, ensure that we are still using all
14561 parts of the object. If we find a piece of the object
14562 that is unused, we must arrange for an appropriate REG_DEAD
14563 note to be added for it. However, we can't just emit a USE
14564 and tag the note to it, since the register might actually
14565 be dead; so we recourse, and the recursive call then finds
14566 the previous insn that used this register. */
14567
14568 if (place && REG_NREGS (XEXP (note, 0)) > 1)
14569 {
14570 unsigned int endregno = END_REGNO (XEXP (note, 0));
14571 bool all_used = true;
14572 unsigned int i;
14573
14574 for (i = regno; i < endregno; i++)
14575 if ((! refers_to_regno_p (i, PATTERN (place))
14576 && ! find_regno_fusage (place, USE, i))
14577 || dead_or_set_regno_p (place, i))
14578 {
14579 all_used = false;
14580 break;
14581 }
14582
14583 if (! all_used)
14584 {
14585 /* Put only REG_DEAD notes for pieces that are
14586 not already dead or set. */
14587
14588 for (i = regno; i < endregno;
14589 i += hard_regno_nregs (i, reg_raw_mode[i]))
14590 {
14591 rtx piece = regno_reg_rtx[i];
14592 basic_block bb = this_basic_block;
14593
14594 if (! dead_or_set_p (place, piece)
14595 && ! reg_bitfield_target_p (piece,
14596 PATTERN (place)))
14597 {
14598 rtx new_note = alloc_reg_note (REG_DEAD, piece,
14599 NULL_RTX);
14600
14601 distribute_notes (new_note, place, place,
14602 NULL, NULL_RTX, NULL_RTX,
14603 NULL_RTX);
14604 }
14605 else if (! refers_to_regno_p (i, PATTERN (place))
14606 && ! find_regno_fusage (place, USE, i))
14607 for (tem_insn = PREV_INSN (place); ;
14608 tem_insn = PREV_INSN (tem_insn))
14609 {
14610 if (!NONDEBUG_INSN_P (tem_insn))
14611 {
14612 if (tem_insn == BB_HEAD (bb))
14613 break;
14614 continue;
14615 }
14616 if (dead_or_set_p (tem_insn, piece)
14617 || reg_bitfield_target_p (piece,
14618 PATTERN (tem_insn)))
14619 {
14620 add_reg_note (tem_insn, REG_UNUSED, piece);
14621 break;
14622 }
14623 }
14624 }
14625
14626 place = 0;
14627 }
14628 }
14629 }
14630 break;
14631
14632 default:
14633 /* Any other notes should not be present at this point in the
14634 compilation. */
14635 gcc_unreachable ();
14636 }
14637
14638 if (place)
14639 {
14640 XEXP (note, 1) = REG_NOTES (place);
14641 REG_NOTES (place) = note;
14642
14643 /* Set added_notes_insn to the earliest insn we added a note to. */
14644 if (added_notes_insn == 0
14645 || DF_INSN_LUID (added_notes_insn) > DF_INSN_LUID (place))
14646 added_notes_insn = place;
14647 }
14648
14649 if (place2)
14650 {
14651 add_shallow_copy_of_reg_note (place2, note);
14652
14653 /* Set added_notes_insn to the earliest insn we added a note to. */
14654 if (added_notes_insn == 0
14655 || DF_INSN_LUID (added_notes_insn) > DF_INSN_LUID (place2))
14656 added_notes_insn = place2;
14657 }
14658 }
14659 }
14660 \f
14661 /* Similarly to above, distribute the LOG_LINKS that used to be present on
14662 I3, I2, and I1 to new locations. This is also called to add a link
14663 pointing at I3 when I3's destination is changed. */
14664
14665 static void
14666 distribute_links (struct insn_link *links)
14667 {
14668 struct insn_link *link, *next_link;
14669
14670 for (link = links; link; link = next_link)
14671 {
14672 rtx_insn *place = 0;
14673 rtx_insn *insn;
14674 rtx set, reg;
14675
14676 next_link = link->next;
14677
14678 /* If the insn that this link points to is a NOTE, ignore it. */
14679 if (NOTE_P (link->insn))
14680 continue;
14681
14682 set = 0;
14683 rtx pat = PATTERN (link->insn);
14684 if (GET_CODE (pat) == SET)
14685 set = pat;
14686 else if (GET_CODE (pat) == PARALLEL)
14687 {
14688 int i;
14689 for (i = 0; i < XVECLEN (pat, 0); i++)
14690 {
14691 set = XVECEXP (pat, 0, i);
14692 if (GET_CODE (set) != SET)
14693 continue;
14694
14695 reg = SET_DEST (set);
14696 while (GET_CODE (reg) == ZERO_EXTRACT
14697 || GET_CODE (reg) == STRICT_LOW_PART
14698 || GET_CODE (reg) == SUBREG)
14699 reg = XEXP (reg, 0);
14700
14701 if (!REG_P (reg))
14702 continue;
14703
14704 if (REGNO (reg) == link->regno)
14705 break;
14706 }
14707 if (i == XVECLEN (pat, 0))
14708 continue;
14709 }
14710 else
14711 continue;
14712
14713 reg = SET_DEST (set);
14714
14715 while (GET_CODE (reg) == ZERO_EXTRACT
14716 || GET_CODE (reg) == STRICT_LOW_PART
14717 || GET_CODE (reg) == SUBREG)
14718 reg = XEXP (reg, 0);
14719
14720 /* A LOG_LINK is defined as being placed on the first insn that uses
14721 a register and points to the insn that sets the register. Start
14722 searching at the next insn after the target of the link and stop
14723 when we reach a set of the register or the end of the basic block.
14724
14725 Note that this correctly handles the link that used to point from
14726 I3 to I2. Also note that not much searching is typically done here
14727 since most links don't point very far away. */
14728
14729 for (insn = NEXT_INSN (link->insn);
14730 (insn && (this_basic_block->next_bb == EXIT_BLOCK_PTR_FOR_FN (cfun)
14731 || BB_HEAD (this_basic_block->next_bb) != insn));
14732 insn = NEXT_INSN (insn))
14733 if (DEBUG_INSN_P (insn))
14734 continue;
14735 else if (INSN_P (insn) && reg_overlap_mentioned_p (reg, PATTERN (insn)))
14736 {
14737 if (reg_referenced_p (reg, PATTERN (insn)))
14738 place = insn;
14739 break;
14740 }
14741 else if (CALL_P (insn)
14742 && find_reg_fusage (insn, USE, reg))
14743 {
14744 place = insn;
14745 break;
14746 }
14747 else if (INSN_P (insn) && reg_set_p (reg, insn))
14748 break;
14749
14750 /* If we found a place to put the link, place it there unless there
14751 is already a link to the same insn as LINK at that point. */
14752
14753 if (place)
14754 {
14755 struct insn_link *link2;
14756
14757 FOR_EACH_LOG_LINK (link2, place)
14758 if (link2->insn == link->insn && link2->regno == link->regno)
14759 break;
14760
14761 if (link2 == NULL)
14762 {
14763 link->next = LOG_LINKS (place);
14764 LOG_LINKS (place) = link;
14765
14766 /* Set added_links_insn to the earliest insn we added a
14767 link to. */
14768 if (added_links_insn == 0
14769 || DF_INSN_LUID (added_links_insn) > DF_INSN_LUID (place))
14770 added_links_insn = place;
14771 }
14772 }
14773 }
14774 }
14775 \f
14776 /* Check for any register or memory mentioned in EQUIV that is not
14777 mentioned in EXPR. This is used to restrict EQUIV to "specializations"
14778 of EXPR where some registers may have been replaced by constants. */
14779
14780 static bool
14781 unmentioned_reg_p (rtx equiv, rtx expr)
14782 {
14783 subrtx_iterator::array_type array;
14784 FOR_EACH_SUBRTX (iter, array, equiv, NONCONST)
14785 {
14786 const_rtx x = *iter;
14787 if ((REG_P (x) || MEM_P (x))
14788 && !reg_mentioned_p (x, expr))
14789 return true;
14790 }
14791 return false;
14792 }
14793 \f
14794 DEBUG_FUNCTION void
14795 dump_combine_stats (FILE *file)
14796 {
14797 fprintf
14798 (file,
14799 ";; Combiner statistics: %d attempts, %d substitutions (%d requiring new space),\n;; %d successes.\n\n",
14800 combine_attempts, combine_merges, combine_extras, combine_successes);
14801 }
14802
14803 void
14804 dump_combine_total_stats (FILE *file)
14805 {
14806 fprintf
14807 (file,
14808 "\n;; Combiner totals: %d attempts, %d substitutions (%d requiring new space),\n;; %d successes.\n",
14809 total_attempts, total_merges, total_extras, total_successes);
14810 }
14811 \f
14812 /* Try combining insns through substitution. */
14813 static unsigned int
14814 rest_of_handle_combine (void)
14815 {
14816 int rebuild_jump_labels_after_combine;
14817
14818 df_set_flags (DF_LR_RUN_DCE + DF_DEFER_INSN_RESCAN);
14819 df_note_add_problem ();
14820 df_analyze ();
14821
14822 regstat_init_n_sets_and_refs ();
14823 reg_n_sets_max = max_reg_num ();
14824
14825 rebuild_jump_labels_after_combine
14826 = combine_instructions (get_insns (), max_reg_num ());
14827
14828 /* Combining insns may have turned an indirect jump into a
14829 direct jump. Rebuild the JUMP_LABEL fields of jumping
14830 instructions. */
14831 if (rebuild_jump_labels_after_combine)
14832 {
14833 if (dom_info_available_p (CDI_DOMINATORS))
14834 free_dominance_info (CDI_DOMINATORS);
14835 timevar_push (TV_JUMP);
14836 rebuild_jump_labels (get_insns ());
14837 cleanup_cfg (0);
14838 timevar_pop (TV_JUMP);
14839 }
14840
14841 regstat_free_n_sets_and_refs ();
14842 return 0;
14843 }
14844
14845 namespace {
14846
14847 const pass_data pass_data_combine =
14848 {
14849 RTL_PASS, /* type */
14850 "combine", /* name */
14851 OPTGROUP_NONE, /* optinfo_flags */
14852 TV_COMBINE, /* tv_id */
14853 PROP_cfglayout, /* properties_required */
14854 0, /* properties_provided */
14855 0, /* properties_destroyed */
14856 0, /* todo_flags_start */
14857 TODO_df_finish, /* todo_flags_finish */
14858 };
14859
14860 class pass_combine : public rtl_opt_pass
14861 {
14862 public:
14863 pass_combine (gcc::context *ctxt)
14864 : rtl_opt_pass (pass_data_combine, ctxt)
14865 {}
14866
14867 /* opt_pass methods: */
14868 virtual bool gate (function *) { return (optimize > 0); }
14869 virtual unsigned int execute (function *)
14870 {
14871 return rest_of_handle_combine ();
14872 }
14873
14874 }; // class pass_combine
14875
14876 } // anon namespace
14877
14878 rtl_opt_pass *
14879 make_pass_combine (gcc::context *ctxt)
14880 {
14881 return new pass_combine (ctxt);
14882 }