re PR bootstrap/45700 (--enable-checking=fold bootstrap failures)
[gcc.git] / gcc / combine.c
1 /* Optimize by combining instructions for GNU compiler.
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
4 Free Software Foundation, Inc.
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
12
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
21
22 /* This module is essentially the "combiner" phase of the U. of Arizona
23 Portable Optimizer, but redone to work on our list-structured
24 representation for RTL instead of their string representation.
25
26 The LOG_LINKS of each insn identify the most recent assignment
27 to each REG used in the insn. It is a list of previous insns,
28 each of which contains a SET for a REG that is used in this insn
29 and not used or set in between. LOG_LINKs never cross basic blocks.
30 They were set up by the preceding pass (lifetime analysis).
31
32 We try to combine each pair of insns joined by a logical link.
33 We also try to combine triples of insns A, B and C when
34 C has a link back to B and B has a link back to A.
35
36 LOG_LINKS does not have links for use of the CC0. They don't
37 need to, because the insn that sets the CC0 is always immediately
38 before the insn that tests it. So we always regard a branch
39 insn as having a logical link to the preceding insn. The same is true
40 for an insn explicitly using CC0.
41
42 We check (with use_crosses_set_p) to avoid combining in such a way
43 as to move a computation to a place where its value would be different.
44
45 Combination is done by mathematically substituting the previous
46 insn(s) values for the regs they set into the expressions in
47 the later insns that refer to these regs. If the result is a valid insn
48 for our target machine, according to the machine description,
49 we install it, delete the earlier insns, and update the data flow
50 information (LOG_LINKS and REG_NOTES) for what we did.
51
52 There are a few exceptions where the dataflow information isn't
53 completely updated (however this is only a local issue since it is
54 regenerated before the next pass that uses it):
55
56 - reg_live_length is not updated
57 - reg_n_refs is not adjusted in the rare case when a register is
58 no longer required in a computation
59 - there are extremely rare cases (see distribute_notes) when a
60 REG_DEAD note is lost
61 - a LOG_LINKS entry that refers to an insn with multiple SETs may be
62 removed because there is no way to know which register it was
63 linking
64
65 To simplify substitution, we combine only when the earlier insn(s)
66 consist of only a single assignment. To simplify updating afterward,
67 we never combine when a subroutine call appears in the middle.
68
69 Since we do not represent assignments to CC0 explicitly except when that
70 is all an insn does, there is no LOG_LINKS entry in an insn that uses
71 the condition code for the insn that set the condition code.
72 Fortunately, these two insns must be consecutive.
73 Therefore, every JUMP_INSN is taken to have an implicit logical link
74 to the preceding insn. This is not quite right, since non-jumps can
75 also use the condition code; but in practice such insns would not
76 combine anyway. */
77
78 #include "config.h"
79 #include "system.h"
80 #include "coretypes.h"
81 #include "tm.h"
82 #include "rtl.h"
83 #include "tree.h"
84 #include "tm_p.h"
85 #include "flags.h"
86 #include "regs.h"
87 #include "hard-reg-set.h"
88 #include "basic-block.h"
89 #include "insn-config.h"
90 #include "function.h"
91 /* Include expr.h after insn-config.h so we get HAVE_conditional_move. */
92 #include "expr.h"
93 #include "insn-attr.h"
94 #include "recog.h"
95 #include "diagnostic-core.h"
96 #include "toplev.h"
97 #include "target.h"
98 #include "optabs.h"
99 #include "insn-codes.h"
100 #include "rtlhooks-def.h"
101 /* Include output.h for dump_file. */
102 #include "output.h"
103 #include "params.h"
104 #include "timevar.h"
105 #include "tree-pass.h"
106 #include "df.h"
107 #include "cgraph.h"
108
109 /* Number of attempts to combine instructions in this function. */
110
111 static int combine_attempts;
112
113 /* Number of attempts that got as far as substitution in this function. */
114
115 static int combine_merges;
116
117 /* Number of instructions combined with added SETs in this function. */
118
119 static int combine_extras;
120
121 /* Number of instructions combined in this function. */
122
123 static int combine_successes;
124
125 /* Totals over entire compilation. */
126
127 static int total_attempts, total_merges, total_extras, total_successes;
128
129 /* combine_instructions may try to replace the right hand side of the
130 second instruction with the value of an associated REG_EQUAL note
131 before throwing it at try_combine. That is problematic when there
132 is a REG_DEAD note for a register used in the old right hand side
133 and can cause distribute_notes to do wrong things. This is the
134 second instruction if it has been so modified, null otherwise. */
135
136 static rtx i2mod;
137
138 /* When I2MOD is nonnull, this is a copy of the old right hand side. */
139
140 static rtx i2mod_old_rhs;
141
142 /* When I2MOD is nonnull, this is a copy of the new right hand side. */
143
144 static rtx i2mod_new_rhs;
145 \f
146 typedef struct reg_stat_struct {
147 /* Record last point of death of (hard or pseudo) register n. */
148 rtx last_death;
149
150 /* Record last point of modification of (hard or pseudo) register n. */
151 rtx last_set;
152
153 /* The next group of fields allows the recording of the last value assigned
154 to (hard or pseudo) register n. We use this information to see if an
155 operation being processed is redundant given a prior operation performed
156 on the register. For example, an `and' with a constant is redundant if
157 all the zero bits are already known to be turned off.
158
159 We use an approach similar to that used by cse, but change it in the
160 following ways:
161
162 (1) We do not want to reinitialize at each label.
163 (2) It is useful, but not critical, to know the actual value assigned
164 to a register. Often just its form is helpful.
165
166 Therefore, we maintain the following fields:
167
168 last_set_value the last value assigned
169 last_set_label records the value of label_tick when the
170 register was assigned
171 last_set_table_tick records the value of label_tick when a
172 value using the register is assigned
173 last_set_invalid set to nonzero when it is not valid
174 to use the value of this register in some
175 register's value
176
177 To understand the usage of these tables, it is important to understand
178 the distinction between the value in last_set_value being valid and
179 the register being validly contained in some other expression in the
180 table.
181
182 (The next two parameters are out of date).
183
184 reg_stat[i].last_set_value is valid if it is nonzero, and either
185 reg_n_sets[i] is 1 or reg_stat[i].last_set_label == label_tick.
186
187 Register I may validly appear in any expression returned for the value
188 of another register if reg_n_sets[i] is 1. It may also appear in the
189 value for register J if reg_stat[j].last_set_invalid is zero, or
190 reg_stat[i].last_set_label < reg_stat[j].last_set_label.
191
192 If an expression is found in the table containing a register which may
193 not validly appear in an expression, the register is replaced by
194 something that won't match, (clobber (const_int 0)). */
195
196 /* Record last value assigned to (hard or pseudo) register n. */
197
198 rtx last_set_value;
199
200 /* Record the value of label_tick when an expression involving register n
201 is placed in last_set_value. */
202
203 int last_set_table_tick;
204
205 /* Record the value of label_tick when the value for register n is placed in
206 last_set_value. */
207
208 int last_set_label;
209
210 /* These fields are maintained in parallel with last_set_value and are
211 used to store the mode in which the register was last set, the bits
212 that were known to be zero when it was last set, and the number of
213 sign bits copies it was known to have when it was last set. */
214
215 unsigned HOST_WIDE_INT last_set_nonzero_bits;
216 char last_set_sign_bit_copies;
217 ENUM_BITFIELD(machine_mode) last_set_mode : 8;
218
219 /* Set nonzero if references to register n in expressions should not be
220 used. last_set_invalid is set nonzero when this register is being
221 assigned to and last_set_table_tick == label_tick. */
222
223 char last_set_invalid;
224
225 /* Some registers that are set more than once and used in more than one
226 basic block are nevertheless always set in similar ways. For example,
227 a QImode register may be loaded from memory in two places on a machine
228 where byte loads zero extend.
229
230 We record in the following fields if a register has some leading bits
231 that are always equal to the sign bit, and what we know about the
232 nonzero bits of a register, specifically which bits are known to be
233 zero.
234
235 If an entry is zero, it means that we don't know anything special. */
236
237 unsigned char sign_bit_copies;
238
239 unsigned HOST_WIDE_INT nonzero_bits;
240
241 /* Record the value of the label_tick when the last truncation
242 happened. The field truncated_to_mode is only valid if
243 truncation_label == label_tick. */
244
245 int truncation_label;
246
247 /* Record the last truncation seen for this register. If truncation
248 is not a nop to this mode we might be able to save an explicit
249 truncation if we know that value already contains a truncated
250 value. */
251
252 ENUM_BITFIELD(machine_mode) truncated_to_mode : 8;
253 } reg_stat_type;
254
255 DEF_VEC_O(reg_stat_type);
256 DEF_VEC_ALLOC_O(reg_stat_type,heap);
257
258 static VEC(reg_stat_type,heap) *reg_stat;
259
260 /* Record the luid of the last insn that invalidated memory
261 (anything that writes memory, and subroutine calls, but not pushes). */
262
263 static int mem_last_set;
264
265 /* Record the luid of the last CALL_INSN
266 so we can tell whether a potential combination crosses any calls. */
267
268 static int last_call_luid;
269
270 /* When `subst' is called, this is the insn that is being modified
271 (by combining in a previous insn). The PATTERN of this insn
272 is still the old pattern partially modified and it should not be
273 looked at, but this may be used to examine the successors of the insn
274 to judge whether a simplification is valid. */
275
276 static rtx subst_insn;
277
278 /* This is the lowest LUID that `subst' is currently dealing with.
279 get_last_value will not return a value if the register was set at or
280 after this LUID. If not for this mechanism, we could get confused if
281 I2 or I1 in try_combine were an insn that used the old value of a register
282 to obtain a new value. In that case, we might erroneously get the
283 new value of the register when we wanted the old one. */
284
285 static int subst_low_luid;
286
287 /* This contains any hard registers that are used in newpat; reg_dead_at_p
288 must consider all these registers to be always live. */
289
290 static HARD_REG_SET newpat_used_regs;
291
292 /* This is an insn to which a LOG_LINKS entry has been added. If this
293 insn is the earlier than I2 or I3, combine should rescan starting at
294 that location. */
295
296 static rtx added_links_insn;
297
298 /* Basic block in which we are performing combines. */
299 static basic_block this_basic_block;
300 static bool optimize_this_for_speed_p;
301
302 \f
303 /* Length of the currently allocated uid_insn_cost array. */
304
305 static int max_uid_known;
306
307 /* The following array records the insn_rtx_cost for every insn
308 in the instruction stream. */
309
310 static int *uid_insn_cost;
311
312 /* The following array records the LOG_LINKS for every insn in the
313 instruction stream as an INSN_LIST rtx. */
314
315 static rtx *uid_log_links;
316
317 #define INSN_COST(INSN) (uid_insn_cost[INSN_UID (INSN)])
318 #define LOG_LINKS(INSN) (uid_log_links[INSN_UID (INSN)])
319
320 /* Incremented for each basic block. */
321
322 static int label_tick;
323
324 /* Reset to label_tick for each extended basic block in scanning order. */
325
326 static int label_tick_ebb_start;
327
328 /* Mode used to compute significance in reg_stat[].nonzero_bits. It is the
329 largest integer mode that can fit in HOST_BITS_PER_WIDE_INT. */
330
331 static enum machine_mode nonzero_bits_mode;
332
333 /* Nonzero when reg_stat[].nonzero_bits and reg_stat[].sign_bit_copies can
334 be safely used. It is zero while computing them and after combine has
335 completed. This former test prevents propagating values based on
336 previously set values, which can be incorrect if a variable is modified
337 in a loop. */
338
339 static int nonzero_sign_valid;
340
341 \f
342 /* Record one modification to rtl structure
343 to be undone by storing old_contents into *where. */
344
345 enum undo_kind { UNDO_RTX, UNDO_INT, UNDO_MODE };
346
347 struct undo
348 {
349 struct undo *next;
350 enum undo_kind kind;
351 union { rtx r; int i; enum machine_mode m; } old_contents;
352 union { rtx *r; int *i; } where;
353 };
354
355 /* Record a bunch of changes to be undone, up to MAX_UNDO of them.
356 num_undo says how many are currently recorded.
357
358 other_insn is nonzero if we have modified some other insn in the process
359 of working on subst_insn. It must be verified too. */
360
361 struct undobuf
362 {
363 struct undo *undos;
364 struct undo *frees;
365 rtx other_insn;
366 };
367
368 static struct undobuf undobuf;
369
370 /* Number of times the pseudo being substituted for
371 was found and replaced. */
372
373 static int n_occurrences;
374
375 static rtx reg_nonzero_bits_for_combine (const_rtx, enum machine_mode, const_rtx,
376 enum machine_mode,
377 unsigned HOST_WIDE_INT,
378 unsigned HOST_WIDE_INT *);
379 static rtx reg_num_sign_bit_copies_for_combine (const_rtx, enum machine_mode, const_rtx,
380 enum machine_mode,
381 unsigned int, unsigned int *);
382 static void do_SUBST (rtx *, rtx);
383 static void do_SUBST_INT (int *, int);
384 static void init_reg_last (void);
385 static void setup_incoming_promotions (rtx);
386 static void set_nonzero_bits_and_sign_copies (rtx, const_rtx, void *);
387 static int cant_combine_insn_p (rtx);
388 static int can_combine_p (rtx, rtx, rtx, rtx, rtx, rtx, rtx *, rtx *);
389 static int combinable_i3pat (rtx, rtx *, rtx, rtx, rtx, int, int, rtx *);
390 static int contains_muldiv (rtx);
391 static rtx try_combine (rtx, rtx, rtx, rtx, int *);
392 static void undo_all (void);
393 static void undo_commit (void);
394 static rtx *find_split_point (rtx *, rtx, bool);
395 static rtx subst (rtx, rtx, rtx, int, int);
396 static rtx combine_simplify_rtx (rtx, enum machine_mode, int);
397 static rtx simplify_if_then_else (rtx);
398 static rtx simplify_set (rtx);
399 static rtx simplify_logical (rtx);
400 static rtx expand_compound_operation (rtx);
401 static const_rtx expand_field_assignment (const_rtx);
402 static rtx make_extraction (enum machine_mode, rtx, HOST_WIDE_INT,
403 rtx, unsigned HOST_WIDE_INT, int, int, int);
404 static rtx extract_left_shift (rtx, int);
405 static rtx make_compound_operation (rtx, enum rtx_code);
406 static int get_pos_from_mask (unsigned HOST_WIDE_INT,
407 unsigned HOST_WIDE_INT *);
408 static rtx canon_reg_for_combine (rtx, rtx);
409 static rtx force_to_mode (rtx, enum machine_mode,
410 unsigned HOST_WIDE_INT, int);
411 static rtx if_then_else_cond (rtx, rtx *, rtx *);
412 static rtx known_cond (rtx, enum rtx_code, rtx, rtx);
413 static int rtx_equal_for_field_assignment_p (rtx, rtx);
414 static rtx make_field_assignment (rtx);
415 static rtx apply_distributive_law (rtx);
416 static rtx distribute_and_simplify_rtx (rtx, int);
417 static rtx simplify_and_const_int_1 (enum machine_mode, rtx,
418 unsigned HOST_WIDE_INT);
419 static rtx simplify_and_const_int (rtx, enum machine_mode, rtx,
420 unsigned HOST_WIDE_INT);
421 static int merge_outer_ops (enum rtx_code *, HOST_WIDE_INT *, enum rtx_code,
422 HOST_WIDE_INT, enum machine_mode, int *);
423 static rtx simplify_shift_const_1 (enum rtx_code, enum machine_mode, rtx, int);
424 static rtx simplify_shift_const (rtx, enum rtx_code, enum machine_mode, rtx,
425 int);
426 static int recog_for_combine (rtx *, rtx, rtx *);
427 static rtx gen_lowpart_for_combine (enum machine_mode, rtx);
428 static enum rtx_code simplify_comparison (enum rtx_code, rtx *, rtx *);
429 static void update_table_tick (rtx);
430 static void record_value_for_reg (rtx, rtx, rtx);
431 static void check_promoted_subreg (rtx, rtx);
432 static void record_dead_and_set_regs_1 (rtx, const_rtx, void *);
433 static void record_dead_and_set_regs (rtx);
434 static int get_last_value_validate (rtx *, rtx, int, int);
435 static rtx get_last_value (const_rtx);
436 static int use_crosses_set_p (const_rtx, int);
437 static void reg_dead_at_p_1 (rtx, const_rtx, void *);
438 static int reg_dead_at_p (rtx, rtx);
439 static void move_deaths (rtx, rtx, int, rtx, rtx *);
440 static int reg_bitfield_target_p (rtx, rtx);
441 static void distribute_notes (rtx, rtx, rtx, rtx, rtx, rtx, rtx);
442 static void distribute_links (rtx);
443 static void mark_used_regs_combine (rtx);
444 static void record_promoted_value (rtx, rtx);
445 static int unmentioned_reg_p_1 (rtx *, void *);
446 static bool unmentioned_reg_p (rtx, rtx);
447 static int record_truncated_value (rtx *, void *);
448 static void record_truncated_values (rtx *, void *);
449 static bool reg_truncated_to_mode (enum machine_mode, const_rtx);
450 static rtx gen_lowpart_or_truncate (enum machine_mode, rtx);
451 \f
452
453 /* It is not safe to use ordinary gen_lowpart in combine.
454 See comments in gen_lowpart_for_combine. */
455 #undef RTL_HOOKS_GEN_LOWPART
456 #define RTL_HOOKS_GEN_LOWPART gen_lowpart_for_combine
457
458 /* Our implementation of gen_lowpart never emits a new pseudo. */
459 #undef RTL_HOOKS_GEN_LOWPART_NO_EMIT
460 #define RTL_HOOKS_GEN_LOWPART_NO_EMIT gen_lowpart_for_combine
461
462 #undef RTL_HOOKS_REG_NONZERO_REG_BITS
463 #define RTL_HOOKS_REG_NONZERO_REG_BITS reg_nonzero_bits_for_combine
464
465 #undef RTL_HOOKS_REG_NUM_SIGN_BIT_COPIES
466 #define RTL_HOOKS_REG_NUM_SIGN_BIT_COPIES reg_num_sign_bit_copies_for_combine
467
468 #undef RTL_HOOKS_REG_TRUNCATED_TO_MODE
469 #define RTL_HOOKS_REG_TRUNCATED_TO_MODE reg_truncated_to_mode
470
471 static const struct rtl_hooks combine_rtl_hooks = RTL_HOOKS_INITIALIZER;
472
473 \f
474 /* Try to split PATTERN found in INSN. This returns NULL_RTX if
475 PATTERN can not be split. Otherwise, it returns an insn sequence.
476 This is a wrapper around split_insns which ensures that the
477 reg_stat vector is made larger if the splitter creates a new
478 register. */
479
480 static rtx
481 combine_split_insns (rtx pattern, rtx insn)
482 {
483 rtx ret;
484 unsigned int nregs;
485
486 ret = split_insns (pattern, insn);
487 nregs = max_reg_num ();
488 if (nregs > VEC_length (reg_stat_type, reg_stat))
489 VEC_safe_grow_cleared (reg_stat_type, heap, reg_stat, nregs);
490 return ret;
491 }
492
493 /* This is used by find_single_use to locate an rtx in LOC that
494 contains exactly one use of DEST, which is typically either a REG
495 or CC0. It returns a pointer to the innermost rtx expression
496 containing DEST. Appearances of DEST that are being used to
497 totally replace it are not counted. */
498
499 static rtx *
500 find_single_use_1 (rtx dest, rtx *loc)
501 {
502 rtx x = *loc;
503 enum rtx_code code = GET_CODE (x);
504 rtx *result = NULL;
505 rtx *this_result;
506 int i;
507 const char *fmt;
508
509 switch (code)
510 {
511 case CONST_INT:
512 case CONST:
513 case LABEL_REF:
514 case SYMBOL_REF:
515 case CONST_DOUBLE:
516 case CONST_VECTOR:
517 case CLOBBER:
518 return 0;
519
520 case SET:
521 /* If the destination is anything other than CC0, PC, a REG or a SUBREG
522 of a REG that occupies all of the REG, the insn uses DEST if
523 it is mentioned in the destination or the source. Otherwise, we
524 need just check the source. */
525 if (GET_CODE (SET_DEST (x)) != CC0
526 && GET_CODE (SET_DEST (x)) != PC
527 && !REG_P (SET_DEST (x))
528 && ! (GET_CODE (SET_DEST (x)) == SUBREG
529 && REG_P (SUBREG_REG (SET_DEST (x)))
530 && (((GET_MODE_SIZE (GET_MODE (SUBREG_REG (SET_DEST (x))))
531 + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)
532 == ((GET_MODE_SIZE (GET_MODE (SET_DEST (x)))
533 + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD))))
534 break;
535
536 return find_single_use_1 (dest, &SET_SRC (x));
537
538 case MEM:
539 case SUBREG:
540 return find_single_use_1 (dest, &XEXP (x, 0));
541
542 default:
543 break;
544 }
545
546 /* If it wasn't one of the common cases above, check each expression and
547 vector of this code. Look for a unique usage of DEST. */
548
549 fmt = GET_RTX_FORMAT (code);
550 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
551 {
552 if (fmt[i] == 'e')
553 {
554 if (dest == XEXP (x, i)
555 || (REG_P (dest) && REG_P (XEXP (x, i))
556 && REGNO (dest) == REGNO (XEXP (x, i))))
557 this_result = loc;
558 else
559 this_result = find_single_use_1 (dest, &XEXP (x, i));
560
561 if (result == NULL)
562 result = this_result;
563 else if (this_result)
564 /* Duplicate usage. */
565 return NULL;
566 }
567 else if (fmt[i] == 'E')
568 {
569 int j;
570
571 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
572 {
573 if (XVECEXP (x, i, j) == dest
574 || (REG_P (dest)
575 && REG_P (XVECEXP (x, i, j))
576 && REGNO (XVECEXP (x, i, j)) == REGNO (dest)))
577 this_result = loc;
578 else
579 this_result = find_single_use_1 (dest, &XVECEXP (x, i, j));
580
581 if (result == NULL)
582 result = this_result;
583 else if (this_result)
584 return NULL;
585 }
586 }
587 }
588
589 return result;
590 }
591
592
593 /* See if DEST, produced in INSN, is used only a single time in the
594 sequel. If so, return a pointer to the innermost rtx expression in which
595 it is used.
596
597 If PLOC is nonzero, *PLOC is set to the insn containing the single use.
598
599 If DEST is cc0_rtx, we look only at the next insn. In that case, we don't
600 care about REG_DEAD notes or LOG_LINKS.
601
602 Otherwise, we find the single use by finding an insn that has a
603 LOG_LINKS pointing at INSN and has a REG_DEAD note for DEST. If DEST is
604 only referenced once in that insn, we know that it must be the first
605 and last insn referencing DEST. */
606
607 static rtx *
608 find_single_use (rtx dest, rtx insn, rtx *ploc)
609 {
610 basic_block bb;
611 rtx next;
612 rtx *result;
613 rtx link;
614
615 #ifdef HAVE_cc0
616 if (dest == cc0_rtx)
617 {
618 next = NEXT_INSN (insn);
619 if (next == 0
620 || (!NONJUMP_INSN_P (next) && !JUMP_P (next)))
621 return 0;
622
623 result = find_single_use_1 (dest, &PATTERN (next));
624 if (result && ploc)
625 *ploc = next;
626 return result;
627 }
628 #endif
629
630 if (!REG_P (dest))
631 return 0;
632
633 bb = BLOCK_FOR_INSN (insn);
634 for (next = NEXT_INSN (insn);
635 next && BLOCK_FOR_INSN (next) == bb;
636 next = NEXT_INSN (next))
637 if (INSN_P (next) && dead_or_set_p (next, dest))
638 {
639 for (link = LOG_LINKS (next); link; link = XEXP (link, 1))
640 if (XEXP (link, 0) == insn)
641 break;
642
643 if (link)
644 {
645 result = find_single_use_1 (dest, &PATTERN (next));
646 if (ploc)
647 *ploc = next;
648 return result;
649 }
650 }
651
652 return 0;
653 }
654 \f
655 /* Substitute NEWVAL, an rtx expression, into INTO, a place in some
656 insn. The substitution can be undone by undo_all. If INTO is already
657 set to NEWVAL, do not record this change. Because computing NEWVAL might
658 also call SUBST, we have to compute it before we put anything into
659 the undo table. */
660
661 static void
662 do_SUBST (rtx *into, rtx newval)
663 {
664 struct undo *buf;
665 rtx oldval = *into;
666
667 if (oldval == newval)
668 return;
669
670 /* We'd like to catch as many invalid transformations here as
671 possible. Unfortunately, there are way too many mode changes
672 that are perfectly valid, so we'd waste too much effort for
673 little gain doing the checks here. Focus on catching invalid
674 transformations involving integer constants. */
675 if (GET_MODE_CLASS (GET_MODE (oldval)) == MODE_INT
676 && CONST_INT_P (newval))
677 {
678 /* Sanity check that we're replacing oldval with a CONST_INT
679 that is a valid sign-extension for the original mode. */
680 gcc_assert (INTVAL (newval)
681 == trunc_int_for_mode (INTVAL (newval), GET_MODE (oldval)));
682
683 /* Replacing the operand of a SUBREG or a ZERO_EXTEND with a
684 CONST_INT is not valid, because after the replacement, the
685 original mode would be gone. Unfortunately, we can't tell
686 when do_SUBST is called to replace the operand thereof, so we
687 perform this test on oldval instead, checking whether an
688 invalid replacement took place before we got here. */
689 gcc_assert (!(GET_CODE (oldval) == SUBREG
690 && CONST_INT_P (SUBREG_REG (oldval))));
691 gcc_assert (!(GET_CODE (oldval) == ZERO_EXTEND
692 && CONST_INT_P (XEXP (oldval, 0))));
693 }
694
695 if (undobuf.frees)
696 buf = undobuf.frees, undobuf.frees = buf->next;
697 else
698 buf = XNEW (struct undo);
699
700 buf->kind = UNDO_RTX;
701 buf->where.r = into;
702 buf->old_contents.r = oldval;
703 *into = newval;
704
705 buf->next = undobuf.undos, undobuf.undos = buf;
706 }
707
708 #define SUBST(INTO, NEWVAL) do_SUBST(&(INTO), (NEWVAL))
709
710 /* Similar to SUBST, but NEWVAL is an int expression. Note that substitution
711 for the value of a HOST_WIDE_INT value (including CONST_INT) is
712 not safe. */
713
714 static void
715 do_SUBST_INT (int *into, int newval)
716 {
717 struct undo *buf;
718 int oldval = *into;
719
720 if (oldval == newval)
721 return;
722
723 if (undobuf.frees)
724 buf = undobuf.frees, undobuf.frees = buf->next;
725 else
726 buf = XNEW (struct undo);
727
728 buf->kind = UNDO_INT;
729 buf->where.i = into;
730 buf->old_contents.i = oldval;
731 *into = newval;
732
733 buf->next = undobuf.undos, undobuf.undos = buf;
734 }
735
736 #define SUBST_INT(INTO, NEWVAL) do_SUBST_INT(&(INTO), (NEWVAL))
737
738 /* Similar to SUBST, but just substitute the mode. This is used when
739 changing the mode of a pseudo-register, so that any other
740 references to the entry in the regno_reg_rtx array will change as
741 well. */
742
743 static void
744 do_SUBST_MODE (rtx *into, enum machine_mode newval)
745 {
746 struct undo *buf;
747 enum machine_mode oldval = GET_MODE (*into);
748
749 if (oldval == newval)
750 return;
751
752 if (undobuf.frees)
753 buf = undobuf.frees, undobuf.frees = buf->next;
754 else
755 buf = XNEW (struct undo);
756
757 buf->kind = UNDO_MODE;
758 buf->where.r = into;
759 buf->old_contents.m = oldval;
760 adjust_reg_mode (*into, newval);
761
762 buf->next = undobuf.undos, undobuf.undos = buf;
763 }
764
765 #define SUBST_MODE(INTO, NEWVAL) do_SUBST_MODE(&(INTO), (NEWVAL))
766 \f
767 /* Subroutine of try_combine. Determine whether the combine replacement
768 patterns NEWPAT, NEWI2PAT and NEWOTHERPAT are cheaper according to
769 insn_rtx_cost that the original instruction sequence I0, I1, I2, I3 and
770 undobuf.other_insn. Note that I1 and/or NEWI2PAT may be NULL_RTX.
771 NEWOTHERPAT and undobuf.other_insn may also both be NULL_RTX. This
772 function returns false, if the costs of all instructions can be
773 estimated, and the replacements are more expensive than the original
774 sequence. */
775
776 static bool
777 combine_validate_cost (rtx i0, rtx i1, rtx i2, rtx i3, rtx newpat,
778 rtx newi2pat, rtx newotherpat)
779 {
780 int i0_cost, i1_cost, i2_cost, i3_cost;
781 int new_i2_cost, new_i3_cost;
782 int old_cost, new_cost;
783
784 /* Lookup the original insn_rtx_costs. */
785 i2_cost = INSN_COST (i2);
786 i3_cost = INSN_COST (i3);
787
788 if (i1)
789 {
790 i1_cost = INSN_COST (i1);
791 if (i0)
792 {
793 i0_cost = INSN_COST (i0);
794 old_cost = (i0_cost > 0 && i1_cost > 0 && i2_cost > 0 && i3_cost > 0
795 ? i0_cost + i1_cost + i2_cost + i3_cost : 0);
796 }
797 else
798 {
799 old_cost = (i1_cost > 0 && i2_cost > 0 && i3_cost > 0
800 ? i1_cost + i2_cost + i3_cost : 0);
801 i0_cost = 0;
802 }
803 }
804 else
805 {
806 old_cost = (i2_cost > 0 && i3_cost > 0) ? i2_cost + i3_cost : 0;
807 i1_cost = i0_cost = 0;
808 }
809
810 /* Calculate the replacement insn_rtx_costs. */
811 new_i3_cost = insn_rtx_cost (newpat, optimize_this_for_speed_p);
812 if (newi2pat)
813 {
814 new_i2_cost = insn_rtx_cost (newi2pat, optimize_this_for_speed_p);
815 new_cost = (new_i2_cost > 0 && new_i3_cost > 0)
816 ? new_i2_cost + new_i3_cost : 0;
817 }
818 else
819 {
820 new_cost = new_i3_cost;
821 new_i2_cost = 0;
822 }
823
824 if (undobuf.other_insn)
825 {
826 int old_other_cost, new_other_cost;
827
828 old_other_cost = INSN_COST (undobuf.other_insn);
829 new_other_cost = insn_rtx_cost (newotherpat, optimize_this_for_speed_p);
830 if (old_other_cost > 0 && new_other_cost > 0)
831 {
832 old_cost += old_other_cost;
833 new_cost += new_other_cost;
834 }
835 else
836 old_cost = 0;
837 }
838
839 /* Disallow this recombination if both new_cost and old_cost are
840 greater than zero, and new_cost is greater than old cost. */
841 if (old_cost > 0
842 && new_cost > old_cost)
843 {
844 if (dump_file)
845 {
846 if (i0)
847 {
848 fprintf (dump_file,
849 "rejecting combination of insns %d, %d, %d and %d\n",
850 INSN_UID (i0), INSN_UID (i1), INSN_UID (i2),
851 INSN_UID (i3));
852 fprintf (dump_file, "original costs %d + %d + %d + %d = %d\n",
853 i0_cost, i1_cost, i2_cost, i3_cost, old_cost);
854 }
855 else if (i1)
856 {
857 fprintf (dump_file,
858 "rejecting combination of insns %d, %d and %d\n",
859 INSN_UID (i1), INSN_UID (i2), INSN_UID (i3));
860 fprintf (dump_file, "original costs %d + %d + %d = %d\n",
861 i1_cost, i2_cost, i3_cost, old_cost);
862 }
863 else
864 {
865 fprintf (dump_file,
866 "rejecting combination of insns %d and %d\n",
867 INSN_UID (i2), INSN_UID (i3));
868 fprintf (dump_file, "original costs %d + %d = %d\n",
869 i2_cost, i3_cost, old_cost);
870 }
871
872 if (newi2pat)
873 {
874 fprintf (dump_file, "replacement costs %d + %d = %d\n",
875 new_i2_cost, new_i3_cost, new_cost);
876 }
877 else
878 fprintf (dump_file, "replacement cost %d\n", new_cost);
879 }
880
881 return false;
882 }
883
884 /* Update the uid_insn_cost array with the replacement costs. */
885 INSN_COST (i2) = new_i2_cost;
886 INSN_COST (i3) = new_i3_cost;
887 if (i1)
888 INSN_COST (i1) = 0;
889
890 return true;
891 }
892
893
894 /* Delete any insns that copy a register to itself. */
895
896 static void
897 delete_noop_moves (void)
898 {
899 rtx insn, next;
900 basic_block bb;
901
902 FOR_EACH_BB (bb)
903 {
904 for (insn = BB_HEAD (bb); insn != NEXT_INSN (BB_END (bb)); insn = next)
905 {
906 next = NEXT_INSN (insn);
907 if (INSN_P (insn) && noop_move_p (insn))
908 {
909 if (dump_file)
910 fprintf (dump_file, "deleting noop move %d\n", INSN_UID (insn));
911
912 delete_insn_and_edges (insn);
913 }
914 }
915 }
916 }
917
918 \f
919 /* Fill in log links field for all insns. */
920
921 static void
922 create_log_links (void)
923 {
924 basic_block bb;
925 rtx *next_use, insn;
926 df_ref *def_vec, *use_vec;
927
928 next_use = XCNEWVEC (rtx, max_reg_num ());
929
930 /* Pass through each block from the end, recording the uses of each
931 register and establishing log links when def is encountered.
932 Note that we do not clear next_use array in order to save time,
933 so we have to test whether the use is in the same basic block as def.
934
935 There are a few cases below when we do not consider the definition or
936 usage -- these are taken from original flow.c did. Don't ask me why it is
937 done this way; I don't know and if it works, I don't want to know. */
938
939 FOR_EACH_BB (bb)
940 {
941 FOR_BB_INSNS_REVERSE (bb, insn)
942 {
943 if (!NONDEBUG_INSN_P (insn))
944 continue;
945
946 /* Log links are created only once. */
947 gcc_assert (!LOG_LINKS (insn));
948
949 for (def_vec = DF_INSN_DEFS (insn); *def_vec; def_vec++)
950 {
951 df_ref def = *def_vec;
952 int regno = DF_REF_REGNO (def);
953 rtx use_insn;
954
955 if (!next_use[regno])
956 continue;
957
958 /* Do not consider if it is pre/post modification in MEM. */
959 if (DF_REF_FLAGS (def) & DF_REF_PRE_POST_MODIFY)
960 continue;
961
962 /* Do not make the log link for frame pointer. */
963 if ((regno == FRAME_POINTER_REGNUM
964 && (! reload_completed || frame_pointer_needed))
965 #if !HARD_FRAME_POINTER_IS_FRAME_POINTER
966 || (regno == HARD_FRAME_POINTER_REGNUM
967 && (! reload_completed || frame_pointer_needed))
968 #endif
969 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
970 || (regno == ARG_POINTER_REGNUM && fixed_regs[regno])
971 #endif
972 )
973 continue;
974
975 use_insn = next_use[regno];
976 if (BLOCK_FOR_INSN (use_insn) == bb)
977 {
978 /* flow.c claimed:
979
980 We don't build a LOG_LINK for hard registers contained
981 in ASM_OPERANDs. If these registers get replaced,
982 we might wind up changing the semantics of the insn,
983 even if reload can make what appear to be valid
984 assignments later. */
985 if (regno >= FIRST_PSEUDO_REGISTER
986 || asm_noperands (PATTERN (use_insn)) < 0)
987 {
988 /* Don't add duplicate links between instructions. */
989 rtx links;
990 for (links = LOG_LINKS (use_insn); links;
991 links = XEXP (links, 1))
992 if (insn == XEXP (links, 0))
993 break;
994
995 if (!links)
996 LOG_LINKS (use_insn) =
997 alloc_INSN_LIST (insn, LOG_LINKS (use_insn));
998 }
999 }
1000 next_use[regno] = NULL_RTX;
1001 }
1002
1003 for (use_vec = DF_INSN_USES (insn); *use_vec; use_vec++)
1004 {
1005 df_ref use = *use_vec;
1006 int regno = DF_REF_REGNO (use);
1007
1008 /* Do not consider the usage of the stack pointer
1009 by function call. */
1010 if (DF_REF_FLAGS (use) & DF_REF_CALL_STACK_USAGE)
1011 continue;
1012
1013 next_use[regno] = insn;
1014 }
1015 }
1016 }
1017
1018 free (next_use);
1019 }
1020
1021 /* Clear LOG_LINKS fields of insns. */
1022
1023 static void
1024 clear_log_links (void)
1025 {
1026 rtx insn;
1027
1028 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
1029 if (INSN_P (insn))
1030 free_INSN_LIST_list (&LOG_LINKS (insn));
1031 }
1032
1033 /* Walk the LOG_LINKS of insn B to see if we find a reference to A. Return
1034 true if we found a LOG_LINK that proves that A feeds B. This only works
1035 if there are no instructions between A and B which could have a link
1036 depending on A, since in that case we would not record a link for B. */
1037
1038 static bool
1039 insn_a_feeds_b (rtx a, rtx b)
1040 {
1041 rtx links;
1042 for (links = LOG_LINKS (b); links; links = XEXP (links, 1))
1043 if (XEXP (links, 0) == a)
1044 return true;
1045 return false;
1046 }
1047 \f
1048 /* Main entry point for combiner. F is the first insn of the function.
1049 NREGS is the first unused pseudo-reg number.
1050
1051 Return nonzero if the combiner has turned an indirect jump
1052 instruction into a direct jump. */
1053 static int
1054 combine_instructions (rtx f, unsigned int nregs)
1055 {
1056 rtx insn, next;
1057 #ifdef HAVE_cc0
1058 rtx prev;
1059 #endif
1060 rtx links, nextlinks;
1061 rtx first;
1062 basic_block last_bb;
1063
1064 int new_direct_jump_p = 0;
1065
1066 for (first = f; first && !INSN_P (first); )
1067 first = NEXT_INSN (first);
1068 if (!first)
1069 return 0;
1070
1071 combine_attempts = 0;
1072 combine_merges = 0;
1073 combine_extras = 0;
1074 combine_successes = 0;
1075
1076 rtl_hooks = combine_rtl_hooks;
1077
1078 VEC_safe_grow_cleared (reg_stat_type, heap, reg_stat, nregs);
1079
1080 init_recog_no_volatile ();
1081
1082 /* Allocate array for insn info. */
1083 max_uid_known = get_max_uid ();
1084 uid_log_links = XCNEWVEC (rtx, max_uid_known + 1);
1085 uid_insn_cost = XCNEWVEC (int, max_uid_known + 1);
1086
1087 nonzero_bits_mode = mode_for_size (HOST_BITS_PER_WIDE_INT, MODE_INT, 0);
1088
1089 /* Don't use reg_stat[].nonzero_bits when computing it. This can cause
1090 problems when, for example, we have j <<= 1 in a loop. */
1091
1092 nonzero_sign_valid = 0;
1093 label_tick = label_tick_ebb_start = 1;
1094
1095 /* Scan all SETs and see if we can deduce anything about what
1096 bits are known to be zero for some registers and how many copies
1097 of the sign bit are known to exist for those registers.
1098
1099 Also set any known values so that we can use it while searching
1100 for what bits are known to be set. */
1101
1102 setup_incoming_promotions (first);
1103 /* Allow the entry block and the first block to fall into the same EBB.
1104 Conceptually the incoming promotions are assigned to the entry block. */
1105 last_bb = ENTRY_BLOCK_PTR;
1106
1107 create_log_links ();
1108 FOR_EACH_BB (this_basic_block)
1109 {
1110 optimize_this_for_speed_p = optimize_bb_for_speed_p (this_basic_block);
1111 last_call_luid = 0;
1112 mem_last_set = -1;
1113
1114 label_tick++;
1115 if (!single_pred_p (this_basic_block)
1116 || single_pred (this_basic_block) != last_bb)
1117 label_tick_ebb_start = label_tick;
1118 last_bb = this_basic_block;
1119
1120 FOR_BB_INSNS (this_basic_block, insn)
1121 if (INSN_P (insn) && BLOCK_FOR_INSN (insn))
1122 {
1123 subst_low_luid = DF_INSN_LUID (insn);
1124 subst_insn = insn;
1125
1126 note_stores (PATTERN (insn), set_nonzero_bits_and_sign_copies,
1127 insn);
1128 record_dead_and_set_regs (insn);
1129
1130 #ifdef AUTO_INC_DEC
1131 for (links = REG_NOTES (insn); links; links = XEXP (links, 1))
1132 if (REG_NOTE_KIND (links) == REG_INC)
1133 set_nonzero_bits_and_sign_copies (XEXP (links, 0), NULL_RTX,
1134 insn);
1135 #endif
1136
1137 /* Record the current insn_rtx_cost of this instruction. */
1138 if (NONJUMP_INSN_P (insn))
1139 INSN_COST (insn) = insn_rtx_cost (PATTERN (insn),
1140 optimize_this_for_speed_p);
1141 if (dump_file)
1142 fprintf(dump_file, "insn_cost %d: %d\n",
1143 INSN_UID (insn), INSN_COST (insn));
1144 }
1145 }
1146
1147 nonzero_sign_valid = 1;
1148
1149 /* Now scan all the insns in forward order. */
1150 label_tick = label_tick_ebb_start = 1;
1151 init_reg_last ();
1152 setup_incoming_promotions (first);
1153 last_bb = ENTRY_BLOCK_PTR;
1154
1155 FOR_EACH_BB (this_basic_block)
1156 {
1157 optimize_this_for_speed_p = optimize_bb_for_speed_p (this_basic_block);
1158 last_call_luid = 0;
1159 mem_last_set = -1;
1160
1161 label_tick++;
1162 if (!single_pred_p (this_basic_block)
1163 || single_pred (this_basic_block) != last_bb)
1164 label_tick_ebb_start = label_tick;
1165 last_bb = this_basic_block;
1166
1167 rtl_profile_for_bb (this_basic_block);
1168 for (insn = BB_HEAD (this_basic_block);
1169 insn != NEXT_INSN (BB_END (this_basic_block));
1170 insn = next ? next : NEXT_INSN (insn))
1171 {
1172 next = 0;
1173 if (NONDEBUG_INSN_P (insn))
1174 {
1175 /* See if we know about function return values before this
1176 insn based upon SUBREG flags. */
1177 check_promoted_subreg (insn, PATTERN (insn));
1178
1179 /* See if we can find hardregs and subreg of pseudos in
1180 narrower modes. This could help turning TRUNCATEs
1181 into SUBREGs. */
1182 note_uses (&PATTERN (insn), record_truncated_values, NULL);
1183
1184 /* Try this insn with each insn it links back to. */
1185
1186 for (links = LOG_LINKS (insn); links; links = XEXP (links, 1))
1187 if ((next = try_combine (insn, XEXP (links, 0), NULL_RTX,
1188 NULL_RTX, &new_direct_jump_p)) != 0)
1189 goto retry;
1190
1191 /* Try each sequence of three linked insns ending with this one. */
1192
1193 for (links = LOG_LINKS (insn); links; links = XEXP (links, 1))
1194 {
1195 rtx link = XEXP (links, 0);
1196
1197 /* If the linked insn has been replaced by a note, then there
1198 is no point in pursuing this chain any further. */
1199 if (NOTE_P (link))
1200 continue;
1201
1202 for (nextlinks = LOG_LINKS (link);
1203 nextlinks;
1204 nextlinks = XEXP (nextlinks, 1))
1205 if ((next = try_combine (insn, link, XEXP (nextlinks, 0),
1206 NULL_RTX,
1207 &new_direct_jump_p)) != 0)
1208 goto retry;
1209 }
1210
1211 #ifdef HAVE_cc0
1212 /* Try to combine a jump insn that uses CC0
1213 with a preceding insn that sets CC0, and maybe with its
1214 logical predecessor as well.
1215 This is how we make decrement-and-branch insns.
1216 We need this special code because data flow connections
1217 via CC0 do not get entered in LOG_LINKS. */
1218
1219 if (JUMP_P (insn)
1220 && (prev = prev_nonnote_insn (insn)) != 0
1221 && NONJUMP_INSN_P (prev)
1222 && sets_cc0_p (PATTERN (prev)))
1223 {
1224 if ((next = try_combine (insn, prev, NULL_RTX, NULL_RTX,
1225 &new_direct_jump_p)) != 0)
1226 goto retry;
1227
1228 for (nextlinks = LOG_LINKS (prev); nextlinks;
1229 nextlinks = XEXP (nextlinks, 1))
1230 if ((next = try_combine (insn, prev, XEXP (nextlinks, 0),
1231 NULL_RTX,
1232 &new_direct_jump_p)) != 0)
1233 goto retry;
1234 }
1235
1236 /* Do the same for an insn that explicitly references CC0. */
1237 if (NONJUMP_INSN_P (insn)
1238 && (prev = prev_nonnote_insn (insn)) != 0
1239 && NONJUMP_INSN_P (prev)
1240 && sets_cc0_p (PATTERN (prev))
1241 && GET_CODE (PATTERN (insn)) == SET
1242 && reg_mentioned_p (cc0_rtx, SET_SRC (PATTERN (insn))))
1243 {
1244 if ((next = try_combine (insn, prev, NULL_RTX, NULL_RTX,
1245 &new_direct_jump_p)) != 0)
1246 goto retry;
1247
1248 for (nextlinks = LOG_LINKS (prev); nextlinks;
1249 nextlinks = XEXP (nextlinks, 1))
1250 if ((next = try_combine (insn, prev, XEXP (nextlinks, 0),
1251 NULL_RTX,
1252 &new_direct_jump_p)) != 0)
1253 goto retry;
1254 }
1255
1256 /* Finally, see if any of the insns that this insn links to
1257 explicitly references CC0. If so, try this insn, that insn,
1258 and its predecessor if it sets CC0. */
1259 for (links = LOG_LINKS (insn); links; links = XEXP (links, 1))
1260 if (NONJUMP_INSN_P (XEXP (links, 0))
1261 && GET_CODE (PATTERN (XEXP (links, 0))) == SET
1262 && reg_mentioned_p (cc0_rtx, SET_SRC (PATTERN (XEXP (links, 0))))
1263 && (prev = prev_nonnote_insn (XEXP (links, 0))) != 0
1264 && NONJUMP_INSN_P (prev)
1265 && sets_cc0_p (PATTERN (prev))
1266 && (next = try_combine (insn, XEXP (links, 0),
1267 prev, NULL_RTX,
1268 &new_direct_jump_p)) != 0)
1269 goto retry;
1270 #endif
1271
1272 /* Try combining an insn with two different insns whose results it
1273 uses. */
1274 for (links = LOG_LINKS (insn); links; links = XEXP (links, 1))
1275 for (nextlinks = XEXP (links, 1); nextlinks;
1276 nextlinks = XEXP (nextlinks, 1))
1277 if ((next = try_combine (insn, XEXP (links, 0),
1278 XEXP (nextlinks, 0), NULL_RTX,
1279 &new_direct_jump_p)) != 0)
1280 goto retry;
1281
1282 /* Try four-instruction combinations. */
1283 for (links = LOG_LINKS (insn); links; links = XEXP (links, 1))
1284 {
1285 rtx next1;
1286 rtx link = XEXP (links, 0);
1287
1288 /* If the linked insn has been replaced by a note, then there
1289 is no point in pursuing this chain any further. */
1290 if (NOTE_P (link))
1291 continue;
1292
1293 for (next1 = LOG_LINKS (link); next1; next1 = XEXP (next1, 1))
1294 {
1295 rtx link1 = XEXP (next1, 0);
1296 if (NOTE_P (link1))
1297 continue;
1298 /* I0 -> I1 -> I2 -> I3. */
1299 for (nextlinks = LOG_LINKS (link1); nextlinks;
1300 nextlinks = XEXP (nextlinks, 1))
1301 if ((next = try_combine (insn, link, link1,
1302 XEXP (nextlinks, 0),
1303 &new_direct_jump_p)) != 0)
1304 goto retry;
1305 /* I0, I1 -> I2, I2 -> I3. */
1306 for (nextlinks = XEXP (next1, 1); nextlinks;
1307 nextlinks = XEXP (nextlinks, 1))
1308 if ((next = try_combine (insn, link, link1,
1309 XEXP (nextlinks, 0),
1310 &new_direct_jump_p)) != 0)
1311 goto retry;
1312 }
1313
1314 for (next1 = XEXP (links, 1); next1; next1 = XEXP (next1, 1))
1315 {
1316 rtx link1 = XEXP (next1, 0);
1317 if (NOTE_P (link1))
1318 continue;
1319 /* I0 -> I2; I1, I2 -> I3. */
1320 for (nextlinks = LOG_LINKS (link); nextlinks;
1321 nextlinks = XEXP (nextlinks, 1))
1322 if ((next = try_combine (insn, link, link1,
1323 XEXP (nextlinks, 0),
1324 &new_direct_jump_p)) != 0)
1325 goto retry;
1326 /* I0 -> I1; I1, I2 -> I3. */
1327 for (nextlinks = LOG_LINKS (link1); nextlinks;
1328 nextlinks = XEXP (nextlinks, 1))
1329 if ((next = try_combine (insn, link, link1,
1330 XEXP (nextlinks, 0),
1331 &new_direct_jump_p)) != 0)
1332 goto retry;
1333 }
1334 }
1335
1336 /* Try this insn with each REG_EQUAL note it links back to. */
1337 for (links = LOG_LINKS (insn); links; links = XEXP (links, 1))
1338 {
1339 rtx set, note;
1340 rtx temp = XEXP (links, 0);
1341 if ((set = single_set (temp)) != 0
1342 && (note = find_reg_equal_equiv_note (temp)) != 0
1343 && (note = XEXP (note, 0), GET_CODE (note)) != EXPR_LIST
1344 /* Avoid using a register that may already been marked
1345 dead by an earlier instruction. */
1346 && ! unmentioned_reg_p (note, SET_SRC (set))
1347 && (GET_MODE (note) == VOIDmode
1348 ? SCALAR_INT_MODE_P (GET_MODE (SET_DEST (set)))
1349 : GET_MODE (SET_DEST (set)) == GET_MODE (note)))
1350 {
1351 /* Temporarily replace the set's source with the
1352 contents of the REG_EQUAL note. The insn will
1353 be deleted or recognized by try_combine. */
1354 rtx orig = SET_SRC (set);
1355 SET_SRC (set) = note;
1356 i2mod = temp;
1357 i2mod_old_rhs = copy_rtx (orig);
1358 i2mod_new_rhs = copy_rtx (note);
1359 next = try_combine (insn, i2mod, NULL_RTX, NULL_RTX,
1360 &new_direct_jump_p);
1361 i2mod = NULL_RTX;
1362 if (next)
1363 goto retry;
1364 SET_SRC (set) = orig;
1365 }
1366 }
1367
1368 if (!NOTE_P (insn))
1369 record_dead_and_set_regs (insn);
1370
1371 retry:
1372 ;
1373 }
1374 }
1375 }
1376
1377 default_rtl_profile ();
1378 clear_log_links ();
1379 clear_bb_flags ();
1380 new_direct_jump_p |= purge_all_dead_edges ();
1381 delete_noop_moves ();
1382
1383 /* Clean up. */
1384 free (uid_log_links);
1385 free (uid_insn_cost);
1386 VEC_free (reg_stat_type, heap, reg_stat);
1387
1388 {
1389 struct undo *undo, *next;
1390 for (undo = undobuf.frees; undo; undo = next)
1391 {
1392 next = undo->next;
1393 free (undo);
1394 }
1395 undobuf.frees = 0;
1396 }
1397
1398 total_attempts += combine_attempts;
1399 total_merges += combine_merges;
1400 total_extras += combine_extras;
1401 total_successes += combine_successes;
1402
1403 nonzero_sign_valid = 0;
1404 rtl_hooks = general_rtl_hooks;
1405
1406 /* Make recognizer allow volatile MEMs again. */
1407 init_recog ();
1408
1409 return new_direct_jump_p;
1410 }
1411
1412 /* Wipe the last_xxx fields of reg_stat in preparation for another pass. */
1413
1414 static void
1415 init_reg_last (void)
1416 {
1417 unsigned int i;
1418 reg_stat_type *p;
1419
1420 FOR_EACH_VEC_ELT (reg_stat_type, reg_stat, i, p)
1421 memset (p, 0, offsetof (reg_stat_type, sign_bit_copies));
1422 }
1423 \f
1424 /* Set up any promoted values for incoming argument registers. */
1425
1426 static void
1427 setup_incoming_promotions (rtx first)
1428 {
1429 tree arg;
1430 bool strictly_local = false;
1431
1432 for (arg = DECL_ARGUMENTS (current_function_decl); arg;
1433 arg = DECL_CHAIN (arg))
1434 {
1435 rtx x, reg = DECL_INCOMING_RTL (arg);
1436 int uns1, uns3;
1437 enum machine_mode mode1, mode2, mode3, mode4;
1438
1439 /* Only continue if the incoming argument is in a register. */
1440 if (!REG_P (reg))
1441 continue;
1442
1443 /* Determine, if possible, whether all call sites of the current
1444 function lie within the current compilation unit. (This does
1445 take into account the exporting of a function via taking its
1446 address, and so forth.) */
1447 strictly_local = cgraph_local_info (current_function_decl)->local;
1448
1449 /* The mode and signedness of the argument before any promotions happen
1450 (equal to the mode of the pseudo holding it at that stage). */
1451 mode1 = TYPE_MODE (TREE_TYPE (arg));
1452 uns1 = TYPE_UNSIGNED (TREE_TYPE (arg));
1453
1454 /* The mode and signedness of the argument after any source language and
1455 TARGET_PROMOTE_PROTOTYPES-driven promotions. */
1456 mode2 = TYPE_MODE (DECL_ARG_TYPE (arg));
1457 uns3 = TYPE_UNSIGNED (DECL_ARG_TYPE (arg));
1458
1459 /* The mode and signedness of the argument as it is actually passed,
1460 after any TARGET_PROMOTE_FUNCTION_ARGS-driven ABI promotions. */
1461 mode3 = promote_function_mode (DECL_ARG_TYPE (arg), mode2, &uns3,
1462 TREE_TYPE (cfun->decl), 0);
1463
1464 /* The mode of the register in which the argument is being passed. */
1465 mode4 = GET_MODE (reg);
1466
1467 /* Eliminate sign extensions in the callee when:
1468 (a) A mode promotion has occurred; */
1469 if (mode1 == mode3)
1470 continue;
1471 /* (b) The mode of the register is the same as the mode of
1472 the argument as it is passed; */
1473 if (mode3 != mode4)
1474 continue;
1475 /* (c) There's no language level extension; */
1476 if (mode1 == mode2)
1477 ;
1478 /* (c.1) All callers are from the current compilation unit. If that's
1479 the case we don't have to rely on an ABI, we only have to know
1480 what we're generating right now, and we know that we will do the
1481 mode1 to mode2 promotion with the given sign. */
1482 else if (!strictly_local)
1483 continue;
1484 /* (c.2) The combination of the two promotions is useful. This is
1485 true when the signs match, or if the first promotion is unsigned.
1486 In the later case, (sign_extend (zero_extend x)) is the same as
1487 (zero_extend (zero_extend x)), so make sure to force UNS3 true. */
1488 else if (uns1)
1489 uns3 = true;
1490 else if (uns3)
1491 continue;
1492
1493 /* Record that the value was promoted from mode1 to mode3,
1494 so that any sign extension at the head of the current
1495 function may be eliminated. */
1496 x = gen_rtx_CLOBBER (mode1, const0_rtx);
1497 x = gen_rtx_fmt_e ((uns3 ? ZERO_EXTEND : SIGN_EXTEND), mode3, x);
1498 record_value_for_reg (reg, first, x);
1499 }
1500 }
1501
1502 /* Called via note_stores. If X is a pseudo that is narrower than
1503 HOST_BITS_PER_WIDE_INT and is being set, record what bits are known zero.
1504
1505 If we are setting only a portion of X and we can't figure out what
1506 portion, assume all bits will be used since we don't know what will
1507 be happening.
1508
1509 Similarly, set how many bits of X are known to be copies of the sign bit
1510 at all locations in the function. This is the smallest number implied
1511 by any set of X. */
1512
1513 static void
1514 set_nonzero_bits_and_sign_copies (rtx x, const_rtx set, void *data)
1515 {
1516 rtx insn = (rtx) data;
1517 unsigned int num;
1518
1519 if (REG_P (x)
1520 && REGNO (x) >= FIRST_PSEUDO_REGISTER
1521 /* If this register is undefined at the start of the file, we can't
1522 say what its contents were. */
1523 && ! REGNO_REG_SET_P
1524 (DF_LR_IN (ENTRY_BLOCK_PTR->next_bb), REGNO (x))
1525 && GET_MODE_BITSIZE (GET_MODE (x)) <= HOST_BITS_PER_WIDE_INT)
1526 {
1527 reg_stat_type *rsp = VEC_index (reg_stat_type, reg_stat, REGNO (x));
1528
1529 if (set == 0 || GET_CODE (set) == CLOBBER)
1530 {
1531 rsp->nonzero_bits = GET_MODE_MASK (GET_MODE (x));
1532 rsp->sign_bit_copies = 1;
1533 return;
1534 }
1535
1536 /* If this register is being initialized using itself, and the
1537 register is uninitialized in this basic block, and there are
1538 no LOG_LINKS which set the register, then part of the
1539 register is uninitialized. In that case we can't assume
1540 anything about the number of nonzero bits.
1541
1542 ??? We could do better if we checked this in
1543 reg_{nonzero_bits,num_sign_bit_copies}_for_combine. Then we
1544 could avoid making assumptions about the insn which initially
1545 sets the register, while still using the information in other
1546 insns. We would have to be careful to check every insn
1547 involved in the combination. */
1548
1549 if (insn
1550 && reg_referenced_p (x, PATTERN (insn))
1551 && !REGNO_REG_SET_P (DF_LR_IN (BLOCK_FOR_INSN (insn)),
1552 REGNO (x)))
1553 {
1554 rtx link;
1555
1556 for (link = LOG_LINKS (insn); link; link = XEXP (link, 1))
1557 {
1558 if (dead_or_set_p (XEXP (link, 0), x))
1559 break;
1560 }
1561 if (!link)
1562 {
1563 rsp->nonzero_bits = GET_MODE_MASK (GET_MODE (x));
1564 rsp->sign_bit_copies = 1;
1565 return;
1566 }
1567 }
1568
1569 /* If this is a complex assignment, see if we can convert it into a
1570 simple assignment. */
1571 set = expand_field_assignment (set);
1572
1573 /* If this is a simple assignment, or we have a paradoxical SUBREG,
1574 set what we know about X. */
1575
1576 if (SET_DEST (set) == x
1577 || (GET_CODE (SET_DEST (set)) == SUBREG
1578 && (GET_MODE_SIZE (GET_MODE (SET_DEST (set)))
1579 > GET_MODE_SIZE (GET_MODE (SUBREG_REG (SET_DEST (set)))))
1580 && SUBREG_REG (SET_DEST (set)) == x))
1581 {
1582 rtx src = SET_SRC (set);
1583
1584 #ifdef SHORT_IMMEDIATES_SIGN_EXTEND
1585 /* If X is narrower than a word and SRC is a non-negative
1586 constant that would appear negative in the mode of X,
1587 sign-extend it for use in reg_stat[].nonzero_bits because some
1588 machines (maybe most) will actually do the sign-extension
1589 and this is the conservative approach.
1590
1591 ??? For 2.5, try to tighten up the MD files in this regard
1592 instead of this kludge. */
1593
1594 if (GET_MODE_BITSIZE (GET_MODE (x)) < BITS_PER_WORD
1595 && CONST_INT_P (src)
1596 && INTVAL (src) > 0
1597 && 0 != (UINTVAL (src)
1598 & ((unsigned HOST_WIDE_INT) 1
1599 << (GET_MODE_BITSIZE (GET_MODE (x)) - 1))))
1600 src = GEN_INT (UINTVAL (src)
1601 | ((unsigned HOST_WIDE_INT) (-1)
1602 << GET_MODE_BITSIZE (GET_MODE (x))));
1603 #endif
1604
1605 /* Don't call nonzero_bits if it cannot change anything. */
1606 if (rsp->nonzero_bits != ~(unsigned HOST_WIDE_INT) 0)
1607 rsp->nonzero_bits |= nonzero_bits (src, nonzero_bits_mode);
1608 num = num_sign_bit_copies (SET_SRC (set), GET_MODE (x));
1609 if (rsp->sign_bit_copies == 0
1610 || rsp->sign_bit_copies > num)
1611 rsp->sign_bit_copies = num;
1612 }
1613 else
1614 {
1615 rsp->nonzero_bits = GET_MODE_MASK (GET_MODE (x));
1616 rsp->sign_bit_copies = 1;
1617 }
1618 }
1619 }
1620 \f
1621 /* See if INSN can be combined into I3. PRED, PRED2, SUCC and SUCC2 are
1622 optionally insns that were previously combined into I3 or that will be
1623 combined into the merger of INSN and I3. The order is PRED, PRED2,
1624 INSN, SUCC, SUCC2, I3.
1625
1626 Return 0 if the combination is not allowed for any reason.
1627
1628 If the combination is allowed, *PDEST will be set to the single
1629 destination of INSN and *PSRC to the single source, and this function
1630 will return 1. */
1631
1632 static int
1633 can_combine_p (rtx insn, rtx i3, rtx pred ATTRIBUTE_UNUSED,
1634 rtx pred2 ATTRIBUTE_UNUSED, rtx succ, rtx succ2,
1635 rtx *pdest, rtx *psrc)
1636 {
1637 int i;
1638 const_rtx set = 0;
1639 rtx src, dest;
1640 rtx p;
1641 #ifdef AUTO_INC_DEC
1642 rtx link;
1643 #endif
1644 bool all_adjacent = true;
1645
1646 if (succ)
1647 {
1648 if (succ2)
1649 {
1650 if (next_active_insn (succ2) != i3)
1651 all_adjacent = false;
1652 if (next_active_insn (succ) != succ2)
1653 all_adjacent = false;
1654 }
1655 else if (next_active_insn (succ) != i3)
1656 all_adjacent = false;
1657 if (next_active_insn (insn) != succ)
1658 all_adjacent = false;
1659 }
1660 else if (next_active_insn (insn) != i3)
1661 all_adjacent = false;
1662
1663 /* Can combine only if previous insn is a SET of a REG, a SUBREG or CC0.
1664 or a PARALLEL consisting of such a SET and CLOBBERs.
1665
1666 If INSN has CLOBBER parallel parts, ignore them for our processing.
1667 By definition, these happen during the execution of the insn. When it
1668 is merged with another insn, all bets are off. If they are, in fact,
1669 needed and aren't also supplied in I3, they may be added by
1670 recog_for_combine. Otherwise, it won't match.
1671
1672 We can also ignore a SET whose SET_DEST is mentioned in a REG_UNUSED
1673 note.
1674
1675 Get the source and destination of INSN. If more than one, can't
1676 combine. */
1677
1678 if (GET_CODE (PATTERN (insn)) == SET)
1679 set = PATTERN (insn);
1680 else if (GET_CODE (PATTERN (insn)) == PARALLEL
1681 && GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) == SET)
1682 {
1683 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
1684 {
1685 rtx elt = XVECEXP (PATTERN (insn), 0, i);
1686
1687 switch (GET_CODE (elt))
1688 {
1689 /* This is important to combine floating point insns
1690 for the SH4 port. */
1691 case USE:
1692 /* Combining an isolated USE doesn't make sense.
1693 We depend here on combinable_i3pat to reject them. */
1694 /* The code below this loop only verifies that the inputs of
1695 the SET in INSN do not change. We call reg_set_between_p
1696 to verify that the REG in the USE does not change between
1697 I3 and INSN.
1698 If the USE in INSN was for a pseudo register, the matching
1699 insn pattern will likely match any register; combining this
1700 with any other USE would only be safe if we knew that the
1701 used registers have identical values, or if there was
1702 something to tell them apart, e.g. different modes. For
1703 now, we forgo such complicated tests and simply disallow
1704 combining of USES of pseudo registers with any other USE. */
1705 if (REG_P (XEXP (elt, 0))
1706 && GET_CODE (PATTERN (i3)) == PARALLEL)
1707 {
1708 rtx i3pat = PATTERN (i3);
1709 int i = XVECLEN (i3pat, 0) - 1;
1710 unsigned int regno = REGNO (XEXP (elt, 0));
1711
1712 do
1713 {
1714 rtx i3elt = XVECEXP (i3pat, 0, i);
1715
1716 if (GET_CODE (i3elt) == USE
1717 && REG_P (XEXP (i3elt, 0))
1718 && (REGNO (XEXP (i3elt, 0)) == regno
1719 ? reg_set_between_p (XEXP (elt, 0),
1720 PREV_INSN (insn), i3)
1721 : regno >= FIRST_PSEUDO_REGISTER))
1722 return 0;
1723 }
1724 while (--i >= 0);
1725 }
1726 break;
1727
1728 /* We can ignore CLOBBERs. */
1729 case CLOBBER:
1730 break;
1731
1732 case SET:
1733 /* Ignore SETs whose result isn't used but not those that
1734 have side-effects. */
1735 if (find_reg_note (insn, REG_UNUSED, SET_DEST (elt))
1736 && insn_nothrow_p (insn)
1737 && !side_effects_p (elt))
1738 break;
1739
1740 /* If we have already found a SET, this is a second one and
1741 so we cannot combine with this insn. */
1742 if (set)
1743 return 0;
1744
1745 set = elt;
1746 break;
1747
1748 default:
1749 /* Anything else means we can't combine. */
1750 return 0;
1751 }
1752 }
1753
1754 if (set == 0
1755 /* If SET_SRC is an ASM_OPERANDS we can't throw away these CLOBBERs,
1756 so don't do anything with it. */
1757 || GET_CODE (SET_SRC (set)) == ASM_OPERANDS)
1758 return 0;
1759 }
1760 else
1761 return 0;
1762
1763 if (set == 0)
1764 return 0;
1765
1766 set = expand_field_assignment (set);
1767 src = SET_SRC (set), dest = SET_DEST (set);
1768
1769 /* Don't eliminate a store in the stack pointer. */
1770 if (dest == stack_pointer_rtx
1771 /* Don't combine with an insn that sets a register to itself if it has
1772 a REG_EQUAL note. This may be part of a LIBCALL sequence. */
1773 || (rtx_equal_p (src, dest) && find_reg_note (insn, REG_EQUAL, NULL_RTX))
1774 /* Can't merge an ASM_OPERANDS. */
1775 || GET_CODE (src) == ASM_OPERANDS
1776 /* Can't merge a function call. */
1777 || GET_CODE (src) == CALL
1778 /* Don't eliminate a function call argument. */
1779 || (CALL_P (i3)
1780 && (find_reg_fusage (i3, USE, dest)
1781 || (REG_P (dest)
1782 && REGNO (dest) < FIRST_PSEUDO_REGISTER
1783 && global_regs[REGNO (dest)])))
1784 /* Don't substitute into an incremented register. */
1785 || FIND_REG_INC_NOTE (i3, dest)
1786 || (succ && FIND_REG_INC_NOTE (succ, dest))
1787 || (succ2 && FIND_REG_INC_NOTE (succ2, dest))
1788 /* Don't substitute into a non-local goto, this confuses CFG. */
1789 || (JUMP_P (i3) && find_reg_note (i3, REG_NON_LOCAL_GOTO, NULL_RTX))
1790 /* Make sure that DEST is not used after SUCC but before I3. */
1791 || (!all_adjacent
1792 && ((succ2
1793 && (reg_used_between_p (dest, succ2, i3)
1794 || reg_used_between_p (dest, succ, succ2)))
1795 || (!succ2 && succ && reg_used_between_p (dest, succ, i3))))
1796 /* Make sure that the value that is to be substituted for the register
1797 does not use any registers whose values alter in between. However,
1798 If the insns are adjacent, a use can't cross a set even though we
1799 think it might (this can happen for a sequence of insns each setting
1800 the same destination; last_set of that register might point to
1801 a NOTE). If INSN has a REG_EQUIV note, the register is always
1802 equivalent to the memory so the substitution is valid even if there
1803 are intervening stores. Also, don't move a volatile asm or
1804 UNSPEC_VOLATILE across any other insns. */
1805 || (! all_adjacent
1806 && (((!MEM_P (src)
1807 || ! find_reg_note (insn, REG_EQUIV, src))
1808 && use_crosses_set_p (src, DF_INSN_LUID (insn)))
1809 || (GET_CODE (src) == ASM_OPERANDS && MEM_VOLATILE_P (src))
1810 || GET_CODE (src) == UNSPEC_VOLATILE))
1811 /* Don't combine across a CALL_INSN, because that would possibly
1812 change whether the life span of some REGs crosses calls or not,
1813 and it is a pain to update that information.
1814 Exception: if source is a constant, moving it later can't hurt.
1815 Accept that as a special case. */
1816 || (DF_INSN_LUID (insn) < last_call_luid && ! CONSTANT_P (src)))
1817 return 0;
1818
1819 /* DEST must either be a REG or CC0. */
1820 if (REG_P (dest))
1821 {
1822 /* If register alignment is being enforced for multi-word items in all
1823 cases except for parameters, it is possible to have a register copy
1824 insn referencing a hard register that is not allowed to contain the
1825 mode being copied and which would not be valid as an operand of most
1826 insns. Eliminate this problem by not combining with such an insn.
1827
1828 Also, on some machines we don't want to extend the life of a hard
1829 register. */
1830
1831 if (REG_P (src)
1832 && ((REGNO (dest) < FIRST_PSEUDO_REGISTER
1833 && ! HARD_REGNO_MODE_OK (REGNO (dest), GET_MODE (dest)))
1834 /* Don't extend the life of a hard register unless it is
1835 user variable (if we have few registers) or it can't
1836 fit into the desired register (meaning something special
1837 is going on).
1838 Also avoid substituting a return register into I3, because
1839 reload can't handle a conflict with constraints of other
1840 inputs. */
1841 || (REGNO (src) < FIRST_PSEUDO_REGISTER
1842 && ! HARD_REGNO_MODE_OK (REGNO (src), GET_MODE (src)))))
1843 return 0;
1844 }
1845 else if (GET_CODE (dest) != CC0)
1846 return 0;
1847
1848
1849 if (GET_CODE (PATTERN (i3)) == PARALLEL)
1850 for (i = XVECLEN (PATTERN (i3), 0) - 1; i >= 0; i--)
1851 if (GET_CODE (XVECEXP (PATTERN (i3), 0, i)) == CLOBBER)
1852 {
1853 /* Don't substitute for a register intended as a clobberable
1854 operand. */
1855 rtx reg = XEXP (XVECEXP (PATTERN (i3), 0, i), 0);
1856 if (rtx_equal_p (reg, dest))
1857 return 0;
1858
1859 /* If the clobber represents an earlyclobber operand, we must not
1860 substitute an expression containing the clobbered register.
1861 As we do not analyze the constraint strings here, we have to
1862 make the conservative assumption. However, if the register is
1863 a fixed hard reg, the clobber cannot represent any operand;
1864 we leave it up to the machine description to either accept or
1865 reject use-and-clobber patterns. */
1866 if (!REG_P (reg)
1867 || REGNO (reg) >= FIRST_PSEUDO_REGISTER
1868 || !fixed_regs[REGNO (reg)])
1869 if (reg_overlap_mentioned_p (reg, src))
1870 return 0;
1871 }
1872
1873 /* If INSN contains anything volatile, or is an `asm' (whether volatile
1874 or not), reject, unless nothing volatile comes between it and I3 */
1875
1876 if (GET_CODE (src) == ASM_OPERANDS || volatile_refs_p (src))
1877 {
1878 /* Make sure neither succ nor succ2 contains a volatile reference. */
1879 if (succ2 != 0 && volatile_refs_p (PATTERN (succ2)))
1880 return 0;
1881 if (succ != 0 && volatile_refs_p (PATTERN (succ)))
1882 return 0;
1883 /* We'll check insns between INSN and I3 below. */
1884 }
1885
1886 /* If INSN is an asm, and DEST is a hard register, reject, since it has
1887 to be an explicit register variable, and was chosen for a reason. */
1888
1889 if (GET_CODE (src) == ASM_OPERANDS
1890 && REG_P (dest) && REGNO (dest) < FIRST_PSEUDO_REGISTER)
1891 return 0;
1892
1893 /* If there are any volatile insns between INSN and I3, reject, because
1894 they might affect machine state. */
1895
1896 for (p = NEXT_INSN (insn); p != i3; p = NEXT_INSN (p))
1897 if (INSN_P (p) && p != succ && p != succ2 && volatile_insn_p (PATTERN (p)))
1898 return 0;
1899
1900 /* If INSN contains an autoincrement or autodecrement, make sure that
1901 register is not used between there and I3, and not already used in
1902 I3 either. Neither must it be used in PRED or SUCC, if they exist.
1903 Also insist that I3 not be a jump; if it were one
1904 and the incremented register were spilled, we would lose. */
1905
1906 #ifdef AUTO_INC_DEC
1907 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
1908 if (REG_NOTE_KIND (link) == REG_INC
1909 && (JUMP_P (i3)
1910 || reg_used_between_p (XEXP (link, 0), insn, i3)
1911 || (pred != NULL_RTX
1912 && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (pred)))
1913 || (pred2 != NULL_RTX
1914 && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (pred2)))
1915 || (succ != NULL_RTX
1916 && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (succ)))
1917 || (succ2 != NULL_RTX
1918 && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (succ2)))
1919 || reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (i3))))
1920 return 0;
1921 #endif
1922
1923 #ifdef HAVE_cc0
1924 /* Don't combine an insn that follows a CC0-setting insn.
1925 An insn that uses CC0 must not be separated from the one that sets it.
1926 We do, however, allow I2 to follow a CC0-setting insn if that insn
1927 is passed as I1; in that case it will be deleted also.
1928 We also allow combining in this case if all the insns are adjacent
1929 because that would leave the two CC0 insns adjacent as well.
1930 It would be more logical to test whether CC0 occurs inside I1 or I2,
1931 but that would be much slower, and this ought to be equivalent. */
1932
1933 p = prev_nonnote_insn (insn);
1934 if (p && p != pred && NONJUMP_INSN_P (p) && sets_cc0_p (PATTERN (p))
1935 && ! all_adjacent)
1936 return 0;
1937 #endif
1938
1939 /* If we get here, we have passed all the tests and the combination is
1940 to be allowed. */
1941
1942 *pdest = dest;
1943 *psrc = src;
1944
1945 return 1;
1946 }
1947 \f
1948 /* LOC is the location within I3 that contains its pattern or the component
1949 of a PARALLEL of the pattern. We validate that it is valid for combining.
1950
1951 One problem is if I3 modifies its output, as opposed to replacing it
1952 entirely, we can't allow the output to contain I2DEST, I1DEST or I0DEST as
1953 doing so would produce an insn that is not equivalent to the original insns.
1954
1955 Consider:
1956
1957 (set (reg:DI 101) (reg:DI 100))
1958 (set (subreg:SI (reg:DI 101) 0) <foo>)
1959
1960 This is NOT equivalent to:
1961
1962 (parallel [(set (subreg:SI (reg:DI 100) 0) <foo>)
1963 (set (reg:DI 101) (reg:DI 100))])
1964
1965 Not only does this modify 100 (in which case it might still be valid
1966 if 100 were dead in I2), it sets 101 to the ORIGINAL value of 100.
1967
1968 We can also run into a problem if I2 sets a register that I1
1969 uses and I1 gets directly substituted into I3 (not via I2). In that
1970 case, we would be getting the wrong value of I2DEST into I3, so we
1971 must reject the combination. This case occurs when I2 and I1 both
1972 feed into I3, rather than when I1 feeds into I2, which feeds into I3.
1973 If I1_NOT_IN_SRC is nonzero, it means that finding I1 in the source
1974 of a SET must prevent combination from occurring. The same situation
1975 can occur for I0, in which case I0_NOT_IN_SRC is set.
1976
1977 Before doing the above check, we first try to expand a field assignment
1978 into a set of logical operations.
1979
1980 If PI3_DEST_KILLED is nonzero, it is a pointer to a location in which
1981 we place a register that is both set and used within I3. If more than one
1982 such register is detected, we fail.
1983
1984 Return 1 if the combination is valid, zero otherwise. */
1985
1986 static int
1987 combinable_i3pat (rtx i3, rtx *loc, rtx i2dest, rtx i1dest, rtx i0dest,
1988 int i1_not_in_src, int i0_not_in_src, rtx *pi3dest_killed)
1989 {
1990 rtx x = *loc;
1991
1992 if (GET_CODE (x) == SET)
1993 {
1994 rtx set = x ;
1995 rtx dest = SET_DEST (set);
1996 rtx src = SET_SRC (set);
1997 rtx inner_dest = dest;
1998 rtx subdest;
1999
2000 while (GET_CODE (inner_dest) == STRICT_LOW_PART
2001 || GET_CODE (inner_dest) == SUBREG
2002 || GET_CODE (inner_dest) == ZERO_EXTRACT)
2003 inner_dest = XEXP (inner_dest, 0);
2004
2005 /* Check for the case where I3 modifies its output, as discussed
2006 above. We don't want to prevent pseudos from being combined
2007 into the address of a MEM, so only prevent the combination if
2008 i1 or i2 set the same MEM. */
2009 if ((inner_dest != dest &&
2010 (!MEM_P (inner_dest)
2011 || rtx_equal_p (i2dest, inner_dest)
2012 || (i1dest && rtx_equal_p (i1dest, inner_dest))
2013 || (i0dest && rtx_equal_p (i0dest, inner_dest)))
2014 && (reg_overlap_mentioned_p (i2dest, inner_dest)
2015 || (i1dest && reg_overlap_mentioned_p (i1dest, inner_dest))
2016 || (i0dest && reg_overlap_mentioned_p (i0dest, inner_dest))))
2017
2018 /* This is the same test done in can_combine_p except we can't test
2019 all_adjacent; we don't have to, since this instruction will stay
2020 in place, thus we are not considering increasing the lifetime of
2021 INNER_DEST.
2022
2023 Also, if this insn sets a function argument, combining it with
2024 something that might need a spill could clobber a previous
2025 function argument; the all_adjacent test in can_combine_p also
2026 checks this; here, we do a more specific test for this case. */
2027
2028 || (REG_P (inner_dest)
2029 && REGNO (inner_dest) < FIRST_PSEUDO_REGISTER
2030 && (! HARD_REGNO_MODE_OK (REGNO (inner_dest),
2031 GET_MODE (inner_dest))))
2032 || (i1_not_in_src && reg_overlap_mentioned_p (i1dest, src))
2033 || (i0_not_in_src && reg_overlap_mentioned_p (i0dest, src)))
2034 return 0;
2035
2036 /* If DEST is used in I3, it is being killed in this insn, so
2037 record that for later. We have to consider paradoxical
2038 subregs here, since they kill the whole register, but we
2039 ignore partial subregs, STRICT_LOW_PART, etc.
2040 Never add REG_DEAD notes for the FRAME_POINTER_REGNUM or the
2041 STACK_POINTER_REGNUM, since these are always considered to be
2042 live. Similarly for ARG_POINTER_REGNUM if it is fixed. */
2043 subdest = dest;
2044 if (GET_CODE (subdest) == SUBREG
2045 && (GET_MODE_SIZE (GET_MODE (subdest))
2046 >= GET_MODE_SIZE (GET_MODE (SUBREG_REG (subdest)))))
2047 subdest = SUBREG_REG (subdest);
2048 if (pi3dest_killed
2049 && REG_P (subdest)
2050 && reg_referenced_p (subdest, PATTERN (i3))
2051 && REGNO (subdest) != FRAME_POINTER_REGNUM
2052 #if !HARD_FRAME_POINTER_IS_FRAME_POINTER
2053 && REGNO (subdest) != HARD_FRAME_POINTER_REGNUM
2054 #endif
2055 #if ARG_POINTER_REGNUM != FRAME_POINTER_REGNUM
2056 && (REGNO (subdest) != ARG_POINTER_REGNUM
2057 || ! fixed_regs [REGNO (subdest)])
2058 #endif
2059 && REGNO (subdest) != STACK_POINTER_REGNUM)
2060 {
2061 if (*pi3dest_killed)
2062 return 0;
2063
2064 *pi3dest_killed = subdest;
2065 }
2066 }
2067
2068 else if (GET_CODE (x) == PARALLEL)
2069 {
2070 int i;
2071
2072 for (i = 0; i < XVECLEN (x, 0); i++)
2073 if (! combinable_i3pat (i3, &XVECEXP (x, 0, i), i2dest, i1dest, i0dest,
2074 i1_not_in_src, i0_not_in_src, pi3dest_killed))
2075 return 0;
2076 }
2077
2078 return 1;
2079 }
2080 \f
2081 /* Return 1 if X is an arithmetic expression that contains a multiplication
2082 and division. We don't count multiplications by powers of two here. */
2083
2084 static int
2085 contains_muldiv (rtx x)
2086 {
2087 switch (GET_CODE (x))
2088 {
2089 case MOD: case DIV: case UMOD: case UDIV:
2090 return 1;
2091
2092 case MULT:
2093 return ! (CONST_INT_P (XEXP (x, 1))
2094 && exact_log2 (UINTVAL (XEXP (x, 1))) >= 0);
2095 default:
2096 if (BINARY_P (x))
2097 return contains_muldiv (XEXP (x, 0))
2098 || contains_muldiv (XEXP (x, 1));
2099
2100 if (UNARY_P (x))
2101 return contains_muldiv (XEXP (x, 0));
2102
2103 return 0;
2104 }
2105 }
2106 \f
2107 /* Determine whether INSN can be used in a combination. Return nonzero if
2108 not. This is used in try_combine to detect early some cases where we
2109 can't perform combinations. */
2110
2111 static int
2112 cant_combine_insn_p (rtx insn)
2113 {
2114 rtx set;
2115 rtx src, dest;
2116
2117 /* If this isn't really an insn, we can't do anything.
2118 This can occur when flow deletes an insn that it has merged into an
2119 auto-increment address. */
2120 if (! INSN_P (insn))
2121 return 1;
2122
2123 /* Never combine loads and stores involving hard regs that are likely
2124 to be spilled. The register allocator can usually handle such
2125 reg-reg moves by tying. If we allow the combiner to make
2126 substitutions of likely-spilled regs, reload might die.
2127 As an exception, we allow combinations involving fixed regs; these are
2128 not available to the register allocator so there's no risk involved. */
2129
2130 set = single_set (insn);
2131 if (! set)
2132 return 0;
2133 src = SET_SRC (set);
2134 dest = SET_DEST (set);
2135 if (GET_CODE (src) == SUBREG)
2136 src = SUBREG_REG (src);
2137 if (GET_CODE (dest) == SUBREG)
2138 dest = SUBREG_REG (dest);
2139 if (REG_P (src) && REG_P (dest)
2140 && ((HARD_REGISTER_P (src)
2141 && ! TEST_HARD_REG_BIT (fixed_reg_set, REGNO (src))
2142 && targetm.class_likely_spilled_p (REGNO_REG_CLASS (REGNO (src))))
2143 || (HARD_REGISTER_P (dest)
2144 && ! TEST_HARD_REG_BIT (fixed_reg_set, REGNO (dest))
2145 && targetm.class_likely_spilled_p (REGNO_REG_CLASS (REGNO (dest))))))
2146 return 1;
2147
2148 return 0;
2149 }
2150
2151 struct likely_spilled_retval_info
2152 {
2153 unsigned regno, nregs;
2154 unsigned mask;
2155 };
2156
2157 /* Called via note_stores by likely_spilled_retval_p. Remove from info->mask
2158 hard registers that are known to be written to / clobbered in full. */
2159 static void
2160 likely_spilled_retval_1 (rtx x, const_rtx set, void *data)
2161 {
2162 struct likely_spilled_retval_info *const info =
2163 (struct likely_spilled_retval_info *) data;
2164 unsigned regno, nregs;
2165 unsigned new_mask;
2166
2167 if (!REG_P (XEXP (set, 0)))
2168 return;
2169 regno = REGNO (x);
2170 if (regno >= info->regno + info->nregs)
2171 return;
2172 nregs = hard_regno_nregs[regno][GET_MODE (x)];
2173 if (regno + nregs <= info->regno)
2174 return;
2175 new_mask = (2U << (nregs - 1)) - 1;
2176 if (regno < info->regno)
2177 new_mask >>= info->regno - regno;
2178 else
2179 new_mask <<= regno - info->regno;
2180 info->mask &= ~new_mask;
2181 }
2182
2183 /* Return nonzero iff part of the return value is live during INSN, and
2184 it is likely spilled. This can happen when more than one insn is needed
2185 to copy the return value, e.g. when we consider to combine into the
2186 second copy insn for a complex value. */
2187
2188 static int
2189 likely_spilled_retval_p (rtx insn)
2190 {
2191 rtx use = BB_END (this_basic_block);
2192 rtx reg, p;
2193 unsigned regno, nregs;
2194 /* We assume here that no machine mode needs more than
2195 32 hard registers when the value overlaps with a register
2196 for which TARGET_FUNCTION_VALUE_REGNO_P is true. */
2197 unsigned mask;
2198 struct likely_spilled_retval_info info;
2199
2200 if (!NONJUMP_INSN_P (use) || GET_CODE (PATTERN (use)) != USE || insn == use)
2201 return 0;
2202 reg = XEXP (PATTERN (use), 0);
2203 if (!REG_P (reg) || !targetm.calls.function_value_regno_p (REGNO (reg)))
2204 return 0;
2205 regno = REGNO (reg);
2206 nregs = hard_regno_nregs[regno][GET_MODE (reg)];
2207 if (nregs == 1)
2208 return 0;
2209 mask = (2U << (nregs - 1)) - 1;
2210
2211 /* Disregard parts of the return value that are set later. */
2212 info.regno = regno;
2213 info.nregs = nregs;
2214 info.mask = mask;
2215 for (p = PREV_INSN (use); info.mask && p != insn; p = PREV_INSN (p))
2216 if (INSN_P (p))
2217 note_stores (PATTERN (p), likely_spilled_retval_1, &info);
2218 mask = info.mask;
2219
2220 /* Check if any of the (probably) live return value registers is
2221 likely spilled. */
2222 nregs --;
2223 do
2224 {
2225 if ((mask & 1 << nregs)
2226 && targetm.class_likely_spilled_p (REGNO_REG_CLASS (regno + nregs)))
2227 return 1;
2228 } while (nregs--);
2229 return 0;
2230 }
2231
2232 /* Adjust INSN after we made a change to its destination.
2233
2234 Changing the destination can invalidate notes that say something about
2235 the results of the insn and a LOG_LINK pointing to the insn. */
2236
2237 static void
2238 adjust_for_new_dest (rtx insn)
2239 {
2240 /* For notes, be conservative and simply remove them. */
2241 remove_reg_equal_equiv_notes (insn);
2242
2243 /* The new insn will have a destination that was previously the destination
2244 of an insn just above it. Call distribute_links to make a LOG_LINK from
2245 the next use of that destination. */
2246 distribute_links (gen_rtx_INSN_LIST (VOIDmode, insn, NULL_RTX));
2247
2248 df_insn_rescan (insn);
2249 }
2250
2251 /* Return TRUE if combine can reuse reg X in mode MODE.
2252 ADDED_SETS is nonzero if the original set is still required. */
2253 static bool
2254 can_change_dest_mode (rtx x, int added_sets, enum machine_mode mode)
2255 {
2256 unsigned int regno;
2257
2258 if (!REG_P(x))
2259 return false;
2260
2261 regno = REGNO (x);
2262 /* Allow hard registers if the new mode is legal, and occupies no more
2263 registers than the old mode. */
2264 if (regno < FIRST_PSEUDO_REGISTER)
2265 return (HARD_REGNO_MODE_OK (regno, mode)
2266 && (hard_regno_nregs[regno][GET_MODE (x)]
2267 >= hard_regno_nregs[regno][mode]));
2268
2269 /* Or a pseudo that is only used once. */
2270 return (REG_N_SETS (regno) == 1 && !added_sets
2271 && !REG_USERVAR_P (x));
2272 }
2273
2274
2275 /* Check whether X, the destination of a set, refers to part of
2276 the register specified by REG. */
2277
2278 static bool
2279 reg_subword_p (rtx x, rtx reg)
2280 {
2281 /* Check that reg is an integer mode register. */
2282 if (!REG_P (reg) || GET_MODE_CLASS (GET_MODE (reg)) != MODE_INT)
2283 return false;
2284
2285 if (GET_CODE (x) == STRICT_LOW_PART
2286 || GET_CODE (x) == ZERO_EXTRACT)
2287 x = XEXP (x, 0);
2288
2289 return GET_CODE (x) == SUBREG
2290 && SUBREG_REG (x) == reg
2291 && GET_MODE_CLASS (GET_MODE (x)) == MODE_INT;
2292 }
2293
2294 #ifdef AUTO_INC_DEC
2295 /* Replace auto-increment addressing modes with explicit operations to access
2296 the same addresses without modifying the corresponding registers. */
2297
2298 static rtx
2299 cleanup_auto_inc_dec (rtx src, enum machine_mode mem_mode)
2300 {
2301 rtx x = src;
2302 const RTX_CODE code = GET_CODE (x);
2303 int i;
2304 const char *fmt;
2305
2306 switch (code)
2307 {
2308 case REG:
2309 case CONST_INT:
2310 case CONST_DOUBLE:
2311 case CONST_FIXED:
2312 case CONST_VECTOR:
2313 case SYMBOL_REF:
2314 case CODE_LABEL:
2315 case PC:
2316 case CC0:
2317 case SCRATCH:
2318 /* SCRATCH must be shared because they represent distinct values. */
2319 return x;
2320 case CLOBBER:
2321 if (REG_P (XEXP (x, 0)) && REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER)
2322 return x;
2323 break;
2324
2325 case CONST:
2326 if (shared_const_p (x))
2327 return x;
2328 break;
2329
2330 case MEM:
2331 mem_mode = GET_MODE (x);
2332 break;
2333
2334 case PRE_INC:
2335 case PRE_DEC:
2336 gcc_assert (mem_mode != VOIDmode && mem_mode != BLKmode);
2337 return gen_rtx_PLUS (GET_MODE (x),
2338 cleanup_auto_inc_dec (XEXP (x, 0), mem_mode),
2339 GEN_INT (code == PRE_INC
2340 ? GET_MODE_SIZE (mem_mode)
2341 : -GET_MODE_SIZE (mem_mode)));
2342
2343 case POST_INC:
2344 case POST_DEC:
2345 case PRE_MODIFY:
2346 case POST_MODIFY:
2347 return cleanup_auto_inc_dec (code == PRE_MODIFY
2348 ? XEXP (x, 1) : XEXP (x, 0),
2349 mem_mode);
2350
2351 default:
2352 break;
2353 }
2354
2355 /* Copy the various flags, fields, and other information. We assume
2356 that all fields need copying, and then clear the fields that should
2357 not be copied. That is the sensible default behavior, and forces
2358 us to explicitly document why we are *not* copying a flag. */
2359 x = shallow_copy_rtx (x);
2360
2361 /* We do not copy the USED flag, which is used as a mark bit during
2362 walks over the RTL. */
2363 RTX_FLAG (x, used) = 0;
2364
2365 /* We do not copy FRAME_RELATED for INSNs. */
2366 if (INSN_P (x))
2367 RTX_FLAG (x, frame_related) = 0;
2368
2369 fmt = GET_RTX_FORMAT (code);
2370 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2371 if (fmt[i] == 'e')
2372 XEXP (x, i) = cleanup_auto_inc_dec (XEXP (x, i), mem_mode);
2373 else if (fmt[i] == 'E' || fmt[i] == 'V')
2374 {
2375 int j;
2376 XVEC (x, i) = rtvec_alloc (XVECLEN (x, i));
2377 for (j = 0; j < XVECLEN (x, i); j++)
2378 XVECEXP (x, i, j)
2379 = cleanup_auto_inc_dec (XVECEXP (src, i, j), mem_mode);
2380 }
2381
2382 return x;
2383 }
2384 #endif
2385
2386 /* Auxiliary data structure for propagate_for_debug_stmt. */
2387
2388 struct rtx_subst_pair
2389 {
2390 rtx to;
2391 bool adjusted;
2392 };
2393
2394 /* DATA points to an rtx_subst_pair. Return the value that should be
2395 substituted. */
2396
2397 static rtx
2398 propagate_for_debug_subst (rtx from, const_rtx old_rtx, void *data)
2399 {
2400 struct rtx_subst_pair *pair = (struct rtx_subst_pair *)data;
2401
2402 if (!rtx_equal_p (from, old_rtx))
2403 return NULL_RTX;
2404 if (!pair->adjusted)
2405 {
2406 pair->adjusted = true;
2407 #ifdef AUTO_INC_DEC
2408 pair->to = cleanup_auto_inc_dec (pair->to, VOIDmode);
2409 #else
2410 pair->to = copy_rtx (pair->to);
2411 #endif
2412 pair->to = make_compound_operation (pair->to, SET);
2413 return pair->to;
2414 }
2415 return copy_rtx (pair->to);
2416 }
2417
2418 /* Replace all the occurrences of DEST with SRC in DEBUG_INSNs between INSN
2419 and LAST. */
2420
2421 static void
2422 propagate_for_debug (rtx insn, rtx last, rtx dest, rtx src)
2423 {
2424 rtx next, loc;
2425
2426 struct rtx_subst_pair p;
2427 p.to = src;
2428 p.adjusted = false;
2429
2430 next = NEXT_INSN (insn);
2431 while (next != last)
2432 {
2433 insn = next;
2434 next = NEXT_INSN (insn);
2435 if (DEBUG_INSN_P (insn))
2436 {
2437 loc = simplify_replace_fn_rtx (INSN_VAR_LOCATION_LOC (insn),
2438 dest, propagate_for_debug_subst, &p);
2439 if (loc == INSN_VAR_LOCATION_LOC (insn))
2440 continue;
2441 INSN_VAR_LOCATION_LOC (insn) = loc;
2442 df_insn_rescan (insn);
2443 }
2444 }
2445 }
2446
2447 /* Delete the unconditional jump INSN and adjust the CFG correspondingly.
2448 Note that the INSN should be deleted *after* removing dead edges, so
2449 that the kept edge is the fallthrough edge for a (set (pc) (pc))
2450 but not for a (set (pc) (label_ref FOO)). */
2451
2452 static void
2453 update_cfg_for_uncondjump (rtx insn)
2454 {
2455 basic_block bb = BLOCK_FOR_INSN (insn);
2456 bool at_end = (BB_END (bb) == insn);
2457
2458 if (at_end)
2459 purge_dead_edges (bb);
2460
2461 delete_insn (insn);
2462 if (at_end && EDGE_COUNT (bb->succs) == 1)
2463 {
2464 rtx insn;
2465
2466 single_succ_edge (bb)->flags |= EDGE_FALLTHRU;
2467
2468 /* Remove barriers from the footer if there are any. */
2469 for (insn = bb->il.rtl->footer; insn; insn = NEXT_INSN (insn))
2470 if (BARRIER_P (insn))
2471 {
2472 if (PREV_INSN (insn))
2473 NEXT_INSN (PREV_INSN (insn)) = NEXT_INSN (insn);
2474 else
2475 bb->il.rtl->footer = NEXT_INSN (insn);
2476 if (NEXT_INSN (insn))
2477 PREV_INSN (NEXT_INSN (insn)) = PREV_INSN (insn);
2478 }
2479 else if (LABEL_P (insn))
2480 break;
2481 }
2482 }
2483
2484 /* Try to combine the insns I0, I1 and I2 into I3.
2485 Here I0, I1 and I2 appear earlier than I3.
2486 I0 and I1 can be zero; then we combine just I2 into I3, or I1 and I2 into
2487 I3.
2488
2489 If we are combining more than two insns and the resulting insn is not
2490 recognized, try splitting it into two insns. If that happens, I2 and I3
2491 are retained and I1/I0 are pseudo-deleted by turning them into a NOTE.
2492 Otherwise, I0, I1 and I2 are pseudo-deleted.
2493
2494 Return 0 if the combination does not work. Then nothing is changed.
2495 If we did the combination, return the insn at which combine should
2496 resume scanning.
2497
2498 Set NEW_DIRECT_JUMP_P to a nonzero value if try_combine creates a
2499 new direct jump instruction. */
2500
2501 static rtx
2502 try_combine (rtx i3, rtx i2, rtx i1, rtx i0, int *new_direct_jump_p)
2503 {
2504 /* New patterns for I3 and I2, respectively. */
2505 rtx newpat, newi2pat = 0;
2506 rtvec newpat_vec_with_clobbers = 0;
2507 int substed_i2 = 0, substed_i1 = 0, substed_i0 = 0;
2508 /* Indicates need to preserve SET in I0, I1 or I2 in I3 if it is not
2509 dead. */
2510 int added_sets_0, added_sets_1, added_sets_2;
2511 /* Total number of SETs to put into I3. */
2512 int total_sets;
2513 /* Nonzero if I2's or I1's body now appears in I3. */
2514 int i2_is_used = 0, i1_is_used = 0;
2515 /* INSN_CODEs for new I3, new I2, and user of condition code. */
2516 int insn_code_number, i2_code_number = 0, other_code_number = 0;
2517 /* Contains I3 if the destination of I3 is used in its source, which means
2518 that the old life of I3 is being killed. If that usage is placed into
2519 I2 and not in I3, a REG_DEAD note must be made. */
2520 rtx i3dest_killed = 0;
2521 /* SET_DEST and SET_SRC of I2, I1 and I0. */
2522 rtx i2dest = 0, i2src = 0, i1dest = 0, i1src = 0, i0dest = 0, i0src = 0;
2523 /* Copy of SET_SRC of I1, if needed. */
2524 rtx i1src_copy = 0;
2525 /* Set if I2DEST was reused as a scratch register. */
2526 bool i2scratch = false;
2527 /* The PATTERNs of I0, I1, and I2, or a copy of them in certain cases. */
2528 rtx i0pat = 0, i1pat = 0, i2pat = 0;
2529 /* Indicates if I2DEST or I1DEST is in I2SRC or I1_SRC. */
2530 int i2dest_in_i2src = 0, i1dest_in_i1src = 0, i2dest_in_i1src = 0;
2531 int i0dest_in_i0src = 0, i1dest_in_i0src = 0, i2dest_in_i0src = 0;
2532 int i2dest_killed = 0, i1dest_killed = 0, i0dest_killed = 0;
2533 int i1_feeds_i2_n = 0, i0_feeds_i2_n = 0, i0_feeds_i1_n = 0;
2534 /* Notes that must be added to REG_NOTES in I3 and I2. */
2535 rtx new_i3_notes, new_i2_notes;
2536 /* Notes that we substituted I3 into I2 instead of the normal case. */
2537 int i3_subst_into_i2 = 0;
2538 /* Notes that I1, I2 or I3 is a MULT operation. */
2539 int have_mult = 0;
2540 int swap_i2i3 = 0;
2541 int changed_i3_dest = 0;
2542
2543 int maxreg;
2544 rtx temp;
2545 rtx link;
2546 rtx other_pat = 0;
2547 rtx new_other_notes;
2548 int i;
2549
2550 /* Only try four-insn combinations when there's high likelihood of
2551 success. Look for simple insns, such as loads of constants or
2552 binary operations involving a constant. */
2553 if (i0)
2554 {
2555 int i;
2556 int ngood = 0;
2557 int nshift = 0;
2558
2559 if (!flag_expensive_optimizations)
2560 return 0;
2561
2562 for (i = 0; i < 4; i++)
2563 {
2564 rtx insn = i == 0 ? i0 : i == 1 ? i1 : i == 2 ? i2 : i3;
2565 rtx set = single_set (insn);
2566 rtx src;
2567 if (!set)
2568 continue;
2569 src = SET_SRC (set);
2570 if (CONSTANT_P (src))
2571 {
2572 ngood += 2;
2573 break;
2574 }
2575 else if (BINARY_P (src) && CONSTANT_P (XEXP (src, 1)))
2576 ngood++;
2577 else if (GET_CODE (src) == ASHIFT || GET_CODE (src) == ASHIFTRT
2578 || GET_CODE (src) == LSHIFTRT)
2579 nshift++;
2580 }
2581 if (ngood < 2 && nshift < 2)
2582 return 0;
2583 }
2584
2585 /* Exit early if one of the insns involved can't be used for
2586 combinations. */
2587 if (cant_combine_insn_p (i3)
2588 || cant_combine_insn_p (i2)
2589 || (i1 && cant_combine_insn_p (i1))
2590 || (i0 && cant_combine_insn_p (i0))
2591 || likely_spilled_retval_p (i3))
2592 return 0;
2593
2594 combine_attempts++;
2595 undobuf.other_insn = 0;
2596
2597 /* Reset the hard register usage information. */
2598 CLEAR_HARD_REG_SET (newpat_used_regs);
2599
2600 if (dump_file && (dump_flags & TDF_DETAILS))
2601 {
2602 if (i0)
2603 fprintf (dump_file, "\nTrying %d, %d, %d -> %d:\n",
2604 INSN_UID (i0), INSN_UID (i1), INSN_UID (i2), INSN_UID (i3));
2605 else if (i1)
2606 fprintf (dump_file, "\nTrying %d, %d -> %d:\n",
2607 INSN_UID (i1), INSN_UID (i2), INSN_UID (i3));
2608 else
2609 fprintf (dump_file, "\nTrying %d -> %d:\n",
2610 INSN_UID (i2), INSN_UID (i3));
2611 }
2612
2613 /* If multiple insns feed into one of I2 or I3, they can be in any
2614 order. To simplify the code below, reorder them in sequence. */
2615 if (i0 && DF_INSN_LUID (i0) > DF_INSN_LUID (i2))
2616 temp = i2, i2 = i0, i0 = temp;
2617 if (i0 && DF_INSN_LUID (i0) > DF_INSN_LUID (i1))
2618 temp = i1, i1 = i0, i0 = temp;
2619 if (i1 && DF_INSN_LUID (i1) > DF_INSN_LUID (i2))
2620 temp = i1, i1 = i2, i2 = temp;
2621
2622 added_links_insn = 0;
2623
2624 /* First check for one important special case that the code below will
2625 not handle. Namely, the case where I1 is zero, I2 is a PARALLEL
2626 and I3 is a SET whose SET_SRC is a SET_DEST in I2. In that case,
2627 we may be able to replace that destination with the destination of I3.
2628 This occurs in the common code where we compute both a quotient and
2629 remainder into a structure, in which case we want to do the computation
2630 directly into the structure to avoid register-register copies.
2631
2632 Note that this case handles both multiple sets in I2 and also cases
2633 where I2 has a number of CLOBBERs inside the PARALLEL.
2634
2635 We make very conservative checks below and only try to handle the
2636 most common cases of this. For example, we only handle the case
2637 where I2 and I3 are adjacent to avoid making difficult register
2638 usage tests. */
2639
2640 if (i1 == 0 && NONJUMP_INSN_P (i3) && GET_CODE (PATTERN (i3)) == SET
2641 && REG_P (SET_SRC (PATTERN (i3)))
2642 && REGNO (SET_SRC (PATTERN (i3))) >= FIRST_PSEUDO_REGISTER
2643 && find_reg_note (i3, REG_DEAD, SET_SRC (PATTERN (i3)))
2644 && GET_CODE (PATTERN (i2)) == PARALLEL
2645 && ! side_effects_p (SET_DEST (PATTERN (i3)))
2646 /* If the dest of I3 is a ZERO_EXTRACT or STRICT_LOW_PART, the code
2647 below would need to check what is inside (and reg_overlap_mentioned_p
2648 doesn't support those codes anyway). Don't allow those destinations;
2649 the resulting insn isn't likely to be recognized anyway. */
2650 && GET_CODE (SET_DEST (PATTERN (i3))) != ZERO_EXTRACT
2651 && GET_CODE (SET_DEST (PATTERN (i3))) != STRICT_LOW_PART
2652 && ! reg_overlap_mentioned_p (SET_SRC (PATTERN (i3)),
2653 SET_DEST (PATTERN (i3)))
2654 && next_active_insn (i2) == i3)
2655 {
2656 rtx p2 = PATTERN (i2);
2657
2658 /* Make sure that the destination of I3,
2659 which we are going to substitute into one output of I2,
2660 is not used within another output of I2. We must avoid making this:
2661 (parallel [(set (mem (reg 69)) ...)
2662 (set (reg 69) ...)])
2663 which is not well-defined as to order of actions.
2664 (Besides, reload can't handle output reloads for this.)
2665
2666 The problem can also happen if the dest of I3 is a memory ref,
2667 if another dest in I2 is an indirect memory ref. */
2668 for (i = 0; i < XVECLEN (p2, 0); i++)
2669 if ((GET_CODE (XVECEXP (p2, 0, i)) == SET
2670 || GET_CODE (XVECEXP (p2, 0, i)) == CLOBBER)
2671 && reg_overlap_mentioned_p (SET_DEST (PATTERN (i3)),
2672 SET_DEST (XVECEXP (p2, 0, i))))
2673 break;
2674
2675 if (i == XVECLEN (p2, 0))
2676 for (i = 0; i < XVECLEN (p2, 0); i++)
2677 if (GET_CODE (XVECEXP (p2, 0, i)) == SET
2678 && SET_DEST (XVECEXP (p2, 0, i)) == SET_SRC (PATTERN (i3)))
2679 {
2680 combine_merges++;
2681
2682 subst_insn = i3;
2683 subst_low_luid = DF_INSN_LUID (i2);
2684
2685 added_sets_2 = added_sets_1 = added_sets_0 = 0;
2686 i2src = SET_SRC (XVECEXP (p2, 0, i));
2687 i2dest = SET_DEST (XVECEXP (p2, 0, i));
2688 i2dest_killed = dead_or_set_p (i2, i2dest);
2689
2690 /* Replace the dest in I2 with our dest and make the resulting
2691 insn the new pattern for I3. Then skip to where we validate
2692 the pattern. Everything was set up above. */
2693 SUBST (SET_DEST (XVECEXP (p2, 0, i)), SET_DEST (PATTERN (i3)));
2694 newpat = p2;
2695 i3_subst_into_i2 = 1;
2696 goto validate_replacement;
2697 }
2698 }
2699
2700 /* If I2 is setting a pseudo to a constant and I3 is setting some
2701 sub-part of it to another constant, merge them by making a new
2702 constant. */
2703 if (i1 == 0
2704 && (temp = single_set (i2)) != 0
2705 && (CONST_INT_P (SET_SRC (temp))
2706 || GET_CODE (SET_SRC (temp)) == CONST_DOUBLE)
2707 && GET_CODE (PATTERN (i3)) == SET
2708 && (CONST_INT_P (SET_SRC (PATTERN (i3)))
2709 || GET_CODE (SET_SRC (PATTERN (i3))) == CONST_DOUBLE)
2710 && reg_subword_p (SET_DEST (PATTERN (i3)), SET_DEST (temp)))
2711 {
2712 rtx dest = SET_DEST (PATTERN (i3));
2713 int offset = -1;
2714 int width = 0;
2715
2716 if (GET_CODE (dest) == ZERO_EXTRACT)
2717 {
2718 if (CONST_INT_P (XEXP (dest, 1))
2719 && CONST_INT_P (XEXP (dest, 2)))
2720 {
2721 width = INTVAL (XEXP (dest, 1));
2722 offset = INTVAL (XEXP (dest, 2));
2723 dest = XEXP (dest, 0);
2724 if (BITS_BIG_ENDIAN)
2725 offset = GET_MODE_BITSIZE (GET_MODE (dest)) - width - offset;
2726 }
2727 }
2728 else
2729 {
2730 if (GET_CODE (dest) == STRICT_LOW_PART)
2731 dest = XEXP (dest, 0);
2732 width = GET_MODE_BITSIZE (GET_MODE (dest));
2733 offset = 0;
2734 }
2735
2736 if (offset >= 0)
2737 {
2738 /* If this is the low part, we're done. */
2739 if (subreg_lowpart_p (dest))
2740 ;
2741 /* Handle the case where inner is twice the size of outer. */
2742 else if (GET_MODE_BITSIZE (GET_MODE (SET_DEST (temp)))
2743 == 2 * GET_MODE_BITSIZE (GET_MODE (dest)))
2744 offset += GET_MODE_BITSIZE (GET_MODE (dest));
2745 /* Otherwise give up for now. */
2746 else
2747 offset = -1;
2748 }
2749
2750 if (offset >= 0
2751 && (GET_MODE_BITSIZE (GET_MODE (SET_DEST (temp)))
2752 <= HOST_BITS_PER_DOUBLE_INT))
2753 {
2754 double_int m, o, i;
2755 rtx inner = SET_SRC (PATTERN (i3));
2756 rtx outer = SET_SRC (temp);
2757
2758 o = rtx_to_double_int (outer);
2759 i = rtx_to_double_int (inner);
2760
2761 m = double_int_mask (width);
2762 i = double_int_and (i, m);
2763 m = double_int_lshift (m, offset, HOST_BITS_PER_DOUBLE_INT, false);
2764 i = double_int_lshift (i, offset, HOST_BITS_PER_DOUBLE_INT, false);
2765 o = double_int_ior (double_int_and_not (o, m), i);
2766
2767 combine_merges++;
2768 subst_insn = i3;
2769 subst_low_luid = DF_INSN_LUID (i2);
2770 added_sets_2 = added_sets_1 = added_sets_0 = 0;
2771 i2dest = SET_DEST (temp);
2772 i2dest_killed = dead_or_set_p (i2, i2dest);
2773
2774 /* Replace the source in I2 with the new constant and make the
2775 resulting insn the new pattern for I3. Then skip to where we
2776 validate the pattern. Everything was set up above. */
2777 SUBST (SET_SRC (temp),
2778 immed_double_int_const (o, GET_MODE (SET_DEST (temp))));
2779
2780 newpat = PATTERN (i2);
2781
2782 /* The dest of I3 has been replaced with the dest of I2. */
2783 changed_i3_dest = 1;
2784 goto validate_replacement;
2785 }
2786 }
2787
2788 #ifndef HAVE_cc0
2789 /* If we have no I1 and I2 looks like:
2790 (parallel [(set (reg:CC X) (compare:CC OP (const_int 0)))
2791 (set Y OP)])
2792 make up a dummy I1 that is
2793 (set Y OP)
2794 and change I2 to be
2795 (set (reg:CC X) (compare:CC Y (const_int 0)))
2796
2797 (We can ignore any trailing CLOBBERs.)
2798
2799 This undoes a previous combination and allows us to match a branch-and-
2800 decrement insn. */
2801
2802 if (i1 == 0 && GET_CODE (PATTERN (i2)) == PARALLEL
2803 && XVECLEN (PATTERN (i2), 0) >= 2
2804 && GET_CODE (XVECEXP (PATTERN (i2), 0, 0)) == SET
2805 && (GET_MODE_CLASS (GET_MODE (SET_DEST (XVECEXP (PATTERN (i2), 0, 0))))
2806 == MODE_CC)
2807 && GET_CODE (SET_SRC (XVECEXP (PATTERN (i2), 0, 0))) == COMPARE
2808 && XEXP (SET_SRC (XVECEXP (PATTERN (i2), 0, 0)), 1) == const0_rtx
2809 && GET_CODE (XVECEXP (PATTERN (i2), 0, 1)) == SET
2810 && REG_P (SET_DEST (XVECEXP (PATTERN (i2), 0, 1)))
2811 && rtx_equal_p (XEXP (SET_SRC (XVECEXP (PATTERN (i2), 0, 0)), 0),
2812 SET_SRC (XVECEXP (PATTERN (i2), 0, 1))))
2813 {
2814 for (i = XVECLEN (PATTERN (i2), 0) - 1; i >= 2; i--)
2815 if (GET_CODE (XVECEXP (PATTERN (i2), 0, i)) != CLOBBER)
2816 break;
2817
2818 if (i == 1)
2819 {
2820 /* We make I1 with the same INSN_UID as I2. This gives it
2821 the same DF_INSN_LUID for value tracking. Our fake I1 will
2822 never appear in the insn stream so giving it the same INSN_UID
2823 as I2 will not cause a problem. */
2824
2825 i1 = gen_rtx_INSN (VOIDmode, INSN_UID (i2), NULL_RTX, i2,
2826 BLOCK_FOR_INSN (i2), XVECEXP (PATTERN (i2), 0, 1),
2827 INSN_LOCATOR (i2), -1, NULL_RTX);
2828
2829 SUBST (PATTERN (i2), XVECEXP (PATTERN (i2), 0, 0));
2830 SUBST (XEXP (SET_SRC (PATTERN (i2)), 0),
2831 SET_DEST (PATTERN (i1)));
2832 }
2833 }
2834 #endif
2835
2836 /* Verify that I2 and I1 are valid for combining. */
2837 if (! can_combine_p (i2, i3, i0, i1, NULL_RTX, NULL_RTX, &i2dest, &i2src)
2838 || (i1 && ! can_combine_p (i1, i3, i0, NULL_RTX, i2, NULL_RTX,
2839 &i1dest, &i1src))
2840 || (i0 && ! can_combine_p (i0, i3, NULL_RTX, NULL_RTX, i1, i2,
2841 &i0dest, &i0src)))
2842 {
2843 undo_all ();
2844 return 0;
2845 }
2846
2847 /* Record whether I2DEST is used in I2SRC and similarly for the other
2848 cases. Knowing this will help in register status updating below. */
2849 i2dest_in_i2src = reg_overlap_mentioned_p (i2dest, i2src);
2850 i1dest_in_i1src = i1 && reg_overlap_mentioned_p (i1dest, i1src);
2851 i2dest_in_i1src = i1 && reg_overlap_mentioned_p (i2dest, i1src);
2852 i0dest_in_i0src = i0 && reg_overlap_mentioned_p (i0dest, i0src);
2853 i1dest_in_i0src = i0 && reg_overlap_mentioned_p (i1dest, i0src);
2854 i2dest_in_i0src = i0 && reg_overlap_mentioned_p (i2dest, i0src);
2855 i2dest_killed = dead_or_set_p (i2, i2dest);
2856 i1dest_killed = i1 && dead_or_set_p (i1, i1dest);
2857 i0dest_killed = i0 && dead_or_set_p (i0, i0dest);
2858
2859 /* For the earlier insns, determine which of the subsequent ones they
2860 feed. */
2861 i1_feeds_i2_n = i1 && insn_a_feeds_b (i1, i2);
2862 i0_feeds_i1_n = i0 && insn_a_feeds_b (i0, i1);
2863 i0_feeds_i2_n = (i0 && (!i0_feeds_i1_n ? insn_a_feeds_b (i0, i2)
2864 : (!reg_overlap_mentioned_p (i1dest, i0dest)
2865 && reg_overlap_mentioned_p (i0dest, i2src))));
2866
2867 /* Ensure that I3's pattern can be the destination of combines. */
2868 if (! combinable_i3pat (i3, &PATTERN (i3), i2dest, i1dest, i0dest,
2869 i1 && i2dest_in_i1src && !i1_feeds_i2_n,
2870 i0 && ((i2dest_in_i0src && !i0_feeds_i2_n)
2871 || (i1dest_in_i0src && !i0_feeds_i1_n)),
2872 &i3dest_killed))
2873 {
2874 undo_all ();
2875 return 0;
2876 }
2877
2878 /* See if any of the insns is a MULT operation. Unless one is, we will
2879 reject a combination that is, since it must be slower. Be conservative
2880 here. */
2881 if (GET_CODE (i2src) == MULT
2882 || (i1 != 0 && GET_CODE (i1src) == MULT)
2883 || (i0 != 0 && GET_CODE (i0src) == MULT)
2884 || (GET_CODE (PATTERN (i3)) == SET
2885 && GET_CODE (SET_SRC (PATTERN (i3))) == MULT))
2886 have_mult = 1;
2887
2888 /* If I3 has an inc, then give up if I1 or I2 uses the reg that is inc'd.
2889 We used to do this EXCEPT in one case: I3 has a post-inc in an
2890 output operand. However, that exception can give rise to insns like
2891 mov r3,(r3)+
2892 which is a famous insn on the PDP-11 where the value of r3 used as the
2893 source was model-dependent. Avoid this sort of thing. */
2894
2895 #if 0
2896 if (!(GET_CODE (PATTERN (i3)) == SET
2897 && REG_P (SET_SRC (PATTERN (i3)))
2898 && MEM_P (SET_DEST (PATTERN (i3)))
2899 && (GET_CODE (XEXP (SET_DEST (PATTERN (i3)), 0)) == POST_INC
2900 || GET_CODE (XEXP (SET_DEST (PATTERN (i3)), 0)) == POST_DEC)))
2901 /* It's not the exception. */
2902 #endif
2903 #ifdef AUTO_INC_DEC
2904 for (link = REG_NOTES (i3); link; link = XEXP (link, 1))
2905 if (REG_NOTE_KIND (link) == REG_INC
2906 && (reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (i2))
2907 || (i1 != 0
2908 && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (i1)))))
2909 {
2910 undo_all ();
2911 return 0;
2912 }
2913 #endif
2914
2915 /* See if the SETs in I1 or I2 need to be kept around in the merged
2916 instruction: whenever the value set there is still needed past I3.
2917 For the SETs in I2, this is easy: we see if I2DEST dies or is set in I3.
2918
2919 For the SET in I1, we have two cases: If I1 and I2 independently
2920 feed into I3, the set in I1 needs to be kept around if I1DEST dies
2921 or is set in I3. Otherwise (if I1 feeds I2 which feeds I3), the set
2922 in I1 needs to be kept around unless I1DEST dies or is set in either
2923 I2 or I3. The same consideration applies to I0. */
2924
2925 added_sets_2 = !dead_or_set_p (i3, i2dest);
2926
2927 if (i1)
2928 added_sets_1 = !(dead_or_set_p (i3, i1dest)
2929 || (i1_feeds_i2_n && dead_or_set_p (i2, i1dest)));
2930 else
2931 added_sets_1 = 0;
2932
2933 if (i0)
2934 added_sets_0 = !(dead_or_set_p (i3, i0dest)
2935 || (i0_feeds_i2_n && dead_or_set_p (i2, i0dest))
2936 || (i0_feeds_i1_n && dead_or_set_p (i1, i0dest)));
2937 else
2938 added_sets_0 = 0;
2939
2940 /* We are about to copy insns for the case where they need to be kept
2941 around. Check that they can be copied in the merged instruction. */
2942
2943 if (targetm.cannot_copy_insn_p
2944 && ((added_sets_2 && targetm.cannot_copy_insn_p (i2))
2945 || (i1 && added_sets_1 && targetm.cannot_copy_insn_p (i1))
2946 || (i0 && added_sets_0 && targetm.cannot_copy_insn_p (i0))))
2947 {
2948 undo_all ();
2949 return 0;
2950 }
2951
2952 /* If the set in I2 needs to be kept around, we must make a copy of
2953 PATTERN (I2), so that when we substitute I1SRC for I1DEST in
2954 PATTERN (I2), we are only substituting for the original I1DEST, not into
2955 an already-substituted copy. This also prevents making self-referential
2956 rtx. If I2 is a PARALLEL, we just need the piece that assigns I2SRC to
2957 I2DEST. */
2958
2959 if (added_sets_2)
2960 {
2961 if (GET_CODE (PATTERN (i2)) == PARALLEL)
2962 i2pat = gen_rtx_SET (VOIDmode, i2dest, copy_rtx (i2src));
2963 else
2964 i2pat = copy_rtx (PATTERN (i2));
2965 }
2966
2967 if (added_sets_1)
2968 {
2969 if (GET_CODE (PATTERN (i1)) == PARALLEL)
2970 i1pat = gen_rtx_SET (VOIDmode, i1dest, copy_rtx (i1src));
2971 else
2972 i1pat = copy_rtx (PATTERN (i1));
2973 }
2974
2975 if (added_sets_0)
2976 {
2977 if (GET_CODE (PATTERN (i0)) == PARALLEL)
2978 i0pat = gen_rtx_SET (VOIDmode, i0dest, copy_rtx (i0src));
2979 else
2980 i0pat = copy_rtx (PATTERN (i0));
2981 }
2982
2983 combine_merges++;
2984
2985 /* Substitute in the latest insn for the regs set by the earlier ones. */
2986
2987 maxreg = max_reg_num ();
2988
2989 subst_insn = i3;
2990
2991 #ifndef HAVE_cc0
2992 /* Many machines that don't use CC0 have insns that can both perform an
2993 arithmetic operation and set the condition code. These operations will
2994 be represented as a PARALLEL with the first element of the vector
2995 being a COMPARE of an arithmetic operation with the constant zero.
2996 The second element of the vector will set some pseudo to the result
2997 of the same arithmetic operation. If we simplify the COMPARE, we won't
2998 match such a pattern and so will generate an extra insn. Here we test
2999 for this case, where both the comparison and the operation result are
3000 needed, and make the PARALLEL by just replacing I2DEST in I3SRC with
3001 I2SRC. Later we will make the PARALLEL that contains I2. */
3002
3003 if (i1 == 0 && added_sets_2 && GET_CODE (PATTERN (i3)) == SET
3004 && GET_CODE (SET_SRC (PATTERN (i3))) == COMPARE
3005 && XEXP (SET_SRC (PATTERN (i3)), 1) == const0_rtx
3006 && rtx_equal_p (XEXP (SET_SRC (PATTERN (i3)), 0), i2dest))
3007 {
3008 #ifdef SELECT_CC_MODE
3009 rtx *cc_use;
3010 enum machine_mode compare_mode;
3011 #endif
3012
3013 newpat = PATTERN (i3);
3014 SUBST (XEXP (SET_SRC (newpat), 0), i2src);
3015
3016 i2_is_used = 1;
3017
3018 #ifdef SELECT_CC_MODE
3019 /* See if a COMPARE with the operand we substituted in should be done
3020 with the mode that is currently being used. If not, do the same
3021 processing we do in `subst' for a SET; namely, if the destination
3022 is used only once, try to replace it with a register of the proper
3023 mode and also replace the COMPARE. */
3024 if (undobuf.other_insn == 0
3025 && (cc_use = find_single_use (SET_DEST (newpat), i3,
3026 &undobuf.other_insn))
3027 && ((compare_mode = SELECT_CC_MODE (GET_CODE (*cc_use),
3028 i2src, const0_rtx))
3029 != GET_MODE (SET_DEST (newpat))))
3030 {
3031 if (can_change_dest_mode (SET_DEST (newpat), added_sets_2,
3032 compare_mode))
3033 {
3034 unsigned int regno = REGNO (SET_DEST (newpat));
3035 rtx new_dest;
3036
3037 if (regno < FIRST_PSEUDO_REGISTER)
3038 new_dest = gen_rtx_REG (compare_mode, regno);
3039 else
3040 {
3041 SUBST_MODE (regno_reg_rtx[regno], compare_mode);
3042 new_dest = regno_reg_rtx[regno];
3043 }
3044
3045 SUBST (SET_DEST (newpat), new_dest);
3046 SUBST (XEXP (*cc_use, 0), new_dest);
3047 SUBST (SET_SRC (newpat),
3048 gen_rtx_COMPARE (compare_mode, i2src, const0_rtx));
3049 }
3050 else
3051 undobuf.other_insn = 0;
3052 }
3053 #endif
3054 }
3055 else
3056 #endif
3057 {
3058 /* It is possible that the source of I2 or I1 may be performing
3059 an unneeded operation, such as a ZERO_EXTEND of something
3060 that is known to have the high part zero. Handle that case
3061 by letting subst look at the innermost one of them.
3062
3063 Another way to do this would be to have a function that tries
3064 to simplify a single insn instead of merging two or more
3065 insns. We don't do this because of the potential of infinite
3066 loops and because of the potential extra memory required.
3067 However, doing it the way we are is a bit of a kludge and
3068 doesn't catch all cases.
3069
3070 But only do this if -fexpensive-optimizations since it slows
3071 things down and doesn't usually win.
3072
3073 This is not done in the COMPARE case above because the
3074 unmodified I2PAT is used in the PARALLEL and so a pattern
3075 with a modified I2SRC would not match. */
3076
3077 if (flag_expensive_optimizations)
3078 {
3079 /* Pass pc_rtx so no substitutions are done, just
3080 simplifications. */
3081 if (i1)
3082 {
3083 subst_low_luid = DF_INSN_LUID (i1);
3084 i1src = subst (i1src, pc_rtx, pc_rtx, 0, 0);
3085 }
3086 else
3087 {
3088 subst_low_luid = DF_INSN_LUID (i2);
3089 i2src = subst (i2src, pc_rtx, pc_rtx, 0, 0);
3090 }
3091 }
3092
3093 n_occurrences = 0; /* `subst' counts here */
3094 subst_low_luid = DF_INSN_LUID (i2);
3095
3096 /* If I1 feeds into I2 and I1DEST is in I1SRC, we need to make a unique
3097 copy of I2SRC each time we substitute it, in order to avoid creating
3098 self-referential RTL when we will be substituting I1SRC for I1DEST
3099 later. Likewise if I0 feeds into I2, either directly or indirectly
3100 through I1, and I0DEST is in I0SRC. */
3101 newpat = subst (PATTERN (i3), i2dest, i2src, 0,
3102 (i1_feeds_i2_n && i1dest_in_i1src)
3103 || ((i0_feeds_i2_n || (i0_feeds_i1_n && i1_feeds_i2_n))
3104 && i0dest_in_i0src));
3105 substed_i2 = 1;
3106
3107 /* Record whether I2's body now appears within I3's body. */
3108 i2_is_used = n_occurrences;
3109 }
3110
3111 /* If we already got a failure, don't try to do more. Otherwise, try to
3112 substitute I1 if we have it. */
3113
3114 if (i1 && GET_CODE (newpat) != CLOBBER)
3115 {
3116 /* Check that an autoincrement side-effect on I1 has not been lost.
3117 This happens if I1DEST is mentioned in I2 and dies there, and
3118 has disappeared from the new pattern. */
3119 if ((FIND_REG_INC_NOTE (i1, NULL_RTX) != 0
3120 && i1_feeds_i2_n
3121 && dead_or_set_p (i2, i1dest)
3122 && !reg_overlap_mentioned_p (i1dest, newpat))
3123 /* Before we can do this substitution, we must redo the test done
3124 above (see detailed comments there) that ensures I1DEST isn't
3125 mentioned in any SETs in NEWPAT that are field assignments. */
3126 || !combinable_i3pat (NULL_RTX, &newpat, i1dest, NULL_RTX, NULL_RTX,
3127 0, 0, 0))
3128 {
3129 undo_all ();
3130 return 0;
3131 }
3132
3133 n_occurrences = 0;
3134 subst_low_luid = DF_INSN_LUID (i1);
3135
3136 /* If I0 feeds into I1 and I0DEST is in I0SRC, we need to make a unique
3137 copy of I1SRC each time we substitute it, in order to avoid creating
3138 self-referential RTL when we will be substituting I0SRC for I0DEST
3139 later. */
3140 newpat = subst (newpat, i1dest, i1src, 0,
3141 i0_feeds_i1_n && i0dest_in_i0src);
3142 substed_i1 = 1;
3143
3144 /* Record whether I1's body now appears within I3's body. */
3145 i1_is_used = n_occurrences;
3146 }
3147
3148 /* Likewise for I0 if we have it. */
3149
3150 if (i0 && GET_CODE (newpat) != CLOBBER)
3151 {
3152 if ((FIND_REG_INC_NOTE (i0, NULL_RTX) != 0
3153 && ((i0_feeds_i2_n && dead_or_set_p (i2, i0dest))
3154 || (i0_feeds_i1_n && dead_or_set_p (i1, i0dest)))
3155 && !reg_overlap_mentioned_p (i0dest, newpat))
3156 || !combinable_i3pat (NULL_RTX, &newpat, i0dest, NULL_RTX, NULL_RTX,
3157 0, 0, 0))
3158 {
3159 undo_all ();
3160 return 0;
3161 }
3162
3163 /* If the following substitution will modify I1SRC, make a copy of it
3164 for the case where it is substituted for I1DEST in I2PAT later. */
3165 if (i0_feeds_i1_n && added_sets_2 && i1_feeds_i2_n)
3166 i1src_copy = copy_rtx (i1src);
3167
3168 n_occurrences = 0;
3169 subst_low_luid = DF_INSN_LUID (i0);
3170 newpat = subst (newpat, i0dest, i0src, 0, 0);
3171 substed_i0 = 1;
3172 }
3173
3174 /* Fail if an autoincrement side-effect has been duplicated. Be careful
3175 to count all the ways that I2SRC and I1SRC can be used. */
3176 if ((FIND_REG_INC_NOTE (i2, NULL_RTX) != 0
3177 && i2_is_used + added_sets_2 > 1)
3178 || (i1 != 0 && FIND_REG_INC_NOTE (i1, NULL_RTX) != 0
3179 && (i1_is_used + added_sets_1 + (added_sets_2 && i1_feeds_i2_n)
3180 > 1))
3181 || (i0 != 0 && FIND_REG_INC_NOTE (i0, NULL_RTX) != 0
3182 && (n_occurrences + added_sets_0
3183 + (added_sets_1 && i0_feeds_i1_n)
3184 + (added_sets_2 && i0_feeds_i2_n)
3185 > 1))
3186 /* Fail if we tried to make a new register. */
3187 || max_reg_num () != maxreg
3188 /* Fail if we couldn't do something and have a CLOBBER. */
3189 || GET_CODE (newpat) == CLOBBER
3190 /* Fail if this new pattern is a MULT and we didn't have one before
3191 at the outer level. */
3192 || (GET_CODE (newpat) == SET && GET_CODE (SET_SRC (newpat)) == MULT
3193 && ! have_mult))
3194 {
3195 undo_all ();
3196 return 0;
3197 }
3198
3199 /* If the actions of the earlier insns must be kept
3200 in addition to substituting them into the latest one,
3201 we must make a new PARALLEL for the latest insn
3202 to hold additional the SETs. */
3203
3204 if (added_sets_0 || added_sets_1 || added_sets_2)
3205 {
3206 int extra_sets = added_sets_0 + added_sets_1 + added_sets_2;
3207 combine_extras++;
3208
3209 if (GET_CODE (newpat) == PARALLEL)
3210 {
3211 rtvec old = XVEC (newpat, 0);
3212 total_sets = XVECLEN (newpat, 0) + extra_sets;
3213 newpat = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (total_sets));
3214 memcpy (XVEC (newpat, 0)->elem, &old->elem[0],
3215 sizeof (old->elem[0]) * old->num_elem);
3216 }
3217 else
3218 {
3219 rtx old = newpat;
3220 total_sets = 1 + extra_sets;
3221 newpat = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (total_sets));
3222 XVECEXP (newpat, 0, 0) = old;
3223 }
3224
3225 if (added_sets_0)
3226 XVECEXP (newpat, 0, --total_sets) = i0pat;
3227
3228 if (added_sets_1)
3229 {
3230 rtx t = i1pat;
3231 if (i0_feeds_i1_n)
3232 t = subst (t, i0dest, i0src, 0, 0);
3233
3234 XVECEXP (newpat, 0, --total_sets) = t;
3235 }
3236 if (added_sets_2)
3237 {
3238 rtx t = i2pat;
3239 if (i1_feeds_i2_n)
3240 t = subst (t, i1dest, i1src_copy ? i1src_copy : i1src, 0,
3241 i0_feeds_i1_n && i0dest_in_i0src);
3242 if ((i0_feeds_i1_n && i1_feeds_i2_n) || i0_feeds_i2_n)
3243 t = subst (t, i0dest, i0src, 0, 0);
3244
3245 XVECEXP (newpat, 0, --total_sets) = t;
3246 }
3247 }
3248
3249 validate_replacement:
3250
3251 /* Note which hard regs this insn has as inputs. */
3252 mark_used_regs_combine (newpat);
3253
3254 /* If recog_for_combine fails, it strips existing clobbers. If we'll
3255 consider splitting this pattern, we might need these clobbers. */
3256 if (i1 && GET_CODE (newpat) == PARALLEL
3257 && GET_CODE (XVECEXP (newpat, 0, XVECLEN (newpat, 0) - 1)) == CLOBBER)
3258 {
3259 int len = XVECLEN (newpat, 0);
3260
3261 newpat_vec_with_clobbers = rtvec_alloc (len);
3262 for (i = 0; i < len; i++)
3263 RTVEC_ELT (newpat_vec_with_clobbers, i) = XVECEXP (newpat, 0, i);
3264 }
3265
3266 /* Is the result of combination a valid instruction? */
3267 insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
3268
3269 /* If the result isn't valid, see if it is a PARALLEL of two SETs where
3270 the second SET's destination is a register that is unused and isn't
3271 marked as an instruction that might trap in an EH region. In that case,
3272 we just need the first SET. This can occur when simplifying a divmod
3273 insn. We *must* test for this case here because the code below that
3274 splits two independent SETs doesn't handle this case correctly when it
3275 updates the register status.
3276
3277 It's pointless doing this if we originally had two sets, one from
3278 i3, and one from i2. Combining then splitting the parallel results
3279 in the original i2 again plus an invalid insn (which we delete).
3280 The net effect is only to move instructions around, which makes
3281 debug info less accurate.
3282
3283 Also check the case where the first SET's destination is unused.
3284 That would not cause incorrect code, but does cause an unneeded
3285 insn to remain. */
3286
3287 if (insn_code_number < 0
3288 && !(added_sets_2 && i1 == 0)
3289 && GET_CODE (newpat) == PARALLEL
3290 && XVECLEN (newpat, 0) == 2
3291 && GET_CODE (XVECEXP (newpat, 0, 0)) == SET
3292 && GET_CODE (XVECEXP (newpat, 0, 1)) == SET
3293 && asm_noperands (newpat) < 0)
3294 {
3295 rtx set0 = XVECEXP (newpat, 0, 0);
3296 rtx set1 = XVECEXP (newpat, 0, 1);
3297
3298 if (((REG_P (SET_DEST (set1))
3299 && find_reg_note (i3, REG_UNUSED, SET_DEST (set1)))
3300 || (GET_CODE (SET_DEST (set1)) == SUBREG
3301 && find_reg_note (i3, REG_UNUSED, SUBREG_REG (SET_DEST (set1)))))
3302 && insn_nothrow_p (i3)
3303 && !side_effects_p (SET_SRC (set1)))
3304 {
3305 newpat = set0;
3306 insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
3307 }
3308
3309 else if (((REG_P (SET_DEST (set0))
3310 && find_reg_note (i3, REG_UNUSED, SET_DEST (set0)))
3311 || (GET_CODE (SET_DEST (set0)) == SUBREG
3312 && find_reg_note (i3, REG_UNUSED,
3313 SUBREG_REG (SET_DEST (set0)))))
3314 && insn_nothrow_p (i3)
3315 && !side_effects_p (SET_SRC (set0)))
3316 {
3317 newpat = set1;
3318 insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
3319
3320 if (insn_code_number >= 0)
3321 changed_i3_dest = 1;
3322 }
3323 }
3324
3325 /* If we were combining three insns and the result is a simple SET
3326 with no ASM_OPERANDS that wasn't recognized, try to split it into two
3327 insns. There are two ways to do this. It can be split using a
3328 machine-specific method (like when you have an addition of a large
3329 constant) or by combine in the function find_split_point. */
3330
3331 if (i1 && insn_code_number < 0 && GET_CODE (newpat) == SET
3332 && asm_noperands (newpat) < 0)
3333 {
3334 rtx parallel, m_split, *split;
3335
3336 /* See if the MD file can split NEWPAT. If it can't, see if letting it
3337 use I2DEST as a scratch register will help. In the latter case,
3338 convert I2DEST to the mode of the source of NEWPAT if we can. */
3339
3340 m_split = combine_split_insns (newpat, i3);
3341
3342 /* We can only use I2DEST as a scratch reg if it doesn't overlap any
3343 inputs of NEWPAT. */
3344
3345 /* ??? If I2DEST is not safe, and I1DEST exists, then it would be
3346 possible to try that as a scratch reg. This would require adding
3347 more code to make it work though. */
3348
3349 if (m_split == 0 && ! reg_overlap_mentioned_p (i2dest, newpat))
3350 {
3351 enum machine_mode new_mode = GET_MODE (SET_DEST (newpat));
3352
3353 /* First try to split using the original register as a
3354 scratch register. */
3355 parallel = gen_rtx_PARALLEL (VOIDmode,
3356 gen_rtvec (2, newpat,
3357 gen_rtx_CLOBBER (VOIDmode,
3358 i2dest)));
3359 m_split = combine_split_insns (parallel, i3);
3360
3361 /* If that didn't work, try changing the mode of I2DEST if
3362 we can. */
3363 if (m_split == 0
3364 && new_mode != GET_MODE (i2dest)
3365 && new_mode != VOIDmode
3366 && can_change_dest_mode (i2dest, added_sets_2, new_mode))
3367 {
3368 enum machine_mode old_mode = GET_MODE (i2dest);
3369 rtx ni2dest;
3370
3371 if (REGNO (i2dest) < FIRST_PSEUDO_REGISTER)
3372 ni2dest = gen_rtx_REG (new_mode, REGNO (i2dest));
3373 else
3374 {
3375 SUBST_MODE (regno_reg_rtx[REGNO (i2dest)], new_mode);
3376 ni2dest = regno_reg_rtx[REGNO (i2dest)];
3377 }
3378
3379 parallel = (gen_rtx_PARALLEL
3380 (VOIDmode,
3381 gen_rtvec (2, newpat,
3382 gen_rtx_CLOBBER (VOIDmode,
3383 ni2dest))));
3384 m_split = combine_split_insns (parallel, i3);
3385
3386 if (m_split == 0
3387 && REGNO (i2dest) >= FIRST_PSEUDO_REGISTER)
3388 {
3389 struct undo *buf;
3390
3391 adjust_reg_mode (regno_reg_rtx[REGNO (i2dest)], old_mode);
3392 buf = undobuf.undos;
3393 undobuf.undos = buf->next;
3394 buf->next = undobuf.frees;
3395 undobuf.frees = buf;
3396 }
3397 }
3398
3399 i2scratch = m_split != 0;
3400 }
3401
3402 /* If recog_for_combine has discarded clobbers, try to use them
3403 again for the split. */
3404 if (m_split == 0 && newpat_vec_with_clobbers)
3405 {
3406 parallel = gen_rtx_PARALLEL (VOIDmode, newpat_vec_with_clobbers);
3407 m_split = combine_split_insns (parallel, i3);
3408 }
3409
3410 if (m_split && NEXT_INSN (m_split) == NULL_RTX)
3411 {
3412 m_split = PATTERN (m_split);
3413 insn_code_number = recog_for_combine (&m_split, i3, &new_i3_notes);
3414 if (insn_code_number >= 0)
3415 newpat = m_split;
3416 }
3417 else if (m_split && NEXT_INSN (NEXT_INSN (m_split)) == NULL_RTX
3418 && (next_real_insn (i2) == i3
3419 || ! use_crosses_set_p (PATTERN (m_split), DF_INSN_LUID (i2))))
3420 {
3421 rtx i2set, i3set;
3422 rtx newi3pat = PATTERN (NEXT_INSN (m_split));
3423 newi2pat = PATTERN (m_split);
3424
3425 i3set = single_set (NEXT_INSN (m_split));
3426 i2set = single_set (m_split);
3427
3428 i2_code_number = recog_for_combine (&newi2pat, i2, &new_i2_notes);
3429
3430 /* If I2 or I3 has multiple SETs, we won't know how to track
3431 register status, so don't use these insns. If I2's destination
3432 is used between I2 and I3, we also can't use these insns. */
3433
3434 if (i2_code_number >= 0 && i2set && i3set
3435 && (next_real_insn (i2) == i3
3436 || ! reg_used_between_p (SET_DEST (i2set), i2, i3)))
3437 insn_code_number = recog_for_combine (&newi3pat, i3,
3438 &new_i3_notes);
3439 if (insn_code_number >= 0)
3440 newpat = newi3pat;
3441
3442 /* It is possible that both insns now set the destination of I3.
3443 If so, we must show an extra use of it. */
3444
3445 if (insn_code_number >= 0)
3446 {
3447 rtx new_i3_dest = SET_DEST (i3set);
3448 rtx new_i2_dest = SET_DEST (i2set);
3449
3450 while (GET_CODE (new_i3_dest) == ZERO_EXTRACT
3451 || GET_CODE (new_i3_dest) == STRICT_LOW_PART
3452 || GET_CODE (new_i3_dest) == SUBREG)
3453 new_i3_dest = XEXP (new_i3_dest, 0);
3454
3455 while (GET_CODE (new_i2_dest) == ZERO_EXTRACT
3456 || GET_CODE (new_i2_dest) == STRICT_LOW_PART
3457 || GET_CODE (new_i2_dest) == SUBREG)
3458 new_i2_dest = XEXP (new_i2_dest, 0);
3459
3460 if (REG_P (new_i3_dest)
3461 && REG_P (new_i2_dest)
3462 && REGNO (new_i3_dest) == REGNO (new_i2_dest))
3463 INC_REG_N_SETS (REGNO (new_i2_dest), 1);
3464 }
3465 }
3466
3467 /* If we can split it and use I2DEST, go ahead and see if that
3468 helps things be recognized. Verify that none of the registers
3469 are set between I2 and I3. */
3470 if (insn_code_number < 0
3471 && (split = find_split_point (&newpat, i3, false)) != 0
3472 #ifdef HAVE_cc0
3473 && REG_P (i2dest)
3474 #endif
3475 /* We need I2DEST in the proper mode. If it is a hard register
3476 or the only use of a pseudo, we can change its mode.
3477 Make sure we don't change a hard register to have a mode that
3478 isn't valid for it, or change the number of registers. */
3479 && (GET_MODE (*split) == GET_MODE (i2dest)
3480 || GET_MODE (*split) == VOIDmode
3481 || can_change_dest_mode (i2dest, added_sets_2,
3482 GET_MODE (*split)))
3483 && (next_real_insn (i2) == i3
3484 || ! use_crosses_set_p (*split, DF_INSN_LUID (i2)))
3485 /* We can't overwrite I2DEST if its value is still used by
3486 NEWPAT. */
3487 && ! reg_referenced_p (i2dest, newpat))
3488 {
3489 rtx newdest = i2dest;
3490 enum rtx_code split_code = GET_CODE (*split);
3491 enum machine_mode split_mode = GET_MODE (*split);
3492 bool subst_done = false;
3493 newi2pat = NULL_RTX;
3494
3495 i2scratch = true;
3496
3497 /* *SPLIT may be part of I2SRC, so make sure we have the
3498 original expression around for later debug processing.
3499 We should not need I2SRC any more in other cases. */
3500 if (MAY_HAVE_DEBUG_INSNS)
3501 i2src = copy_rtx (i2src);
3502 else
3503 i2src = NULL;
3504
3505 /* Get NEWDEST as a register in the proper mode. We have already
3506 validated that we can do this. */
3507 if (GET_MODE (i2dest) != split_mode && split_mode != VOIDmode)
3508 {
3509 if (REGNO (i2dest) < FIRST_PSEUDO_REGISTER)
3510 newdest = gen_rtx_REG (split_mode, REGNO (i2dest));
3511 else
3512 {
3513 SUBST_MODE (regno_reg_rtx[REGNO (i2dest)], split_mode);
3514 newdest = regno_reg_rtx[REGNO (i2dest)];
3515 }
3516 }
3517
3518 /* If *SPLIT is a (mult FOO (const_int pow2)), convert it to
3519 an ASHIFT. This can occur if it was inside a PLUS and hence
3520 appeared to be a memory address. This is a kludge. */
3521 if (split_code == MULT
3522 && CONST_INT_P (XEXP (*split, 1))
3523 && INTVAL (XEXP (*split, 1)) > 0
3524 && (i = exact_log2 (UINTVAL (XEXP (*split, 1)))) >= 0)
3525 {
3526 SUBST (*split, gen_rtx_ASHIFT (split_mode,
3527 XEXP (*split, 0), GEN_INT (i)));
3528 /* Update split_code because we may not have a multiply
3529 anymore. */
3530 split_code = GET_CODE (*split);
3531 }
3532
3533 #ifdef INSN_SCHEDULING
3534 /* If *SPLIT is a paradoxical SUBREG, when we split it, it should
3535 be written as a ZERO_EXTEND. */
3536 if (split_code == SUBREG && MEM_P (SUBREG_REG (*split)))
3537 {
3538 #ifdef LOAD_EXTEND_OP
3539 /* Or as a SIGN_EXTEND if LOAD_EXTEND_OP says that that's
3540 what it really is. */
3541 if (LOAD_EXTEND_OP (GET_MODE (SUBREG_REG (*split)))
3542 == SIGN_EXTEND)
3543 SUBST (*split, gen_rtx_SIGN_EXTEND (split_mode,
3544 SUBREG_REG (*split)));
3545 else
3546 #endif
3547 SUBST (*split, gen_rtx_ZERO_EXTEND (split_mode,
3548 SUBREG_REG (*split)));
3549 }
3550 #endif
3551
3552 /* Attempt to split binary operators using arithmetic identities. */
3553 if (BINARY_P (SET_SRC (newpat))
3554 && split_mode == GET_MODE (SET_SRC (newpat))
3555 && ! side_effects_p (SET_SRC (newpat)))
3556 {
3557 rtx setsrc = SET_SRC (newpat);
3558 enum machine_mode mode = GET_MODE (setsrc);
3559 enum rtx_code code = GET_CODE (setsrc);
3560 rtx src_op0 = XEXP (setsrc, 0);
3561 rtx src_op1 = XEXP (setsrc, 1);
3562
3563 /* Split "X = Y op Y" as "Z = Y; X = Z op Z". */
3564 if (rtx_equal_p (src_op0, src_op1))
3565 {
3566 newi2pat = gen_rtx_SET (VOIDmode, newdest, src_op0);
3567 SUBST (XEXP (setsrc, 0), newdest);
3568 SUBST (XEXP (setsrc, 1), newdest);
3569 subst_done = true;
3570 }
3571 /* Split "((P op Q) op R) op S" where op is PLUS or MULT. */
3572 else if ((code == PLUS || code == MULT)
3573 && GET_CODE (src_op0) == code
3574 && GET_CODE (XEXP (src_op0, 0)) == code
3575 && (INTEGRAL_MODE_P (mode)
3576 || (FLOAT_MODE_P (mode)
3577 && flag_unsafe_math_optimizations)))
3578 {
3579 rtx p = XEXP (XEXP (src_op0, 0), 0);
3580 rtx q = XEXP (XEXP (src_op0, 0), 1);
3581 rtx r = XEXP (src_op0, 1);
3582 rtx s = src_op1;
3583
3584 /* Split both "((X op Y) op X) op Y" and
3585 "((X op Y) op Y) op X" as "T op T" where T is
3586 "X op Y". */
3587 if ((rtx_equal_p (p,r) && rtx_equal_p (q,s))
3588 || (rtx_equal_p (p,s) && rtx_equal_p (q,r)))
3589 {
3590 newi2pat = gen_rtx_SET (VOIDmode, newdest,
3591 XEXP (src_op0, 0));
3592 SUBST (XEXP (setsrc, 0), newdest);
3593 SUBST (XEXP (setsrc, 1), newdest);
3594 subst_done = true;
3595 }
3596 /* Split "((X op X) op Y) op Y)" as "T op T" where
3597 T is "X op Y". */
3598 else if (rtx_equal_p (p,q) && rtx_equal_p (r,s))
3599 {
3600 rtx tmp = simplify_gen_binary (code, mode, p, r);
3601 newi2pat = gen_rtx_SET (VOIDmode, newdest, tmp);
3602 SUBST (XEXP (setsrc, 0), newdest);
3603 SUBST (XEXP (setsrc, 1), newdest);
3604 subst_done = true;
3605 }
3606 }
3607 }
3608
3609 if (!subst_done)
3610 {
3611 newi2pat = gen_rtx_SET (VOIDmode, newdest, *split);
3612 SUBST (*split, newdest);
3613 }
3614
3615 i2_code_number = recog_for_combine (&newi2pat, i2, &new_i2_notes);
3616
3617 /* recog_for_combine might have added CLOBBERs to newi2pat.
3618 Make sure NEWPAT does not depend on the clobbered regs. */
3619 if (GET_CODE (newi2pat) == PARALLEL)
3620 for (i = XVECLEN (newi2pat, 0) - 1; i >= 0; i--)
3621 if (GET_CODE (XVECEXP (newi2pat, 0, i)) == CLOBBER)
3622 {
3623 rtx reg = XEXP (XVECEXP (newi2pat, 0, i), 0);
3624 if (reg_overlap_mentioned_p (reg, newpat))
3625 {
3626 undo_all ();
3627 return 0;
3628 }
3629 }
3630
3631 /* If the split point was a MULT and we didn't have one before,
3632 don't use one now. */
3633 if (i2_code_number >= 0 && ! (split_code == MULT && ! have_mult))
3634 insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
3635 }
3636 }
3637
3638 /* Check for a case where we loaded from memory in a narrow mode and
3639 then sign extended it, but we need both registers. In that case,
3640 we have a PARALLEL with both loads from the same memory location.
3641 We can split this into a load from memory followed by a register-register
3642 copy. This saves at least one insn, more if register allocation can
3643 eliminate the copy.
3644
3645 We cannot do this if the destination of the first assignment is a
3646 condition code register or cc0. We eliminate this case by making sure
3647 the SET_DEST and SET_SRC have the same mode.
3648
3649 We cannot do this if the destination of the second assignment is
3650 a register that we have already assumed is zero-extended. Similarly
3651 for a SUBREG of such a register. */
3652
3653 else if (i1 && insn_code_number < 0 && asm_noperands (newpat) < 0
3654 && GET_CODE (newpat) == PARALLEL
3655 && XVECLEN (newpat, 0) == 2
3656 && GET_CODE (XVECEXP (newpat, 0, 0)) == SET
3657 && GET_CODE (SET_SRC (XVECEXP (newpat, 0, 0))) == SIGN_EXTEND
3658 && (GET_MODE (SET_DEST (XVECEXP (newpat, 0, 0)))
3659 == GET_MODE (SET_SRC (XVECEXP (newpat, 0, 0))))
3660 && GET_CODE (XVECEXP (newpat, 0, 1)) == SET
3661 && rtx_equal_p (SET_SRC (XVECEXP (newpat, 0, 1)),
3662 XEXP (SET_SRC (XVECEXP (newpat, 0, 0)), 0))
3663 && ! use_crosses_set_p (SET_SRC (XVECEXP (newpat, 0, 1)),
3664 DF_INSN_LUID (i2))
3665 && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) != ZERO_EXTRACT
3666 && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) != STRICT_LOW_PART
3667 && ! (temp = SET_DEST (XVECEXP (newpat, 0, 1)),
3668 (REG_P (temp)
3669 && VEC_index (reg_stat_type, reg_stat,
3670 REGNO (temp))->nonzero_bits != 0
3671 && GET_MODE_BITSIZE (GET_MODE (temp)) < BITS_PER_WORD
3672 && GET_MODE_BITSIZE (GET_MODE (temp)) < HOST_BITS_PER_INT
3673 && (VEC_index (reg_stat_type, reg_stat,
3674 REGNO (temp))->nonzero_bits
3675 != GET_MODE_MASK (word_mode))))
3676 && ! (GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) == SUBREG
3677 && (temp = SUBREG_REG (SET_DEST (XVECEXP (newpat, 0, 1))),
3678 (REG_P (temp)
3679 && VEC_index (reg_stat_type, reg_stat,
3680 REGNO (temp))->nonzero_bits != 0
3681 && GET_MODE_BITSIZE (GET_MODE (temp)) < BITS_PER_WORD
3682 && GET_MODE_BITSIZE (GET_MODE (temp)) < HOST_BITS_PER_INT
3683 && (VEC_index (reg_stat_type, reg_stat,
3684 REGNO (temp))->nonzero_bits
3685 != GET_MODE_MASK (word_mode)))))
3686 && ! reg_overlap_mentioned_p (SET_DEST (XVECEXP (newpat, 0, 1)),
3687 SET_SRC (XVECEXP (newpat, 0, 1)))
3688 && ! find_reg_note (i3, REG_UNUSED,
3689 SET_DEST (XVECEXP (newpat, 0, 0))))
3690 {
3691 rtx ni2dest;
3692
3693 newi2pat = XVECEXP (newpat, 0, 0);
3694 ni2dest = SET_DEST (XVECEXP (newpat, 0, 0));
3695 newpat = XVECEXP (newpat, 0, 1);
3696 SUBST (SET_SRC (newpat),
3697 gen_lowpart (GET_MODE (SET_SRC (newpat)), ni2dest));
3698 i2_code_number = recog_for_combine (&newi2pat, i2, &new_i2_notes);
3699
3700 if (i2_code_number >= 0)
3701 insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
3702
3703 if (insn_code_number >= 0)
3704 swap_i2i3 = 1;
3705 }
3706
3707 /* Similarly, check for a case where we have a PARALLEL of two independent
3708 SETs but we started with three insns. In this case, we can do the sets
3709 as two separate insns. This case occurs when some SET allows two
3710 other insns to combine, but the destination of that SET is still live. */
3711
3712 else if (i1 && insn_code_number < 0 && asm_noperands (newpat) < 0
3713 && GET_CODE (newpat) == PARALLEL
3714 && XVECLEN (newpat, 0) == 2
3715 && GET_CODE (XVECEXP (newpat, 0, 0)) == SET
3716 && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 0))) != ZERO_EXTRACT
3717 && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 0))) != STRICT_LOW_PART
3718 && GET_CODE (XVECEXP (newpat, 0, 1)) == SET
3719 && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) != ZERO_EXTRACT
3720 && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) != STRICT_LOW_PART
3721 && ! reg_referenced_p (SET_DEST (XVECEXP (newpat, 0, 1)),
3722 XVECEXP (newpat, 0, 0))
3723 && ! reg_referenced_p (SET_DEST (XVECEXP (newpat, 0, 0)),
3724 XVECEXP (newpat, 0, 1))
3725 && ! (contains_muldiv (SET_SRC (XVECEXP (newpat, 0, 0)))
3726 && contains_muldiv (SET_SRC (XVECEXP (newpat, 0, 1)))))
3727 {
3728 /* Normally, it doesn't matter which of the two is done first,
3729 but the one that references cc0 can't be the second, and
3730 one which uses any regs/memory set in between i2 and i3 can't
3731 be first. */
3732 if (!use_crosses_set_p (SET_SRC (XVECEXP (newpat, 0, 1)),
3733 DF_INSN_LUID (i2))
3734 #ifdef HAVE_cc0
3735 && !reg_referenced_p (cc0_rtx, XVECEXP (newpat, 0, 0))
3736 #endif
3737 )
3738 {
3739 newi2pat = XVECEXP (newpat, 0, 1);
3740 newpat = XVECEXP (newpat, 0, 0);
3741 }
3742 else if (!use_crosses_set_p (SET_SRC (XVECEXP (newpat, 0, 0)),
3743 DF_INSN_LUID (i2))
3744 #ifdef HAVE_cc0
3745 && !reg_referenced_p (cc0_rtx, XVECEXP (newpat, 0, 1))
3746 #endif
3747 )
3748 {
3749 newi2pat = XVECEXP (newpat, 0, 0);
3750 newpat = XVECEXP (newpat, 0, 1);
3751 }
3752 else
3753 {
3754 undo_all ();
3755 return 0;
3756 }
3757
3758 i2_code_number = recog_for_combine (&newi2pat, i2, &new_i2_notes);
3759
3760 if (i2_code_number >= 0)
3761 {
3762 /* recog_for_combine might have added CLOBBERs to newi2pat.
3763 Make sure NEWPAT does not depend on the clobbered regs. */
3764 if (GET_CODE (newi2pat) == PARALLEL)
3765 {
3766 for (i = XVECLEN (newi2pat, 0) - 1; i >= 0; i--)
3767 if (GET_CODE (XVECEXP (newi2pat, 0, i)) == CLOBBER)
3768 {
3769 rtx reg = XEXP (XVECEXP (newi2pat, 0, i), 0);
3770 if (reg_overlap_mentioned_p (reg, newpat))
3771 {
3772 undo_all ();
3773 return 0;
3774 }
3775 }
3776 }
3777
3778 insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
3779 }
3780 }
3781
3782 /* If it still isn't recognized, fail and change things back the way they
3783 were. */
3784 if ((insn_code_number < 0
3785 /* Is the result a reasonable ASM_OPERANDS? */
3786 && (! check_asm_operands (newpat) || added_sets_1 || added_sets_2)))
3787 {
3788 undo_all ();
3789 return 0;
3790 }
3791
3792 /* If we had to change another insn, make sure it is valid also. */
3793 if (undobuf.other_insn)
3794 {
3795 CLEAR_HARD_REG_SET (newpat_used_regs);
3796
3797 other_pat = PATTERN (undobuf.other_insn);
3798 other_code_number = recog_for_combine (&other_pat, undobuf.other_insn,
3799 &new_other_notes);
3800
3801 if (other_code_number < 0 && ! check_asm_operands (other_pat))
3802 {
3803 undo_all ();
3804 return 0;
3805 }
3806 }
3807
3808 #ifdef HAVE_cc0
3809 /* If I2 is the CC0 setter and I3 is the CC0 user then check whether
3810 they are adjacent to each other or not. */
3811 {
3812 rtx p = prev_nonnote_insn (i3);
3813 if (p && p != i2 && NONJUMP_INSN_P (p) && newi2pat
3814 && sets_cc0_p (newi2pat))
3815 {
3816 undo_all ();
3817 return 0;
3818 }
3819 }
3820 #endif
3821
3822 /* Only allow this combination if insn_rtx_costs reports that the
3823 replacement instructions are cheaper than the originals. */
3824 if (!combine_validate_cost (i0, i1, i2, i3, newpat, newi2pat, other_pat))
3825 {
3826 undo_all ();
3827 return 0;
3828 }
3829
3830 if (MAY_HAVE_DEBUG_INSNS)
3831 {
3832 struct undo *undo;
3833
3834 for (undo = undobuf.undos; undo; undo = undo->next)
3835 if (undo->kind == UNDO_MODE)
3836 {
3837 rtx reg = *undo->where.r;
3838 enum machine_mode new_mode = GET_MODE (reg);
3839 enum machine_mode old_mode = undo->old_contents.m;
3840
3841 /* Temporarily revert mode back. */
3842 adjust_reg_mode (reg, old_mode);
3843
3844 if (reg == i2dest && i2scratch)
3845 {
3846 /* If we used i2dest as a scratch register with a
3847 different mode, substitute it for the original
3848 i2src while its original mode is temporarily
3849 restored, and then clear i2scratch so that we don't
3850 do it again later. */
3851 propagate_for_debug (i2, i3, reg, i2src);
3852 i2scratch = false;
3853 /* Put back the new mode. */
3854 adjust_reg_mode (reg, new_mode);
3855 }
3856 else
3857 {
3858 rtx tempreg = gen_raw_REG (old_mode, REGNO (reg));
3859 rtx first, last;
3860
3861 if (reg == i2dest)
3862 {
3863 first = i2;
3864 last = i3;
3865 }
3866 else
3867 {
3868 first = i3;
3869 last = undobuf.other_insn;
3870 gcc_assert (last);
3871 }
3872
3873 /* We're dealing with a reg that changed mode but not
3874 meaning, so we want to turn it into a subreg for
3875 the new mode. However, because of REG sharing and
3876 because its mode had already changed, we have to do
3877 it in two steps. First, replace any debug uses of
3878 reg, with its original mode temporarily restored,
3879 with this copy we have created; then, replace the
3880 copy with the SUBREG of the original shared reg,
3881 once again changed to the new mode. */
3882 propagate_for_debug (first, last, reg, tempreg);
3883 adjust_reg_mode (reg, new_mode);
3884 propagate_for_debug (first, last, tempreg,
3885 lowpart_subreg (old_mode, reg, new_mode));
3886 }
3887 }
3888 }
3889
3890 /* If we will be able to accept this, we have made a
3891 change to the destination of I3. This requires us to
3892 do a few adjustments. */
3893
3894 if (changed_i3_dest)
3895 {
3896 PATTERN (i3) = newpat;
3897 adjust_for_new_dest (i3);
3898 }
3899
3900 /* We now know that we can do this combination. Merge the insns and
3901 update the status of registers and LOG_LINKS. */
3902
3903 if (undobuf.other_insn)
3904 {
3905 rtx note, next;
3906
3907 PATTERN (undobuf.other_insn) = other_pat;
3908
3909 /* If any of the notes in OTHER_INSN were REG_UNUSED, ensure that they
3910 are still valid. Then add any non-duplicate notes added by
3911 recog_for_combine. */
3912 for (note = REG_NOTES (undobuf.other_insn); note; note = next)
3913 {
3914 next = XEXP (note, 1);
3915
3916 if (REG_NOTE_KIND (note) == REG_UNUSED
3917 && ! reg_set_p (XEXP (note, 0), PATTERN (undobuf.other_insn)))
3918 remove_note (undobuf.other_insn, note);
3919 }
3920
3921 distribute_notes (new_other_notes, undobuf.other_insn,
3922 undobuf.other_insn, NULL_RTX, NULL_RTX, NULL_RTX,
3923 NULL_RTX);
3924 }
3925
3926 if (swap_i2i3)
3927 {
3928 rtx insn;
3929 rtx link;
3930 rtx ni2dest;
3931
3932 /* I3 now uses what used to be its destination and which is now
3933 I2's destination. This requires us to do a few adjustments. */
3934 PATTERN (i3) = newpat;
3935 adjust_for_new_dest (i3);
3936
3937 /* We need a LOG_LINK from I3 to I2. But we used to have one,
3938 so we still will.
3939
3940 However, some later insn might be using I2's dest and have
3941 a LOG_LINK pointing at I3. We must remove this link.
3942 The simplest way to remove the link is to point it at I1,
3943 which we know will be a NOTE. */
3944
3945 /* newi2pat is usually a SET here; however, recog_for_combine might
3946 have added some clobbers. */
3947 if (GET_CODE (newi2pat) == PARALLEL)
3948 ni2dest = SET_DEST (XVECEXP (newi2pat, 0, 0));
3949 else
3950 ni2dest = SET_DEST (newi2pat);
3951
3952 for (insn = NEXT_INSN (i3);
3953 insn && (this_basic_block->next_bb == EXIT_BLOCK_PTR
3954 || insn != BB_HEAD (this_basic_block->next_bb));
3955 insn = NEXT_INSN (insn))
3956 {
3957 if (INSN_P (insn) && reg_referenced_p (ni2dest, PATTERN (insn)))
3958 {
3959 for (link = LOG_LINKS (insn); link;
3960 link = XEXP (link, 1))
3961 if (XEXP (link, 0) == i3)
3962 XEXP (link, 0) = i1;
3963
3964 break;
3965 }
3966 }
3967 }
3968
3969 {
3970 rtx i3notes, i2notes, i1notes = 0, i0notes = 0;
3971 rtx i3links, i2links, i1links = 0, i0links = 0;
3972 rtx midnotes = 0;
3973 int from_luid;
3974 unsigned int regno;
3975 /* Compute which registers we expect to eliminate. newi2pat may be setting
3976 either i3dest or i2dest, so we must check it. Also, i1dest may be the
3977 same as i3dest, in which case newi2pat may be setting i1dest. */
3978 rtx elim_i2 = ((newi2pat && reg_set_p (i2dest, newi2pat))
3979 || i2dest_in_i2src || i2dest_in_i1src || i2dest_in_i0src
3980 || !i2dest_killed
3981 ? 0 : i2dest);
3982 rtx elim_i1 = (i1 == 0 || i1dest_in_i1src || i1dest_in_i0src
3983 || (newi2pat && reg_set_p (i1dest, newi2pat))
3984 || !i1dest_killed
3985 ? 0 : i1dest);
3986 rtx elim_i0 = (i0 == 0 || i0dest_in_i0src
3987 || (newi2pat && reg_set_p (i0dest, newi2pat))
3988 || !i0dest_killed
3989 ? 0 : i0dest);
3990
3991 /* Get the old REG_NOTES and LOG_LINKS from all our insns and
3992 clear them. */
3993 i3notes = REG_NOTES (i3), i3links = LOG_LINKS (i3);
3994 i2notes = REG_NOTES (i2), i2links = LOG_LINKS (i2);
3995 if (i1)
3996 i1notes = REG_NOTES (i1), i1links = LOG_LINKS (i1);
3997 if (i0)
3998 i0notes = REG_NOTES (i0), i0links = LOG_LINKS (i0);
3999
4000 /* Ensure that we do not have something that should not be shared but
4001 occurs multiple times in the new insns. Check this by first
4002 resetting all the `used' flags and then copying anything is shared. */
4003
4004 reset_used_flags (i3notes);
4005 reset_used_flags (i2notes);
4006 reset_used_flags (i1notes);
4007 reset_used_flags (i0notes);
4008 reset_used_flags (newpat);
4009 reset_used_flags (newi2pat);
4010 if (undobuf.other_insn)
4011 reset_used_flags (PATTERN (undobuf.other_insn));
4012
4013 i3notes = copy_rtx_if_shared (i3notes);
4014 i2notes = copy_rtx_if_shared (i2notes);
4015 i1notes = copy_rtx_if_shared (i1notes);
4016 i0notes = copy_rtx_if_shared (i0notes);
4017 newpat = copy_rtx_if_shared (newpat);
4018 newi2pat = copy_rtx_if_shared (newi2pat);
4019 if (undobuf.other_insn)
4020 reset_used_flags (PATTERN (undobuf.other_insn));
4021
4022 INSN_CODE (i3) = insn_code_number;
4023 PATTERN (i3) = newpat;
4024
4025 if (CALL_P (i3) && CALL_INSN_FUNCTION_USAGE (i3))
4026 {
4027 rtx call_usage = CALL_INSN_FUNCTION_USAGE (i3);
4028
4029 reset_used_flags (call_usage);
4030 call_usage = copy_rtx (call_usage);
4031
4032 if (substed_i2)
4033 {
4034 /* I2SRC must still be meaningful at this point. Some splitting
4035 operations can invalidate I2SRC, but those operations do not
4036 apply to calls. */
4037 gcc_assert (i2src);
4038 replace_rtx (call_usage, i2dest, i2src);
4039 }
4040
4041 if (substed_i1)
4042 replace_rtx (call_usage, i1dest, i1src);
4043 if (substed_i0)
4044 replace_rtx (call_usage, i0dest, i0src);
4045
4046 CALL_INSN_FUNCTION_USAGE (i3) = call_usage;
4047 }
4048
4049 if (undobuf.other_insn)
4050 INSN_CODE (undobuf.other_insn) = other_code_number;
4051
4052 /* We had one special case above where I2 had more than one set and
4053 we replaced a destination of one of those sets with the destination
4054 of I3. In that case, we have to update LOG_LINKS of insns later
4055 in this basic block. Note that this (expensive) case is rare.
4056
4057 Also, in this case, we must pretend that all REG_NOTEs for I2
4058 actually came from I3, so that REG_UNUSED notes from I2 will be
4059 properly handled. */
4060
4061 if (i3_subst_into_i2)
4062 {
4063 for (i = 0; i < XVECLEN (PATTERN (i2), 0); i++)
4064 if ((GET_CODE (XVECEXP (PATTERN (i2), 0, i)) == SET
4065 || GET_CODE (XVECEXP (PATTERN (i2), 0, i)) == CLOBBER)
4066 && REG_P (SET_DEST (XVECEXP (PATTERN (i2), 0, i)))
4067 && SET_DEST (XVECEXP (PATTERN (i2), 0, i)) != i2dest
4068 && ! find_reg_note (i2, REG_UNUSED,
4069 SET_DEST (XVECEXP (PATTERN (i2), 0, i))))
4070 for (temp = NEXT_INSN (i2);
4071 temp && (this_basic_block->next_bb == EXIT_BLOCK_PTR
4072 || BB_HEAD (this_basic_block) != temp);
4073 temp = NEXT_INSN (temp))
4074 if (temp != i3 && INSN_P (temp))
4075 for (link = LOG_LINKS (temp); link; link = XEXP (link, 1))
4076 if (XEXP (link, 0) == i2)
4077 XEXP (link, 0) = i3;
4078
4079 if (i3notes)
4080 {
4081 rtx link = i3notes;
4082 while (XEXP (link, 1))
4083 link = XEXP (link, 1);
4084 XEXP (link, 1) = i2notes;
4085 }
4086 else
4087 i3notes = i2notes;
4088 i2notes = 0;
4089 }
4090
4091 LOG_LINKS (i3) = 0;
4092 REG_NOTES (i3) = 0;
4093 LOG_LINKS (i2) = 0;
4094 REG_NOTES (i2) = 0;
4095
4096 if (newi2pat)
4097 {
4098 if (MAY_HAVE_DEBUG_INSNS && i2scratch)
4099 propagate_for_debug (i2, i3, i2dest, i2src);
4100 INSN_CODE (i2) = i2_code_number;
4101 PATTERN (i2) = newi2pat;
4102 }
4103 else
4104 {
4105 if (MAY_HAVE_DEBUG_INSNS && i2src)
4106 propagate_for_debug (i2, i3, i2dest, i2src);
4107 SET_INSN_DELETED (i2);
4108 }
4109
4110 if (i1)
4111 {
4112 LOG_LINKS (i1) = 0;
4113 REG_NOTES (i1) = 0;
4114 if (MAY_HAVE_DEBUG_INSNS)
4115 propagate_for_debug (i1, i3, i1dest, i1src);
4116 SET_INSN_DELETED (i1);
4117 }
4118
4119 if (i0)
4120 {
4121 LOG_LINKS (i0) = 0;
4122 REG_NOTES (i0) = 0;
4123 if (MAY_HAVE_DEBUG_INSNS)
4124 propagate_for_debug (i0, i3, i0dest, i0src);
4125 SET_INSN_DELETED (i0);
4126 }
4127
4128 /* Get death notes for everything that is now used in either I3 or
4129 I2 and used to die in a previous insn. If we built two new
4130 patterns, move from I1 to I2 then I2 to I3 so that we get the
4131 proper movement on registers that I2 modifies. */
4132
4133 if (i0)
4134 from_luid = DF_INSN_LUID (i0);
4135 else if (i1)
4136 from_luid = DF_INSN_LUID (i1);
4137 else
4138 from_luid = DF_INSN_LUID (i2);
4139 if (newi2pat)
4140 move_deaths (newi2pat, NULL_RTX, from_luid, i2, &midnotes);
4141 move_deaths (newpat, newi2pat, from_luid, i3, &midnotes);
4142
4143 /* Distribute all the LOG_LINKS and REG_NOTES from I1, I2, and I3. */
4144 if (i3notes)
4145 distribute_notes (i3notes, i3, i3, newi2pat ? i2 : NULL_RTX,
4146 elim_i2, elim_i1, elim_i0);
4147 if (i2notes)
4148 distribute_notes (i2notes, i2, i3, newi2pat ? i2 : NULL_RTX,
4149 elim_i2, elim_i1, elim_i0);
4150 if (i1notes)
4151 distribute_notes (i1notes, i1, i3, newi2pat ? i2 : NULL_RTX,
4152 elim_i2, elim_i1, elim_i0);
4153 if (i0notes)
4154 distribute_notes (i0notes, i0, i3, newi2pat ? i2 : NULL_RTX,
4155 elim_i2, elim_i1, elim_i0);
4156 if (midnotes)
4157 distribute_notes (midnotes, NULL_RTX, i3, newi2pat ? i2 : NULL_RTX,
4158 elim_i2, elim_i1, elim_i0);
4159
4160 /* Distribute any notes added to I2 or I3 by recog_for_combine. We
4161 know these are REG_UNUSED and want them to go to the desired insn,
4162 so we always pass it as i3. */
4163
4164 if (newi2pat && new_i2_notes)
4165 distribute_notes (new_i2_notes, i2, i2, NULL_RTX, NULL_RTX, NULL_RTX,
4166 NULL_RTX);
4167
4168 if (new_i3_notes)
4169 distribute_notes (new_i3_notes, i3, i3, NULL_RTX, NULL_RTX, NULL_RTX,
4170 NULL_RTX);
4171
4172 /* If I3DEST was used in I3SRC, it really died in I3. We may need to
4173 put a REG_DEAD note for it somewhere. If NEWI2PAT exists and sets
4174 I3DEST, the death must be somewhere before I2, not I3. If we passed I3
4175 in that case, it might delete I2. Similarly for I2 and I1.
4176 Show an additional death due to the REG_DEAD note we make here. If
4177 we discard it in distribute_notes, we will decrement it again. */
4178
4179 if (i3dest_killed)
4180 {
4181 if (newi2pat && reg_set_p (i3dest_killed, newi2pat))
4182 distribute_notes (alloc_reg_note (REG_DEAD, i3dest_killed,
4183 NULL_RTX),
4184 NULL_RTX, i2, NULL_RTX, elim_i2, elim_i1, elim_i0);
4185 else
4186 distribute_notes (alloc_reg_note (REG_DEAD, i3dest_killed,
4187 NULL_RTX),
4188 NULL_RTX, i3, newi2pat ? i2 : NULL_RTX,
4189 elim_i2, elim_i1, elim_i0);
4190 }
4191
4192 if (i2dest_in_i2src)
4193 {
4194 rtx new_note = alloc_reg_note (REG_DEAD, i2dest, NULL_RTX);
4195 if (newi2pat && reg_set_p (i2dest, newi2pat))
4196 distribute_notes (new_note, NULL_RTX, i2, NULL_RTX, NULL_RTX,
4197 NULL_RTX, NULL_RTX);
4198 else
4199 distribute_notes (new_note, NULL_RTX, i3, newi2pat ? i2 : NULL_RTX,
4200 NULL_RTX, NULL_RTX, NULL_RTX);
4201 }
4202
4203 if (i1dest_in_i1src)
4204 {
4205 rtx new_note = alloc_reg_note (REG_DEAD, i1dest, NULL_RTX);
4206 if (newi2pat && reg_set_p (i1dest, newi2pat))
4207 distribute_notes (new_note, NULL_RTX, i2, NULL_RTX, NULL_RTX,
4208 NULL_RTX, NULL_RTX);
4209 else
4210 distribute_notes (new_note, NULL_RTX, i3, newi2pat ? i2 : NULL_RTX,
4211 NULL_RTX, NULL_RTX, NULL_RTX);
4212 }
4213
4214 if (i0dest_in_i0src)
4215 {
4216 rtx new_note = alloc_reg_note (REG_DEAD, i0dest, NULL_RTX);
4217 if (newi2pat && reg_set_p (i0dest, newi2pat))
4218 distribute_notes (new_note, NULL_RTX, i2, NULL_RTX, NULL_RTX,
4219 NULL_RTX, NULL_RTX);
4220 else
4221 distribute_notes (new_note, NULL_RTX, i3, newi2pat ? i2 : NULL_RTX,
4222 NULL_RTX, NULL_RTX, NULL_RTX);
4223 }
4224
4225 distribute_links (i3links);
4226 distribute_links (i2links);
4227 distribute_links (i1links);
4228 distribute_links (i0links);
4229
4230 if (REG_P (i2dest))
4231 {
4232 rtx link;
4233 rtx i2_insn = 0, i2_val = 0, set;
4234
4235 /* The insn that used to set this register doesn't exist, and
4236 this life of the register may not exist either. See if one of
4237 I3's links points to an insn that sets I2DEST. If it does,
4238 that is now the last known value for I2DEST. If we don't update
4239 this and I2 set the register to a value that depended on its old
4240 contents, we will get confused. If this insn is used, thing
4241 will be set correctly in combine_instructions. */
4242
4243 for (link = LOG_LINKS (i3); link; link = XEXP (link, 1))
4244 if ((set = single_set (XEXP (link, 0))) != 0
4245 && rtx_equal_p (i2dest, SET_DEST (set)))
4246 i2_insn = XEXP (link, 0), i2_val = SET_SRC (set);
4247
4248 record_value_for_reg (i2dest, i2_insn, i2_val);
4249
4250 /* If the reg formerly set in I2 died only once and that was in I3,
4251 zero its use count so it won't make `reload' do any work. */
4252 if (! added_sets_2
4253 && (newi2pat == 0 || ! reg_mentioned_p (i2dest, newi2pat))
4254 && ! i2dest_in_i2src)
4255 {
4256 regno = REGNO (i2dest);
4257 INC_REG_N_SETS (regno, -1);
4258 }
4259 }
4260
4261 if (i1 && REG_P (i1dest))
4262 {
4263 rtx link;
4264 rtx i1_insn = 0, i1_val = 0, set;
4265
4266 for (link = LOG_LINKS (i3); link; link = XEXP (link, 1))
4267 if ((set = single_set (XEXP (link, 0))) != 0
4268 && rtx_equal_p (i1dest, SET_DEST (set)))
4269 i1_insn = XEXP (link, 0), i1_val = SET_SRC (set);
4270
4271 record_value_for_reg (i1dest, i1_insn, i1_val);
4272
4273 regno = REGNO (i1dest);
4274 if (! added_sets_1 && ! i1dest_in_i1src)
4275 INC_REG_N_SETS (regno, -1);
4276 }
4277
4278 if (i0 && REG_P (i0dest))
4279 {
4280 rtx link;
4281 rtx i0_insn = 0, i0_val = 0, set;
4282
4283 for (link = LOG_LINKS (i3); link; link = XEXP (link, 1))
4284 if ((set = single_set (XEXP (link, 0))) != 0
4285 && rtx_equal_p (i0dest, SET_DEST (set)))
4286 i0_insn = XEXP (link, 0), i0_val = SET_SRC (set);
4287
4288 record_value_for_reg (i0dest, i0_insn, i0_val);
4289
4290 regno = REGNO (i0dest);
4291 if (! added_sets_0 && ! i0dest_in_i0src)
4292 INC_REG_N_SETS (regno, -1);
4293 }
4294
4295 /* Update reg_stat[].nonzero_bits et al for any changes that may have
4296 been made to this insn. The order of
4297 set_nonzero_bits_and_sign_copies() is important. Because newi2pat
4298 can affect nonzero_bits of newpat */
4299 if (newi2pat)
4300 note_stores (newi2pat, set_nonzero_bits_and_sign_copies, NULL);
4301 note_stores (newpat, set_nonzero_bits_and_sign_copies, NULL);
4302 }
4303
4304 if (undobuf.other_insn != NULL_RTX)
4305 {
4306 if (dump_file)
4307 {
4308 fprintf (dump_file, "modifying other_insn ");
4309 dump_insn_slim (dump_file, undobuf.other_insn);
4310 }
4311 df_insn_rescan (undobuf.other_insn);
4312 }
4313
4314 if (i0 && !(NOTE_P(i0) && (NOTE_KIND (i0) == NOTE_INSN_DELETED)))
4315 {
4316 if (dump_file)
4317 {
4318 fprintf (dump_file, "modifying insn i1 ");
4319 dump_insn_slim (dump_file, i0);
4320 }
4321 df_insn_rescan (i0);
4322 }
4323
4324 if (i1 && !(NOTE_P(i1) && (NOTE_KIND (i1) == NOTE_INSN_DELETED)))
4325 {
4326 if (dump_file)
4327 {
4328 fprintf (dump_file, "modifying insn i1 ");
4329 dump_insn_slim (dump_file, i1);
4330 }
4331 df_insn_rescan (i1);
4332 }
4333
4334 if (i2 && !(NOTE_P(i2) && (NOTE_KIND (i2) == NOTE_INSN_DELETED)))
4335 {
4336 if (dump_file)
4337 {
4338 fprintf (dump_file, "modifying insn i2 ");
4339 dump_insn_slim (dump_file, i2);
4340 }
4341 df_insn_rescan (i2);
4342 }
4343
4344 if (i3 && !(NOTE_P(i3) && (NOTE_KIND (i3) == NOTE_INSN_DELETED)))
4345 {
4346 if (dump_file)
4347 {
4348 fprintf (dump_file, "modifying insn i3 ");
4349 dump_insn_slim (dump_file, i3);
4350 }
4351 df_insn_rescan (i3);
4352 }
4353
4354 /* Set new_direct_jump_p if a new return or simple jump instruction
4355 has been created. Adjust the CFG accordingly. */
4356
4357 if (returnjump_p (i3) || any_uncondjump_p (i3))
4358 {
4359 *new_direct_jump_p = 1;
4360 mark_jump_label (PATTERN (i3), i3, 0);
4361 update_cfg_for_uncondjump (i3);
4362 }
4363
4364 if (undobuf.other_insn != NULL_RTX
4365 && (returnjump_p (undobuf.other_insn)
4366 || any_uncondjump_p (undobuf.other_insn)))
4367 {
4368 *new_direct_jump_p = 1;
4369 update_cfg_for_uncondjump (undobuf.other_insn);
4370 }
4371
4372 /* A noop might also need cleaning up of CFG, if it comes from the
4373 simplification of a jump. */
4374 if (GET_CODE (newpat) == SET
4375 && SET_SRC (newpat) == pc_rtx
4376 && SET_DEST (newpat) == pc_rtx)
4377 {
4378 *new_direct_jump_p = 1;
4379 update_cfg_for_uncondjump (i3);
4380 }
4381
4382 combine_successes++;
4383 undo_commit ();
4384
4385 if (added_links_insn
4386 && (newi2pat == 0 || DF_INSN_LUID (added_links_insn) < DF_INSN_LUID (i2))
4387 && DF_INSN_LUID (added_links_insn) < DF_INSN_LUID (i3))
4388 return added_links_insn;
4389 else
4390 return newi2pat ? i2 : i3;
4391 }
4392 \f
4393 /* Undo all the modifications recorded in undobuf. */
4394
4395 static void
4396 undo_all (void)
4397 {
4398 struct undo *undo, *next;
4399
4400 for (undo = undobuf.undos; undo; undo = next)
4401 {
4402 next = undo->next;
4403 switch (undo->kind)
4404 {
4405 case UNDO_RTX:
4406 *undo->where.r = undo->old_contents.r;
4407 break;
4408 case UNDO_INT:
4409 *undo->where.i = undo->old_contents.i;
4410 break;
4411 case UNDO_MODE:
4412 adjust_reg_mode (*undo->where.r, undo->old_contents.m);
4413 break;
4414 default:
4415 gcc_unreachable ();
4416 }
4417
4418 undo->next = undobuf.frees;
4419 undobuf.frees = undo;
4420 }
4421
4422 undobuf.undos = 0;
4423 }
4424
4425 /* We've committed to accepting the changes we made. Move all
4426 of the undos to the free list. */
4427
4428 static void
4429 undo_commit (void)
4430 {
4431 struct undo *undo, *next;
4432
4433 for (undo = undobuf.undos; undo; undo = next)
4434 {
4435 next = undo->next;
4436 undo->next = undobuf.frees;
4437 undobuf.frees = undo;
4438 }
4439 undobuf.undos = 0;
4440 }
4441 \f
4442 /* Find the innermost point within the rtx at LOC, possibly LOC itself,
4443 where we have an arithmetic expression and return that point. LOC will
4444 be inside INSN.
4445
4446 try_combine will call this function to see if an insn can be split into
4447 two insns. */
4448
4449 static rtx *
4450 find_split_point (rtx *loc, rtx insn, bool set_src)
4451 {
4452 rtx x = *loc;
4453 enum rtx_code code = GET_CODE (x);
4454 rtx *split;
4455 unsigned HOST_WIDE_INT len = 0;
4456 HOST_WIDE_INT pos = 0;
4457 int unsignedp = 0;
4458 rtx inner = NULL_RTX;
4459
4460 /* First special-case some codes. */
4461 switch (code)
4462 {
4463 case SUBREG:
4464 #ifdef INSN_SCHEDULING
4465 /* If we are making a paradoxical SUBREG invalid, it becomes a split
4466 point. */
4467 if (MEM_P (SUBREG_REG (x)))
4468 return loc;
4469 #endif
4470 return find_split_point (&SUBREG_REG (x), insn, false);
4471
4472 case MEM:
4473 #ifdef HAVE_lo_sum
4474 /* If we have (mem (const ..)) or (mem (symbol_ref ...)), split it
4475 using LO_SUM and HIGH. */
4476 if (GET_CODE (XEXP (x, 0)) == CONST
4477 || GET_CODE (XEXP (x, 0)) == SYMBOL_REF)
4478 {
4479 enum machine_mode address_mode
4480 = targetm.addr_space.address_mode (MEM_ADDR_SPACE (x));
4481
4482 SUBST (XEXP (x, 0),
4483 gen_rtx_LO_SUM (address_mode,
4484 gen_rtx_HIGH (address_mode, XEXP (x, 0)),
4485 XEXP (x, 0)));
4486 return &XEXP (XEXP (x, 0), 0);
4487 }
4488 #endif
4489
4490 /* If we have a PLUS whose second operand is a constant and the
4491 address is not valid, perhaps will can split it up using
4492 the machine-specific way to split large constants. We use
4493 the first pseudo-reg (one of the virtual regs) as a placeholder;
4494 it will not remain in the result. */
4495 if (GET_CODE (XEXP (x, 0)) == PLUS
4496 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
4497 && ! memory_address_addr_space_p (GET_MODE (x), XEXP (x, 0),
4498 MEM_ADDR_SPACE (x)))
4499 {
4500 rtx reg = regno_reg_rtx[FIRST_PSEUDO_REGISTER];
4501 rtx seq = combine_split_insns (gen_rtx_SET (VOIDmode, reg,
4502 XEXP (x, 0)),
4503 subst_insn);
4504
4505 /* This should have produced two insns, each of which sets our
4506 placeholder. If the source of the second is a valid address,
4507 we can make put both sources together and make a split point
4508 in the middle. */
4509
4510 if (seq
4511 && NEXT_INSN (seq) != NULL_RTX
4512 && NEXT_INSN (NEXT_INSN (seq)) == NULL_RTX
4513 && NONJUMP_INSN_P (seq)
4514 && GET_CODE (PATTERN (seq)) == SET
4515 && SET_DEST (PATTERN (seq)) == reg
4516 && ! reg_mentioned_p (reg,
4517 SET_SRC (PATTERN (seq)))
4518 && NONJUMP_INSN_P (NEXT_INSN (seq))
4519 && GET_CODE (PATTERN (NEXT_INSN (seq))) == SET
4520 && SET_DEST (PATTERN (NEXT_INSN (seq))) == reg
4521 && memory_address_addr_space_p
4522 (GET_MODE (x), SET_SRC (PATTERN (NEXT_INSN (seq))),
4523 MEM_ADDR_SPACE (x)))
4524 {
4525 rtx src1 = SET_SRC (PATTERN (seq));
4526 rtx src2 = SET_SRC (PATTERN (NEXT_INSN (seq)));
4527
4528 /* Replace the placeholder in SRC2 with SRC1. If we can
4529 find where in SRC2 it was placed, that can become our
4530 split point and we can replace this address with SRC2.
4531 Just try two obvious places. */
4532
4533 src2 = replace_rtx (src2, reg, src1);
4534 split = 0;
4535 if (XEXP (src2, 0) == src1)
4536 split = &XEXP (src2, 0);
4537 else if (GET_RTX_FORMAT (GET_CODE (XEXP (src2, 0)))[0] == 'e'
4538 && XEXP (XEXP (src2, 0), 0) == src1)
4539 split = &XEXP (XEXP (src2, 0), 0);
4540
4541 if (split)
4542 {
4543 SUBST (XEXP (x, 0), src2);
4544 return split;
4545 }
4546 }
4547
4548 /* If that didn't work, perhaps the first operand is complex and
4549 needs to be computed separately, so make a split point there.
4550 This will occur on machines that just support REG + CONST
4551 and have a constant moved through some previous computation. */
4552
4553 else if (!OBJECT_P (XEXP (XEXP (x, 0), 0))
4554 && ! (GET_CODE (XEXP (XEXP (x, 0), 0)) == SUBREG
4555 && OBJECT_P (SUBREG_REG (XEXP (XEXP (x, 0), 0)))))
4556 return &XEXP (XEXP (x, 0), 0);
4557 }
4558
4559 /* If we have a PLUS whose first operand is complex, try computing it
4560 separately by making a split there. */
4561 if (GET_CODE (XEXP (x, 0)) == PLUS
4562 && ! memory_address_addr_space_p (GET_MODE (x), XEXP (x, 0),
4563 MEM_ADDR_SPACE (x))
4564 && ! OBJECT_P (XEXP (XEXP (x, 0), 0))
4565 && ! (GET_CODE (XEXP (XEXP (x, 0), 0)) == SUBREG
4566 && OBJECT_P (SUBREG_REG (XEXP (XEXP (x, 0), 0)))))
4567 return &XEXP (XEXP (x, 0), 0);
4568 break;
4569
4570 case SET:
4571 #ifdef HAVE_cc0
4572 /* If SET_DEST is CC0 and SET_SRC is not an operand, a COMPARE, or a
4573 ZERO_EXTRACT, the most likely reason why this doesn't match is that
4574 we need to put the operand into a register. So split at that
4575 point. */
4576
4577 if (SET_DEST (x) == cc0_rtx
4578 && GET_CODE (SET_SRC (x)) != COMPARE
4579 && GET_CODE (SET_SRC (x)) != ZERO_EXTRACT
4580 && !OBJECT_P (SET_SRC (x))
4581 && ! (GET_CODE (SET_SRC (x)) == SUBREG
4582 && OBJECT_P (SUBREG_REG (SET_SRC (x)))))
4583 return &SET_SRC (x);
4584 #endif
4585
4586 /* See if we can split SET_SRC as it stands. */
4587 split = find_split_point (&SET_SRC (x), insn, true);
4588 if (split && split != &SET_SRC (x))
4589 return split;
4590
4591 /* See if we can split SET_DEST as it stands. */
4592 split = find_split_point (&SET_DEST (x), insn, false);
4593 if (split && split != &SET_DEST (x))
4594 return split;
4595
4596 /* See if this is a bitfield assignment with everything constant. If
4597 so, this is an IOR of an AND, so split it into that. */
4598 if (GET_CODE (SET_DEST (x)) == ZERO_EXTRACT
4599 && (GET_MODE_BITSIZE (GET_MODE (XEXP (SET_DEST (x), 0)))
4600 <= HOST_BITS_PER_WIDE_INT)
4601 && CONST_INT_P (XEXP (SET_DEST (x), 1))
4602 && CONST_INT_P (XEXP (SET_DEST (x), 2))
4603 && CONST_INT_P (SET_SRC (x))
4604 && ((INTVAL (XEXP (SET_DEST (x), 1))
4605 + INTVAL (XEXP (SET_DEST (x), 2)))
4606 <= GET_MODE_BITSIZE (GET_MODE (XEXP (SET_DEST (x), 0))))
4607 && ! side_effects_p (XEXP (SET_DEST (x), 0)))
4608 {
4609 HOST_WIDE_INT pos = INTVAL (XEXP (SET_DEST (x), 2));
4610 unsigned HOST_WIDE_INT len = INTVAL (XEXP (SET_DEST (x), 1));
4611 unsigned HOST_WIDE_INT src = INTVAL (SET_SRC (x));
4612 rtx dest = XEXP (SET_DEST (x), 0);
4613 enum machine_mode mode = GET_MODE (dest);
4614 unsigned HOST_WIDE_INT mask
4615 = ((unsigned HOST_WIDE_INT) 1 << len) - 1;
4616 rtx or_mask;
4617
4618 if (BITS_BIG_ENDIAN)
4619 pos = GET_MODE_BITSIZE (mode) - len - pos;
4620
4621 or_mask = gen_int_mode (src << pos, mode);
4622 if (src == mask)
4623 SUBST (SET_SRC (x),
4624 simplify_gen_binary (IOR, mode, dest, or_mask));
4625 else
4626 {
4627 rtx negmask = gen_int_mode (~(mask << pos), mode);
4628 SUBST (SET_SRC (x),
4629 simplify_gen_binary (IOR, mode,
4630 simplify_gen_binary (AND, mode,
4631 dest, negmask),
4632 or_mask));
4633 }
4634
4635 SUBST (SET_DEST (x), dest);
4636
4637 split = find_split_point (&SET_SRC (x), insn, true);
4638 if (split && split != &SET_SRC (x))
4639 return split;
4640 }
4641
4642 /* Otherwise, see if this is an operation that we can split into two.
4643 If so, try to split that. */
4644 code = GET_CODE (SET_SRC (x));
4645
4646 switch (code)
4647 {
4648 case AND:
4649 /* If we are AND'ing with a large constant that is only a single
4650 bit and the result is only being used in a context where we
4651 need to know if it is zero or nonzero, replace it with a bit
4652 extraction. This will avoid the large constant, which might
4653 have taken more than one insn to make. If the constant were
4654 not a valid argument to the AND but took only one insn to make,
4655 this is no worse, but if it took more than one insn, it will
4656 be better. */
4657
4658 if (CONST_INT_P (XEXP (SET_SRC (x), 1))
4659 && REG_P (XEXP (SET_SRC (x), 0))
4660 && (pos = exact_log2 (UINTVAL (XEXP (SET_SRC (x), 1)))) >= 7
4661 && REG_P (SET_DEST (x))
4662 && (split = find_single_use (SET_DEST (x), insn, (rtx*) 0)) != 0
4663 && (GET_CODE (*split) == EQ || GET_CODE (*split) == NE)
4664 && XEXP (*split, 0) == SET_DEST (x)
4665 && XEXP (*split, 1) == const0_rtx)
4666 {
4667 rtx extraction = make_extraction (GET_MODE (SET_DEST (x)),
4668 XEXP (SET_SRC (x), 0),
4669 pos, NULL_RTX, 1, 1, 0, 0);
4670 if (extraction != 0)
4671 {
4672 SUBST (SET_SRC (x), extraction);
4673 return find_split_point (loc, insn, false);
4674 }
4675 }
4676 break;
4677
4678 case NE:
4679 /* If STORE_FLAG_VALUE is -1, this is (NE X 0) and only one bit of X
4680 is known to be on, this can be converted into a NEG of a shift. */
4681 if (STORE_FLAG_VALUE == -1 && XEXP (SET_SRC (x), 1) == const0_rtx
4682 && GET_MODE (SET_SRC (x)) == GET_MODE (XEXP (SET_SRC (x), 0))
4683 && 1 <= (pos = exact_log2
4684 (nonzero_bits (XEXP (SET_SRC (x), 0),
4685 GET_MODE (XEXP (SET_SRC (x), 0))))))
4686 {
4687 enum machine_mode mode = GET_MODE (XEXP (SET_SRC (x), 0));
4688
4689 SUBST (SET_SRC (x),
4690 gen_rtx_NEG (mode,
4691 gen_rtx_LSHIFTRT (mode,
4692 XEXP (SET_SRC (x), 0),
4693 GEN_INT (pos))));
4694
4695 split = find_split_point (&SET_SRC (x), insn, true);
4696 if (split && split != &SET_SRC (x))
4697 return split;
4698 }
4699 break;
4700
4701 case SIGN_EXTEND:
4702 inner = XEXP (SET_SRC (x), 0);
4703
4704 /* We can't optimize if either mode is a partial integer
4705 mode as we don't know how many bits are significant
4706 in those modes. */
4707 if (GET_MODE_CLASS (GET_MODE (inner)) == MODE_PARTIAL_INT
4708 || GET_MODE_CLASS (GET_MODE (SET_SRC (x))) == MODE_PARTIAL_INT)
4709 break;
4710
4711 pos = 0;
4712 len = GET_MODE_BITSIZE (GET_MODE (inner));
4713 unsignedp = 0;
4714 break;
4715
4716 case SIGN_EXTRACT:
4717 case ZERO_EXTRACT:
4718 if (CONST_INT_P (XEXP (SET_SRC (x), 1))
4719 && CONST_INT_P (XEXP (SET_SRC (x), 2)))
4720 {
4721 inner = XEXP (SET_SRC (x), 0);
4722 len = INTVAL (XEXP (SET_SRC (x), 1));
4723 pos = INTVAL (XEXP (SET_SRC (x), 2));
4724
4725 if (BITS_BIG_ENDIAN)
4726 pos = GET_MODE_BITSIZE (GET_MODE (inner)) - len - pos;
4727 unsignedp = (code == ZERO_EXTRACT);
4728 }
4729 break;
4730
4731 default:
4732 break;
4733 }
4734
4735 if (len && pos >= 0 && pos + len <= GET_MODE_BITSIZE (GET_MODE (inner)))
4736 {
4737 enum machine_mode mode = GET_MODE (SET_SRC (x));
4738
4739 /* For unsigned, we have a choice of a shift followed by an
4740 AND or two shifts. Use two shifts for field sizes where the
4741 constant might be too large. We assume here that we can
4742 always at least get 8-bit constants in an AND insn, which is
4743 true for every current RISC. */
4744
4745 if (unsignedp && len <= 8)
4746 {
4747 SUBST (SET_SRC (x),
4748 gen_rtx_AND (mode,
4749 gen_rtx_LSHIFTRT
4750 (mode, gen_lowpart (mode, inner),
4751 GEN_INT (pos)),
4752 GEN_INT (((unsigned HOST_WIDE_INT) 1 << len)
4753 - 1)));
4754
4755 split = find_split_point (&SET_SRC (x), insn, true);
4756 if (split && split != &SET_SRC (x))
4757 return split;
4758 }
4759 else
4760 {
4761 SUBST (SET_SRC (x),
4762 gen_rtx_fmt_ee
4763 (unsignedp ? LSHIFTRT : ASHIFTRT, mode,
4764 gen_rtx_ASHIFT (mode,
4765 gen_lowpart (mode, inner),
4766 GEN_INT (GET_MODE_BITSIZE (mode)
4767 - len - pos)),
4768 GEN_INT (GET_MODE_BITSIZE (mode) - len)));
4769
4770 split = find_split_point (&SET_SRC (x), insn, true);
4771 if (split && split != &SET_SRC (x))
4772 return split;
4773 }
4774 }
4775
4776 /* See if this is a simple operation with a constant as the second
4777 operand. It might be that this constant is out of range and hence
4778 could be used as a split point. */
4779 if (BINARY_P (SET_SRC (x))
4780 && CONSTANT_P (XEXP (SET_SRC (x), 1))
4781 && (OBJECT_P (XEXP (SET_SRC (x), 0))
4782 || (GET_CODE (XEXP (SET_SRC (x), 0)) == SUBREG
4783 && OBJECT_P (SUBREG_REG (XEXP (SET_SRC (x), 0))))))
4784 return &XEXP (SET_SRC (x), 1);
4785
4786 /* Finally, see if this is a simple operation with its first operand
4787 not in a register. The operation might require this operand in a
4788 register, so return it as a split point. We can always do this
4789 because if the first operand were another operation, we would have
4790 already found it as a split point. */
4791 if ((BINARY_P (SET_SRC (x)) || UNARY_P (SET_SRC (x)))
4792 && ! register_operand (XEXP (SET_SRC (x), 0), VOIDmode))
4793 return &XEXP (SET_SRC (x), 0);
4794
4795 return 0;
4796
4797 case AND:
4798 case IOR:
4799 /* We write NOR as (and (not A) (not B)), but if we don't have a NOR,
4800 it is better to write this as (not (ior A B)) so we can split it.
4801 Similarly for IOR. */
4802 if (GET_CODE (XEXP (x, 0)) == NOT && GET_CODE (XEXP (x, 1)) == NOT)
4803 {
4804 SUBST (*loc,
4805 gen_rtx_NOT (GET_MODE (x),
4806 gen_rtx_fmt_ee (code == IOR ? AND : IOR,
4807 GET_MODE (x),
4808 XEXP (XEXP (x, 0), 0),
4809 XEXP (XEXP (x, 1), 0))));
4810 return find_split_point (loc, insn, set_src);
4811 }
4812
4813 /* Many RISC machines have a large set of logical insns. If the
4814 second operand is a NOT, put it first so we will try to split the
4815 other operand first. */
4816 if (GET_CODE (XEXP (x, 1)) == NOT)
4817 {
4818 rtx tem = XEXP (x, 0);
4819 SUBST (XEXP (x, 0), XEXP (x, 1));
4820 SUBST (XEXP (x, 1), tem);
4821 }
4822 break;
4823
4824 case PLUS:
4825 case MINUS:
4826 /* Canonicalization can produce (minus A (mult B C)), where C is a
4827 constant. It may be better to try splitting (plus (mult B -C) A)
4828 instead if this isn't a multiply by a power of two. */
4829 if (set_src && code == MINUS && GET_CODE (XEXP (x, 1)) == MULT
4830 && GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT
4831 && exact_log2 (INTVAL (XEXP (XEXP (x, 1), 1))) < 0)
4832 {
4833 enum machine_mode mode = GET_MODE (x);
4834 unsigned HOST_WIDE_INT this_int = INTVAL (XEXP (XEXP (x, 1), 1));
4835 HOST_WIDE_INT other_int = trunc_int_for_mode (-this_int, mode);
4836 SUBST (*loc, gen_rtx_PLUS (mode, gen_rtx_MULT (mode,
4837 XEXP (XEXP (x, 1), 0),
4838 GEN_INT (other_int)),
4839 XEXP (x, 0)));
4840 return find_split_point (loc, insn, set_src);
4841 }
4842
4843 /* Split at a multiply-accumulate instruction. However if this is
4844 the SET_SRC, we likely do not have such an instruction and it's
4845 worthless to try this split. */
4846 if (!set_src && GET_CODE (XEXP (x, 0)) == MULT)
4847 return loc;
4848
4849 default:
4850 break;
4851 }
4852
4853 /* Otherwise, select our actions depending on our rtx class. */
4854 switch (GET_RTX_CLASS (code))
4855 {
4856 case RTX_BITFIELD_OPS: /* This is ZERO_EXTRACT and SIGN_EXTRACT. */
4857 case RTX_TERNARY:
4858 split = find_split_point (&XEXP (x, 2), insn, false);
4859 if (split)
4860 return split;
4861 /* ... fall through ... */
4862 case RTX_BIN_ARITH:
4863 case RTX_COMM_ARITH:
4864 case RTX_COMPARE:
4865 case RTX_COMM_COMPARE:
4866 split = find_split_point (&XEXP (x, 1), insn, false);
4867 if (split)
4868 return split;
4869 /* ... fall through ... */
4870 case RTX_UNARY:
4871 /* Some machines have (and (shift ...) ...) insns. If X is not
4872 an AND, but XEXP (X, 0) is, use it as our split point. */
4873 if (GET_CODE (x) != AND && GET_CODE (XEXP (x, 0)) == AND)
4874 return &XEXP (x, 0);
4875
4876 split = find_split_point (&XEXP (x, 0), insn, false);
4877 if (split)
4878 return split;
4879 return loc;
4880
4881 default:
4882 /* Otherwise, we don't have a split point. */
4883 return 0;
4884 }
4885 }
4886 \f
4887 /* Throughout X, replace FROM with TO, and return the result.
4888 The result is TO if X is FROM;
4889 otherwise the result is X, but its contents may have been modified.
4890 If they were modified, a record was made in undobuf so that
4891 undo_all will (among other things) return X to its original state.
4892
4893 If the number of changes necessary is too much to record to undo,
4894 the excess changes are not made, so the result is invalid.
4895 The changes already made can still be undone.
4896 undobuf.num_undo is incremented for such changes, so by testing that
4897 the caller can tell whether the result is valid.
4898
4899 `n_occurrences' is incremented each time FROM is replaced.
4900
4901 IN_DEST is nonzero if we are processing the SET_DEST of a SET.
4902
4903 UNIQUE_COPY is nonzero if each substitution must be unique. We do this
4904 by copying if `n_occurrences' is nonzero. */
4905
4906 static rtx
4907 subst (rtx x, rtx from, rtx to, int in_dest, int unique_copy)
4908 {
4909 enum rtx_code code = GET_CODE (x);
4910 enum machine_mode op0_mode = VOIDmode;
4911 const char *fmt;
4912 int len, i;
4913 rtx new_rtx;
4914
4915 /* Two expressions are equal if they are identical copies of a shared
4916 RTX or if they are both registers with the same register number
4917 and mode. */
4918
4919 #define COMBINE_RTX_EQUAL_P(X,Y) \
4920 ((X) == (Y) \
4921 || (REG_P (X) && REG_P (Y) \
4922 && REGNO (X) == REGNO (Y) && GET_MODE (X) == GET_MODE (Y)))
4923
4924 if (! in_dest && COMBINE_RTX_EQUAL_P (x, from))
4925 {
4926 n_occurrences++;
4927 return (unique_copy && n_occurrences > 1 ? copy_rtx (to) : to);
4928 }
4929
4930 /* If X and FROM are the same register but different modes, they
4931 will not have been seen as equal above. However, the log links code
4932 will make a LOG_LINKS entry for that case. If we do nothing, we
4933 will try to rerecognize our original insn and, when it succeeds,
4934 we will delete the feeding insn, which is incorrect.
4935
4936 So force this insn not to match in this (rare) case. */
4937 if (! in_dest && code == REG && REG_P (from)
4938 && reg_overlap_mentioned_p (x, from))
4939 return gen_rtx_CLOBBER (GET_MODE (x), const0_rtx);
4940
4941 /* If this is an object, we are done unless it is a MEM or LO_SUM, both
4942 of which may contain things that can be combined. */
4943 if (code != MEM && code != LO_SUM && OBJECT_P (x))
4944 return x;
4945
4946 /* It is possible to have a subexpression appear twice in the insn.
4947 Suppose that FROM is a register that appears within TO.
4948 Then, after that subexpression has been scanned once by `subst',
4949 the second time it is scanned, TO may be found. If we were
4950 to scan TO here, we would find FROM within it and create a
4951 self-referent rtl structure which is completely wrong. */
4952 if (COMBINE_RTX_EQUAL_P (x, to))
4953 return to;
4954
4955 /* Parallel asm_operands need special attention because all of the
4956 inputs are shared across the arms. Furthermore, unsharing the
4957 rtl results in recognition failures. Failure to handle this case
4958 specially can result in circular rtl.
4959
4960 Solve this by doing a normal pass across the first entry of the
4961 parallel, and only processing the SET_DESTs of the subsequent
4962 entries. Ug. */
4963
4964 if (code == PARALLEL
4965 && GET_CODE (XVECEXP (x, 0, 0)) == SET
4966 && GET_CODE (SET_SRC (XVECEXP (x, 0, 0))) == ASM_OPERANDS)
4967 {
4968 new_rtx = subst (XVECEXP (x, 0, 0), from, to, 0, unique_copy);
4969
4970 /* If this substitution failed, this whole thing fails. */
4971 if (GET_CODE (new_rtx) == CLOBBER
4972 && XEXP (new_rtx, 0) == const0_rtx)
4973 return new_rtx;
4974
4975 SUBST (XVECEXP (x, 0, 0), new_rtx);
4976
4977 for (i = XVECLEN (x, 0) - 1; i >= 1; i--)
4978 {
4979 rtx dest = SET_DEST (XVECEXP (x, 0, i));
4980
4981 if (!REG_P (dest)
4982 && GET_CODE (dest) != CC0
4983 && GET_CODE (dest) != PC)
4984 {
4985 new_rtx = subst (dest, from, to, 0, unique_copy);
4986
4987 /* If this substitution failed, this whole thing fails. */
4988 if (GET_CODE (new_rtx) == CLOBBER
4989 && XEXP (new_rtx, 0) == const0_rtx)
4990 return new_rtx;
4991
4992 SUBST (SET_DEST (XVECEXP (x, 0, i)), new_rtx);
4993 }
4994 }
4995 }
4996 else
4997 {
4998 len = GET_RTX_LENGTH (code);
4999 fmt = GET_RTX_FORMAT (code);
5000
5001 /* We don't need to process a SET_DEST that is a register, CC0,
5002 or PC, so set up to skip this common case. All other cases
5003 where we want to suppress replacing something inside a
5004 SET_SRC are handled via the IN_DEST operand. */
5005 if (code == SET
5006 && (REG_P (SET_DEST (x))
5007 || GET_CODE (SET_DEST (x)) == CC0
5008 || GET_CODE (SET_DEST (x)) == PC))
5009 fmt = "ie";
5010
5011 /* Get the mode of operand 0 in case X is now a SIGN_EXTEND of a
5012 constant. */
5013 if (fmt[0] == 'e')
5014 op0_mode = GET_MODE (XEXP (x, 0));
5015
5016 for (i = 0; i < len; i++)
5017 {
5018 if (fmt[i] == 'E')
5019 {
5020 int j;
5021 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
5022 {
5023 if (COMBINE_RTX_EQUAL_P (XVECEXP (x, i, j), from))
5024 {
5025 new_rtx = (unique_copy && n_occurrences
5026 ? copy_rtx (to) : to);
5027 n_occurrences++;
5028 }
5029 else
5030 {
5031 new_rtx = subst (XVECEXP (x, i, j), from, to, 0,
5032 unique_copy);
5033
5034 /* If this substitution failed, this whole thing
5035 fails. */
5036 if (GET_CODE (new_rtx) == CLOBBER
5037 && XEXP (new_rtx, 0) == const0_rtx)
5038 return new_rtx;
5039 }
5040
5041 SUBST (XVECEXP (x, i, j), new_rtx);
5042 }
5043 }
5044 else if (fmt[i] == 'e')
5045 {
5046 /* If this is a register being set, ignore it. */
5047 new_rtx = XEXP (x, i);
5048 if (in_dest
5049 && i == 0
5050 && (((code == SUBREG || code == ZERO_EXTRACT)
5051 && REG_P (new_rtx))
5052 || code == STRICT_LOW_PART))
5053 ;
5054
5055 else if (COMBINE_RTX_EQUAL_P (XEXP (x, i), from))
5056 {
5057 /* In general, don't install a subreg involving two
5058 modes not tieable. It can worsen register
5059 allocation, and can even make invalid reload
5060 insns, since the reg inside may need to be copied
5061 from in the outside mode, and that may be invalid
5062 if it is an fp reg copied in integer mode.
5063
5064 We allow two exceptions to this: It is valid if
5065 it is inside another SUBREG and the mode of that
5066 SUBREG and the mode of the inside of TO is
5067 tieable and it is valid if X is a SET that copies
5068 FROM to CC0. */
5069
5070 if (GET_CODE (to) == SUBREG
5071 && ! MODES_TIEABLE_P (GET_MODE (to),
5072 GET_MODE (SUBREG_REG (to)))
5073 && ! (code == SUBREG
5074 && MODES_TIEABLE_P (GET_MODE (x),
5075 GET_MODE (SUBREG_REG (to))))
5076 #ifdef HAVE_cc0
5077 && ! (code == SET && i == 1 && XEXP (x, 0) == cc0_rtx)
5078 #endif
5079 )
5080 return gen_rtx_CLOBBER (VOIDmode, const0_rtx);
5081
5082 #ifdef CANNOT_CHANGE_MODE_CLASS
5083 if (code == SUBREG
5084 && REG_P (to)
5085 && REGNO (to) < FIRST_PSEUDO_REGISTER
5086 && REG_CANNOT_CHANGE_MODE_P (REGNO (to),
5087 GET_MODE (to),
5088 GET_MODE (x)))
5089 return gen_rtx_CLOBBER (VOIDmode, const0_rtx);
5090 #endif
5091
5092 new_rtx = (unique_copy && n_occurrences ? copy_rtx (to) : to);
5093 n_occurrences++;
5094 }
5095 else
5096 /* If we are in a SET_DEST, suppress most cases unless we
5097 have gone inside a MEM, in which case we want to
5098 simplify the address. We assume here that things that
5099 are actually part of the destination have their inner
5100 parts in the first expression. This is true for SUBREG,
5101 STRICT_LOW_PART, and ZERO_EXTRACT, which are the only
5102 things aside from REG and MEM that should appear in a
5103 SET_DEST. */
5104 new_rtx = subst (XEXP (x, i), from, to,
5105 (((in_dest
5106 && (code == SUBREG || code == STRICT_LOW_PART
5107 || code == ZERO_EXTRACT))
5108 || code == SET)
5109 && i == 0), unique_copy);
5110
5111 /* If we found that we will have to reject this combination,
5112 indicate that by returning the CLOBBER ourselves, rather than
5113 an expression containing it. This will speed things up as
5114 well as prevent accidents where two CLOBBERs are considered
5115 to be equal, thus producing an incorrect simplification. */
5116
5117 if (GET_CODE (new_rtx) == CLOBBER && XEXP (new_rtx, 0) == const0_rtx)
5118 return new_rtx;
5119
5120 if (GET_CODE (x) == SUBREG
5121 && (CONST_INT_P (new_rtx)
5122 || GET_CODE (new_rtx) == CONST_DOUBLE))
5123 {
5124 enum machine_mode mode = GET_MODE (x);
5125
5126 x = simplify_subreg (GET_MODE (x), new_rtx,
5127 GET_MODE (SUBREG_REG (x)),
5128 SUBREG_BYTE (x));
5129 if (! x)
5130 x = gen_rtx_CLOBBER (mode, const0_rtx);
5131 }
5132 else if (CONST_INT_P (new_rtx)
5133 && GET_CODE (x) == ZERO_EXTEND)
5134 {
5135 x = simplify_unary_operation (ZERO_EXTEND, GET_MODE (x),
5136 new_rtx, GET_MODE (XEXP (x, 0)));
5137 gcc_assert (x);
5138 }
5139 else
5140 SUBST (XEXP (x, i), new_rtx);
5141 }
5142 }
5143 }
5144
5145 /* Check if we are loading something from the constant pool via float
5146 extension; in this case we would undo compress_float_constant
5147 optimization and degenerate constant load to an immediate value. */
5148 if (GET_CODE (x) == FLOAT_EXTEND
5149 && MEM_P (XEXP (x, 0))
5150 && MEM_READONLY_P (XEXP (x, 0)))
5151 {
5152 rtx tmp = avoid_constant_pool_reference (x);
5153 if (x != tmp)
5154 return x;
5155 }
5156
5157 /* Try to simplify X. If the simplification changed the code, it is likely
5158 that further simplification will help, so loop, but limit the number
5159 of repetitions that will be performed. */
5160
5161 for (i = 0; i < 4; i++)
5162 {
5163 /* If X is sufficiently simple, don't bother trying to do anything
5164 with it. */
5165 if (code != CONST_INT && code != REG && code != CLOBBER)
5166 x = combine_simplify_rtx (x, op0_mode, in_dest);
5167
5168 if (GET_CODE (x) == code)
5169 break;
5170
5171 code = GET_CODE (x);
5172
5173 /* We no longer know the original mode of operand 0 since we
5174 have changed the form of X) */
5175 op0_mode = VOIDmode;
5176 }
5177
5178 return x;
5179 }
5180 \f
5181 /* Simplify X, a piece of RTL. We just operate on the expression at the
5182 outer level; call `subst' to simplify recursively. Return the new
5183 expression.
5184
5185 OP0_MODE is the original mode of XEXP (x, 0). IN_DEST is nonzero
5186 if we are inside a SET_DEST. */
5187
5188 static rtx
5189 combine_simplify_rtx (rtx x, enum machine_mode op0_mode, int in_dest)
5190 {
5191 enum rtx_code code = GET_CODE (x);
5192 enum machine_mode mode = GET_MODE (x);
5193 rtx temp;
5194 int i;
5195
5196 /* If this is a commutative operation, put a constant last and a complex
5197 expression first. We don't need to do this for comparisons here. */
5198 if (COMMUTATIVE_ARITH_P (x)
5199 && swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
5200 {
5201 temp = XEXP (x, 0);
5202 SUBST (XEXP (x, 0), XEXP (x, 1));
5203 SUBST (XEXP (x, 1), temp);
5204 }
5205
5206 /* If this is a simple operation applied to an IF_THEN_ELSE, try
5207 applying it to the arms of the IF_THEN_ELSE. This often simplifies
5208 things. Check for cases where both arms are testing the same
5209 condition.
5210
5211 Don't do anything if all operands are very simple. */
5212
5213 if ((BINARY_P (x)
5214 && ((!OBJECT_P (XEXP (x, 0))
5215 && ! (GET_CODE (XEXP (x, 0)) == SUBREG
5216 && OBJECT_P (SUBREG_REG (XEXP (x, 0)))))
5217 || (!OBJECT_P (XEXP (x, 1))
5218 && ! (GET_CODE (XEXP (x, 1)) == SUBREG
5219 && OBJECT_P (SUBREG_REG (XEXP (x, 1)))))))
5220 || (UNARY_P (x)
5221 && (!OBJECT_P (XEXP (x, 0))
5222 && ! (GET_CODE (XEXP (x, 0)) == SUBREG
5223 && OBJECT_P (SUBREG_REG (XEXP (x, 0)))))))
5224 {
5225 rtx cond, true_rtx, false_rtx;
5226
5227 cond = if_then_else_cond (x, &true_rtx, &false_rtx);
5228 if (cond != 0
5229 /* If everything is a comparison, what we have is highly unlikely
5230 to be simpler, so don't use it. */
5231 && ! (COMPARISON_P (x)
5232 && (COMPARISON_P (true_rtx) || COMPARISON_P (false_rtx))))
5233 {
5234 rtx cop1 = const0_rtx;
5235 enum rtx_code cond_code = simplify_comparison (NE, &cond, &cop1);
5236
5237 if (cond_code == NE && COMPARISON_P (cond))
5238 return x;
5239
5240 /* Simplify the alternative arms; this may collapse the true and
5241 false arms to store-flag values. Be careful to use copy_rtx
5242 here since true_rtx or false_rtx might share RTL with x as a
5243 result of the if_then_else_cond call above. */
5244 true_rtx = subst (copy_rtx (true_rtx), pc_rtx, pc_rtx, 0, 0);
5245 false_rtx = subst (copy_rtx (false_rtx), pc_rtx, pc_rtx, 0, 0);
5246
5247 /* If true_rtx and false_rtx are not general_operands, an if_then_else
5248 is unlikely to be simpler. */
5249 if (general_operand (true_rtx, VOIDmode)
5250 && general_operand (false_rtx, VOIDmode))
5251 {
5252 enum rtx_code reversed;
5253
5254 /* Restarting if we generate a store-flag expression will cause
5255 us to loop. Just drop through in this case. */
5256
5257 /* If the result values are STORE_FLAG_VALUE and zero, we can
5258 just make the comparison operation. */
5259 if (true_rtx == const_true_rtx && false_rtx == const0_rtx)
5260 x = simplify_gen_relational (cond_code, mode, VOIDmode,
5261 cond, cop1);
5262 else if (true_rtx == const0_rtx && false_rtx == const_true_rtx
5263 && ((reversed = reversed_comparison_code_parts
5264 (cond_code, cond, cop1, NULL))
5265 != UNKNOWN))
5266 x = simplify_gen_relational (reversed, mode, VOIDmode,
5267 cond, cop1);
5268
5269 /* Likewise, we can make the negate of a comparison operation
5270 if the result values are - STORE_FLAG_VALUE and zero. */
5271 else if (CONST_INT_P (true_rtx)
5272 && INTVAL (true_rtx) == - STORE_FLAG_VALUE
5273 && false_rtx == const0_rtx)
5274 x = simplify_gen_unary (NEG, mode,
5275 simplify_gen_relational (cond_code,
5276 mode, VOIDmode,
5277 cond, cop1),
5278 mode);
5279 else if (CONST_INT_P (false_rtx)
5280 && INTVAL (false_rtx) == - STORE_FLAG_VALUE
5281 && true_rtx == const0_rtx
5282 && ((reversed = reversed_comparison_code_parts
5283 (cond_code, cond, cop1, NULL))
5284 != UNKNOWN))
5285 x = simplify_gen_unary (NEG, mode,
5286 simplify_gen_relational (reversed,
5287 mode, VOIDmode,
5288 cond, cop1),
5289 mode);
5290 else
5291 return gen_rtx_IF_THEN_ELSE (mode,
5292 simplify_gen_relational (cond_code,
5293 mode,
5294 VOIDmode,
5295 cond,
5296 cop1),
5297 true_rtx, false_rtx);
5298
5299 code = GET_CODE (x);
5300 op0_mode = VOIDmode;
5301 }
5302 }
5303 }
5304
5305 /* Try to fold this expression in case we have constants that weren't
5306 present before. */
5307 temp = 0;
5308 switch (GET_RTX_CLASS (code))
5309 {
5310 case RTX_UNARY:
5311 if (op0_mode == VOIDmode)
5312 op0_mode = GET_MODE (XEXP (x, 0));
5313 temp = simplify_unary_operation (code, mode, XEXP (x, 0), op0_mode);
5314 break;
5315 case RTX_COMPARE:
5316 case RTX_COMM_COMPARE:
5317 {
5318 enum machine_mode cmp_mode = GET_MODE (XEXP (x, 0));
5319 if (cmp_mode == VOIDmode)
5320 {
5321 cmp_mode = GET_MODE (XEXP (x, 1));
5322 if (cmp_mode == VOIDmode)
5323 cmp_mode = op0_mode;
5324 }
5325 temp = simplify_relational_operation (code, mode, cmp_mode,
5326 XEXP (x, 0), XEXP (x, 1));
5327 }
5328 break;
5329 case RTX_COMM_ARITH:
5330 case RTX_BIN_ARITH:
5331 temp = simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
5332 break;
5333 case RTX_BITFIELD_OPS:
5334 case RTX_TERNARY:
5335 temp = simplify_ternary_operation (code, mode, op0_mode, XEXP (x, 0),
5336 XEXP (x, 1), XEXP (x, 2));
5337 break;
5338 default:
5339 break;
5340 }
5341
5342 if (temp)
5343 {
5344 x = temp;
5345 code = GET_CODE (temp);
5346 op0_mode = VOIDmode;
5347 mode = GET_MODE (temp);
5348 }
5349
5350 /* First see if we can apply the inverse distributive law. */
5351 if (code == PLUS || code == MINUS
5352 || code == AND || code == IOR || code == XOR)
5353 {
5354 x = apply_distributive_law (x);
5355 code = GET_CODE (x);
5356 op0_mode = VOIDmode;
5357 }
5358
5359 /* If CODE is an associative operation not otherwise handled, see if we
5360 can associate some operands. This can win if they are constants or
5361 if they are logically related (i.e. (a & b) & a). */
5362 if ((code == PLUS || code == MINUS || code == MULT || code == DIV
5363 || code == AND || code == IOR || code == XOR
5364 || code == SMAX || code == SMIN || code == UMAX || code == UMIN)
5365 && ((INTEGRAL_MODE_P (mode) && code != DIV)
5366 || (flag_associative_math && FLOAT_MODE_P (mode))))
5367 {
5368 if (GET_CODE (XEXP (x, 0)) == code)
5369 {
5370 rtx other = XEXP (XEXP (x, 0), 0);
5371 rtx inner_op0 = XEXP (XEXP (x, 0), 1);
5372 rtx inner_op1 = XEXP (x, 1);
5373 rtx inner;
5374
5375 /* Make sure we pass the constant operand if any as the second
5376 one if this is a commutative operation. */
5377 if (CONSTANT_P (inner_op0) && COMMUTATIVE_ARITH_P (x))
5378 {
5379 rtx tem = inner_op0;
5380 inner_op0 = inner_op1;
5381 inner_op1 = tem;
5382 }
5383 inner = simplify_binary_operation (code == MINUS ? PLUS
5384 : code == DIV ? MULT
5385 : code,
5386 mode, inner_op0, inner_op1);
5387
5388 /* For commutative operations, try the other pair if that one
5389 didn't simplify. */
5390 if (inner == 0 && COMMUTATIVE_ARITH_P (x))
5391 {
5392 other = XEXP (XEXP (x, 0), 1);
5393 inner = simplify_binary_operation (code, mode,
5394 XEXP (XEXP (x, 0), 0),
5395 XEXP (x, 1));
5396 }
5397
5398 if (inner)
5399 return simplify_gen_binary (code, mode, other, inner);
5400 }
5401 }
5402
5403 /* A little bit of algebraic simplification here. */
5404 switch (code)
5405 {
5406 case MEM:
5407 /* Ensure that our address has any ASHIFTs converted to MULT in case
5408 address-recognizing predicates are called later. */
5409 temp = make_compound_operation (XEXP (x, 0), MEM);
5410 SUBST (XEXP (x, 0), temp);
5411 break;
5412
5413 case SUBREG:
5414 if (op0_mode == VOIDmode)
5415 op0_mode = GET_MODE (SUBREG_REG (x));
5416
5417 /* See if this can be moved to simplify_subreg. */
5418 if (CONSTANT_P (SUBREG_REG (x))
5419 && subreg_lowpart_offset (mode, op0_mode) == SUBREG_BYTE (x)
5420 /* Don't call gen_lowpart if the inner mode
5421 is VOIDmode and we cannot simplify it, as SUBREG without
5422 inner mode is invalid. */
5423 && (GET_MODE (SUBREG_REG (x)) != VOIDmode
5424 || gen_lowpart_common (mode, SUBREG_REG (x))))
5425 return gen_lowpart (mode, SUBREG_REG (x));
5426
5427 if (GET_MODE_CLASS (GET_MODE (SUBREG_REG (x))) == MODE_CC)
5428 break;
5429 {
5430 rtx temp;
5431 temp = simplify_subreg (mode, SUBREG_REG (x), op0_mode,
5432 SUBREG_BYTE (x));
5433 if (temp)
5434 return temp;
5435 }
5436
5437 /* Don't change the mode of the MEM if that would change the meaning
5438 of the address. */
5439 if (MEM_P (SUBREG_REG (x))
5440 && (MEM_VOLATILE_P (SUBREG_REG (x))
5441 || mode_dependent_address_p (XEXP (SUBREG_REG (x), 0))))
5442 return gen_rtx_CLOBBER (mode, const0_rtx);
5443
5444 /* Note that we cannot do any narrowing for non-constants since
5445 we might have been counting on using the fact that some bits were
5446 zero. We now do this in the SET. */
5447
5448 break;
5449
5450 case NEG:
5451 temp = expand_compound_operation (XEXP (x, 0));
5452
5453 /* For C equal to the width of MODE minus 1, (neg (ashiftrt X C)) can be
5454 replaced by (lshiftrt X C). This will convert
5455 (neg (sign_extract X 1 Y)) to (zero_extract X 1 Y). */
5456
5457 if (GET_CODE (temp) == ASHIFTRT
5458 && CONST_INT_P (XEXP (temp, 1))
5459 && INTVAL (XEXP (temp, 1)) == GET_MODE_BITSIZE (mode) - 1)
5460 return simplify_shift_const (NULL_RTX, LSHIFTRT, mode, XEXP (temp, 0),
5461 INTVAL (XEXP (temp, 1)));
5462
5463 /* If X has only a single bit that might be nonzero, say, bit I, convert
5464 (neg X) to (ashiftrt (ashift X C-I) C-I) where C is the bitsize of
5465 MODE minus 1. This will convert (neg (zero_extract X 1 Y)) to
5466 (sign_extract X 1 Y). But only do this if TEMP isn't a register
5467 or a SUBREG of one since we'd be making the expression more
5468 complex if it was just a register. */
5469
5470 if (!REG_P (temp)
5471 && ! (GET_CODE (temp) == SUBREG
5472 && REG_P (SUBREG_REG (temp)))
5473 && (i = exact_log2 (nonzero_bits (temp, mode))) >= 0)
5474 {
5475 rtx temp1 = simplify_shift_const
5476 (NULL_RTX, ASHIFTRT, mode,
5477 simplify_shift_const (NULL_RTX, ASHIFT, mode, temp,
5478 GET_MODE_BITSIZE (mode) - 1 - i),
5479 GET_MODE_BITSIZE (mode) - 1 - i);
5480
5481 /* If all we did was surround TEMP with the two shifts, we
5482 haven't improved anything, so don't use it. Otherwise,
5483 we are better off with TEMP1. */
5484 if (GET_CODE (temp1) != ASHIFTRT
5485 || GET_CODE (XEXP (temp1, 0)) != ASHIFT
5486 || XEXP (XEXP (temp1, 0), 0) != temp)
5487 return temp1;
5488 }
5489 break;
5490
5491 case TRUNCATE:
5492 /* We can't handle truncation to a partial integer mode here
5493 because we don't know the real bitsize of the partial
5494 integer mode. */
5495 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
5496 break;
5497
5498 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
5499 SUBST (XEXP (x, 0),
5500 force_to_mode (XEXP (x, 0), GET_MODE (XEXP (x, 0)),
5501 GET_MODE_MASK (mode), 0));
5502
5503 /* We can truncate a constant value and return it. */
5504 if (CONST_INT_P (XEXP (x, 0)))
5505 return gen_int_mode (INTVAL (XEXP (x, 0)), mode);
5506
5507 /* Similarly to what we do in simplify-rtx.c, a truncate of a register
5508 whose value is a comparison can be replaced with a subreg if
5509 STORE_FLAG_VALUE permits. */
5510 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
5511 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0
5512 && (temp = get_last_value (XEXP (x, 0)))
5513 && COMPARISON_P (temp))
5514 return gen_lowpart (mode, XEXP (x, 0));
5515 break;
5516
5517 case CONST:
5518 /* (const (const X)) can become (const X). Do it this way rather than
5519 returning the inner CONST since CONST can be shared with a
5520 REG_EQUAL note. */
5521 if (GET_CODE (XEXP (x, 0)) == CONST)
5522 SUBST (XEXP (x, 0), XEXP (XEXP (x, 0), 0));
5523 break;
5524
5525 #ifdef HAVE_lo_sum
5526 case LO_SUM:
5527 /* Convert (lo_sum (high FOO) FOO) to FOO. This is necessary so we
5528 can add in an offset. find_split_point will split this address up
5529 again if it doesn't match. */
5530 if (GET_CODE (XEXP (x, 0)) == HIGH
5531 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
5532 return XEXP (x, 1);
5533 break;
5534 #endif
5535
5536 case PLUS:
5537 /* (plus (xor (and <foo> (const_int pow2 - 1)) <c>) <-c>)
5538 when c is (const_int (pow2 + 1) / 2) is a sign extension of a
5539 bit-field and can be replaced by either a sign_extend or a
5540 sign_extract. The `and' may be a zero_extend and the two
5541 <c>, -<c> constants may be reversed. */
5542 if (GET_CODE (XEXP (x, 0)) == XOR
5543 && CONST_INT_P (XEXP (x, 1))
5544 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
5545 && INTVAL (XEXP (x, 1)) == -INTVAL (XEXP (XEXP (x, 0), 1))
5546 && ((i = exact_log2 (UINTVAL (XEXP (XEXP (x, 0), 1)))) >= 0
5547 || (i = exact_log2 (UINTVAL (XEXP (x, 1)))) >= 0)
5548 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
5549 && ((GET_CODE (XEXP (XEXP (x, 0), 0)) == AND
5550 && CONST_INT_P (XEXP (XEXP (XEXP (x, 0), 0), 1))
5551 && (UINTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1))
5552 == ((unsigned HOST_WIDE_INT) 1 << (i + 1)) - 1))
5553 || (GET_CODE (XEXP (XEXP (x, 0), 0)) == ZERO_EXTEND
5554 && (GET_MODE_BITSIZE (GET_MODE (XEXP (XEXP (XEXP (x, 0), 0), 0)))
5555 == (unsigned int) i + 1))))
5556 return simplify_shift_const
5557 (NULL_RTX, ASHIFTRT, mode,
5558 simplify_shift_const (NULL_RTX, ASHIFT, mode,
5559 XEXP (XEXP (XEXP (x, 0), 0), 0),
5560 GET_MODE_BITSIZE (mode) - (i + 1)),
5561 GET_MODE_BITSIZE (mode) - (i + 1));
5562
5563 /* If only the low-order bit of X is possibly nonzero, (plus x -1)
5564 can become (ashiftrt (ashift (xor x 1) C) C) where C is
5565 the bitsize of the mode - 1. This allows simplification of
5566 "a = (b & 8) == 0;" */
5567 if (XEXP (x, 1) == constm1_rtx
5568 && !REG_P (XEXP (x, 0))
5569 && ! (GET_CODE (XEXP (x, 0)) == SUBREG
5570 && REG_P (SUBREG_REG (XEXP (x, 0))))
5571 && nonzero_bits (XEXP (x, 0), mode) == 1)
5572 return simplify_shift_const (NULL_RTX, ASHIFTRT, mode,
5573 simplify_shift_const (NULL_RTX, ASHIFT, mode,
5574 gen_rtx_XOR (mode, XEXP (x, 0), const1_rtx),
5575 GET_MODE_BITSIZE (mode) - 1),
5576 GET_MODE_BITSIZE (mode) - 1);
5577
5578 /* If we are adding two things that have no bits in common, convert
5579 the addition into an IOR. This will often be further simplified,
5580 for example in cases like ((a & 1) + (a & 2)), which can
5581 become a & 3. */
5582
5583 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
5584 && (nonzero_bits (XEXP (x, 0), mode)
5585 & nonzero_bits (XEXP (x, 1), mode)) == 0)
5586 {
5587 /* Try to simplify the expression further. */
5588 rtx tor = simplify_gen_binary (IOR, mode, XEXP (x, 0), XEXP (x, 1));
5589 temp = combine_simplify_rtx (tor, mode, in_dest);
5590
5591 /* If we could, great. If not, do not go ahead with the IOR
5592 replacement, since PLUS appears in many special purpose
5593 address arithmetic instructions. */
5594 if (GET_CODE (temp) != CLOBBER && temp != tor)
5595 return temp;
5596 }
5597 break;
5598
5599 case MINUS:
5600 /* (minus <foo> (and <foo> (const_int -pow2))) becomes
5601 (and <foo> (const_int pow2-1)) */
5602 if (GET_CODE (XEXP (x, 1)) == AND
5603 && CONST_INT_P (XEXP (XEXP (x, 1), 1))
5604 && exact_log2 (-UINTVAL (XEXP (XEXP (x, 1), 1))) >= 0
5605 && rtx_equal_p (XEXP (XEXP (x, 1), 0), XEXP (x, 0)))
5606 return simplify_and_const_int (NULL_RTX, mode, XEXP (x, 0),
5607 -INTVAL (XEXP (XEXP (x, 1), 1)) - 1);
5608 break;
5609
5610 case MULT:
5611 /* If we have (mult (plus A B) C), apply the distributive law and then
5612 the inverse distributive law to see if things simplify. This
5613 occurs mostly in addresses, often when unrolling loops. */
5614
5615 if (GET_CODE (XEXP (x, 0)) == PLUS)
5616 {
5617 rtx result = distribute_and_simplify_rtx (x, 0);
5618 if (result)
5619 return result;
5620 }
5621
5622 /* Try simplify a*(b/c) as (a*b)/c. */
5623 if (FLOAT_MODE_P (mode) && flag_associative_math
5624 && GET_CODE (XEXP (x, 0)) == DIV)
5625 {
5626 rtx tem = simplify_binary_operation (MULT, mode,
5627 XEXP (XEXP (x, 0), 0),
5628 XEXP (x, 1));
5629 if (tem)
5630 return simplify_gen_binary (DIV, mode, tem, XEXP (XEXP (x, 0), 1));
5631 }
5632 break;
5633
5634 case UDIV:
5635 /* If this is a divide by a power of two, treat it as a shift if
5636 its first operand is a shift. */
5637 if (CONST_INT_P (XEXP (x, 1))
5638 && (i = exact_log2 (UINTVAL (XEXP (x, 1)))) >= 0
5639 && (GET_CODE (XEXP (x, 0)) == ASHIFT
5640 || GET_CODE (XEXP (x, 0)) == LSHIFTRT
5641 || GET_CODE (XEXP (x, 0)) == ASHIFTRT
5642 || GET_CODE (XEXP (x, 0)) == ROTATE
5643 || GET_CODE (XEXP (x, 0)) == ROTATERT))
5644 return simplify_shift_const (NULL_RTX, LSHIFTRT, mode, XEXP (x, 0), i);
5645 break;
5646
5647 case EQ: case NE:
5648 case GT: case GTU: case GE: case GEU:
5649 case LT: case LTU: case LE: case LEU:
5650 case UNEQ: case LTGT:
5651 case UNGT: case UNGE:
5652 case UNLT: case UNLE:
5653 case UNORDERED: case ORDERED:
5654 /* If the first operand is a condition code, we can't do anything
5655 with it. */
5656 if (GET_CODE (XEXP (x, 0)) == COMPARE
5657 || (GET_MODE_CLASS (GET_MODE (XEXP (x, 0))) != MODE_CC
5658 && ! CC0_P (XEXP (x, 0))))
5659 {
5660 rtx op0 = XEXP (x, 0);
5661 rtx op1 = XEXP (x, 1);
5662 enum rtx_code new_code;
5663
5664 if (GET_CODE (op0) == COMPARE)
5665 op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
5666
5667 /* Simplify our comparison, if possible. */
5668 new_code = simplify_comparison (code, &op0, &op1);
5669
5670 /* If STORE_FLAG_VALUE is 1, we can convert (ne x 0) to simply X
5671 if only the low-order bit is possibly nonzero in X (such as when
5672 X is a ZERO_EXTRACT of one bit). Similarly, we can convert EQ to
5673 (xor X 1) or (minus 1 X); we use the former. Finally, if X is
5674 known to be either 0 or -1, NE becomes a NEG and EQ becomes
5675 (plus X 1).
5676
5677 Remove any ZERO_EXTRACT we made when thinking this was a
5678 comparison. It may now be simpler to use, e.g., an AND. If a
5679 ZERO_EXTRACT is indeed appropriate, it will be placed back by
5680 the call to make_compound_operation in the SET case. */
5681
5682 if (STORE_FLAG_VALUE == 1
5683 && new_code == NE && GET_MODE_CLASS (mode) == MODE_INT
5684 && op1 == const0_rtx
5685 && mode == GET_MODE (op0)
5686 && nonzero_bits (op0, mode) == 1)
5687 return gen_lowpart (mode,
5688 expand_compound_operation (op0));
5689
5690 else if (STORE_FLAG_VALUE == 1
5691 && new_code == NE && GET_MODE_CLASS (mode) == MODE_INT
5692 && op1 == const0_rtx
5693 && mode == GET_MODE (op0)
5694 && (num_sign_bit_copies (op0, mode)
5695 == GET_MODE_BITSIZE (mode)))
5696 {
5697 op0 = expand_compound_operation (op0);
5698 return simplify_gen_unary (NEG, mode,
5699 gen_lowpart (mode, op0),
5700 mode);
5701 }
5702
5703 else if (STORE_FLAG_VALUE == 1
5704 && new_code == EQ && GET_MODE_CLASS (mode) == MODE_INT
5705 && op1 == const0_rtx
5706 && mode == GET_MODE (op0)
5707 && nonzero_bits (op0, mode) == 1)
5708 {
5709 op0 = expand_compound_operation (op0);
5710 return simplify_gen_binary (XOR, mode,
5711 gen_lowpart (mode, op0),
5712 const1_rtx);
5713 }
5714
5715 else if (STORE_FLAG_VALUE == 1
5716 && new_code == EQ && GET_MODE_CLASS (mode) == MODE_INT
5717 && op1 == const0_rtx
5718 && mode == GET_MODE (op0)
5719 && (num_sign_bit_copies (op0, mode)
5720 == GET_MODE_BITSIZE (mode)))
5721 {
5722 op0 = expand_compound_operation (op0);
5723 return plus_constant (gen_lowpart (mode, op0), 1);
5724 }
5725
5726 /* If STORE_FLAG_VALUE is -1, we have cases similar to
5727 those above. */
5728 if (STORE_FLAG_VALUE == -1
5729 && new_code == NE && GET_MODE_CLASS (mode) == MODE_INT
5730 && op1 == const0_rtx
5731 && (num_sign_bit_copies (op0, mode)
5732 == GET_MODE_BITSIZE (mode)))
5733 return gen_lowpart (mode,
5734 expand_compound_operation (op0));
5735
5736 else if (STORE_FLAG_VALUE == -1
5737 && new_code == NE && GET_MODE_CLASS (mode) == MODE_INT
5738 && op1 == const0_rtx
5739 && mode == GET_MODE (op0)
5740 && nonzero_bits (op0, mode) == 1)
5741 {
5742 op0 = expand_compound_operation (op0);
5743 return simplify_gen_unary (NEG, mode,
5744 gen_lowpart (mode, op0),
5745 mode);
5746 }
5747
5748 else if (STORE_FLAG_VALUE == -1
5749 && new_code == EQ && GET_MODE_CLASS (mode) == MODE_INT
5750 && op1 == const0_rtx
5751 && mode == GET_MODE (op0)
5752 && (num_sign_bit_copies (op0, mode)
5753 == GET_MODE_BITSIZE (mode)))
5754 {
5755 op0 = expand_compound_operation (op0);
5756 return simplify_gen_unary (NOT, mode,
5757 gen_lowpart (mode, op0),
5758 mode);
5759 }
5760
5761 /* If X is 0/1, (eq X 0) is X-1. */
5762 else if (STORE_FLAG_VALUE == -1
5763 && new_code == EQ && GET_MODE_CLASS (mode) == MODE_INT
5764 && op1 == const0_rtx
5765 && mode == GET_MODE (op0)
5766 && nonzero_bits (op0, mode) == 1)
5767 {
5768 op0 = expand_compound_operation (op0);
5769 return plus_constant (gen_lowpart (mode, op0), -1);
5770 }
5771
5772 /* If STORE_FLAG_VALUE says to just test the sign bit and X has just
5773 one bit that might be nonzero, we can convert (ne x 0) to
5774 (ashift x c) where C puts the bit in the sign bit. Remove any
5775 AND with STORE_FLAG_VALUE when we are done, since we are only
5776 going to test the sign bit. */
5777 if (new_code == NE && GET_MODE_CLASS (mode) == MODE_INT
5778 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
5779 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
5780 == (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1))
5781 && op1 == const0_rtx
5782 && mode == GET_MODE (op0)
5783 && (i = exact_log2 (nonzero_bits (op0, mode))) >= 0)
5784 {
5785 x = simplify_shift_const (NULL_RTX, ASHIFT, mode,
5786 expand_compound_operation (op0),
5787 GET_MODE_BITSIZE (mode) - 1 - i);
5788 if (GET_CODE (x) == AND && XEXP (x, 1) == const_true_rtx)
5789 return XEXP (x, 0);
5790 else
5791 return x;
5792 }
5793
5794 /* If the code changed, return a whole new comparison. */
5795 if (new_code != code)
5796 return gen_rtx_fmt_ee (new_code, mode, op0, op1);
5797
5798 /* Otherwise, keep this operation, but maybe change its operands.
5799 This also converts (ne (compare FOO BAR) 0) to (ne FOO BAR). */
5800 SUBST (XEXP (x, 0), op0);
5801 SUBST (XEXP (x, 1), op1);
5802 }
5803 break;
5804
5805 case IF_THEN_ELSE:
5806 return simplify_if_then_else (x);
5807
5808 case ZERO_EXTRACT:
5809 case SIGN_EXTRACT:
5810 case ZERO_EXTEND:
5811 case SIGN_EXTEND:
5812 /* If we are processing SET_DEST, we are done. */
5813 if (in_dest)
5814 return x;
5815
5816 return expand_compound_operation (x);
5817
5818 case SET:
5819 return simplify_set (x);
5820
5821 case AND:
5822 case IOR:
5823 return simplify_logical (x);
5824
5825 case ASHIFT:
5826 case LSHIFTRT:
5827 case ASHIFTRT:
5828 case ROTATE:
5829 case ROTATERT:
5830 /* If this is a shift by a constant amount, simplify it. */
5831 if (CONST_INT_P (XEXP (x, 1)))
5832 return simplify_shift_const (x, code, mode, XEXP (x, 0),
5833 INTVAL (XEXP (x, 1)));
5834
5835 else if (SHIFT_COUNT_TRUNCATED && !REG_P (XEXP (x, 1)))
5836 SUBST (XEXP (x, 1),
5837 force_to_mode (XEXP (x, 1), GET_MODE (XEXP (x, 1)),
5838 ((unsigned HOST_WIDE_INT) 1
5839 << exact_log2 (GET_MODE_BITSIZE (GET_MODE (x))))
5840 - 1,
5841 0));
5842 break;
5843
5844 default:
5845 break;
5846 }
5847
5848 return x;
5849 }
5850 \f
5851 /* Simplify X, an IF_THEN_ELSE expression. Return the new expression. */
5852
5853 static rtx
5854 simplify_if_then_else (rtx x)
5855 {
5856 enum machine_mode mode = GET_MODE (x);
5857 rtx cond = XEXP (x, 0);
5858 rtx true_rtx = XEXP (x, 1);
5859 rtx false_rtx = XEXP (x, 2);
5860 enum rtx_code true_code = GET_CODE (cond);
5861 int comparison_p = COMPARISON_P (cond);
5862 rtx temp;
5863 int i;
5864 enum rtx_code false_code;
5865 rtx reversed;
5866
5867 /* Simplify storing of the truth value. */
5868 if (comparison_p && true_rtx == const_true_rtx && false_rtx == const0_rtx)
5869 return simplify_gen_relational (true_code, mode, VOIDmode,
5870 XEXP (cond, 0), XEXP (cond, 1));
5871
5872 /* Also when the truth value has to be reversed. */
5873 if (comparison_p
5874 && true_rtx == const0_rtx && false_rtx == const_true_rtx
5875 && (reversed = reversed_comparison (cond, mode)))
5876 return reversed;
5877
5878 /* Sometimes we can simplify the arm of an IF_THEN_ELSE if a register used
5879 in it is being compared against certain values. Get the true and false
5880 comparisons and see if that says anything about the value of each arm. */
5881
5882 if (comparison_p
5883 && ((false_code = reversed_comparison_code (cond, NULL))
5884 != UNKNOWN)
5885 && REG_P (XEXP (cond, 0)))
5886 {
5887 HOST_WIDE_INT nzb;
5888 rtx from = XEXP (cond, 0);
5889 rtx true_val = XEXP (cond, 1);
5890 rtx false_val = true_val;
5891 int swapped = 0;
5892
5893 /* If FALSE_CODE is EQ, swap the codes and arms. */
5894
5895 if (false_code == EQ)
5896 {
5897 swapped = 1, true_code = EQ, false_code = NE;
5898 temp = true_rtx, true_rtx = false_rtx, false_rtx = temp;
5899 }
5900
5901 /* If we are comparing against zero and the expression being tested has
5902 only a single bit that might be nonzero, that is its value when it is
5903 not equal to zero. Similarly if it is known to be -1 or 0. */
5904
5905 if (true_code == EQ && true_val == const0_rtx
5906 && exact_log2 (nzb = nonzero_bits (from, GET_MODE (from))) >= 0)
5907 {
5908 false_code = EQ;
5909 false_val = GEN_INT (trunc_int_for_mode (nzb, GET_MODE (from)));
5910 }
5911 else if (true_code == EQ && true_val == const0_rtx
5912 && (num_sign_bit_copies (from, GET_MODE (from))
5913 == GET_MODE_BITSIZE (GET_MODE (from))))
5914 {
5915 false_code = EQ;
5916 false_val = constm1_rtx;
5917 }
5918
5919 /* Now simplify an arm if we know the value of the register in the
5920 branch and it is used in the arm. Be careful due to the potential
5921 of locally-shared RTL. */
5922
5923 if (reg_mentioned_p (from, true_rtx))
5924 true_rtx = subst (known_cond (copy_rtx (true_rtx), true_code,
5925 from, true_val),
5926 pc_rtx, pc_rtx, 0, 0);
5927 if (reg_mentioned_p (from, false_rtx))
5928 false_rtx = subst (known_cond (copy_rtx (false_rtx), false_code,
5929 from, false_val),
5930 pc_rtx, pc_rtx, 0, 0);
5931
5932 SUBST (XEXP (x, 1), swapped ? false_rtx : true_rtx);
5933 SUBST (XEXP (x, 2), swapped ? true_rtx : false_rtx);
5934
5935 true_rtx = XEXP (x, 1);
5936 false_rtx = XEXP (x, 2);
5937 true_code = GET_CODE (cond);
5938 }
5939
5940 /* If we have (if_then_else FOO (pc) (label_ref BAR)) and FOO can be
5941 reversed, do so to avoid needing two sets of patterns for
5942 subtract-and-branch insns. Similarly if we have a constant in the true
5943 arm, the false arm is the same as the first operand of the comparison, or
5944 the false arm is more complicated than the true arm. */
5945
5946 if (comparison_p
5947 && reversed_comparison_code (cond, NULL) != UNKNOWN
5948 && (true_rtx == pc_rtx
5949 || (CONSTANT_P (true_rtx)
5950 && !CONST_INT_P (false_rtx) && false_rtx != pc_rtx)
5951 || true_rtx == const0_rtx
5952 || (OBJECT_P (true_rtx) && !OBJECT_P (false_rtx))
5953 || (GET_CODE (true_rtx) == SUBREG && OBJECT_P (SUBREG_REG (true_rtx))
5954 && !OBJECT_P (false_rtx))
5955 || reg_mentioned_p (true_rtx, false_rtx)
5956 || rtx_equal_p (false_rtx, XEXP (cond, 0))))
5957 {
5958 true_code = reversed_comparison_code (cond, NULL);
5959 SUBST (XEXP (x, 0), reversed_comparison (cond, GET_MODE (cond)));
5960 SUBST (XEXP (x, 1), false_rtx);
5961 SUBST (XEXP (x, 2), true_rtx);
5962
5963 temp = true_rtx, true_rtx = false_rtx, false_rtx = temp;
5964 cond = XEXP (x, 0);
5965
5966 /* It is possible that the conditional has been simplified out. */
5967 true_code = GET_CODE (cond);
5968 comparison_p = COMPARISON_P (cond);
5969 }
5970
5971 /* If the two arms are identical, we don't need the comparison. */
5972
5973 if (rtx_equal_p (true_rtx, false_rtx) && ! side_effects_p (cond))
5974 return true_rtx;
5975
5976 /* Convert a == b ? b : a to "a". */
5977 if (true_code == EQ && ! side_effects_p (cond)
5978 && !HONOR_NANS (mode)
5979 && rtx_equal_p (XEXP (cond, 0), false_rtx)
5980 && rtx_equal_p (XEXP (cond, 1), true_rtx))
5981 return false_rtx;
5982 else if (true_code == NE && ! side_effects_p (cond)
5983 && !HONOR_NANS (mode)
5984 && rtx_equal_p (XEXP (cond, 0), true_rtx)
5985 && rtx_equal_p (XEXP (cond, 1), false_rtx))
5986 return true_rtx;
5987
5988 /* Look for cases where we have (abs x) or (neg (abs X)). */
5989
5990 if (GET_MODE_CLASS (mode) == MODE_INT
5991 && comparison_p
5992 && XEXP (cond, 1) == const0_rtx
5993 && GET_CODE (false_rtx) == NEG
5994 && rtx_equal_p (true_rtx, XEXP (false_rtx, 0))
5995 && rtx_equal_p (true_rtx, XEXP (cond, 0))
5996 && ! side_effects_p (true_rtx))
5997 switch (true_code)
5998 {
5999 case GT:
6000 case GE:
6001 return simplify_gen_unary (ABS, mode, true_rtx, mode);
6002 case LT:
6003 case LE:
6004 return
6005 simplify_gen_unary (NEG, mode,
6006 simplify_gen_unary (ABS, mode, true_rtx, mode),
6007 mode);
6008 default:
6009 break;
6010 }
6011
6012 /* Look for MIN or MAX. */
6013
6014 if ((! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
6015 && comparison_p
6016 && rtx_equal_p (XEXP (cond, 0), true_rtx)
6017 && rtx_equal_p (XEXP (cond, 1), false_rtx)
6018 && ! side_effects_p (cond))
6019 switch (true_code)
6020 {
6021 case GE:
6022 case GT:
6023 return simplify_gen_binary (SMAX, mode, true_rtx, false_rtx);
6024 case LE:
6025 case LT:
6026 return simplify_gen_binary (SMIN, mode, true_rtx, false_rtx);
6027 case GEU:
6028 case GTU:
6029 return simplify_gen_binary (UMAX, mode, true_rtx, false_rtx);
6030 case LEU:
6031 case LTU:
6032 return simplify_gen_binary (UMIN, mode, true_rtx, false_rtx);
6033 default:
6034 break;
6035 }
6036
6037 /* If we have (if_then_else COND (OP Z C1) Z) and OP is an identity when its
6038 second operand is zero, this can be done as (OP Z (mult COND C2)) where
6039 C2 = C1 * STORE_FLAG_VALUE. Similarly if OP has an outer ZERO_EXTEND or
6040 SIGN_EXTEND as long as Z is already extended (so we don't destroy it).
6041 We can do this kind of thing in some cases when STORE_FLAG_VALUE is
6042 neither 1 or -1, but it isn't worth checking for. */
6043
6044 if ((STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
6045 && comparison_p
6046 && GET_MODE_CLASS (mode) == MODE_INT
6047 && ! side_effects_p (x))
6048 {
6049 rtx t = make_compound_operation (true_rtx, SET);
6050 rtx f = make_compound_operation (false_rtx, SET);
6051 rtx cond_op0 = XEXP (cond, 0);
6052 rtx cond_op1 = XEXP (cond, 1);
6053 enum rtx_code op = UNKNOWN, extend_op = UNKNOWN;
6054 enum machine_mode m = mode;
6055 rtx z = 0, c1 = NULL_RTX;
6056
6057 if ((GET_CODE (t) == PLUS || GET_CODE (t) == MINUS
6058 || GET_CODE (t) == IOR || GET_CODE (t) == XOR
6059 || GET_CODE (t) == ASHIFT
6060 || GET_CODE (t) == LSHIFTRT || GET_CODE (t) == ASHIFTRT)
6061 && rtx_equal_p (XEXP (t, 0), f))
6062 c1 = XEXP (t, 1), op = GET_CODE (t), z = f;
6063
6064 /* If an identity-zero op is commutative, check whether there
6065 would be a match if we swapped the operands. */
6066 else if ((GET_CODE (t) == PLUS || GET_CODE (t) == IOR
6067 || GET_CODE (t) == XOR)
6068 && rtx_equal_p (XEXP (t, 1), f))
6069 c1 = XEXP (t, 0), op = GET_CODE (t), z = f;
6070 else if (GET_CODE (t) == SIGN_EXTEND
6071 && (GET_CODE (XEXP (t, 0)) == PLUS
6072 || GET_CODE (XEXP (t, 0)) == MINUS
6073 || GET_CODE (XEXP (t, 0)) == IOR
6074 || GET_CODE (XEXP (t, 0)) == XOR
6075 || GET_CODE (XEXP (t, 0)) == ASHIFT
6076 || GET_CODE (XEXP (t, 0)) == LSHIFTRT
6077 || GET_CODE (XEXP (t, 0)) == ASHIFTRT)
6078 && GET_CODE (XEXP (XEXP (t, 0), 0)) == SUBREG
6079 && subreg_lowpart_p (XEXP (XEXP (t, 0), 0))
6080 && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 0)), f)
6081 && (num_sign_bit_copies (f, GET_MODE (f))
6082 > (unsigned int)
6083 (GET_MODE_BITSIZE (mode)
6084 - GET_MODE_BITSIZE (GET_MODE (XEXP (XEXP (t, 0), 0))))))
6085 {
6086 c1 = XEXP (XEXP (t, 0), 1); z = f; op = GET_CODE (XEXP (t, 0));
6087 extend_op = SIGN_EXTEND;
6088 m = GET_MODE (XEXP (t, 0));
6089 }
6090 else if (GET_CODE (t) == SIGN_EXTEND
6091 && (GET_CODE (XEXP (t, 0)) == PLUS
6092 || GET_CODE (XEXP (t, 0)) == IOR
6093 || GET_CODE (XEXP (t, 0)) == XOR)
6094 && GET_CODE (XEXP (XEXP (t, 0), 1)) == SUBREG
6095 && subreg_lowpart_p (XEXP (XEXP (t, 0), 1))
6096 && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 1)), f)
6097 && (num_sign_bit_copies (f, GET_MODE (f))
6098 > (unsigned int)
6099 (GET_MODE_BITSIZE (mode)
6100 - GET_MODE_BITSIZE (GET_MODE (XEXP (XEXP (t, 0), 1))))))
6101 {
6102 c1 = XEXP (XEXP (t, 0), 0); z = f; op = GET_CODE (XEXP (t, 0));
6103 extend_op = SIGN_EXTEND;
6104 m = GET_MODE (XEXP (t, 0));
6105 }
6106 else if (GET_CODE (t) == ZERO_EXTEND
6107 && (GET_CODE (XEXP (t, 0)) == PLUS
6108 || GET_CODE (XEXP (t, 0)) == MINUS
6109 || GET_CODE (XEXP (t, 0)) == IOR
6110 || GET_CODE (XEXP (t, 0)) == XOR
6111 || GET_CODE (XEXP (t, 0)) == ASHIFT
6112 || GET_CODE (XEXP (t, 0)) == LSHIFTRT
6113 || GET_CODE (XEXP (t, 0)) == ASHIFTRT)
6114 && GET_CODE (XEXP (XEXP (t, 0), 0)) == SUBREG
6115 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
6116 && subreg_lowpart_p (XEXP (XEXP (t, 0), 0))
6117 && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 0)), f)
6118 && ((nonzero_bits (f, GET_MODE (f))
6119 & ~GET_MODE_MASK (GET_MODE (XEXP (XEXP (t, 0), 0))))
6120 == 0))
6121 {
6122 c1 = XEXP (XEXP (t, 0), 1); z = f; op = GET_CODE (XEXP (t, 0));
6123 extend_op = ZERO_EXTEND;
6124 m = GET_MODE (XEXP (t, 0));
6125 }
6126 else if (GET_CODE (t) == ZERO_EXTEND
6127 && (GET_CODE (XEXP (t, 0)) == PLUS
6128 || GET_CODE (XEXP (t, 0)) == IOR
6129 || GET_CODE (XEXP (t, 0)) == XOR)
6130 && GET_CODE (XEXP (XEXP (t, 0), 1)) == SUBREG
6131 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
6132 && subreg_lowpart_p (XEXP (XEXP (t, 0), 1))
6133 && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 1)), f)
6134 && ((nonzero_bits (f, GET_MODE (f))
6135 & ~GET_MODE_MASK (GET_MODE (XEXP (XEXP (t, 0), 1))))
6136 == 0))
6137 {
6138 c1 = XEXP (XEXP (t, 0), 0); z = f; op = GET_CODE (XEXP (t, 0));
6139 extend_op = ZERO_EXTEND;
6140 m = GET_MODE (XEXP (t, 0));
6141 }
6142
6143 if (z)
6144 {
6145 temp = subst (simplify_gen_relational (true_code, m, VOIDmode,
6146 cond_op0, cond_op1),
6147 pc_rtx, pc_rtx, 0, 0);
6148 temp = simplify_gen_binary (MULT, m, temp,
6149 simplify_gen_binary (MULT, m, c1,
6150 const_true_rtx));
6151 temp = subst (temp, pc_rtx, pc_rtx, 0, 0);
6152 temp = simplify_gen_binary (op, m, gen_lowpart (m, z), temp);
6153
6154 if (extend_op != UNKNOWN)
6155 temp = simplify_gen_unary (extend_op, mode, temp, m);
6156
6157 return temp;
6158 }
6159 }
6160
6161 /* If we have (if_then_else (ne A 0) C1 0) and either A is known to be 0 or
6162 1 and C1 is a single bit or A is known to be 0 or -1 and C1 is the
6163 negation of a single bit, we can convert this operation to a shift. We
6164 can actually do this more generally, but it doesn't seem worth it. */
6165
6166 if (true_code == NE && XEXP (cond, 1) == const0_rtx
6167 && false_rtx == const0_rtx && CONST_INT_P (true_rtx)
6168 && ((1 == nonzero_bits (XEXP (cond, 0), mode)
6169 && (i = exact_log2 (UINTVAL (true_rtx))) >= 0)
6170 || ((num_sign_bit_copies (XEXP (cond, 0), mode)
6171 == GET_MODE_BITSIZE (mode))
6172 && (i = exact_log2 (-UINTVAL (true_rtx))) >= 0)))
6173 return
6174 simplify_shift_const (NULL_RTX, ASHIFT, mode,
6175 gen_lowpart (mode, XEXP (cond, 0)), i);
6176
6177 /* (IF_THEN_ELSE (NE REG 0) (0) (8)) is REG for nonzero_bits (REG) == 8. */
6178 if (true_code == NE && XEXP (cond, 1) == const0_rtx
6179 && false_rtx == const0_rtx && CONST_INT_P (true_rtx)
6180 && GET_MODE (XEXP (cond, 0)) == mode
6181 && (UINTVAL (true_rtx) & GET_MODE_MASK (mode))
6182 == nonzero_bits (XEXP (cond, 0), mode)
6183 && (i = exact_log2 (UINTVAL (true_rtx) & GET_MODE_MASK (mode))) >= 0)
6184 return XEXP (cond, 0);
6185
6186 return x;
6187 }
6188 \f
6189 /* Simplify X, a SET expression. Return the new expression. */
6190
6191 static rtx
6192 simplify_set (rtx x)
6193 {
6194 rtx src = SET_SRC (x);
6195 rtx dest = SET_DEST (x);
6196 enum machine_mode mode
6197 = GET_MODE (src) != VOIDmode ? GET_MODE (src) : GET_MODE (dest);
6198 rtx other_insn;
6199 rtx *cc_use;
6200
6201 /* (set (pc) (return)) gets written as (return). */
6202 if (GET_CODE (dest) == PC && GET_CODE (src) == RETURN)
6203 return src;
6204
6205 /* Now that we know for sure which bits of SRC we are using, see if we can
6206 simplify the expression for the object knowing that we only need the
6207 low-order bits. */
6208
6209 if (GET_MODE_CLASS (mode) == MODE_INT
6210 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
6211 {
6212 src = force_to_mode (src, mode, ~(unsigned HOST_WIDE_INT) 0, 0);
6213 SUBST (SET_SRC (x), src);
6214 }
6215
6216 /* If we are setting CC0 or if the source is a COMPARE, look for the use of
6217 the comparison result and try to simplify it unless we already have used
6218 undobuf.other_insn. */
6219 if ((GET_MODE_CLASS (mode) == MODE_CC
6220 || GET_CODE (src) == COMPARE
6221 || CC0_P (dest))
6222 && (cc_use = find_single_use (dest, subst_insn, &other_insn)) != 0
6223 && (undobuf.other_insn == 0 || other_insn == undobuf.other_insn)
6224 && COMPARISON_P (*cc_use)
6225 && rtx_equal_p (XEXP (*cc_use, 0), dest))
6226 {
6227 enum rtx_code old_code = GET_CODE (*cc_use);
6228 enum rtx_code new_code;
6229 rtx op0, op1, tmp;
6230 int other_changed = 0;
6231 enum machine_mode compare_mode = GET_MODE (dest);
6232
6233 if (GET_CODE (src) == COMPARE)
6234 op0 = XEXP (src, 0), op1 = XEXP (src, 1);
6235 else
6236 op0 = src, op1 = CONST0_RTX (GET_MODE (src));
6237
6238 tmp = simplify_relational_operation (old_code, compare_mode, VOIDmode,
6239 op0, op1);
6240 if (!tmp)
6241 new_code = old_code;
6242 else if (!CONSTANT_P (tmp))
6243 {
6244 new_code = GET_CODE (tmp);
6245 op0 = XEXP (tmp, 0);
6246 op1 = XEXP (tmp, 1);
6247 }
6248 else
6249 {
6250 rtx pat = PATTERN (other_insn);
6251 undobuf.other_insn = other_insn;
6252 SUBST (*cc_use, tmp);
6253
6254 /* Attempt to simplify CC user. */
6255 if (GET_CODE (pat) == SET)
6256 {
6257 rtx new_rtx = simplify_rtx (SET_SRC (pat));
6258 if (new_rtx != NULL_RTX)
6259 SUBST (SET_SRC (pat), new_rtx);
6260 }
6261
6262 /* Convert X into a no-op move. */
6263 SUBST (SET_DEST (x), pc_rtx);
6264 SUBST (SET_SRC (x), pc_rtx);
6265 return x;
6266 }
6267
6268 /* Simplify our comparison, if possible. */
6269 new_code = simplify_comparison (new_code, &op0, &op1);
6270
6271 #ifdef SELECT_CC_MODE
6272 /* If this machine has CC modes other than CCmode, check to see if we
6273 need to use a different CC mode here. */
6274 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
6275 compare_mode = GET_MODE (op0);
6276 else
6277 compare_mode = SELECT_CC_MODE (new_code, op0, op1);
6278
6279 #ifndef HAVE_cc0
6280 /* If the mode changed, we have to change SET_DEST, the mode in the
6281 compare, and the mode in the place SET_DEST is used. If SET_DEST is
6282 a hard register, just build new versions with the proper mode. If it
6283 is a pseudo, we lose unless it is only time we set the pseudo, in
6284 which case we can safely change its mode. */
6285 if (compare_mode != GET_MODE (dest))
6286 {
6287 if (can_change_dest_mode (dest, 0, compare_mode))
6288 {
6289 unsigned int regno = REGNO (dest);
6290 rtx new_dest;
6291
6292 if (regno < FIRST_PSEUDO_REGISTER)
6293 new_dest = gen_rtx_REG (compare_mode, regno);
6294 else
6295 {
6296 SUBST_MODE (regno_reg_rtx[regno], compare_mode);
6297 new_dest = regno_reg_rtx[regno];
6298 }
6299
6300 SUBST (SET_DEST (x), new_dest);
6301 SUBST (XEXP (*cc_use, 0), new_dest);
6302 other_changed = 1;
6303
6304 dest = new_dest;
6305 }
6306 }
6307 #endif /* cc0 */
6308 #endif /* SELECT_CC_MODE */
6309
6310 /* If the code changed, we have to build a new comparison in
6311 undobuf.other_insn. */
6312 if (new_code != old_code)
6313 {
6314 int other_changed_previously = other_changed;
6315 unsigned HOST_WIDE_INT mask;
6316 rtx old_cc_use = *cc_use;
6317
6318 SUBST (*cc_use, gen_rtx_fmt_ee (new_code, GET_MODE (*cc_use),
6319 dest, const0_rtx));
6320 other_changed = 1;
6321
6322 /* If the only change we made was to change an EQ into an NE or
6323 vice versa, OP0 has only one bit that might be nonzero, and OP1
6324 is zero, check if changing the user of the condition code will
6325 produce a valid insn. If it won't, we can keep the original code
6326 in that insn by surrounding our operation with an XOR. */
6327
6328 if (((old_code == NE && new_code == EQ)
6329 || (old_code == EQ && new_code == NE))
6330 && ! other_changed_previously && op1 == const0_rtx
6331 && GET_MODE_BITSIZE (GET_MODE (op0)) <= HOST_BITS_PER_WIDE_INT
6332 && exact_log2 (mask = nonzero_bits (op0, GET_MODE (op0))) >= 0)
6333 {
6334 rtx pat = PATTERN (other_insn), note = 0;
6335
6336 if ((recog_for_combine (&pat, other_insn, &note) < 0
6337 && ! check_asm_operands (pat)))
6338 {
6339 *cc_use = old_cc_use;
6340 other_changed = 0;
6341
6342 op0 = simplify_gen_binary (XOR, GET_MODE (op0),
6343 op0, GEN_INT (mask));
6344 }
6345 }
6346 }
6347
6348 if (other_changed)
6349 undobuf.other_insn = other_insn;
6350
6351 /* Otherwise, if we didn't previously have a COMPARE in the
6352 correct mode, we need one. */
6353 if (GET_CODE (src) != COMPARE || GET_MODE (src) != compare_mode)
6354 {
6355 SUBST (SET_SRC (x), gen_rtx_COMPARE (compare_mode, op0, op1));
6356 src = SET_SRC (x);
6357 }
6358 else if (GET_MODE (op0) == compare_mode && op1 == const0_rtx)
6359 {
6360 SUBST (SET_SRC (x), op0);
6361 src = SET_SRC (x);
6362 }
6363 /* Otherwise, update the COMPARE if needed. */
6364 else if (XEXP (src, 0) != op0 || XEXP (src, 1) != op1)
6365 {
6366 SUBST (SET_SRC (x), gen_rtx_COMPARE (compare_mode, op0, op1));
6367 src = SET_SRC (x);
6368 }
6369 }
6370 else
6371 {
6372 /* Get SET_SRC in a form where we have placed back any
6373 compound expressions. Then do the checks below. */
6374 src = make_compound_operation (src, SET);
6375 SUBST (SET_SRC (x), src);
6376 }
6377
6378 /* If we have (set x (subreg:m1 (op:m2 ...) 0)) with OP being some operation,
6379 and X being a REG or (subreg (reg)), we may be able to convert this to
6380 (set (subreg:m2 x) (op)).
6381
6382 We can always do this if M1 is narrower than M2 because that means that
6383 we only care about the low bits of the result.
6384
6385 However, on machines without WORD_REGISTER_OPERATIONS defined, we cannot
6386 perform a narrower operation than requested since the high-order bits will
6387 be undefined. On machine where it is defined, this transformation is safe
6388 as long as M1 and M2 have the same number of words. */
6389
6390 if (GET_CODE (src) == SUBREG && subreg_lowpart_p (src)
6391 && !OBJECT_P (SUBREG_REG (src))
6392 && (((GET_MODE_SIZE (GET_MODE (src)) + (UNITS_PER_WORD - 1))
6393 / UNITS_PER_WORD)
6394 == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (src)))
6395 + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD))
6396 #ifndef WORD_REGISTER_OPERATIONS
6397 && (GET_MODE_SIZE (GET_MODE (src))
6398 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (src))))
6399 #endif
6400 #ifdef CANNOT_CHANGE_MODE_CLASS
6401 && ! (REG_P (dest) && REGNO (dest) < FIRST_PSEUDO_REGISTER
6402 && REG_CANNOT_CHANGE_MODE_P (REGNO (dest),
6403 GET_MODE (SUBREG_REG (src)),
6404 GET_MODE (src)))
6405 #endif
6406 && (REG_P (dest)
6407 || (GET_CODE (dest) == SUBREG
6408 && REG_P (SUBREG_REG (dest)))))
6409 {
6410 SUBST (SET_DEST (x),
6411 gen_lowpart (GET_MODE (SUBREG_REG (src)),
6412 dest));
6413 SUBST (SET_SRC (x), SUBREG_REG (src));
6414
6415 src = SET_SRC (x), dest = SET_DEST (x);
6416 }
6417
6418 #ifdef HAVE_cc0
6419 /* If we have (set (cc0) (subreg ...)), we try to remove the subreg
6420 in SRC. */
6421 if (dest == cc0_rtx
6422 && GET_CODE (src) == SUBREG
6423 && subreg_lowpart_p (src)
6424 && (GET_MODE_BITSIZE (GET_MODE (src))
6425 < GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (src)))))
6426 {
6427 rtx inner = SUBREG_REG (src);
6428 enum machine_mode inner_mode = GET_MODE (inner);
6429
6430 /* Here we make sure that we don't have a sign bit on. */
6431 if (GET_MODE_BITSIZE (inner_mode) <= HOST_BITS_PER_WIDE_INT
6432 && (nonzero_bits (inner, inner_mode)
6433 < ((unsigned HOST_WIDE_INT) 1
6434 << (GET_MODE_BITSIZE (GET_MODE (src)) - 1))))
6435 {
6436 SUBST (SET_SRC (x), inner);
6437 src = SET_SRC (x);
6438 }
6439 }
6440 #endif
6441
6442 #ifdef LOAD_EXTEND_OP
6443 /* If we have (set FOO (subreg:M (mem:N BAR) 0)) with M wider than N, this
6444 would require a paradoxical subreg. Replace the subreg with a
6445 zero_extend to avoid the reload that would otherwise be required. */
6446
6447 if (GET_CODE (src) == SUBREG && subreg_lowpart_p (src)
6448 && INTEGRAL_MODE_P (GET_MODE (SUBREG_REG (src)))
6449 && LOAD_EXTEND_OP (GET_MODE (SUBREG_REG (src))) != UNKNOWN
6450 && SUBREG_BYTE (src) == 0
6451 && (GET_MODE_SIZE (GET_MODE (src))
6452 > GET_MODE_SIZE (GET_MODE (SUBREG_REG (src))))
6453 && MEM_P (SUBREG_REG (src)))
6454 {
6455 SUBST (SET_SRC (x),
6456 gen_rtx_fmt_e (LOAD_EXTEND_OP (GET_MODE (SUBREG_REG (src))),
6457 GET_MODE (src), SUBREG_REG (src)));
6458
6459 src = SET_SRC (x);
6460 }
6461 #endif
6462
6463 /* If we don't have a conditional move, SET_SRC is an IF_THEN_ELSE, and we
6464 are comparing an item known to be 0 or -1 against 0, use a logical
6465 operation instead. Check for one of the arms being an IOR of the other
6466 arm with some value. We compute three terms to be IOR'ed together. In
6467 practice, at most two will be nonzero. Then we do the IOR's. */
6468
6469 if (GET_CODE (dest) != PC
6470 && GET_CODE (src) == IF_THEN_ELSE
6471 && GET_MODE_CLASS (GET_MODE (src)) == MODE_INT
6472 && (GET_CODE (XEXP (src, 0)) == EQ || GET_CODE (XEXP (src, 0)) == NE)
6473 && XEXP (XEXP (src, 0), 1) == const0_rtx
6474 && GET_MODE (src) == GET_MODE (XEXP (XEXP (src, 0), 0))
6475 #ifdef HAVE_conditional_move
6476 && ! can_conditionally_move_p (GET_MODE (src))
6477 #endif
6478 && (num_sign_bit_copies (XEXP (XEXP (src, 0), 0),
6479 GET_MODE (XEXP (XEXP (src, 0), 0)))
6480 == GET_MODE_BITSIZE (GET_MODE (XEXP (XEXP (src, 0), 0))))
6481 && ! side_effects_p (src))
6482 {
6483 rtx true_rtx = (GET_CODE (XEXP (src, 0)) == NE
6484 ? XEXP (src, 1) : XEXP (src, 2));
6485 rtx false_rtx = (GET_CODE (XEXP (src, 0)) == NE
6486 ? XEXP (src, 2) : XEXP (src, 1));
6487 rtx term1 = const0_rtx, term2, term3;
6488
6489 if (GET_CODE (true_rtx) == IOR
6490 && rtx_equal_p (XEXP (true_rtx, 0), false_rtx))
6491 term1 = false_rtx, true_rtx = XEXP (true_rtx, 1), false_rtx = const0_rtx;
6492 else if (GET_CODE (true_rtx) == IOR
6493 && rtx_equal_p (XEXP (true_rtx, 1), false_rtx))
6494 term1 = false_rtx, true_rtx = XEXP (true_rtx, 0), false_rtx = const0_rtx;
6495 else if (GET_CODE (false_rtx) == IOR
6496 && rtx_equal_p (XEXP (false_rtx, 0), true_rtx))
6497 term1 = true_rtx, false_rtx = XEXP (false_rtx, 1), true_rtx = const0_rtx;
6498 else if (GET_CODE (false_rtx) == IOR
6499 && rtx_equal_p (XEXP (false_rtx, 1), true_rtx))
6500 term1 = true_rtx, false_rtx = XEXP (false_rtx, 0), true_rtx = const0_rtx;
6501
6502 term2 = simplify_gen_binary (AND, GET_MODE (src),
6503 XEXP (XEXP (src, 0), 0), true_rtx);
6504 term3 = simplify_gen_binary (AND, GET_MODE (src),
6505 simplify_gen_unary (NOT, GET_MODE (src),
6506 XEXP (XEXP (src, 0), 0),
6507 GET_MODE (src)),
6508 false_rtx);
6509
6510 SUBST (SET_SRC (x),
6511 simplify_gen_binary (IOR, GET_MODE (src),
6512 simplify_gen_binary (IOR, GET_MODE (src),
6513 term1, term2),
6514 term3));
6515
6516 src = SET_SRC (x);
6517 }
6518
6519 /* If either SRC or DEST is a CLOBBER of (const_int 0), make this
6520 whole thing fail. */
6521 if (GET_CODE (src) == CLOBBER && XEXP (src, 0) == const0_rtx)
6522 return src;
6523 else if (GET_CODE (dest) == CLOBBER && XEXP (dest, 0) == const0_rtx)
6524 return dest;
6525 else
6526 /* Convert this into a field assignment operation, if possible. */
6527 return make_field_assignment (x);
6528 }
6529 \f
6530 /* Simplify, X, and AND, IOR, or XOR operation, and return the simplified
6531 result. */
6532
6533 static rtx
6534 simplify_logical (rtx x)
6535 {
6536 enum machine_mode mode = GET_MODE (x);
6537 rtx op0 = XEXP (x, 0);
6538 rtx op1 = XEXP (x, 1);
6539
6540 switch (GET_CODE (x))
6541 {
6542 case AND:
6543 /* We can call simplify_and_const_int only if we don't lose
6544 any (sign) bits when converting INTVAL (op1) to
6545 "unsigned HOST_WIDE_INT". */
6546 if (CONST_INT_P (op1)
6547 && (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
6548 || INTVAL (op1) > 0))
6549 {
6550 x = simplify_and_const_int (x, mode, op0, INTVAL (op1));
6551 if (GET_CODE (x) != AND)
6552 return x;
6553
6554 op0 = XEXP (x, 0);
6555 op1 = XEXP (x, 1);
6556 }
6557
6558 /* If we have any of (and (ior A B) C) or (and (xor A B) C),
6559 apply the distributive law and then the inverse distributive
6560 law to see if things simplify. */
6561 if (GET_CODE (op0) == IOR || GET_CODE (op0) == XOR)
6562 {
6563 rtx result = distribute_and_simplify_rtx (x, 0);
6564 if (result)
6565 return result;
6566 }
6567 if (GET_CODE (op1) == IOR || GET_CODE (op1) == XOR)
6568 {
6569 rtx result = distribute_and_simplify_rtx (x, 1);
6570 if (result)
6571 return result;
6572 }
6573 break;
6574
6575 case IOR:
6576 /* If we have (ior (and A B) C), apply the distributive law and then
6577 the inverse distributive law to see if things simplify. */
6578
6579 if (GET_CODE (op0) == AND)
6580 {
6581 rtx result = distribute_and_simplify_rtx (x, 0);
6582 if (result)
6583 return result;
6584 }
6585
6586 if (GET_CODE (op1) == AND)
6587 {
6588 rtx result = distribute_and_simplify_rtx (x, 1);
6589 if (result)
6590 return result;
6591 }
6592 break;
6593
6594 default:
6595 gcc_unreachable ();
6596 }
6597
6598 return x;
6599 }
6600 \f
6601 /* We consider ZERO_EXTRACT, SIGN_EXTRACT, and SIGN_EXTEND as "compound
6602 operations" because they can be replaced with two more basic operations.
6603 ZERO_EXTEND is also considered "compound" because it can be replaced with
6604 an AND operation, which is simpler, though only one operation.
6605
6606 The function expand_compound_operation is called with an rtx expression
6607 and will convert it to the appropriate shifts and AND operations,
6608 simplifying at each stage.
6609
6610 The function make_compound_operation is called to convert an expression
6611 consisting of shifts and ANDs into the equivalent compound expression.
6612 It is the inverse of this function, loosely speaking. */
6613
6614 static rtx
6615 expand_compound_operation (rtx x)
6616 {
6617 unsigned HOST_WIDE_INT pos = 0, len;
6618 int unsignedp = 0;
6619 unsigned int modewidth;
6620 rtx tem;
6621
6622 switch (GET_CODE (x))
6623 {
6624 case ZERO_EXTEND:
6625 unsignedp = 1;
6626 case SIGN_EXTEND:
6627 /* We can't necessarily use a const_int for a multiword mode;
6628 it depends on implicitly extending the value.
6629 Since we don't know the right way to extend it,
6630 we can't tell whether the implicit way is right.
6631
6632 Even for a mode that is no wider than a const_int,
6633 we can't win, because we need to sign extend one of its bits through
6634 the rest of it, and we don't know which bit. */
6635 if (CONST_INT_P (XEXP (x, 0)))
6636 return x;
6637
6638 /* Return if (subreg:MODE FROM 0) is not a safe replacement for
6639 (zero_extend:MODE FROM) or (sign_extend:MODE FROM). It is for any MEM
6640 because (SUBREG (MEM...)) is guaranteed to cause the MEM to be
6641 reloaded. If not for that, MEM's would very rarely be safe.
6642
6643 Reject MODEs bigger than a word, because we might not be able
6644 to reference a two-register group starting with an arbitrary register
6645 (and currently gen_lowpart might crash for a SUBREG). */
6646
6647 if (GET_MODE_SIZE (GET_MODE (XEXP (x, 0))) > UNITS_PER_WORD)
6648 return x;
6649
6650 /* Reject MODEs that aren't scalar integers because turning vector
6651 or complex modes into shifts causes problems. */
6652
6653 if (! SCALAR_INT_MODE_P (GET_MODE (XEXP (x, 0))))
6654 return x;
6655
6656 len = GET_MODE_BITSIZE (GET_MODE (XEXP (x, 0)));
6657 /* If the inner object has VOIDmode (the only way this can happen
6658 is if it is an ASM_OPERANDS), we can't do anything since we don't
6659 know how much masking to do. */
6660 if (len == 0)
6661 return x;
6662
6663 break;
6664
6665 case ZERO_EXTRACT:
6666 unsignedp = 1;
6667
6668 /* ... fall through ... */
6669
6670 case SIGN_EXTRACT:
6671 /* If the operand is a CLOBBER, just return it. */
6672 if (GET_CODE (XEXP (x, 0)) == CLOBBER)
6673 return XEXP (x, 0);
6674
6675 if (!CONST_INT_P (XEXP (x, 1))
6676 || !CONST_INT_P (XEXP (x, 2))
6677 || GET_MODE (XEXP (x, 0)) == VOIDmode)
6678 return x;
6679
6680 /* Reject MODEs that aren't scalar integers because turning vector
6681 or complex modes into shifts causes problems. */
6682
6683 if (! SCALAR_INT_MODE_P (GET_MODE (XEXP (x, 0))))
6684 return x;
6685
6686 len = INTVAL (XEXP (x, 1));
6687 pos = INTVAL (XEXP (x, 2));
6688
6689 /* This should stay within the object being extracted, fail otherwise. */
6690 if (len + pos > GET_MODE_BITSIZE (GET_MODE (XEXP (x, 0))))
6691 return x;
6692
6693 if (BITS_BIG_ENDIAN)
6694 pos = GET_MODE_BITSIZE (GET_MODE (XEXP (x, 0))) - len - pos;
6695
6696 break;
6697
6698 default:
6699 return x;
6700 }
6701 /* Convert sign extension to zero extension, if we know that the high
6702 bit is not set, as this is easier to optimize. It will be converted
6703 back to cheaper alternative in make_extraction. */
6704 if (GET_CODE (x) == SIGN_EXTEND
6705 && (GET_MODE_BITSIZE (GET_MODE (x)) <= HOST_BITS_PER_WIDE_INT
6706 && ((nonzero_bits (XEXP (x, 0), GET_MODE (XEXP (x, 0)))
6707 & ~(((unsigned HOST_WIDE_INT)
6708 GET_MODE_MASK (GET_MODE (XEXP (x, 0))))
6709 >> 1))
6710 == 0)))
6711 {
6712 rtx temp = gen_rtx_ZERO_EXTEND (GET_MODE (x), XEXP (x, 0));
6713 rtx temp2 = expand_compound_operation (temp);
6714
6715 /* Make sure this is a profitable operation. */
6716 if (rtx_cost (x, SET, optimize_this_for_speed_p)
6717 > rtx_cost (temp2, SET, optimize_this_for_speed_p))
6718 return temp2;
6719 else if (rtx_cost (x, SET, optimize_this_for_speed_p)
6720 > rtx_cost (temp, SET, optimize_this_for_speed_p))
6721 return temp;
6722 else
6723 return x;
6724 }
6725
6726 /* We can optimize some special cases of ZERO_EXTEND. */
6727 if (GET_CODE (x) == ZERO_EXTEND)
6728 {
6729 /* (zero_extend:DI (truncate:SI foo:DI)) is just foo:DI if we
6730 know that the last value didn't have any inappropriate bits
6731 set. */
6732 if (GET_CODE (XEXP (x, 0)) == TRUNCATE
6733 && GET_MODE (XEXP (XEXP (x, 0), 0)) == GET_MODE (x)
6734 && GET_MODE_BITSIZE (GET_MODE (x)) <= HOST_BITS_PER_WIDE_INT
6735 && (nonzero_bits (XEXP (XEXP (x, 0), 0), GET_MODE (x))
6736 & ~GET_MODE_MASK (GET_MODE (XEXP (x, 0)))) == 0)
6737 return XEXP (XEXP (x, 0), 0);
6738
6739 /* Likewise for (zero_extend:DI (subreg:SI foo:DI 0)). */
6740 if (GET_CODE (XEXP (x, 0)) == SUBREG
6741 && GET_MODE (SUBREG_REG (XEXP (x, 0))) == GET_MODE (x)
6742 && subreg_lowpart_p (XEXP (x, 0))
6743 && GET_MODE_BITSIZE (GET_MODE (x)) <= HOST_BITS_PER_WIDE_INT
6744 && (nonzero_bits (SUBREG_REG (XEXP (x, 0)), GET_MODE (x))
6745 & ~GET_MODE_MASK (GET_MODE (XEXP (x, 0)))) == 0)
6746 return SUBREG_REG (XEXP (x, 0));
6747
6748 /* (zero_extend:DI (truncate:SI foo:DI)) is just foo:DI when foo
6749 is a comparison and STORE_FLAG_VALUE permits. This is like
6750 the first case, but it works even when GET_MODE (x) is larger
6751 than HOST_WIDE_INT. */
6752 if (GET_CODE (XEXP (x, 0)) == TRUNCATE
6753 && GET_MODE (XEXP (XEXP (x, 0), 0)) == GET_MODE (x)
6754 && COMPARISON_P (XEXP (XEXP (x, 0), 0))
6755 && (GET_MODE_BITSIZE (GET_MODE (XEXP (x, 0)))
6756 <= HOST_BITS_PER_WIDE_INT)
6757 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (GET_MODE (XEXP (x, 0)))) == 0)
6758 return XEXP (XEXP (x, 0), 0);
6759
6760 /* Likewise for (zero_extend:DI (subreg:SI foo:DI 0)). */
6761 if (GET_CODE (XEXP (x, 0)) == SUBREG
6762 && GET_MODE (SUBREG_REG (XEXP (x, 0))) == GET_MODE (x)
6763 && subreg_lowpart_p (XEXP (x, 0))
6764 && COMPARISON_P (SUBREG_REG (XEXP (x, 0)))
6765 && (GET_MODE_BITSIZE (GET_MODE (XEXP (x, 0)))
6766 <= HOST_BITS_PER_WIDE_INT)
6767 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (GET_MODE (XEXP (x, 0)))) == 0)
6768 return SUBREG_REG (XEXP (x, 0));
6769
6770 }
6771
6772 /* If we reach here, we want to return a pair of shifts. The inner
6773 shift is a left shift of BITSIZE - POS - LEN bits. The outer
6774 shift is a right shift of BITSIZE - LEN bits. It is arithmetic or
6775 logical depending on the value of UNSIGNEDP.
6776
6777 If this was a ZERO_EXTEND or ZERO_EXTRACT, this pair of shifts will be
6778 converted into an AND of a shift.
6779
6780 We must check for the case where the left shift would have a negative
6781 count. This can happen in a case like (x >> 31) & 255 on machines
6782 that can't shift by a constant. On those machines, we would first
6783 combine the shift with the AND to produce a variable-position
6784 extraction. Then the constant of 31 would be substituted in
6785 to produce such a position. */
6786
6787 modewidth = GET_MODE_BITSIZE (GET_MODE (x));
6788 if (modewidth >= pos + len)
6789 {
6790 enum machine_mode mode = GET_MODE (x);
6791 tem = gen_lowpart (mode, XEXP (x, 0));
6792 if (!tem || GET_CODE (tem) == CLOBBER)
6793 return x;
6794 tem = simplify_shift_const (NULL_RTX, ASHIFT, mode,
6795 tem, modewidth - pos - len);
6796 tem = simplify_shift_const (NULL_RTX, unsignedp ? LSHIFTRT : ASHIFTRT,
6797 mode, tem, modewidth - len);
6798 }
6799 else if (unsignedp && len < HOST_BITS_PER_WIDE_INT)
6800 tem = simplify_and_const_int (NULL_RTX, GET_MODE (x),
6801 simplify_shift_const (NULL_RTX, LSHIFTRT,
6802 GET_MODE (x),
6803 XEXP (x, 0), pos),
6804 ((unsigned HOST_WIDE_INT) 1 << len) - 1);
6805 else
6806 /* Any other cases we can't handle. */
6807 return x;
6808
6809 /* If we couldn't do this for some reason, return the original
6810 expression. */
6811 if (GET_CODE (tem) == CLOBBER)
6812 return x;
6813
6814 return tem;
6815 }
6816 \f
6817 /* X is a SET which contains an assignment of one object into
6818 a part of another (such as a bit-field assignment, STRICT_LOW_PART,
6819 or certain SUBREGS). If possible, convert it into a series of
6820 logical operations.
6821
6822 We half-heartedly support variable positions, but do not at all
6823 support variable lengths. */
6824
6825 static const_rtx
6826 expand_field_assignment (const_rtx x)
6827 {
6828 rtx inner;
6829 rtx pos; /* Always counts from low bit. */
6830 int len;
6831 rtx mask, cleared, masked;
6832 enum machine_mode compute_mode;
6833
6834 /* Loop until we find something we can't simplify. */
6835 while (1)
6836 {
6837 if (GET_CODE (SET_DEST (x)) == STRICT_LOW_PART
6838 && GET_CODE (XEXP (SET_DEST (x), 0)) == SUBREG)
6839 {
6840 inner = SUBREG_REG (XEXP (SET_DEST (x), 0));
6841 len = GET_MODE_BITSIZE (GET_MODE (XEXP (SET_DEST (x), 0)));
6842 pos = GEN_INT (subreg_lsb (XEXP (SET_DEST (x), 0)));
6843 }
6844 else if (GET_CODE (SET_DEST (x)) == ZERO_EXTRACT
6845 && CONST_INT_P (XEXP (SET_DEST (x), 1)))
6846 {
6847 inner = XEXP (SET_DEST (x), 0);
6848 len = INTVAL (XEXP (SET_DEST (x), 1));
6849 pos = XEXP (SET_DEST (x), 2);
6850
6851 /* A constant position should stay within the width of INNER. */
6852 if (CONST_INT_P (pos)
6853 && INTVAL (pos) + len > GET_MODE_BITSIZE (GET_MODE (inner)))
6854 break;
6855
6856 if (BITS_BIG_ENDIAN)
6857 {
6858 if (CONST_INT_P (pos))
6859 pos = GEN_INT (GET_MODE_BITSIZE (GET_MODE (inner)) - len
6860 - INTVAL (pos));
6861 else if (GET_CODE (pos) == MINUS
6862 && CONST_INT_P (XEXP (pos, 1))
6863 && (INTVAL (XEXP (pos, 1))
6864 == GET_MODE_BITSIZE (GET_MODE (inner)) - len))
6865 /* If position is ADJUST - X, new position is X. */
6866 pos = XEXP (pos, 0);
6867 else
6868 pos = simplify_gen_binary (MINUS, GET_MODE (pos),
6869 GEN_INT (GET_MODE_BITSIZE (
6870 GET_MODE (inner))
6871 - len),
6872 pos);
6873 }
6874 }
6875
6876 /* A SUBREG between two modes that occupy the same numbers of words
6877 can be done by moving the SUBREG to the source. */
6878 else if (GET_CODE (SET_DEST (x)) == SUBREG
6879 /* We need SUBREGs to compute nonzero_bits properly. */
6880 && nonzero_sign_valid
6881 && (((GET_MODE_SIZE (GET_MODE (SET_DEST (x)))
6882 + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)
6883 == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (SET_DEST (x))))
6884 + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)))
6885 {
6886 x = gen_rtx_SET (VOIDmode, SUBREG_REG (SET_DEST (x)),
6887 gen_lowpart
6888 (GET_MODE (SUBREG_REG (SET_DEST (x))),
6889 SET_SRC (x)));
6890 continue;
6891 }
6892 else
6893 break;
6894
6895 while (GET_CODE (inner) == SUBREG && subreg_lowpart_p (inner))
6896 inner = SUBREG_REG (inner);
6897
6898 compute_mode = GET_MODE (inner);
6899
6900 /* Don't attempt bitwise arithmetic on non scalar integer modes. */
6901 if (! SCALAR_INT_MODE_P (compute_mode))
6902 {
6903 enum machine_mode imode;
6904
6905 /* Don't do anything for vector or complex integral types. */
6906 if (! FLOAT_MODE_P (compute_mode))
6907 break;
6908
6909 /* Try to find an integral mode to pun with. */
6910 imode = mode_for_size (GET_MODE_BITSIZE (compute_mode), MODE_INT, 0);
6911 if (imode == BLKmode)
6912 break;
6913
6914 compute_mode = imode;
6915 inner = gen_lowpart (imode, inner);
6916 }
6917
6918 /* Compute a mask of LEN bits, if we can do this on the host machine. */
6919 if (len >= HOST_BITS_PER_WIDE_INT)
6920 break;
6921
6922 /* Now compute the equivalent expression. Make a copy of INNER
6923 for the SET_DEST in case it is a MEM into which we will substitute;
6924 we don't want shared RTL in that case. */
6925 mask = GEN_INT (((unsigned HOST_WIDE_INT) 1 << len) - 1);
6926 cleared = simplify_gen_binary (AND, compute_mode,
6927 simplify_gen_unary (NOT, compute_mode,
6928 simplify_gen_binary (ASHIFT,
6929 compute_mode,
6930 mask, pos),
6931 compute_mode),
6932 inner);
6933 masked = simplify_gen_binary (ASHIFT, compute_mode,
6934 simplify_gen_binary (
6935 AND, compute_mode,
6936 gen_lowpart (compute_mode, SET_SRC (x)),
6937 mask),
6938 pos);
6939
6940 x = gen_rtx_SET (VOIDmode, copy_rtx (inner),
6941 simplify_gen_binary (IOR, compute_mode,
6942 cleared, masked));
6943 }
6944
6945 return x;
6946 }
6947 \f
6948 /* Return an RTX for a reference to LEN bits of INNER. If POS_RTX is nonzero,
6949 it is an RTX that represents a variable starting position; otherwise,
6950 POS is the (constant) starting bit position (counted from the LSB).
6951
6952 UNSIGNEDP is nonzero for an unsigned reference and zero for a
6953 signed reference.
6954
6955 IN_DEST is nonzero if this is a reference in the destination of a
6956 SET. This is used when a ZERO_ or SIGN_EXTRACT isn't needed. If nonzero,
6957 a STRICT_LOW_PART will be used, if zero, ZERO_EXTEND or SIGN_EXTEND will
6958 be used.
6959
6960 IN_COMPARE is nonzero if we are in a COMPARE. This means that a
6961 ZERO_EXTRACT should be built even for bits starting at bit 0.
6962
6963 MODE is the desired mode of the result (if IN_DEST == 0).
6964
6965 The result is an RTX for the extraction or NULL_RTX if the target
6966 can't handle it. */
6967
6968 static rtx
6969 make_extraction (enum machine_mode mode, rtx inner, HOST_WIDE_INT pos,
6970 rtx pos_rtx, unsigned HOST_WIDE_INT len, int unsignedp,
6971 int in_dest, int in_compare)
6972 {
6973 /* This mode describes the size of the storage area
6974 to fetch the overall value from. Within that, we
6975 ignore the POS lowest bits, etc. */
6976 enum machine_mode is_mode = GET_MODE (inner);
6977 enum machine_mode inner_mode;
6978 enum machine_mode wanted_inner_mode;
6979 enum machine_mode wanted_inner_reg_mode = word_mode;
6980 enum machine_mode pos_mode = word_mode;
6981 enum machine_mode extraction_mode = word_mode;
6982 enum machine_mode tmode = mode_for_size (len, MODE_INT, 1);
6983 rtx new_rtx = 0;
6984 rtx orig_pos_rtx = pos_rtx;
6985 HOST_WIDE_INT orig_pos;
6986
6987 if (GET_CODE (inner) == SUBREG && subreg_lowpart_p (inner))
6988 {
6989 /* If going from (subreg:SI (mem:QI ...)) to (mem:QI ...),
6990 consider just the QI as the memory to extract from.
6991 The subreg adds or removes high bits; its mode is
6992 irrelevant to the meaning of this extraction,
6993 since POS and LEN count from the lsb. */
6994 if (MEM_P (SUBREG_REG (inner)))
6995 is_mode = GET_MODE (SUBREG_REG (inner));
6996 inner = SUBREG_REG (inner);
6997 }
6998 else if (GET_CODE (inner) == ASHIFT
6999 && CONST_INT_P (XEXP (inner, 1))
7000 && pos_rtx == 0 && pos == 0
7001 && len > UINTVAL (XEXP (inner, 1)))
7002 {
7003 /* We're extracting the least significant bits of an rtx
7004 (ashift X (const_int C)), where LEN > C. Extract the
7005 least significant (LEN - C) bits of X, giving an rtx
7006 whose mode is MODE, then shift it left C times. */
7007 new_rtx = make_extraction (mode, XEXP (inner, 0),
7008 0, 0, len - INTVAL (XEXP (inner, 1)),
7009 unsignedp, in_dest, in_compare);
7010 if (new_rtx != 0)
7011 return gen_rtx_ASHIFT (mode, new_rtx, XEXP (inner, 1));
7012 }
7013
7014 inner_mode = GET_MODE (inner);
7015
7016 if (pos_rtx && CONST_INT_P (pos_rtx))
7017 pos = INTVAL (pos_rtx), pos_rtx = 0;
7018
7019 /* See if this can be done without an extraction. We never can if the
7020 width of the field is not the same as that of some integer mode. For
7021 registers, we can only avoid the extraction if the position is at the
7022 low-order bit and this is either not in the destination or we have the
7023 appropriate STRICT_LOW_PART operation available.
7024
7025 For MEM, we can avoid an extract if the field starts on an appropriate
7026 boundary and we can change the mode of the memory reference. */
7027
7028 if (tmode != BLKmode
7029 && ((pos_rtx == 0 && (pos % BITS_PER_WORD) == 0
7030 && !MEM_P (inner)
7031 && (inner_mode == tmode
7032 || !REG_P (inner)
7033 || TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (tmode),
7034 GET_MODE_BITSIZE (inner_mode))
7035 || reg_truncated_to_mode (tmode, inner))
7036 && (! in_dest
7037 || (REG_P (inner)
7038 && have_insn_for (STRICT_LOW_PART, tmode))))
7039 || (MEM_P (inner) && pos_rtx == 0
7040 && (pos
7041 % (STRICT_ALIGNMENT ? GET_MODE_ALIGNMENT (tmode)
7042 : BITS_PER_UNIT)) == 0
7043 /* We can't do this if we are widening INNER_MODE (it
7044 may not be aligned, for one thing). */
7045 && GET_MODE_BITSIZE (inner_mode) >= GET_MODE_BITSIZE (tmode)
7046 && (inner_mode == tmode
7047 || (! mode_dependent_address_p (XEXP (inner, 0))
7048 && ! MEM_VOLATILE_P (inner))))))
7049 {
7050 /* If INNER is a MEM, make a new MEM that encompasses just the desired
7051 field. If the original and current mode are the same, we need not
7052 adjust the offset. Otherwise, we do if bytes big endian.
7053
7054 If INNER is not a MEM, get a piece consisting of just the field
7055 of interest (in this case POS % BITS_PER_WORD must be 0). */
7056
7057 if (MEM_P (inner))
7058 {
7059 HOST_WIDE_INT offset;
7060
7061 /* POS counts from lsb, but make OFFSET count in memory order. */
7062 if (BYTES_BIG_ENDIAN)
7063 offset = (GET_MODE_BITSIZE (is_mode) - len - pos) / BITS_PER_UNIT;
7064 else
7065 offset = pos / BITS_PER_UNIT;
7066
7067 new_rtx = adjust_address_nv (inner, tmode, offset);
7068 }
7069 else if (REG_P (inner))
7070 {
7071 if (tmode != inner_mode)
7072 {
7073 /* We can't call gen_lowpart in a DEST since we
7074 always want a SUBREG (see below) and it would sometimes
7075 return a new hard register. */
7076 if (pos || in_dest)
7077 {
7078 HOST_WIDE_INT final_word = pos / BITS_PER_WORD;
7079
7080 if (WORDS_BIG_ENDIAN
7081 && GET_MODE_SIZE (inner_mode) > UNITS_PER_WORD)
7082 final_word = ((GET_MODE_SIZE (inner_mode)
7083 - GET_MODE_SIZE (tmode))
7084 / UNITS_PER_WORD) - final_word;
7085
7086 final_word *= UNITS_PER_WORD;
7087 if (BYTES_BIG_ENDIAN &&
7088 GET_MODE_SIZE (inner_mode) > GET_MODE_SIZE (tmode))
7089 final_word += (GET_MODE_SIZE (inner_mode)
7090 - GET_MODE_SIZE (tmode)) % UNITS_PER_WORD;
7091
7092 /* Avoid creating invalid subregs, for example when
7093 simplifying (x>>32)&255. */
7094 if (!validate_subreg (tmode, inner_mode, inner, final_word))
7095 return NULL_RTX;
7096
7097 new_rtx = gen_rtx_SUBREG (tmode, inner, final_word);
7098 }
7099 else
7100 new_rtx = gen_lowpart (tmode, inner);
7101 }
7102 else
7103 new_rtx = inner;
7104 }
7105 else
7106 new_rtx = force_to_mode (inner, tmode,
7107 len >= HOST_BITS_PER_WIDE_INT
7108 ? ~(unsigned HOST_WIDE_INT) 0
7109 : ((unsigned HOST_WIDE_INT) 1 << len) - 1,
7110 0);
7111
7112 /* If this extraction is going into the destination of a SET,
7113 make a STRICT_LOW_PART unless we made a MEM. */
7114
7115 if (in_dest)
7116 return (MEM_P (new_rtx) ? new_rtx
7117 : (GET_CODE (new_rtx) != SUBREG
7118 ? gen_rtx_CLOBBER (tmode, const0_rtx)
7119 : gen_rtx_STRICT_LOW_PART (VOIDmode, new_rtx)));
7120
7121 if (mode == tmode)
7122 return new_rtx;
7123
7124 if (CONST_INT_P (new_rtx)
7125 || GET_CODE (new_rtx) == CONST_DOUBLE)
7126 return simplify_unary_operation (unsignedp ? ZERO_EXTEND : SIGN_EXTEND,
7127 mode, new_rtx, tmode);
7128
7129 /* If we know that no extraneous bits are set, and that the high
7130 bit is not set, convert the extraction to the cheaper of
7131 sign and zero extension, that are equivalent in these cases. */
7132 if (flag_expensive_optimizations
7133 && (GET_MODE_BITSIZE (tmode) <= HOST_BITS_PER_WIDE_INT
7134 && ((nonzero_bits (new_rtx, tmode)
7135 & ~(((unsigned HOST_WIDE_INT)
7136 GET_MODE_MASK (tmode))
7137 >> 1))
7138 == 0)))
7139 {
7140 rtx temp = gen_rtx_ZERO_EXTEND (mode, new_rtx);
7141 rtx temp1 = gen_rtx_SIGN_EXTEND (mode, new_rtx);
7142
7143 /* Prefer ZERO_EXTENSION, since it gives more information to
7144 backends. */
7145 if (rtx_cost (temp, SET, optimize_this_for_speed_p)
7146 <= rtx_cost (temp1, SET, optimize_this_for_speed_p))
7147 return temp;
7148 return temp1;
7149 }
7150
7151 /* Otherwise, sign- or zero-extend unless we already are in the
7152 proper mode. */
7153
7154 return (gen_rtx_fmt_e (unsignedp ? ZERO_EXTEND : SIGN_EXTEND,
7155 mode, new_rtx));
7156 }
7157
7158 /* Unless this is a COMPARE or we have a funny memory reference,
7159 don't do anything with zero-extending field extracts starting at
7160 the low-order bit since they are simple AND operations. */
7161 if (pos_rtx == 0 && pos == 0 && ! in_dest
7162 && ! in_compare && unsignedp)
7163 return 0;
7164
7165 /* Unless INNER is not MEM, reject this if we would be spanning bytes or
7166 if the position is not a constant and the length is not 1. In all
7167 other cases, we would only be going outside our object in cases when
7168 an original shift would have been undefined. */
7169 if (MEM_P (inner)
7170 && ((pos_rtx == 0 && pos + len > GET_MODE_BITSIZE (is_mode))
7171 || (pos_rtx != 0 && len != 1)))
7172 return 0;
7173
7174 /* Get the mode to use should INNER not be a MEM, the mode for the position,
7175 and the mode for the result. */
7176 if (in_dest && mode_for_extraction (EP_insv, -1) != MAX_MACHINE_MODE)
7177 {
7178 wanted_inner_reg_mode = mode_for_extraction (EP_insv, 0);
7179 pos_mode = mode_for_extraction (EP_insv, 2);
7180 extraction_mode = mode_for_extraction (EP_insv, 3);
7181 }
7182
7183 if (! in_dest && unsignedp
7184 && mode_for_extraction (EP_extzv, -1) != MAX_MACHINE_MODE)
7185 {
7186 wanted_inner_reg_mode = mode_for_extraction (EP_extzv, 1);
7187 pos_mode = mode_for_extraction (EP_extzv, 3);
7188 extraction_mode = mode_for_extraction (EP_extzv, 0);
7189 }
7190
7191 if (! in_dest && ! unsignedp
7192 && mode_for_extraction (EP_extv, -1) != MAX_MACHINE_MODE)
7193 {
7194 wanted_inner_reg_mode = mode_for_extraction (EP_extv, 1);
7195 pos_mode = mode_for_extraction (EP_extv, 3);
7196 extraction_mode = mode_for_extraction (EP_extv, 0);
7197 }
7198
7199 /* Never narrow an object, since that might not be safe. */
7200
7201 if (mode != VOIDmode
7202 && GET_MODE_SIZE (extraction_mode) < GET_MODE_SIZE (mode))
7203 extraction_mode = mode;
7204
7205 if (pos_rtx && GET_MODE (pos_rtx) != VOIDmode
7206 && GET_MODE_SIZE (pos_mode) < GET_MODE_SIZE (GET_MODE (pos_rtx)))
7207 pos_mode = GET_MODE (pos_rtx);
7208
7209 /* If this is not from memory, the desired mode is the preferred mode
7210 for an extraction pattern's first input operand, or word_mode if there
7211 is none. */
7212 if (!MEM_P (inner))
7213 wanted_inner_mode = wanted_inner_reg_mode;
7214 else
7215 {
7216 /* Be careful not to go beyond the extracted object and maintain the
7217 natural alignment of the memory. */
7218 wanted_inner_mode = smallest_mode_for_size (len, MODE_INT);
7219 while (pos % GET_MODE_BITSIZE (wanted_inner_mode) + len
7220 > GET_MODE_BITSIZE (wanted_inner_mode))
7221 {
7222 wanted_inner_mode = GET_MODE_WIDER_MODE (wanted_inner_mode);
7223 gcc_assert (wanted_inner_mode != VOIDmode);
7224 }
7225
7226 /* If we have to change the mode of memory and cannot, the desired mode
7227 is EXTRACTION_MODE. */
7228 if (inner_mode != wanted_inner_mode
7229 && (mode_dependent_address_p (XEXP (inner, 0))
7230 || MEM_VOLATILE_P (inner)
7231 || pos_rtx))
7232 wanted_inner_mode = extraction_mode;
7233 }
7234
7235 orig_pos = pos;
7236
7237 if (BITS_BIG_ENDIAN)
7238 {
7239 /* POS is passed as if BITS_BIG_ENDIAN == 0, so we need to convert it to
7240 BITS_BIG_ENDIAN style. If position is constant, compute new
7241 position. Otherwise, build subtraction.
7242 Note that POS is relative to the mode of the original argument.
7243 If it's a MEM we need to recompute POS relative to that.
7244 However, if we're extracting from (or inserting into) a register,
7245 we want to recompute POS relative to wanted_inner_mode. */
7246 int width = (MEM_P (inner)
7247 ? GET_MODE_BITSIZE (is_mode)
7248 : GET_MODE_BITSIZE (wanted_inner_mode));
7249
7250 if (pos_rtx == 0)
7251 pos = width - len - pos;
7252 else
7253 pos_rtx
7254 = gen_rtx_MINUS (GET_MODE (pos_rtx), GEN_INT (width - len), pos_rtx);
7255 /* POS may be less than 0 now, but we check for that below.
7256 Note that it can only be less than 0 if !MEM_P (inner). */
7257 }
7258
7259 /* If INNER has a wider mode, and this is a constant extraction, try to
7260 make it smaller and adjust the byte to point to the byte containing
7261 the value. */
7262 if (wanted_inner_mode != VOIDmode
7263 && inner_mode != wanted_inner_mode
7264 && ! pos_rtx
7265 && GET_MODE_SIZE (wanted_inner_mode) < GET_MODE_SIZE (is_mode)
7266 && MEM_P (inner)
7267 && ! mode_dependent_address_p (XEXP (inner, 0))
7268 && ! MEM_VOLATILE_P (inner))
7269 {
7270 int offset = 0;
7271
7272 /* The computations below will be correct if the machine is big
7273 endian in both bits and bytes or little endian in bits and bytes.
7274 If it is mixed, we must adjust. */
7275
7276 /* If bytes are big endian and we had a paradoxical SUBREG, we must
7277 adjust OFFSET to compensate. */
7278 if (BYTES_BIG_ENDIAN
7279 && GET_MODE_SIZE (inner_mode) < GET_MODE_SIZE (is_mode))
7280 offset -= GET_MODE_SIZE (is_mode) - GET_MODE_SIZE (inner_mode);
7281
7282 /* We can now move to the desired byte. */
7283 offset += (pos / GET_MODE_BITSIZE (wanted_inner_mode))
7284 * GET_MODE_SIZE (wanted_inner_mode);
7285 pos %= GET_MODE_BITSIZE (wanted_inner_mode);
7286
7287 if (BYTES_BIG_ENDIAN != BITS_BIG_ENDIAN
7288 && is_mode != wanted_inner_mode)
7289 offset = (GET_MODE_SIZE (is_mode)
7290 - GET_MODE_SIZE (wanted_inner_mode) - offset);
7291
7292 inner = adjust_address_nv (inner, wanted_inner_mode, offset);
7293 }
7294
7295 /* If INNER is not memory, get it into the proper mode. If we are changing
7296 its mode, POS must be a constant and smaller than the size of the new
7297 mode. */
7298 else if (!MEM_P (inner))
7299 {
7300 /* On the LHS, don't create paradoxical subregs implicitely truncating
7301 the register unless TRULY_NOOP_TRUNCATION. */
7302 if (in_dest
7303 && !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (GET_MODE (inner)),
7304 GET_MODE_BITSIZE (wanted_inner_mode)))
7305 return NULL_RTX;
7306
7307 if (GET_MODE (inner) != wanted_inner_mode
7308 && (pos_rtx != 0
7309 || orig_pos + len > GET_MODE_BITSIZE (wanted_inner_mode)))
7310 return NULL_RTX;
7311
7312 if (orig_pos < 0)
7313 return NULL_RTX;
7314
7315 inner = force_to_mode (inner, wanted_inner_mode,
7316 pos_rtx
7317 || len + orig_pos >= HOST_BITS_PER_WIDE_INT
7318 ? ~(unsigned HOST_WIDE_INT) 0
7319 : ((((unsigned HOST_WIDE_INT) 1 << len) - 1)
7320 << orig_pos),
7321 0);
7322 }
7323
7324 /* Adjust mode of POS_RTX, if needed. If we want a wider mode, we
7325 have to zero extend. Otherwise, we can just use a SUBREG. */
7326 if (pos_rtx != 0
7327 && GET_MODE_SIZE (pos_mode) > GET_MODE_SIZE (GET_MODE (pos_rtx)))
7328 {
7329 rtx temp = gen_rtx_ZERO_EXTEND (pos_mode, pos_rtx);
7330
7331 /* If we know that no extraneous bits are set, and that the high
7332 bit is not set, convert extraction to cheaper one - either
7333 SIGN_EXTENSION or ZERO_EXTENSION, that are equivalent in these
7334 cases. */
7335 if (flag_expensive_optimizations
7336 && (GET_MODE_BITSIZE (GET_MODE (pos_rtx)) <= HOST_BITS_PER_WIDE_INT
7337 && ((nonzero_bits (pos_rtx, GET_MODE (pos_rtx))
7338 & ~(((unsigned HOST_WIDE_INT)
7339 GET_MODE_MASK (GET_MODE (pos_rtx)))
7340 >> 1))
7341 == 0)))
7342 {
7343 rtx temp1 = gen_rtx_SIGN_EXTEND (pos_mode, pos_rtx);
7344
7345 /* Prefer ZERO_EXTENSION, since it gives more information to
7346 backends. */
7347 if (rtx_cost (temp1, SET, optimize_this_for_speed_p)
7348 < rtx_cost (temp, SET, optimize_this_for_speed_p))
7349 temp = temp1;
7350 }
7351 pos_rtx = temp;
7352 }
7353 else if (pos_rtx != 0
7354 && GET_MODE_SIZE (pos_mode) < GET_MODE_SIZE (GET_MODE (pos_rtx)))
7355 pos_rtx = gen_lowpart (pos_mode, pos_rtx);
7356
7357 /* Make POS_RTX unless we already have it and it is correct. If we don't
7358 have a POS_RTX but we do have an ORIG_POS_RTX, the latter must
7359 be a CONST_INT. */
7360 if (pos_rtx == 0 && orig_pos_rtx != 0 && INTVAL (orig_pos_rtx) == pos)
7361 pos_rtx = orig_pos_rtx;
7362
7363 else if (pos_rtx == 0)
7364 pos_rtx = GEN_INT (pos);
7365
7366 /* Make the required operation. See if we can use existing rtx. */
7367 new_rtx = gen_rtx_fmt_eee (unsignedp ? ZERO_EXTRACT : SIGN_EXTRACT,
7368 extraction_mode, inner, GEN_INT (len), pos_rtx);
7369 if (! in_dest)
7370 new_rtx = gen_lowpart (mode, new_rtx);
7371
7372 return new_rtx;
7373 }
7374 \f
7375 /* See if X contains an ASHIFT of COUNT or more bits that can be commuted
7376 with any other operations in X. Return X without that shift if so. */
7377
7378 static rtx
7379 extract_left_shift (rtx x, int count)
7380 {
7381 enum rtx_code code = GET_CODE (x);
7382 enum machine_mode mode = GET_MODE (x);
7383 rtx tem;
7384
7385 switch (code)
7386 {
7387 case ASHIFT:
7388 /* This is the shift itself. If it is wide enough, we will return
7389 either the value being shifted if the shift count is equal to
7390 COUNT or a shift for the difference. */
7391 if (CONST_INT_P (XEXP (x, 1))
7392 && INTVAL (XEXP (x, 1)) >= count)
7393 return simplify_shift_const (NULL_RTX, ASHIFT, mode, XEXP (x, 0),
7394 INTVAL (XEXP (x, 1)) - count);
7395 break;
7396
7397 case NEG: case NOT:
7398 if ((tem = extract_left_shift (XEXP (x, 0), count)) != 0)
7399 return simplify_gen_unary (code, mode, tem, mode);
7400
7401 break;
7402
7403 case PLUS: case IOR: case XOR: case AND:
7404 /* If we can safely shift this constant and we find the inner shift,
7405 make a new operation. */
7406 if (CONST_INT_P (XEXP (x, 1))
7407 && (UINTVAL (XEXP (x, 1))
7408 & ((((unsigned HOST_WIDE_INT) 1 << count)) - 1)) == 0
7409 && (tem = extract_left_shift (XEXP (x, 0), count)) != 0)
7410 return simplify_gen_binary (code, mode, tem,
7411 GEN_INT (INTVAL (XEXP (x, 1)) >> count));
7412
7413 break;
7414
7415 default:
7416 break;
7417 }
7418
7419 return 0;
7420 }
7421 \f
7422 /* Look at the expression rooted at X. Look for expressions
7423 equivalent to ZERO_EXTRACT, SIGN_EXTRACT, ZERO_EXTEND, SIGN_EXTEND.
7424 Form these expressions.
7425
7426 Return the new rtx, usually just X.
7427
7428 Also, for machines like the VAX that don't have logical shift insns,
7429 try to convert logical to arithmetic shift operations in cases where
7430 they are equivalent. This undoes the canonicalizations to logical
7431 shifts done elsewhere.
7432
7433 We try, as much as possible, to re-use rtl expressions to save memory.
7434
7435 IN_CODE says what kind of expression we are processing. Normally, it is
7436 SET. In a memory address (inside a MEM, PLUS or minus, the latter two
7437 being kludges), it is MEM. When processing the arguments of a comparison
7438 or a COMPARE against zero, it is COMPARE. */
7439
7440 static rtx
7441 make_compound_operation (rtx x, enum rtx_code in_code)
7442 {
7443 enum rtx_code code = GET_CODE (x);
7444 enum machine_mode mode = GET_MODE (x);
7445 int mode_width = GET_MODE_BITSIZE (mode);
7446 rtx rhs, lhs;
7447 enum rtx_code next_code;
7448 int i, j;
7449 rtx new_rtx = 0;
7450 rtx tem;
7451 const char *fmt;
7452
7453 /* Select the code to be used in recursive calls. Once we are inside an
7454 address, we stay there. If we have a comparison, set to COMPARE,
7455 but once inside, go back to our default of SET. */
7456
7457 next_code = (code == MEM ? MEM
7458 : ((code == PLUS || code == MINUS)
7459 && SCALAR_INT_MODE_P (mode)) ? MEM
7460 : ((code == COMPARE || COMPARISON_P (x))
7461 && XEXP (x, 1) == const0_rtx) ? COMPARE
7462 : in_code == COMPARE ? SET : in_code);
7463
7464 /* Process depending on the code of this operation. If NEW is set
7465 nonzero, it will be returned. */
7466
7467 switch (code)
7468 {
7469 case ASHIFT:
7470 /* Convert shifts by constants into multiplications if inside
7471 an address. */
7472 if (in_code == MEM && CONST_INT_P (XEXP (x, 1))
7473 && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT
7474 && INTVAL (XEXP (x, 1)) >= 0)
7475 {
7476 HOST_WIDE_INT count = INTVAL (XEXP (x, 1));
7477 HOST_WIDE_INT multval = (HOST_WIDE_INT) 1 << count;
7478
7479 new_rtx = make_compound_operation (XEXP (x, 0), next_code);
7480 if (GET_CODE (new_rtx) == NEG)
7481 {
7482 new_rtx = XEXP (new_rtx, 0);
7483 multval = -multval;
7484 }
7485 multval = trunc_int_for_mode (multval, mode);
7486 new_rtx = gen_rtx_MULT (mode, new_rtx, GEN_INT (multval));
7487 }
7488 break;
7489
7490 case PLUS:
7491 lhs = XEXP (x, 0);
7492 rhs = XEXP (x, 1);
7493 lhs = make_compound_operation (lhs, next_code);
7494 rhs = make_compound_operation (rhs, next_code);
7495 if (GET_CODE (lhs) == MULT && GET_CODE (XEXP (lhs, 0)) == NEG
7496 && SCALAR_INT_MODE_P (mode))
7497 {
7498 tem = simplify_gen_binary (MULT, mode, XEXP (XEXP (lhs, 0), 0),
7499 XEXP (lhs, 1));
7500 new_rtx = simplify_gen_binary (MINUS, mode, rhs, tem);
7501 }
7502 else if (GET_CODE (lhs) == MULT
7503 && (CONST_INT_P (XEXP (lhs, 1)) && INTVAL (XEXP (lhs, 1)) < 0))
7504 {
7505 tem = simplify_gen_binary (MULT, mode, XEXP (lhs, 0),
7506 simplify_gen_unary (NEG, mode,
7507 XEXP (lhs, 1),
7508 mode));
7509 new_rtx = simplify_gen_binary (MINUS, mode, rhs, tem);
7510 }
7511 else
7512 {
7513 SUBST (XEXP (x, 0), lhs);
7514 SUBST (XEXP (x, 1), rhs);
7515 goto maybe_swap;
7516 }
7517 x = gen_lowpart (mode, new_rtx);
7518 goto maybe_swap;
7519
7520 case MINUS:
7521 lhs = XEXP (x, 0);
7522 rhs = XEXP (x, 1);
7523 lhs = make_compound_operation (lhs, next_code);
7524 rhs = make_compound_operation (rhs, next_code);
7525 if (GET_CODE (rhs) == MULT && GET_CODE (XEXP (rhs, 0)) == NEG
7526 && SCALAR_INT_MODE_P (mode))
7527 {
7528 tem = simplify_gen_binary (MULT, mode, XEXP (XEXP (rhs, 0), 0),
7529 XEXP (rhs, 1));
7530 new_rtx = simplify_gen_binary (PLUS, mode, tem, lhs);
7531 }
7532 else if (GET_CODE (rhs) == MULT
7533 && (CONST_INT_P (XEXP (rhs, 1)) && INTVAL (XEXP (rhs, 1)) < 0))
7534 {
7535 tem = simplify_gen_binary (MULT, mode, XEXP (rhs, 0),
7536 simplify_gen_unary (NEG, mode,
7537 XEXP (rhs, 1),
7538 mode));
7539 new_rtx = simplify_gen_binary (PLUS, mode, tem, lhs);
7540 }
7541 else
7542 {
7543 SUBST (XEXP (x, 0), lhs);
7544 SUBST (XEXP (x, 1), rhs);
7545 return x;
7546 }
7547 return gen_lowpart (mode, new_rtx);
7548
7549 case AND:
7550 /* If the second operand is not a constant, we can't do anything
7551 with it. */
7552 if (!CONST_INT_P (XEXP (x, 1)))
7553 break;
7554
7555 /* If the constant is a power of two minus one and the first operand
7556 is a logical right shift, make an extraction. */
7557 if (GET_CODE (XEXP (x, 0)) == LSHIFTRT
7558 && (i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0)
7559 {
7560 new_rtx = make_compound_operation (XEXP (XEXP (x, 0), 0), next_code);
7561 new_rtx = make_extraction (mode, new_rtx, 0, XEXP (XEXP (x, 0), 1), i, 1,
7562 0, in_code == COMPARE);
7563 }
7564
7565 /* Same as previous, but for (subreg (lshiftrt ...)) in first op. */
7566 else if (GET_CODE (XEXP (x, 0)) == SUBREG
7567 && subreg_lowpart_p (XEXP (x, 0))
7568 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == LSHIFTRT
7569 && (i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0)
7570 {
7571 new_rtx = make_compound_operation (XEXP (SUBREG_REG (XEXP (x, 0)), 0),
7572 next_code);
7573 new_rtx = make_extraction (GET_MODE (SUBREG_REG (XEXP (x, 0))), new_rtx, 0,
7574 XEXP (SUBREG_REG (XEXP (x, 0)), 1), i, 1,
7575 0, in_code == COMPARE);
7576 }
7577 /* Same as previous, but for (xor/ior (lshiftrt...) (lshiftrt...)). */
7578 else if ((GET_CODE (XEXP (x, 0)) == XOR
7579 || GET_CODE (XEXP (x, 0)) == IOR)
7580 && GET_CODE (XEXP (XEXP (x, 0), 0)) == LSHIFTRT
7581 && GET_CODE (XEXP (XEXP (x, 0), 1)) == LSHIFTRT
7582 && (i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0)
7583 {
7584 /* Apply the distributive law, and then try to make extractions. */
7585 new_rtx = gen_rtx_fmt_ee (GET_CODE (XEXP (x, 0)), mode,
7586 gen_rtx_AND (mode, XEXP (XEXP (x, 0), 0),
7587 XEXP (x, 1)),
7588 gen_rtx_AND (mode, XEXP (XEXP (x, 0), 1),
7589 XEXP (x, 1)));
7590 new_rtx = make_compound_operation (new_rtx, in_code);
7591 }
7592
7593 /* If we are have (and (rotate X C) M) and C is larger than the number
7594 of bits in M, this is an extraction. */
7595
7596 else if (GET_CODE (XEXP (x, 0)) == ROTATE
7597 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
7598 && (i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0
7599 && i <= INTVAL (XEXP (XEXP (x, 0), 1)))
7600 {
7601 new_rtx = make_compound_operation (XEXP (XEXP (x, 0), 0), next_code);
7602 new_rtx = make_extraction (mode, new_rtx,
7603 (GET_MODE_BITSIZE (mode)
7604 - INTVAL (XEXP (XEXP (x, 0), 1))),
7605 NULL_RTX, i, 1, 0, in_code == COMPARE);
7606 }
7607
7608 /* On machines without logical shifts, if the operand of the AND is
7609 a logical shift and our mask turns off all the propagated sign
7610 bits, we can replace the logical shift with an arithmetic shift. */
7611 else if (GET_CODE (XEXP (x, 0)) == LSHIFTRT
7612 && !have_insn_for (LSHIFTRT, mode)
7613 && have_insn_for (ASHIFTRT, mode)
7614 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
7615 && INTVAL (XEXP (XEXP (x, 0), 1)) >= 0
7616 && INTVAL (XEXP (XEXP (x, 0), 1)) < HOST_BITS_PER_WIDE_INT
7617 && mode_width <= HOST_BITS_PER_WIDE_INT)
7618 {
7619 unsigned HOST_WIDE_INT mask = GET_MODE_MASK (mode);
7620
7621 mask >>= INTVAL (XEXP (XEXP (x, 0), 1));
7622 if ((INTVAL (XEXP (x, 1)) & ~mask) == 0)
7623 SUBST (XEXP (x, 0),
7624 gen_rtx_ASHIFTRT (mode,
7625 make_compound_operation
7626 (XEXP (XEXP (x, 0), 0), next_code),
7627 XEXP (XEXP (x, 0), 1)));
7628 }
7629
7630 /* If the constant is one less than a power of two, this might be
7631 representable by an extraction even if no shift is present.
7632 If it doesn't end up being a ZERO_EXTEND, we will ignore it unless
7633 we are in a COMPARE. */
7634 else if ((i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0)
7635 new_rtx = make_extraction (mode,
7636 make_compound_operation (XEXP (x, 0),
7637 next_code),
7638 0, NULL_RTX, i, 1, 0, in_code == COMPARE);
7639
7640 /* If we are in a comparison and this is an AND with a power of two,
7641 convert this into the appropriate bit extract. */
7642 else if (in_code == COMPARE
7643 && (i = exact_log2 (UINTVAL (XEXP (x, 1)))) >= 0)
7644 new_rtx = make_extraction (mode,
7645 make_compound_operation (XEXP (x, 0),
7646 next_code),
7647 i, NULL_RTX, 1, 1, 0, 1);
7648
7649 break;
7650
7651 case LSHIFTRT:
7652 /* If the sign bit is known to be zero, replace this with an
7653 arithmetic shift. */
7654 if (have_insn_for (ASHIFTRT, mode)
7655 && ! have_insn_for (LSHIFTRT, mode)
7656 && mode_width <= HOST_BITS_PER_WIDE_INT
7657 && (nonzero_bits (XEXP (x, 0), mode) & (1 << (mode_width - 1))) == 0)
7658 {
7659 new_rtx = gen_rtx_ASHIFTRT (mode,
7660 make_compound_operation (XEXP (x, 0),
7661 next_code),
7662 XEXP (x, 1));
7663 break;
7664 }
7665
7666 /* ... fall through ... */
7667
7668 case ASHIFTRT:
7669 lhs = XEXP (x, 0);
7670 rhs = XEXP (x, 1);
7671
7672 /* If we have (ashiftrt (ashift foo C1) C2) with C2 >= C1,
7673 this is a SIGN_EXTRACT. */
7674 if (CONST_INT_P (rhs)
7675 && GET_CODE (lhs) == ASHIFT
7676 && CONST_INT_P (XEXP (lhs, 1))
7677 && INTVAL (rhs) >= INTVAL (XEXP (lhs, 1))
7678 && INTVAL (rhs) < mode_width)
7679 {
7680 new_rtx = make_compound_operation (XEXP (lhs, 0), next_code);
7681 new_rtx = make_extraction (mode, new_rtx,
7682 INTVAL (rhs) - INTVAL (XEXP (lhs, 1)),
7683 NULL_RTX, mode_width - INTVAL (rhs),
7684 code == LSHIFTRT, 0, in_code == COMPARE);
7685 break;
7686 }
7687
7688 /* See if we have operations between an ASHIFTRT and an ASHIFT.
7689 If so, try to merge the shifts into a SIGN_EXTEND. We could
7690 also do this for some cases of SIGN_EXTRACT, but it doesn't
7691 seem worth the effort; the case checked for occurs on Alpha. */
7692
7693 if (!OBJECT_P (lhs)
7694 && ! (GET_CODE (lhs) == SUBREG
7695 && (OBJECT_P (SUBREG_REG (lhs))))
7696 && CONST_INT_P (rhs)
7697 && INTVAL (rhs) < HOST_BITS_PER_WIDE_INT
7698 && INTVAL (rhs) < mode_width
7699 && (new_rtx = extract_left_shift (lhs, INTVAL (rhs))) != 0)
7700 new_rtx = make_extraction (mode, make_compound_operation (new_rtx, next_code),
7701 0, NULL_RTX, mode_width - INTVAL (rhs),
7702 code == LSHIFTRT, 0, in_code == COMPARE);
7703
7704 break;
7705
7706 case SUBREG:
7707 /* Call ourselves recursively on the inner expression. If we are
7708 narrowing the object and it has a different RTL code from
7709 what it originally did, do this SUBREG as a force_to_mode. */
7710 {
7711 rtx inner = SUBREG_REG (x), simplified;
7712
7713 tem = make_compound_operation (inner, in_code);
7714
7715 simplified
7716 = simplify_subreg (mode, tem, GET_MODE (inner), SUBREG_BYTE (x));
7717 if (simplified)
7718 tem = simplified;
7719
7720 if (GET_CODE (tem) != GET_CODE (inner)
7721 && GET_MODE_SIZE (mode) < GET_MODE_SIZE (GET_MODE (inner))
7722 && subreg_lowpart_p (x))
7723 {
7724 rtx newer
7725 = force_to_mode (tem, mode, ~(unsigned HOST_WIDE_INT) 0, 0);
7726
7727 /* If we have something other than a SUBREG, we might have
7728 done an expansion, so rerun ourselves. */
7729 if (GET_CODE (newer) != SUBREG)
7730 newer = make_compound_operation (newer, in_code);
7731
7732 /* force_to_mode can expand compounds. If it just re-expanded the
7733 compound, use gen_lowpart to convert to the desired mode. */
7734 if (rtx_equal_p (newer, x)
7735 /* Likewise if it re-expanded the compound only partially.
7736 This happens for SUBREG of ZERO_EXTRACT if they extract
7737 the same number of bits. */
7738 || (GET_CODE (newer) == SUBREG
7739 && (GET_CODE (SUBREG_REG (newer)) == LSHIFTRT
7740 || GET_CODE (SUBREG_REG (newer)) == ASHIFTRT)
7741 && GET_CODE (inner) == AND
7742 && rtx_equal_p (SUBREG_REG (newer), XEXP (inner, 0))))
7743 return gen_lowpart (GET_MODE (x), tem);
7744
7745 return newer;
7746 }
7747
7748 if (simplified)
7749 return tem;
7750 }
7751 break;
7752
7753 default:
7754 break;
7755 }
7756
7757 if (new_rtx)
7758 {
7759 x = gen_lowpart (mode, new_rtx);
7760 code = GET_CODE (x);
7761 }
7762
7763 /* Now recursively process each operand of this operation. */
7764 fmt = GET_RTX_FORMAT (code);
7765 for (i = 0; i < GET_RTX_LENGTH (code); i++)
7766 if (fmt[i] == 'e')
7767 {
7768 new_rtx = make_compound_operation (XEXP (x, i), next_code);
7769 SUBST (XEXP (x, i), new_rtx);
7770 }
7771 else if (fmt[i] == 'E')
7772 for (j = 0; j < XVECLEN (x, i); j++)
7773 {
7774 new_rtx = make_compound_operation (XVECEXP (x, i, j), next_code);
7775 SUBST (XVECEXP (x, i, j), new_rtx);
7776 }
7777
7778 maybe_swap:
7779 /* If this is a commutative operation, the changes to the operands
7780 may have made it noncanonical. */
7781 if (COMMUTATIVE_ARITH_P (x)
7782 && swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
7783 {
7784 tem = XEXP (x, 0);
7785 SUBST (XEXP (x, 0), XEXP (x, 1));
7786 SUBST (XEXP (x, 1), tem);
7787 }
7788
7789 return x;
7790 }
7791 \f
7792 /* Given M see if it is a value that would select a field of bits
7793 within an item, but not the entire word. Return -1 if not.
7794 Otherwise, return the starting position of the field, where 0 is the
7795 low-order bit.
7796
7797 *PLEN is set to the length of the field. */
7798
7799 static int
7800 get_pos_from_mask (unsigned HOST_WIDE_INT m, unsigned HOST_WIDE_INT *plen)
7801 {
7802 /* Get the bit number of the first 1 bit from the right, -1 if none. */
7803 int pos = m ? ctz_hwi (m) : -1;
7804 int len = 0;
7805
7806 if (pos >= 0)
7807 /* Now shift off the low-order zero bits and see if we have a
7808 power of two minus 1. */
7809 len = exact_log2 ((m >> pos) + 1);
7810
7811 if (len <= 0)
7812 pos = -1;
7813
7814 *plen = len;
7815 return pos;
7816 }
7817 \f
7818 /* If X refers to a register that equals REG in value, replace these
7819 references with REG. */
7820 static rtx
7821 canon_reg_for_combine (rtx x, rtx reg)
7822 {
7823 rtx op0, op1, op2;
7824 const char *fmt;
7825 int i;
7826 bool copied;
7827
7828 enum rtx_code code = GET_CODE (x);
7829 switch (GET_RTX_CLASS (code))
7830 {
7831 case RTX_UNARY:
7832 op0 = canon_reg_for_combine (XEXP (x, 0), reg);
7833 if (op0 != XEXP (x, 0))
7834 return simplify_gen_unary (GET_CODE (x), GET_MODE (x), op0,
7835 GET_MODE (reg));
7836 break;
7837
7838 case RTX_BIN_ARITH:
7839 case RTX_COMM_ARITH:
7840 op0 = canon_reg_for_combine (XEXP (x, 0), reg);
7841 op1 = canon_reg_for_combine (XEXP (x, 1), reg);
7842 if (op0 != XEXP (x, 0) || op1 != XEXP (x, 1))
7843 return simplify_gen_binary (GET_CODE (x), GET_MODE (x), op0, op1);
7844 break;
7845
7846 case RTX_COMPARE:
7847 case RTX_COMM_COMPARE:
7848 op0 = canon_reg_for_combine (XEXP (x, 0), reg);
7849 op1 = canon_reg_for_combine (XEXP (x, 1), reg);
7850 if (op0 != XEXP (x, 0) || op1 != XEXP (x, 1))
7851 return simplify_gen_relational (GET_CODE (x), GET_MODE (x),
7852 GET_MODE (op0), op0, op1);
7853 break;
7854
7855 case RTX_TERNARY:
7856 case RTX_BITFIELD_OPS:
7857 op0 = canon_reg_for_combine (XEXP (x, 0), reg);
7858 op1 = canon_reg_for_combine (XEXP (x, 1), reg);
7859 op2 = canon_reg_for_combine (XEXP (x, 2), reg);
7860 if (op0 != XEXP (x, 0) || op1 != XEXP (x, 1) || op2 != XEXP (x, 2))
7861 return simplify_gen_ternary (GET_CODE (x), GET_MODE (x),
7862 GET_MODE (op0), op0, op1, op2);
7863
7864 case RTX_OBJ:
7865 if (REG_P (x))
7866 {
7867 if (rtx_equal_p (get_last_value (reg), x)
7868 || rtx_equal_p (reg, get_last_value (x)))
7869 return reg;
7870 else
7871 break;
7872 }
7873
7874 /* fall through */
7875
7876 default:
7877 fmt = GET_RTX_FORMAT (code);
7878 copied = false;
7879 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
7880 if (fmt[i] == 'e')
7881 {
7882 rtx op = canon_reg_for_combine (XEXP (x, i), reg);
7883 if (op != XEXP (x, i))
7884 {
7885 if (!copied)
7886 {
7887 copied = true;
7888 x = copy_rtx (x);
7889 }
7890 XEXP (x, i) = op;
7891 }
7892 }
7893 else if (fmt[i] == 'E')
7894 {
7895 int j;
7896 for (j = 0; j < XVECLEN (x, i); j++)
7897 {
7898 rtx op = canon_reg_for_combine (XVECEXP (x, i, j), reg);
7899 if (op != XVECEXP (x, i, j))
7900 {
7901 if (!copied)
7902 {
7903 copied = true;
7904 x = copy_rtx (x);
7905 }
7906 XVECEXP (x, i, j) = op;
7907 }
7908 }
7909 }
7910
7911 break;
7912 }
7913
7914 return x;
7915 }
7916
7917 /* Return X converted to MODE. If the value is already truncated to
7918 MODE we can just return a subreg even though in the general case we
7919 would need an explicit truncation. */
7920
7921 static rtx
7922 gen_lowpart_or_truncate (enum machine_mode mode, rtx x)
7923 {
7924 if (!CONST_INT_P (x)
7925 && GET_MODE_SIZE (mode) < GET_MODE_SIZE (GET_MODE (x))
7926 && !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
7927 GET_MODE_BITSIZE (GET_MODE (x)))
7928 && !(REG_P (x) && reg_truncated_to_mode (mode, x)))
7929 {
7930 /* Bit-cast X into an integer mode. */
7931 if (!SCALAR_INT_MODE_P (GET_MODE (x)))
7932 x = gen_lowpart (int_mode_for_mode (GET_MODE (x)), x);
7933 x = simplify_gen_unary (TRUNCATE, int_mode_for_mode (mode),
7934 x, GET_MODE (x));
7935 }
7936
7937 return gen_lowpart (mode, x);
7938 }
7939
7940 /* See if X can be simplified knowing that we will only refer to it in
7941 MODE and will only refer to those bits that are nonzero in MASK.
7942 If other bits are being computed or if masking operations are done
7943 that select a superset of the bits in MASK, they can sometimes be
7944 ignored.
7945
7946 Return a possibly simplified expression, but always convert X to
7947 MODE. If X is a CONST_INT, AND the CONST_INT with MASK.
7948
7949 If JUST_SELECT is nonzero, don't optimize by noticing that bits in MASK
7950 are all off in X. This is used when X will be complemented, by either
7951 NOT, NEG, or XOR. */
7952
7953 static rtx
7954 force_to_mode (rtx x, enum machine_mode mode, unsigned HOST_WIDE_INT mask,
7955 int just_select)
7956 {
7957 enum rtx_code code = GET_CODE (x);
7958 int next_select = just_select || code == XOR || code == NOT || code == NEG;
7959 enum machine_mode op_mode;
7960 unsigned HOST_WIDE_INT fuller_mask, nonzero;
7961 rtx op0, op1, temp;
7962
7963 /* If this is a CALL or ASM_OPERANDS, don't do anything. Some of the
7964 code below will do the wrong thing since the mode of such an
7965 expression is VOIDmode.
7966
7967 Also do nothing if X is a CLOBBER; this can happen if X was
7968 the return value from a call to gen_lowpart. */
7969 if (code == CALL || code == ASM_OPERANDS || code == CLOBBER)
7970 return x;
7971
7972 /* We want to perform the operation is its present mode unless we know
7973 that the operation is valid in MODE, in which case we do the operation
7974 in MODE. */
7975 op_mode = ((GET_MODE_CLASS (mode) == GET_MODE_CLASS (GET_MODE (x))
7976 && have_insn_for (code, mode))
7977 ? mode : GET_MODE (x));
7978
7979 /* It is not valid to do a right-shift in a narrower mode
7980 than the one it came in with. */
7981 if ((code == LSHIFTRT || code == ASHIFTRT)
7982 && GET_MODE_BITSIZE (mode) < GET_MODE_BITSIZE (GET_MODE (x)))
7983 op_mode = GET_MODE (x);
7984
7985 /* Truncate MASK to fit OP_MODE. */
7986 if (op_mode)
7987 mask &= GET_MODE_MASK (op_mode);
7988
7989 /* When we have an arithmetic operation, or a shift whose count we
7990 do not know, we need to assume that all bits up to the highest-order
7991 bit in MASK will be needed. This is how we form such a mask. */
7992 if (mask & ((unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)))
7993 fuller_mask = ~(unsigned HOST_WIDE_INT) 0;
7994 else
7995 fuller_mask = (((unsigned HOST_WIDE_INT) 1 << (floor_log2 (mask) + 1))
7996 - 1);
7997
7998 /* Determine what bits of X are guaranteed to be (non)zero. */
7999 nonzero = nonzero_bits (x, mode);
8000
8001 /* If none of the bits in X are needed, return a zero. */
8002 if (!just_select && (nonzero & mask) == 0 && !side_effects_p (x))
8003 x = const0_rtx;
8004
8005 /* If X is a CONST_INT, return a new one. Do this here since the
8006 test below will fail. */
8007 if (CONST_INT_P (x))
8008 {
8009 if (SCALAR_INT_MODE_P (mode))
8010 return gen_int_mode (INTVAL (x) & mask, mode);
8011 else
8012 {
8013 x = GEN_INT (INTVAL (x) & mask);
8014 return gen_lowpart_common (mode, x);
8015 }
8016 }
8017
8018 /* If X is narrower than MODE and we want all the bits in X's mode, just
8019 get X in the proper mode. */
8020 if (GET_MODE_SIZE (GET_MODE (x)) < GET_MODE_SIZE (mode)
8021 && (GET_MODE_MASK (GET_MODE (x)) & ~mask) == 0)
8022 return gen_lowpart (mode, x);
8023
8024 /* We can ignore the effect of a SUBREG if it narrows the mode or
8025 if the constant masks to zero all the bits the mode doesn't have. */
8026 if (GET_CODE (x) == SUBREG
8027 && subreg_lowpart_p (x)
8028 && ((GET_MODE_SIZE (GET_MODE (x))
8029 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x))))
8030 || (0 == (mask
8031 & GET_MODE_MASK (GET_MODE (x))
8032 & ~GET_MODE_MASK (GET_MODE (SUBREG_REG (x)))))))
8033 return force_to_mode (SUBREG_REG (x), mode, mask, next_select);
8034
8035 /* The arithmetic simplifications here only work for scalar integer modes. */
8036 if (!SCALAR_INT_MODE_P (mode) || !SCALAR_INT_MODE_P (GET_MODE (x)))
8037 return gen_lowpart_or_truncate (mode, x);
8038
8039 switch (code)
8040 {
8041 case CLOBBER:
8042 /* If X is a (clobber (const_int)), return it since we know we are
8043 generating something that won't match. */
8044 return x;
8045
8046 case SIGN_EXTEND:
8047 case ZERO_EXTEND:
8048 case ZERO_EXTRACT:
8049 case SIGN_EXTRACT:
8050 x = expand_compound_operation (x);
8051 if (GET_CODE (x) != code)
8052 return force_to_mode (x, mode, mask, next_select);
8053 break;
8054
8055 case TRUNCATE:
8056 /* Similarly for a truncate. */
8057 return force_to_mode (XEXP (x, 0), mode, mask, next_select);
8058
8059 case AND:
8060 /* If this is an AND with a constant, convert it into an AND
8061 whose constant is the AND of that constant with MASK. If it
8062 remains an AND of MASK, delete it since it is redundant. */
8063
8064 if (CONST_INT_P (XEXP (x, 1)))
8065 {
8066 x = simplify_and_const_int (x, op_mode, XEXP (x, 0),
8067 mask & INTVAL (XEXP (x, 1)));
8068
8069 /* If X is still an AND, see if it is an AND with a mask that
8070 is just some low-order bits. If so, and it is MASK, we don't
8071 need it. */
8072
8073 if (GET_CODE (x) == AND && CONST_INT_P (XEXP (x, 1))
8074 && ((INTVAL (XEXP (x, 1)) & GET_MODE_MASK (GET_MODE (x)))
8075 == mask))
8076 x = XEXP (x, 0);
8077
8078 /* If it remains an AND, try making another AND with the bits
8079 in the mode mask that aren't in MASK turned on. If the
8080 constant in the AND is wide enough, this might make a
8081 cheaper constant. */
8082
8083 if (GET_CODE (x) == AND && CONST_INT_P (XEXP (x, 1))
8084 && GET_MODE_MASK (GET_MODE (x)) != mask
8085 && GET_MODE_BITSIZE (GET_MODE (x)) <= HOST_BITS_PER_WIDE_INT)
8086 {
8087 unsigned HOST_WIDE_INT cval
8088 = UINTVAL (XEXP (x, 1))
8089 | (GET_MODE_MASK (GET_MODE (x)) & ~mask);
8090 int width = GET_MODE_BITSIZE (GET_MODE (x));
8091 rtx y;
8092
8093 /* If MODE is narrower than HOST_WIDE_INT and CVAL is a negative
8094 number, sign extend it. */
8095 if (width > 0 && width < HOST_BITS_PER_WIDE_INT
8096 && (cval & ((unsigned HOST_WIDE_INT) 1 << (width - 1))) != 0)
8097 cval |= (unsigned HOST_WIDE_INT) -1 << width;
8098
8099 y = simplify_gen_binary (AND, GET_MODE (x),
8100 XEXP (x, 0), GEN_INT (cval));
8101 if (rtx_cost (y, SET, optimize_this_for_speed_p)
8102 < rtx_cost (x, SET, optimize_this_for_speed_p))
8103 x = y;
8104 }
8105
8106 break;
8107 }
8108
8109 goto binop;
8110
8111 case PLUS:
8112 /* In (and (plus FOO C1) M), if M is a mask that just turns off
8113 low-order bits (as in an alignment operation) and FOO is already
8114 aligned to that boundary, mask C1 to that boundary as well.
8115 This may eliminate that PLUS and, later, the AND. */
8116
8117 {
8118 unsigned int width = GET_MODE_BITSIZE (mode);
8119 unsigned HOST_WIDE_INT smask = mask;
8120
8121 /* If MODE is narrower than HOST_WIDE_INT and mask is a negative
8122 number, sign extend it. */
8123
8124 if (width < HOST_BITS_PER_WIDE_INT
8125 && (smask & ((unsigned HOST_WIDE_INT) 1 << (width - 1))) != 0)
8126 smask |= (unsigned HOST_WIDE_INT) (-1) << width;
8127
8128 if (CONST_INT_P (XEXP (x, 1))
8129 && exact_log2 (- smask) >= 0
8130 && (nonzero_bits (XEXP (x, 0), mode) & ~smask) == 0
8131 && (INTVAL (XEXP (x, 1)) & ~smask) != 0)
8132 return force_to_mode (plus_constant (XEXP (x, 0),
8133 (INTVAL (XEXP (x, 1)) & smask)),
8134 mode, smask, next_select);
8135 }
8136
8137 /* ... fall through ... */
8138
8139 case MULT:
8140 /* For PLUS, MINUS and MULT, we need any bits less significant than the
8141 most significant bit in MASK since carries from those bits will
8142 affect the bits we are interested in. */
8143 mask = fuller_mask;
8144 goto binop;
8145
8146 case MINUS:
8147 /* If X is (minus C Y) where C's least set bit is larger than any bit
8148 in the mask, then we may replace with (neg Y). */
8149 if (CONST_INT_P (XEXP (x, 0))
8150 && (((unsigned HOST_WIDE_INT) (INTVAL (XEXP (x, 0))
8151 & -INTVAL (XEXP (x, 0))))
8152 > mask))
8153 {
8154 x = simplify_gen_unary (NEG, GET_MODE (x), XEXP (x, 1),
8155 GET_MODE (x));
8156 return force_to_mode (x, mode, mask, next_select);
8157 }
8158
8159 /* Similarly, if C contains every bit in the fuller_mask, then we may
8160 replace with (not Y). */
8161 if (CONST_INT_P (XEXP (x, 0))
8162 && ((UINTVAL (XEXP (x, 0)) | fuller_mask) == UINTVAL (XEXP (x, 0))))
8163 {
8164 x = simplify_gen_unary (NOT, GET_MODE (x),
8165 XEXP (x, 1), GET_MODE (x));
8166 return force_to_mode (x, mode, mask, next_select);
8167 }
8168
8169 mask = fuller_mask;
8170 goto binop;
8171
8172 case IOR:
8173 case XOR:
8174 /* If X is (ior (lshiftrt FOO C1) C2), try to commute the IOR and
8175 LSHIFTRT so we end up with an (and (lshiftrt (ior ...) ...) ...)
8176 operation which may be a bitfield extraction. Ensure that the
8177 constant we form is not wider than the mode of X. */
8178
8179 if (GET_CODE (XEXP (x, 0)) == LSHIFTRT
8180 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
8181 && INTVAL (XEXP (XEXP (x, 0), 1)) >= 0
8182 && INTVAL (XEXP (XEXP (x, 0), 1)) < HOST_BITS_PER_WIDE_INT
8183 && CONST_INT_P (XEXP (x, 1))
8184 && ((INTVAL (XEXP (XEXP (x, 0), 1))
8185 + floor_log2 (INTVAL (XEXP (x, 1))))
8186 < GET_MODE_BITSIZE (GET_MODE (x)))
8187 && (UINTVAL (XEXP (x, 1))
8188 & ~nonzero_bits (XEXP (x, 0), GET_MODE (x))) == 0)
8189 {
8190 temp = GEN_INT ((INTVAL (XEXP (x, 1)) & mask)
8191 << INTVAL (XEXP (XEXP (x, 0), 1)));
8192 temp = simplify_gen_binary (GET_CODE (x), GET_MODE (x),
8193 XEXP (XEXP (x, 0), 0), temp);
8194 x = simplify_gen_binary (LSHIFTRT, GET_MODE (x), temp,
8195 XEXP (XEXP (x, 0), 1));
8196 return force_to_mode (x, mode, mask, next_select);
8197 }
8198
8199 binop:
8200 /* For most binary operations, just propagate into the operation and
8201 change the mode if we have an operation of that mode. */
8202
8203 op0 = force_to_mode (XEXP (x, 0), mode, mask, next_select);
8204 op1 = force_to_mode (XEXP (x, 1), mode, mask, next_select);
8205
8206 /* If we ended up truncating both operands, truncate the result of the
8207 operation instead. */
8208 if (GET_CODE (op0) == TRUNCATE
8209 && GET_CODE (op1) == TRUNCATE)
8210 {
8211 op0 = XEXP (op0, 0);
8212 op1 = XEXP (op1, 0);
8213 }
8214
8215 op0 = gen_lowpart_or_truncate (op_mode, op0);
8216 op1 = gen_lowpart_or_truncate (op_mode, op1);
8217
8218 if (op_mode != GET_MODE (x) || op0 != XEXP (x, 0) || op1 != XEXP (x, 1))
8219 x = simplify_gen_binary (code, op_mode, op0, op1);
8220 break;
8221
8222 case ASHIFT:
8223 /* For left shifts, do the same, but just for the first operand.
8224 However, we cannot do anything with shifts where we cannot
8225 guarantee that the counts are smaller than the size of the mode
8226 because such a count will have a different meaning in a
8227 wider mode. */
8228
8229 if (! (CONST_INT_P (XEXP (x, 1))
8230 && INTVAL (XEXP (x, 1)) >= 0
8231 && INTVAL (XEXP (x, 1)) < GET_MODE_BITSIZE (mode))
8232 && ! (GET_MODE (XEXP (x, 1)) != VOIDmode
8233 && (nonzero_bits (XEXP (x, 1), GET_MODE (XEXP (x, 1)))
8234 < (unsigned HOST_WIDE_INT) GET_MODE_BITSIZE (mode))))
8235 break;
8236
8237 /* If the shift count is a constant and we can do arithmetic in
8238 the mode of the shift, refine which bits we need. Otherwise, use the
8239 conservative form of the mask. */
8240 if (CONST_INT_P (XEXP (x, 1))
8241 && INTVAL (XEXP (x, 1)) >= 0
8242 && INTVAL (XEXP (x, 1)) < GET_MODE_BITSIZE (op_mode)
8243 && GET_MODE_BITSIZE (op_mode) <= HOST_BITS_PER_WIDE_INT)
8244 mask >>= INTVAL (XEXP (x, 1));
8245 else
8246 mask = fuller_mask;
8247
8248 op0 = gen_lowpart_or_truncate (op_mode,
8249 force_to_mode (XEXP (x, 0), op_mode,
8250 mask, next_select));
8251
8252 if (op_mode != GET_MODE (x) || op0 != XEXP (x, 0))
8253 x = simplify_gen_binary (code, op_mode, op0, XEXP (x, 1));
8254 break;
8255
8256 case LSHIFTRT:
8257 /* Here we can only do something if the shift count is a constant,
8258 this shift constant is valid for the host, and we can do arithmetic
8259 in OP_MODE. */
8260
8261 if (CONST_INT_P (XEXP (x, 1))
8262 && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT
8263 && GET_MODE_BITSIZE (op_mode) <= HOST_BITS_PER_WIDE_INT)
8264 {
8265 rtx inner = XEXP (x, 0);
8266 unsigned HOST_WIDE_INT inner_mask;
8267
8268 /* Select the mask of the bits we need for the shift operand. */
8269 inner_mask = mask << INTVAL (XEXP (x, 1));
8270
8271 /* We can only change the mode of the shift if we can do arithmetic
8272 in the mode of the shift and INNER_MASK is no wider than the
8273 width of X's mode. */
8274 if ((inner_mask & ~GET_MODE_MASK (GET_MODE (x))) != 0)
8275 op_mode = GET_MODE (x);
8276
8277 inner = force_to_mode (inner, op_mode, inner_mask, next_select);
8278
8279 if (GET_MODE (x) != op_mode || inner != XEXP (x, 0))
8280 x = simplify_gen_binary (LSHIFTRT, op_mode, inner, XEXP (x, 1));
8281 }
8282
8283 /* If we have (and (lshiftrt FOO C1) C2) where the combination of the
8284 shift and AND produces only copies of the sign bit (C2 is one less
8285 than a power of two), we can do this with just a shift. */
8286
8287 if (GET_CODE (x) == LSHIFTRT
8288 && CONST_INT_P (XEXP (x, 1))
8289 /* The shift puts one of the sign bit copies in the least significant
8290 bit. */
8291 && ((INTVAL (XEXP (x, 1))
8292 + num_sign_bit_copies (XEXP (x, 0), GET_MODE (XEXP (x, 0))))
8293 >= GET_MODE_BITSIZE (GET_MODE (x)))
8294 && exact_log2 (mask + 1) >= 0
8295 /* Number of bits left after the shift must be more than the mask
8296 needs. */
8297 && ((INTVAL (XEXP (x, 1)) + exact_log2 (mask + 1))
8298 <= GET_MODE_BITSIZE (GET_MODE (x)))
8299 /* Must be more sign bit copies than the mask needs. */
8300 && ((int) num_sign_bit_copies (XEXP (x, 0), GET_MODE (XEXP (x, 0)))
8301 >= exact_log2 (mask + 1)))
8302 x = simplify_gen_binary (LSHIFTRT, GET_MODE (x), XEXP (x, 0),
8303 GEN_INT (GET_MODE_BITSIZE (GET_MODE (x))
8304 - exact_log2 (mask + 1)));
8305
8306 goto shiftrt;
8307
8308 case ASHIFTRT:
8309 /* If we are just looking for the sign bit, we don't need this shift at
8310 all, even if it has a variable count. */
8311 if (GET_MODE_BITSIZE (GET_MODE (x)) <= HOST_BITS_PER_WIDE_INT
8312 && (mask == ((unsigned HOST_WIDE_INT) 1
8313 << (GET_MODE_BITSIZE (GET_MODE (x)) - 1))))
8314 return force_to_mode (XEXP (x, 0), mode, mask, next_select);
8315
8316 /* If this is a shift by a constant, get a mask that contains those bits
8317 that are not copies of the sign bit. We then have two cases: If
8318 MASK only includes those bits, this can be a logical shift, which may
8319 allow simplifications. If MASK is a single-bit field not within
8320 those bits, we are requesting a copy of the sign bit and hence can
8321 shift the sign bit to the appropriate location. */
8322
8323 if (CONST_INT_P (XEXP (x, 1)) && INTVAL (XEXP (x, 1)) >= 0
8324 && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT)
8325 {
8326 int i;
8327
8328 /* If the considered data is wider than HOST_WIDE_INT, we can't
8329 represent a mask for all its bits in a single scalar.
8330 But we only care about the lower bits, so calculate these. */
8331
8332 if (GET_MODE_BITSIZE (GET_MODE (x)) > HOST_BITS_PER_WIDE_INT)
8333 {
8334 nonzero = ~(unsigned HOST_WIDE_INT) 0;
8335
8336 /* GET_MODE_BITSIZE (GET_MODE (x)) - INTVAL (XEXP (x, 1))
8337 is the number of bits a full-width mask would have set.
8338 We need only shift if these are fewer than nonzero can
8339 hold. If not, we must keep all bits set in nonzero. */
8340
8341 if (GET_MODE_BITSIZE (GET_MODE (x)) - INTVAL (XEXP (x, 1))
8342 < HOST_BITS_PER_WIDE_INT)
8343 nonzero >>= INTVAL (XEXP (x, 1))
8344 + HOST_BITS_PER_WIDE_INT
8345 - GET_MODE_BITSIZE (GET_MODE (x)) ;
8346 }
8347 else
8348 {
8349 nonzero = GET_MODE_MASK (GET_MODE (x));
8350 nonzero >>= INTVAL (XEXP (x, 1));
8351 }
8352
8353 if ((mask & ~nonzero) == 0)
8354 {
8355 x = simplify_shift_const (NULL_RTX, LSHIFTRT, GET_MODE (x),
8356 XEXP (x, 0), INTVAL (XEXP (x, 1)));
8357 if (GET_CODE (x) != ASHIFTRT)
8358 return force_to_mode (x, mode, mask, next_select);
8359 }
8360
8361 else if ((i = exact_log2 (mask)) >= 0)
8362 {
8363 x = simplify_shift_const
8364 (NULL_RTX, LSHIFTRT, GET_MODE (x), XEXP (x, 0),
8365 GET_MODE_BITSIZE (GET_MODE (x)) - 1 - i);
8366
8367 if (GET_CODE (x) != ASHIFTRT)
8368 return force_to_mode (x, mode, mask, next_select);
8369 }
8370 }
8371
8372 /* If MASK is 1, convert this to an LSHIFTRT. This can be done
8373 even if the shift count isn't a constant. */
8374 if (mask == 1)
8375 x = simplify_gen_binary (LSHIFTRT, GET_MODE (x),
8376 XEXP (x, 0), XEXP (x, 1));
8377
8378 shiftrt:
8379
8380 /* If this is a zero- or sign-extension operation that just affects bits
8381 we don't care about, remove it. Be sure the call above returned
8382 something that is still a shift. */
8383
8384 if ((GET_CODE (x) == LSHIFTRT || GET_CODE (x) == ASHIFTRT)
8385 && CONST_INT_P (XEXP (x, 1))
8386 && INTVAL (XEXP (x, 1)) >= 0
8387 && (INTVAL (XEXP (x, 1))
8388 <= GET_MODE_BITSIZE (GET_MODE (x)) - (floor_log2 (mask) + 1))
8389 && GET_CODE (XEXP (x, 0)) == ASHIFT
8390 && XEXP (XEXP (x, 0), 1) == XEXP (x, 1))
8391 return force_to_mode (XEXP (XEXP (x, 0), 0), mode, mask,
8392 next_select);
8393
8394 break;
8395
8396 case ROTATE:
8397 case ROTATERT:
8398 /* If the shift count is constant and we can do computations
8399 in the mode of X, compute where the bits we care about are.
8400 Otherwise, we can't do anything. Don't change the mode of
8401 the shift or propagate MODE into the shift, though. */
8402 if (CONST_INT_P (XEXP (x, 1))
8403 && INTVAL (XEXP (x, 1)) >= 0)
8404 {
8405 temp = simplify_binary_operation (code == ROTATE ? ROTATERT : ROTATE,
8406 GET_MODE (x), GEN_INT (mask),
8407 XEXP (x, 1));
8408 if (temp && CONST_INT_P (temp))
8409 SUBST (XEXP (x, 0),
8410 force_to_mode (XEXP (x, 0), GET_MODE (x),
8411 INTVAL (temp), next_select));
8412 }
8413 break;
8414
8415 case NEG:
8416 /* If we just want the low-order bit, the NEG isn't needed since it
8417 won't change the low-order bit. */
8418 if (mask == 1)
8419 return force_to_mode (XEXP (x, 0), mode, mask, just_select);
8420
8421 /* We need any bits less significant than the most significant bit in
8422 MASK since carries from those bits will affect the bits we are
8423 interested in. */
8424 mask = fuller_mask;
8425 goto unop;
8426
8427 case NOT:
8428 /* (not FOO) is (xor FOO CONST), so if FOO is an LSHIFTRT, we can do the
8429 same as the XOR case above. Ensure that the constant we form is not
8430 wider than the mode of X. */
8431
8432 if (GET_CODE (XEXP (x, 0)) == LSHIFTRT
8433 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
8434 && INTVAL (XEXP (XEXP (x, 0), 1)) >= 0
8435 && (INTVAL (XEXP (XEXP (x, 0), 1)) + floor_log2 (mask)
8436 < GET_MODE_BITSIZE (GET_MODE (x)))
8437 && INTVAL (XEXP (XEXP (x, 0), 1)) < HOST_BITS_PER_WIDE_INT)
8438 {
8439 temp = gen_int_mode (mask << INTVAL (XEXP (XEXP (x, 0), 1)),
8440 GET_MODE (x));
8441 temp = simplify_gen_binary (XOR, GET_MODE (x),
8442 XEXP (XEXP (x, 0), 0), temp);
8443 x = simplify_gen_binary (LSHIFTRT, GET_MODE (x),
8444 temp, XEXP (XEXP (x, 0), 1));
8445
8446 return force_to_mode (x, mode, mask, next_select);
8447 }
8448
8449 /* (and (not FOO) CONST) is (not (or FOO (not CONST))), so we must
8450 use the full mask inside the NOT. */
8451 mask = fuller_mask;
8452
8453 unop:
8454 op0 = gen_lowpart_or_truncate (op_mode,
8455 force_to_mode (XEXP (x, 0), mode, mask,
8456 next_select));
8457 if (op_mode != GET_MODE (x) || op0 != XEXP (x, 0))
8458 x = simplify_gen_unary (code, op_mode, op0, op_mode);
8459 break;
8460
8461 case NE:
8462 /* (and (ne FOO 0) CONST) can be (and FOO CONST) if CONST is included
8463 in STORE_FLAG_VALUE and FOO has a single bit that might be nonzero,
8464 which is equal to STORE_FLAG_VALUE. */
8465 if ((mask & ~STORE_FLAG_VALUE) == 0
8466 && XEXP (x, 1) == const0_rtx
8467 && GET_MODE (XEXP (x, 0)) == mode
8468 && exact_log2 (nonzero_bits (XEXP (x, 0), mode)) >= 0
8469 && (nonzero_bits (XEXP (x, 0), mode)
8470 == (unsigned HOST_WIDE_INT) STORE_FLAG_VALUE))
8471 return force_to_mode (XEXP (x, 0), mode, mask, next_select);
8472
8473 break;
8474
8475 case IF_THEN_ELSE:
8476 /* We have no way of knowing if the IF_THEN_ELSE can itself be
8477 written in a narrower mode. We play it safe and do not do so. */
8478
8479 SUBST (XEXP (x, 1),
8480 gen_lowpart_or_truncate (GET_MODE (x),
8481 force_to_mode (XEXP (x, 1), mode,
8482 mask, next_select)));
8483 SUBST (XEXP (x, 2),
8484 gen_lowpart_or_truncate (GET_MODE (x),
8485 force_to_mode (XEXP (x, 2), mode,
8486 mask, next_select)));
8487 break;
8488
8489 default:
8490 break;
8491 }
8492
8493 /* Ensure we return a value of the proper mode. */
8494 return gen_lowpart_or_truncate (mode, x);
8495 }
8496 \f
8497 /* Return nonzero if X is an expression that has one of two values depending on
8498 whether some other value is zero or nonzero. In that case, we return the
8499 value that is being tested, *PTRUE is set to the value if the rtx being
8500 returned has a nonzero value, and *PFALSE is set to the other alternative.
8501
8502 If we return zero, we set *PTRUE and *PFALSE to X. */
8503
8504 static rtx
8505 if_then_else_cond (rtx x, rtx *ptrue, rtx *pfalse)
8506 {
8507 enum machine_mode mode = GET_MODE (x);
8508 enum rtx_code code = GET_CODE (x);
8509 rtx cond0, cond1, true0, true1, false0, false1;
8510 unsigned HOST_WIDE_INT nz;
8511
8512 /* If we are comparing a value against zero, we are done. */
8513 if ((code == NE || code == EQ)
8514 && XEXP (x, 1) == const0_rtx)
8515 {
8516 *ptrue = (code == NE) ? const_true_rtx : const0_rtx;
8517 *pfalse = (code == NE) ? const0_rtx : const_true_rtx;
8518 return XEXP (x, 0);
8519 }
8520
8521 /* If this is a unary operation whose operand has one of two values, apply
8522 our opcode to compute those values. */
8523 else if (UNARY_P (x)
8524 && (cond0 = if_then_else_cond (XEXP (x, 0), &true0, &false0)) != 0)
8525 {
8526 *ptrue = simplify_gen_unary (code, mode, true0, GET_MODE (XEXP (x, 0)));
8527 *pfalse = simplify_gen_unary (code, mode, false0,
8528 GET_MODE (XEXP (x, 0)));
8529 return cond0;
8530 }
8531
8532 /* If this is a COMPARE, do nothing, since the IF_THEN_ELSE we would
8533 make can't possibly match and would suppress other optimizations. */
8534 else if (code == COMPARE)
8535 ;
8536
8537 /* If this is a binary operation, see if either side has only one of two
8538 values. If either one does or if both do and they are conditional on
8539 the same value, compute the new true and false values. */
8540 else if (BINARY_P (x))
8541 {
8542 cond0 = if_then_else_cond (XEXP (x, 0), &true0, &false0);
8543 cond1 = if_then_else_cond (XEXP (x, 1), &true1, &false1);
8544
8545 if ((cond0 != 0 || cond1 != 0)
8546 && ! (cond0 != 0 && cond1 != 0 && ! rtx_equal_p (cond0, cond1)))
8547 {
8548 /* If if_then_else_cond returned zero, then true/false are the
8549 same rtl. We must copy one of them to prevent invalid rtl
8550 sharing. */
8551 if (cond0 == 0)
8552 true0 = copy_rtx (true0);
8553 else if (cond1 == 0)
8554 true1 = copy_rtx (true1);
8555
8556 if (COMPARISON_P (x))
8557 {
8558 *ptrue = simplify_gen_relational (code, mode, VOIDmode,
8559 true0, true1);
8560 *pfalse = simplify_gen_relational (code, mode, VOIDmode,
8561 false0, false1);
8562 }
8563 else
8564 {
8565 *ptrue = simplify_gen_binary (code, mode, true0, true1);
8566 *pfalse = simplify_gen_binary (code, mode, false0, false1);
8567 }
8568
8569 return cond0 ? cond0 : cond1;
8570 }
8571
8572 /* See if we have PLUS, IOR, XOR, MINUS or UMAX, where one of the
8573 operands is zero when the other is nonzero, and vice-versa,
8574 and STORE_FLAG_VALUE is 1 or -1. */
8575
8576 if ((STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
8577 && (code == PLUS || code == IOR || code == XOR || code == MINUS
8578 || code == UMAX)
8579 && GET_CODE (XEXP (x, 0)) == MULT && GET_CODE (XEXP (x, 1)) == MULT)
8580 {
8581 rtx op0 = XEXP (XEXP (x, 0), 1);
8582 rtx op1 = XEXP (XEXP (x, 1), 1);
8583
8584 cond0 = XEXP (XEXP (x, 0), 0);
8585 cond1 = XEXP (XEXP (x, 1), 0);
8586
8587 if (COMPARISON_P (cond0)
8588 && COMPARISON_P (cond1)
8589 && ((GET_CODE (cond0) == reversed_comparison_code (cond1, NULL)
8590 && rtx_equal_p (XEXP (cond0, 0), XEXP (cond1, 0))
8591 && rtx_equal_p (XEXP (cond0, 1), XEXP (cond1, 1)))
8592 || ((swap_condition (GET_CODE (cond0))
8593 == reversed_comparison_code (cond1, NULL))
8594 && rtx_equal_p (XEXP (cond0, 0), XEXP (cond1, 1))
8595 && rtx_equal_p (XEXP (cond0, 1), XEXP (cond1, 0))))
8596 && ! side_effects_p (x))
8597 {
8598 *ptrue = simplify_gen_binary (MULT, mode, op0, const_true_rtx);
8599 *pfalse = simplify_gen_binary (MULT, mode,
8600 (code == MINUS
8601 ? simplify_gen_unary (NEG, mode,
8602 op1, mode)
8603 : op1),
8604 const_true_rtx);
8605 return cond0;
8606 }
8607 }
8608
8609 /* Similarly for MULT, AND and UMIN, except that for these the result
8610 is always zero. */
8611 if ((STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
8612 && (code == MULT || code == AND || code == UMIN)
8613 && GET_CODE (XEXP (x, 0)) == MULT && GET_CODE (XEXP (x, 1)) == MULT)
8614 {
8615 cond0 = XEXP (XEXP (x, 0), 0);
8616 cond1 = XEXP (XEXP (x, 1), 0);
8617
8618 if (COMPARISON_P (cond0)
8619 && COMPARISON_P (cond1)
8620 && ((GET_CODE (cond0) == reversed_comparison_code (cond1, NULL)
8621 && rtx_equal_p (XEXP (cond0, 0), XEXP (cond1, 0))
8622 && rtx_equal_p (XEXP (cond0, 1), XEXP (cond1, 1)))
8623 || ((swap_condition (GET_CODE (cond0))
8624 == reversed_comparison_code (cond1, NULL))
8625 && rtx_equal_p (XEXP (cond0, 0), XEXP (cond1, 1))
8626 && rtx_equal_p (XEXP (cond0, 1), XEXP (cond1, 0))))
8627 && ! side_effects_p (x))
8628 {
8629 *ptrue = *pfalse = const0_rtx;
8630 return cond0;
8631 }
8632 }
8633 }
8634
8635 else if (code == IF_THEN_ELSE)
8636 {
8637 /* If we have IF_THEN_ELSE already, extract the condition and
8638 canonicalize it if it is NE or EQ. */
8639 cond0 = XEXP (x, 0);
8640 *ptrue = XEXP (x, 1), *pfalse = XEXP (x, 2);
8641 if (GET_CODE (cond0) == NE && XEXP (cond0, 1) == const0_rtx)
8642 return XEXP (cond0, 0);
8643 else if (GET_CODE (cond0) == EQ && XEXP (cond0, 1) == const0_rtx)
8644 {
8645 *ptrue = XEXP (x, 2), *pfalse = XEXP (x, 1);
8646 return XEXP (cond0, 0);
8647 }
8648 else
8649 return cond0;
8650 }
8651
8652 /* If X is a SUBREG, we can narrow both the true and false values
8653 if the inner expression, if there is a condition. */
8654 else if (code == SUBREG
8655 && 0 != (cond0 = if_then_else_cond (SUBREG_REG (x),
8656 &true0, &false0)))
8657 {
8658 true0 = simplify_gen_subreg (mode, true0,
8659 GET_MODE (SUBREG_REG (x)), SUBREG_BYTE (x));
8660 false0 = simplify_gen_subreg (mode, false0,
8661 GET_MODE (SUBREG_REG (x)), SUBREG_BYTE (x));
8662 if (true0 && false0)
8663 {
8664 *ptrue = true0;
8665 *pfalse = false0;
8666 return cond0;
8667 }
8668 }
8669
8670 /* If X is a constant, this isn't special and will cause confusions
8671 if we treat it as such. Likewise if it is equivalent to a constant. */
8672 else if (CONSTANT_P (x)
8673 || ((cond0 = get_last_value (x)) != 0 && CONSTANT_P (cond0)))
8674 ;
8675
8676 /* If we're in BImode, canonicalize on 0 and STORE_FLAG_VALUE, as that
8677 will be least confusing to the rest of the compiler. */
8678 else if (mode == BImode)
8679 {
8680 *ptrue = GEN_INT (STORE_FLAG_VALUE), *pfalse = const0_rtx;
8681 return x;
8682 }
8683
8684 /* If X is known to be either 0 or -1, those are the true and
8685 false values when testing X. */
8686 else if (x == constm1_rtx || x == const0_rtx
8687 || (mode != VOIDmode
8688 && num_sign_bit_copies (x, mode) == GET_MODE_BITSIZE (mode)))
8689 {
8690 *ptrue = constm1_rtx, *pfalse = const0_rtx;
8691 return x;
8692 }
8693
8694 /* Likewise for 0 or a single bit. */
8695 else if (SCALAR_INT_MODE_P (mode)
8696 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
8697 && exact_log2 (nz = nonzero_bits (x, mode)) >= 0)
8698 {
8699 *ptrue = gen_int_mode (nz, mode), *pfalse = const0_rtx;
8700 return x;
8701 }
8702
8703 /* Otherwise fail; show no condition with true and false values the same. */
8704 *ptrue = *pfalse = x;
8705 return 0;
8706 }
8707 \f
8708 /* Return the value of expression X given the fact that condition COND
8709 is known to be true when applied to REG as its first operand and VAL
8710 as its second. X is known to not be shared and so can be modified in
8711 place.
8712
8713 We only handle the simplest cases, and specifically those cases that
8714 arise with IF_THEN_ELSE expressions. */
8715
8716 static rtx
8717 known_cond (rtx x, enum rtx_code cond, rtx reg, rtx val)
8718 {
8719 enum rtx_code code = GET_CODE (x);
8720 rtx temp;
8721 const char *fmt;
8722 int i, j;
8723
8724 if (side_effects_p (x))
8725 return x;
8726
8727 /* If either operand of the condition is a floating point value,
8728 then we have to avoid collapsing an EQ comparison. */
8729 if (cond == EQ
8730 && rtx_equal_p (x, reg)
8731 && ! FLOAT_MODE_P (GET_MODE (x))
8732 && ! FLOAT_MODE_P (GET_MODE (val)))
8733 return val;
8734
8735 if (cond == UNEQ && rtx_equal_p (x, reg))
8736 return val;
8737
8738 /* If X is (abs REG) and we know something about REG's relationship
8739 with zero, we may be able to simplify this. */
8740
8741 if (code == ABS && rtx_equal_p (XEXP (x, 0), reg) && val == const0_rtx)
8742 switch (cond)
8743 {
8744 case GE: case GT: case EQ:
8745 return XEXP (x, 0);
8746 case LT: case LE:
8747 return simplify_gen_unary (NEG, GET_MODE (XEXP (x, 0)),
8748 XEXP (x, 0),
8749 GET_MODE (XEXP (x, 0)));
8750 default:
8751 break;
8752 }
8753
8754 /* The only other cases we handle are MIN, MAX, and comparisons if the
8755 operands are the same as REG and VAL. */
8756
8757 else if (COMPARISON_P (x) || COMMUTATIVE_ARITH_P (x))
8758 {
8759 if (rtx_equal_p (XEXP (x, 0), val))
8760 cond = swap_condition (cond), temp = val, val = reg, reg = temp;
8761
8762 if (rtx_equal_p (XEXP (x, 0), reg) && rtx_equal_p (XEXP (x, 1), val))
8763 {
8764 if (COMPARISON_P (x))
8765 {
8766 if (comparison_dominates_p (cond, code))
8767 return const_true_rtx;
8768
8769 code = reversed_comparison_code (x, NULL);
8770 if (code != UNKNOWN
8771 && comparison_dominates_p (cond, code))
8772 return const0_rtx;
8773 else
8774 return x;
8775 }
8776 else if (code == SMAX || code == SMIN
8777 || code == UMIN || code == UMAX)
8778 {
8779 int unsignedp = (code == UMIN || code == UMAX);
8780
8781 /* Do not reverse the condition when it is NE or EQ.
8782 This is because we cannot conclude anything about
8783 the value of 'SMAX (x, y)' when x is not equal to y,
8784 but we can when x equals y. */
8785 if ((code == SMAX || code == UMAX)
8786 && ! (cond == EQ || cond == NE))
8787 cond = reverse_condition (cond);
8788
8789 switch (cond)
8790 {
8791 case GE: case GT:
8792 return unsignedp ? x : XEXP (x, 1);
8793 case LE: case LT:
8794 return unsignedp ? x : XEXP (x, 0);
8795 case GEU: case GTU:
8796 return unsignedp ? XEXP (x, 1) : x;
8797 case LEU: case LTU:
8798 return unsignedp ? XEXP (x, 0) : x;
8799 default:
8800 break;
8801 }
8802 }
8803 }
8804 }
8805 else if (code == SUBREG)
8806 {
8807 enum machine_mode inner_mode = GET_MODE (SUBREG_REG (x));
8808 rtx new_rtx, r = known_cond (SUBREG_REG (x), cond, reg, val);
8809
8810 if (SUBREG_REG (x) != r)
8811 {
8812 /* We must simplify subreg here, before we lose track of the
8813 original inner_mode. */
8814 new_rtx = simplify_subreg (GET_MODE (x), r,
8815 inner_mode, SUBREG_BYTE (x));
8816 if (new_rtx)
8817 return new_rtx;
8818 else
8819 SUBST (SUBREG_REG (x), r);
8820 }
8821
8822 return x;
8823 }
8824 /* We don't have to handle SIGN_EXTEND here, because even in the
8825 case of replacing something with a modeless CONST_INT, a
8826 CONST_INT is already (supposed to be) a valid sign extension for
8827 its narrower mode, which implies it's already properly
8828 sign-extended for the wider mode. Now, for ZERO_EXTEND, the
8829 story is different. */
8830 else if (code == ZERO_EXTEND)
8831 {
8832 enum machine_mode inner_mode = GET_MODE (XEXP (x, 0));
8833 rtx new_rtx, r = known_cond (XEXP (x, 0), cond, reg, val);
8834
8835 if (XEXP (x, 0) != r)
8836 {
8837 /* We must simplify the zero_extend here, before we lose
8838 track of the original inner_mode. */
8839 new_rtx = simplify_unary_operation (ZERO_EXTEND, GET_MODE (x),
8840 r, inner_mode);
8841 if (new_rtx)
8842 return new_rtx;
8843 else
8844 SUBST (XEXP (x, 0), r);
8845 }
8846
8847 return x;
8848 }
8849
8850 fmt = GET_RTX_FORMAT (code);
8851 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
8852 {
8853 if (fmt[i] == 'e')
8854 SUBST (XEXP (x, i), known_cond (XEXP (x, i), cond, reg, val));
8855 else if (fmt[i] == 'E')
8856 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
8857 SUBST (XVECEXP (x, i, j), known_cond (XVECEXP (x, i, j),
8858 cond, reg, val));
8859 }
8860
8861 return x;
8862 }
8863 \f
8864 /* See if X and Y are equal for the purposes of seeing if we can rewrite an
8865 assignment as a field assignment. */
8866
8867 static int
8868 rtx_equal_for_field_assignment_p (rtx x, rtx y)
8869 {
8870 if (x == y || rtx_equal_p (x, y))
8871 return 1;
8872
8873 if (x == 0 || y == 0 || GET_MODE (x) != GET_MODE (y))
8874 return 0;
8875
8876 /* Check for a paradoxical SUBREG of a MEM compared with the MEM.
8877 Note that all SUBREGs of MEM are paradoxical; otherwise they
8878 would have been rewritten. */
8879 if (MEM_P (x) && GET_CODE (y) == SUBREG
8880 && MEM_P (SUBREG_REG (y))
8881 && rtx_equal_p (SUBREG_REG (y),
8882 gen_lowpart (GET_MODE (SUBREG_REG (y)), x)))
8883 return 1;
8884
8885 if (MEM_P (y) && GET_CODE (x) == SUBREG
8886 && MEM_P (SUBREG_REG (x))
8887 && rtx_equal_p (SUBREG_REG (x),
8888 gen_lowpart (GET_MODE (SUBREG_REG (x)), y)))
8889 return 1;
8890
8891 /* We used to see if get_last_value of X and Y were the same but that's
8892 not correct. In one direction, we'll cause the assignment to have
8893 the wrong destination and in the case, we'll import a register into this
8894 insn that might have already have been dead. So fail if none of the
8895 above cases are true. */
8896 return 0;
8897 }
8898 \f
8899 /* See if X, a SET operation, can be rewritten as a bit-field assignment.
8900 Return that assignment if so.
8901
8902 We only handle the most common cases. */
8903
8904 static rtx
8905 make_field_assignment (rtx x)
8906 {
8907 rtx dest = SET_DEST (x);
8908 rtx src = SET_SRC (x);
8909 rtx assign;
8910 rtx rhs, lhs;
8911 HOST_WIDE_INT c1;
8912 HOST_WIDE_INT pos;
8913 unsigned HOST_WIDE_INT len;
8914 rtx other;
8915 enum machine_mode mode;
8916
8917 /* If SRC was (and (not (ashift (const_int 1) POS)) DEST), this is
8918 a clear of a one-bit field. We will have changed it to
8919 (and (rotate (const_int -2) POS) DEST), so check for that. Also check
8920 for a SUBREG. */
8921
8922 if (GET_CODE (src) == AND && GET_CODE (XEXP (src, 0)) == ROTATE
8923 && CONST_INT_P (XEXP (XEXP (src, 0), 0))
8924 && INTVAL (XEXP (XEXP (src, 0), 0)) == -2
8925 && rtx_equal_for_field_assignment_p (dest, XEXP (src, 1)))
8926 {
8927 assign = make_extraction (VOIDmode, dest, 0, XEXP (XEXP (src, 0), 1),
8928 1, 1, 1, 0);
8929 if (assign != 0)
8930 return gen_rtx_SET (VOIDmode, assign, const0_rtx);
8931 return x;
8932 }
8933
8934 if (GET_CODE (src) == AND && GET_CODE (XEXP (src, 0)) == SUBREG
8935 && subreg_lowpart_p (XEXP (src, 0))
8936 && (GET_MODE_SIZE (GET_MODE (XEXP (src, 0)))
8937 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (XEXP (src, 0)))))
8938 && GET_CODE (SUBREG_REG (XEXP (src, 0))) == ROTATE
8939 && CONST_INT_P (XEXP (SUBREG_REG (XEXP (src, 0)), 0))
8940 && INTVAL (XEXP (SUBREG_REG (XEXP (src, 0)), 0)) == -2
8941 && rtx_equal_for_field_assignment_p (dest, XEXP (src, 1)))
8942 {
8943 assign = make_extraction (VOIDmode, dest, 0,
8944 XEXP (SUBREG_REG (XEXP (src, 0)), 1),
8945 1, 1, 1, 0);
8946 if (assign != 0)
8947 return gen_rtx_SET (VOIDmode, assign, const0_rtx);
8948 return x;
8949 }
8950
8951 /* If SRC is (ior (ashift (const_int 1) POS) DEST), this is a set of a
8952 one-bit field. */
8953 if (GET_CODE (src) == IOR && GET_CODE (XEXP (src, 0)) == ASHIFT
8954 && XEXP (XEXP (src, 0), 0) == const1_rtx
8955 && rtx_equal_for_field_assignment_p (dest, XEXP (src, 1)))
8956 {
8957 assign = make_extraction (VOIDmode, dest, 0, XEXP (XEXP (src, 0), 1),
8958 1, 1, 1, 0);
8959 if (assign != 0)
8960 return gen_rtx_SET (VOIDmode, assign, const1_rtx);
8961 return x;
8962 }
8963
8964 /* If DEST is already a field assignment, i.e. ZERO_EXTRACT, and the
8965 SRC is an AND with all bits of that field set, then we can discard
8966 the AND. */
8967 if (GET_CODE (dest) == ZERO_EXTRACT
8968 && CONST_INT_P (XEXP (dest, 1))
8969 && GET_CODE (src) == AND
8970 && CONST_INT_P (XEXP (src, 1)))
8971 {
8972 HOST_WIDE_INT width = INTVAL (XEXP (dest, 1));
8973 unsigned HOST_WIDE_INT and_mask = INTVAL (XEXP (src, 1));
8974 unsigned HOST_WIDE_INT ze_mask;
8975
8976 if (width >= HOST_BITS_PER_WIDE_INT)
8977 ze_mask = -1;
8978 else
8979 ze_mask = ((unsigned HOST_WIDE_INT)1 << width) - 1;
8980
8981 /* Complete overlap. We can remove the source AND. */
8982 if ((and_mask & ze_mask) == ze_mask)
8983 return gen_rtx_SET (VOIDmode, dest, XEXP (src, 0));
8984
8985 /* Partial overlap. We can reduce the source AND. */
8986 if ((and_mask & ze_mask) != and_mask)
8987 {
8988 mode = GET_MODE (src);
8989 src = gen_rtx_AND (mode, XEXP (src, 0),
8990 gen_int_mode (and_mask & ze_mask, mode));
8991 return gen_rtx_SET (VOIDmode, dest, src);
8992 }
8993 }
8994
8995 /* The other case we handle is assignments into a constant-position
8996 field. They look like (ior/xor (and DEST C1) OTHER). If C1 represents
8997 a mask that has all one bits except for a group of zero bits and
8998 OTHER is known to have zeros where C1 has ones, this is such an
8999 assignment. Compute the position and length from C1. Shift OTHER
9000 to the appropriate position, force it to the required mode, and
9001 make the extraction. Check for the AND in both operands. */
9002
9003 if (GET_CODE (src) != IOR && GET_CODE (src) != XOR)
9004 return x;
9005
9006 rhs = expand_compound_operation (XEXP (src, 0));
9007 lhs = expand_compound_operation (XEXP (src, 1));
9008
9009 if (GET_CODE (rhs) == AND
9010 && CONST_INT_P (XEXP (rhs, 1))
9011 && rtx_equal_for_field_assignment_p (XEXP (rhs, 0), dest))
9012 c1 = INTVAL (XEXP (rhs, 1)), other = lhs;
9013 else if (GET_CODE (lhs) == AND
9014 && CONST_INT_P (XEXP (lhs, 1))
9015 && rtx_equal_for_field_assignment_p (XEXP (lhs, 0), dest))
9016 c1 = INTVAL (XEXP (lhs, 1)), other = rhs;
9017 else
9018 return x;
9019
9020 pos = get_pos_from_mask ((~c1) & GET_MODE_MASK (GET_MODE (dest)), &len);
9021 if (pos < 0 || pos + len > GET_MODE_BITSIZE (GET_MODE (dest))
9022 || GET_MODE_BITSIZE (GET_MODE (dest)) > HOST_BITS_PER_WIDE_INT
9023 || (c1 & nonzero_bits (other, GET_MODE (dest))) != 0)
9024 return x;
9025
9026 assign = make_extraction (VOIDmode, dest, pos, NULL_RTX, len, 1, 1, 0);
9027 if (assign == 0)
9028 return x;
9029
9030 /* The mode to use for the source is the mode of the assignment, or of
9031 what is inside a possible STRICT_LOW_PART. */
9032 mode = (GET_CODE (assign) == STRICT_LOW_PART
9033 ? GET_MODE (XEXP (assign, 0)) : GET_MODE (assign));
9034
9035 /* Shift OTHER right POS places and make it the source, restricting it
9036 to the proper length and mode. */
9037
9038 src = canon_reg_for_combine (simplify_shift_const (NULL_RTX, LSHIFTRT,
9039 GET_MODE (src),
9040 other, pos),
9041 dest);
9042 src = force_to_mode (src, mode,
9043 GET_MODE_BITSIZE (mode) >= HOST_BITS_PER_WIDE_INT
9044 ? ~(unsigned HOST_WIDE_INT) 0
9045 : ((unsigned HOST_WIDE_INT) 1 << len) - 1,
9046 0);
9047
9048 /* If SRC is masked by an AND that does not make a difference in
9049 the value being stored, strip it. */
9050 if (GET_CODE (assign) == ZERO_EXTRACT
9051 && CONST_INT_P (XEXP (assign, 1))
9052 && INTVAL (XEXP (assign, 1)) < HOST_BITS_PER_WIDE_INT
9053 && GET_CODE (src) == AND
9054 && CONST_INT_P (XEXP (src, 1))
9055 && UINTVAL (XEXP (src, 1))
9056 == ((unsigned HOST_WIDE_INT) 1 << INTVAL (XEXP (assign, 1))) - 1)
9057 src = XEXP (src, 0);
9058
9059 return gen_rtx_SET (VOIDmode, assign, src);
9060 }
9061 \f
9062 /* See if X is of the form (+ (* a c) (* b c)) and convert to (* (+ a b) c)
9063 if so. */
9064
9065 static rtx
9066 apply_distributive_law (rtx x)
9067 {
9068 enum rtx_code code = GET_CODE (x);
9069 enum rtx_code inner_code;
9070 rtx lhs, rhs, other;
9071 rtx tem;
9072
9073 /* Distributivity is not true for floating point as it can change the
9074 value. So we don't do it unless -funsafe-math-optimizations. */
9075 if (FLOAT_MODE_P (GET_MODE (x))
9076 && ! flag_unsafe_math_optimizations)
9077 return x;
9078
9079 /* The outer operation can only be one of the following: */
9080 if (code != IOR && code != AND && code != XOR
9081 && code != PLUS && code != MINUS)
9082 return x;
9083
9084 lhs = XEXP (x, 0);
9085 rhs = XEXP (x, 1);
9086
9087 /* If either operand is a primitive we can't do anything, so get out
9088 fast. */
9089 if (OBJECT_P (lhs) || OBJECT_P (rhs))
9090 return x;
9091
9092 lhs = expand_compound_operation (lhs);
9093 rhs = expand_compound_operation (rhs);
9094 inner_code = GET_CODE (lhs);
9095 if (inner_code != GET_CODE (rhs))
9096 return x;
9097
9098 /* See if the inner and outer operations distribute. */
9099 switch (inner_code)
9100 {
9101 case LSHIFTRT:
9102 case ASHIFTRT:
9103 case AND:
9104 case IOR:
9105 /* These all distribute except over PLUS. */
9106 if (code == PLUS || code == MINUS)
9107 return x;
9108 break;
9109
9110 case MULT:
9111 if (code != PLUS && code != MINUS)
9112 return x;
9113 break;
9114
9115 case ASHIFT:
9116 /* This is also a multiply, so it distributes over everything. */
9117 break;
9118
9119 case SUBREG:
9120 /* Non-paradoxical SUBREGs distributes over all operations,
9121 provided the inner modes and byte offsets are the same, this
9122 is an extraction of a low-order part, we don't convert an fp
9123 operation to int or vice versa, this is not a vector mode,
9124 and we would not be converting a single-word operation into a
9125 multi-word operation. The latter test is not required, but
9126 it prevents generating unneeded multi-word operations. Some
9127 of the previous tests are redundant given the latter test,
9128 but are retained because they are required for correctness.
9129
9130 We produce the result slightly differently in this case. */
9131
9132 if (GET_MODE (SUBREG_REG (lhs)) != GET_MODE (SUBREG_REG (rhs))
9133 || SUBREG_BYTE (lhs) != SUBREG_BYTE (rhs)
9134 || ! subreg_lowpart_p (lhs)
9135 || (GET_MODE_CLASS (GET_MODE (lhs))
9136 != GET_MODE_CLASS (GET_MODE (SUBREG_REG (lhs))))
9137 || (GET_MODE_SIZE (GET_MODE (lhs))
9138 > GET_MODE_SIZE (GET_MODE (SUBREG_REG (lhs))))
9139 || VECTOR_MODE_P (GET_MODE (lhs))
9140 || GET_MODE_SIZE (GET_MODE (SUBREG_REG (lhs))) > UNITS_PER_WORD
9141 /* Result might need to be truncated. Don't change mode if
9142 explicit truncation is needed. */
9143 || !TRULY_NOOP_TRUNCATION
9144 (GET_MODE_BITSIZE (GET_MODE (x)),
9145 GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (lhs)))))
9146 return x;
9147
9148 tem = simplify_gen_binary (code, GET_MODE (SUBREG_REG (lhs)),
9149 SUBREG_REG (lhs), SUBREG_REG (rhs));
9150 return gen_lowpart (GET_MODE (x), tem);
9151
9152 default:
9153 return x;
9154 }
9155
9156 /* Set LHS and RHS to the inner operands (A and B in the example
9157 above) and set OTHER to the common operand (C in the example).
9158 There is only one way to do this unless the inner operation is
9159 commutative. */
9160 if (COMMUTATIVE_ARITH_P (lhs)
9161 && rtx_equal_p (XEXP (lhs, 0), XEXP (rhs, 0)))
9162 other = XEXP (lhs, 0), lhs = XEXP (lhs, 1), rhs = XEXP (rhs, 1);
9163 else if (COMMUTATIVE_ARITH_P (lhs)
9164 && rtx_equal_p (XEXP (lhs, 0), XEXP (rhs, 1)))
9165 other = XEXP (lhs, 0), lhs = XEXP (lhs, 1), rhs = XEXP (rhs, 0);
9166 else if (COMMUTATIVE_ARITH_P (lhs)
9167 && rtx_equal_p (XEXP (lhs, 1), XEXP (rhs, 0)))
9168 other = XEXP (lhs, 1), lhs = XEXP (lhs, 0), rhs = XEXP (rhs, 1);
9169 else if (rtx_equal_p (XEXP (lhs, 1), XEXP (rhs, 1)))
9170 other = XEXP (lhs, 1), lhs = XEXP (lhs, 0), rhs = XEXP (rhs, 0);
9171 else
9172 return x;
9173
9174 /* Form the new inner operation, seeing if it simplifies first. */
9175 tem = simplify_gen_binary (code, GET_MODE (x), lhs, rhs);
9176
9177 /* There is one exception to the general way of distributing:
9178 (a | c) ^ (b | c) -> (a ^ b) & ~c */
9179 if (code == XOR && inner_code == IOR)
9180 {
9181 inner_code = AND;
9182 other = simplify_gen_unary (NOT, GET_MODE (x), other, GET_MODE (x));
9183 }
9184
9185 /* We may be able to continuing distributing the result, so call
9186 ourselves recursively on the inner operation before forming the
9187 outer operation, which we return. */
9188 return simplify_gen_binary (inner_code, GET_MODE (x),
9189 apply_distributive_law (tem), other);
9190 }
9191
9192 /* See if X is of the form (* (+ A B) C), and if so convert to
9193 (+ (* A C) (* B C)) and try to simplify.
9194
9195 Most of the time, this results in no change. However, if some of
9196 the operands are the same or inverses of each other, simplifications
9197 will result.
9198
9199 For example, (and (ior A B) (not B)) can occur as the result of
9200 expanding a bit field assignment. When we apply the distributive
9201 law to this, we get (ior (and (A (not B))) (and (B (not B)))),
9202 which then simplifies to (and (A (not B))).
9203
9204 Note that no checks happen on the validity of applying the inverse
9205 distributive law. This is pointless since we can do it in the
9206 few places where this routine is called.
9207
9208 N is the index of the term that is decomposed (the arithmetic operation,
9209 i.e. (+ A B) in the first example above). !N is the index of the term that
9210 is distributed, i.e. of C in the first example above. */
9211 static rtx
9212 distribute_and_simplify_rtx (rtx x, int n)
9213 {
9214 enum machine_mode mode;
9215 enum rtx_code outer_code, inner_code;
9216 rtx decomposed, distributed, inner_op0, inner_op1, new_op0, new_op1, tmp;
9217
9218 /* Distributivity is not true for floating point as it can change the
9219 value. So we don't do it unless -funsafe-math-optimizations. */
9220 if (FLOAT_MODE_P (GET_MODE (x))
9221 && ! flag_unsafe_math_optimizations)
9222 return NULL_RTX;
9223
9224 decomposed = XEXP (x, n);
9225 if (!ARITHMETIC_P (decomposed))
9226 return NULL_RTX;
9227
9228 mode = GET_MODE (x);
9229 outer_code = GET_CODE (x);
9230 distributed = XEXP (x, !n);
9231
9232 inner_code = GET_CODE (decomposed);
9233 inner_op0 = XEXP (decomposed, 0);
9234 inner_op1 = XEXP (decomposed, 1);
9235
9236 /* Special case (and (xor B C) (not A)), which is equivalent to
9237 (xor (ior A B) (ior A C)) */
9238 if (outer_code == AND && inner_code == XOR && GET_CODE (distributed) == NOT)
9239 {
9240 distributed = XEXP (distributed, 0);
9241 outer_code = IOR;
9242 }
9243
9244 if (n == 0)
9245 {
9246 /* Distribute the second term. */
9247 new_op0 = simplify_gen_binary (outer_code, mode, inner_op0, distributed);
9248 new_op1 = simplify_gen_binary (outer_code, mode, inner_op1, distributed);
9249 }
9250 else
9251 {
9252 /* Distribute the first term. */
9253 new_op0 = simplify_gen_binary (outer_code, mode, distributed, inner_op0);
9254 new_op1 = simplify_gen_binary (outer_code, mode, distributed, inner_op1);
9255 }
9256
9257 tmp = apply_distributive_law (simplify_gen_binary (inner_code, mode,
9258 new_op0, new_op1));
9259 if (GET_CODE (tmp) != outer_code
9260 && rtx_cost (tmp, SET, optimize_this_for_speed_p)
9261 < rtx_cost (x, SET, optimize_this_for_speed_p))
9262 return tmp;
9263
9264 return NULL_RTX;
9265 }
9266 \f
9267 /* Simplify a logical `and' of VAROP with the constant CONSTOP, to be done
9268 in MODE. Return an equivalent form, if different from (and VAROP
9269 (const_int CONSTOP)). Otherwise, return NULL_RTX. */
9270
9271 static rtx
9272 simplify_and_const_int_1 (enum machine_mode mode, rtx varop,
9273 unsigned HOST_WIDE_INT constop)
9274 {
9275 unsigned HOST_WIDE_INT nonzero;
9276 unsigned HOST_WIDE_INT orig_constop;
9277 rtx orig_varop;
9278 int i;
9279
9280 orig_varop = varop;
9281 orig_constop = constop;
9282 if (GET_CODE (varop) == CLOBBER)
9283 return NULL_RTX;
9284
9285 /* Simplify VAROP knowing that we will be only looking at some of the
9286 bits in it.
9287
9288 Note by passing in CONSTOP, we guarantee that the bits not set in
9289 CONSTOP are not significant and will never be examined. We must
9290 ensure that is the case by explicitly masking out those bits
9291 before returning. */
9292 varop = force_to_mode (varop, mode, constop, 0);
9293
9294 /* If VAROP is a CLOBBER, we will fail so return it. */
9295 if (GET_CODE (varop) == CLOBBER)
9296 return varop;
9297
9298 /* If VAROP is a CONST_INT, then we need to apply the mask in CONSTOP
9299 to VAROP and return the new constant. */
9300 if (CONST_INT_P (varop))
9301 return gen_int_mode (INTVAL (varop) & constop, mode);
9302
9303 /* See what bits may be nonzero in VAROP. Unlike the general case of
9304 a call to nonzero_bits, here we don't care about bits outside
9305 MODE. */
9306
9307 nonzero = nonzero_bits (varop, mode) & GET_MODE_MASK (mode);
9308
9309 /* Turn off all bits in the constant that are known to already be zero.
9310 Thus, if the AND isn't needed at all, we will have CONSTOP == NONZERO_BITS
9311 which is tested below. */
9312
9313 constop &= nonzero;
9314
9315 /* If we don't have any bits left, return zero. */
9316 if (constop == 0)
9317 return const0_rtx;
9318
9319 /* If VAROP is a NEG of something known to be zero or 1 and CONSTOP is
9320 a power of two, we can replace this with an ASHIFT. */
9321 if (GET_CODE (varop) == NEG && nonzero_bits (XEXP (varop, 0), mode) == 1
9322 && (i = exact_log2 (constop)) >= 0)
9323 return simplify_shift_const (NULL_RTX, ASHIFT, mode, XEXP (varop, 0), i);
9324
9325 /* If VAROP is an IOR or XOR, apply the AND to both branches of the IOR
9326 or XOR, then try to apply the distributive law. This may eliminate
9327 operations if either branch can be simplified because of the AND.
9328 It may also make some cases more complex, but those cases probably
9329 won't match a pattern either with or without this. */
9330
9331 if (GET_CODE (varop) == IOR || GET_CODE (varop) == XOR)
9332 return
9333 gen_lowpart
9334 (mode,
9335 apply_distributive_law
9336 (simplify_gen_binary (GET_CODE (varop), GET_MODE (varop),
9337 simplify_and_const_int (NULL_RTX,
9338 GET_MODE (varop),
9339 XEXP (varop, 0),
9340 constop),
9341 simplify_and_const_int (NULL_RTX,
9342 GET_MODE (varop),
9343 XEXP (varop, 1),
9344 constop))));
9345
9346 /* If VAROP is PLUS, and the constant is a mask of low bits, distribute
9347 the AND and see if one of the operands simplifies to zero. If so, we
9348 may eliminate it. */
9349
9350 if (GET_CODE (varop) == PLUS
9351 && exact_log2 (constop + 1) >= 0)
9352 {
9353 rtx o0, o1;
9354
9355 o0 = simplify_and_const_int (NULL_RTX, mode, XEXP (varop, 0), constop);
9356 o1 = simplify_and_const_int (NULL_RTX, mode, XEXP (varop, 1), constop);
9357 if (o0 == const0_rtx)
9358 return o1;
9359 if (o1 == const0_rtx)
9360 return o0;
9361 }
9362
9363 /* Make a SUBREG if necessary. If we can't make it, fail. */
9364 varop = gen_lowpart (mode, varop);
9365 if (varop == NULL_RTX || GET_CODE (varop) == CLOBBER)
9366 return NULL_RTX;
9367
9368 /* If we are only masking insignificant bits, return VAROP. */
9369 if (constop == nonzero)
9370 return varop;
9371
9372 if (varop == orig_varop && constop == orig_constop)
9373 return NULL_RTX;
9374
9375 /* Otherwise, return an AND. */
9376 return simplify_gen_binary (AND, mode, varop, gen_int_mode (constop, mode));
9377 }
9378
9379
9380 /* We have X, a logical `and' of VAROP with the constant CONSTOP, to be done
9381 in MODE.
9382
9383 Return an equivalent form, if different from X. Otherwise, return X. If
9384 X is zero, we are to always construct the equivalent form. */
9385
9386 static rtx
9387 simplify_and_const_int (rtx x, enum machine_mode mode, rtx varop,
9388 unsigned HOST_WIDE_INT constop)
9389 {
9390 rtx tem = simplify_and_const_int_1 (mode, varop, constop);
9391 if (tem)
9392 return tem;
9393
9394 if (!x)
9395 x = simplify_gen_binary (AND, GET_MODE (varop), varop,
9396 gen_int_mode (constop, mode));
9397 if (GET_MODE (x) != mode)
9398 x = gen_lowpart (mode, x);
9399 return x;
9400 }
9401 \f
9402 /* Given a REG, X, compute which bits in X can be nonzero.
9403 We don't care about bits outside of those defined in MODE.
9404
9405 For most X this is simply GET_MODE_MASK (GET_MODE (MODE)), but if X is
9406 a shift, AND, or zero_extract, we can do better. */
9407
9408 static rtx
9409 reg_nonzero_bits_for_combine (const_rtx x, enum machine_mode mode,
9410 const_rtx known_x ATTRIBUTE_UNUSED,
9411 enum machine_mode known_mode ATTRIBUTE_UNUSED,
9412 unsigned HOST_WIDE_INT known_ret ATTRIBUTE_UNUSED,
9413 unsigned HOST_WIDE_INT *nonzero)
9414 {
9415 rtx tem;
9416 reg_stat_type *rsp;
9417
9418 /* If X is a register whose nonzero bits value is current, use it.
9419 Otherwise, if X is a register whose value we can find, use that
9420 value. Otherwise, use the previously-computed global nonzero bits
9421 for this register. */
9422
9423 rsp = VEC_index (reg_stat_type, reg_stat, REGNO (x));
9424 if (rsp->last_set_value != 0
9425 && (rsp->last_set_mode == mode
9426 || (GET_MODE_CLASS (rsp->last_set_mode) == MODE_INT
9427 && GET_MODE_CLASS (mode) == MODE_INT))
9428 && ((rsp->last_set_label >= label_tick_ebb_start
9429 && rsp->last_set_label < label_tick)
9430 || (rsp->last_set_label == label_tick
9431 && DF_INSN_LUID (rsp->last_set) < subst_low_luid)
9432 || (REGNO (x) >= FIRST_PSEUDO_REGISTER
9433 && REG_N_SETS (REGNO (x)) == 1
9434 && !REGNO_REG_SET_P
9435 (DF_LR_IN (ENTRY_BLOCK_PTR->next_bb), REGNO (x)))))
9436 {
9437 *nonzero &= rsp->last_set_nonzero_bits;
9438 return NULL;
9439 }
9440
9441 tem = get_last_value (x);
9442
9443 if (tem)
9444 {
9445 #ifdef SHORT_IMMEDIATES_SIGN_EXTEND
9446 /* If X is narrower than MODE and TEM is a non-negative
9447 constant that would appear negative in the mode of X,
9448 sign-extend it for use in reg_nonzero_bits because some
9449 machines (maybe most) will actually do the sign-extension
9450 and this is the conservative approach.
9451
9452 ??? For 2.5, try to tighten up the MD files in this regard
9453 instead of this kludge. */
9454
9455 if (GET_MODE_BITSIZE (GET_MODE (x)) < GET_MODE_BITSIZE (mode)
9456 && CONST_INT_P (tem)
9457 && INTVAL (tem) > 0
9458 && 0 != (UINTVAL (tem)
9459 & ((unsigned HOST_WIDE_INT) 1
9460 << (GET_MODE_BITSIZE (GET_MODE (x)) - 1))))
9461 tem = GEN_INT (UINTVAL (tem)
9462 | ((unsigned HOST_WIDE_INT) (-1)
9463 << GET_MODE_BITSIZE (GET_MODE (x))));
9464 #endif
9465 return tem;
9466 }
9467 else if (nonzero_sign_valid && rsp->nonzero_bits)
9468 {
9469 unsigned HOST_WIDE_INT mask = rsp->nonzero_bits;
9470
9471 if (GET_MODE_BITSIZE (GET_MODE (x)) < GET_MODE_BITSIZE (mode))
9472 /* We don't know anything about the upper bits. */
9473 mask |= GET_MODE_MASK (mode) ^ GET_MODE_MASK (GET_MODE (x));
9474 *nonzero &= mask;
9475 }
9476
9477 return NULL;
9478 }
9479
9480 /* Return the number of bits at the high-order end of X that are known to
9481 be equal to the sign bit. X will be used in mode MODE; if MODE is
9482 VOIDmode, X will be used in its own mode. The returned value will always
9483 be between 1 and the number of bits in MODE. */
9484
9485 static rtx
9486 reg_num_sign_bit_copies_for_combine (const_rtx x, enum machine_mode mode,
9487 const_rtx known_x ATTRIBUTE_UNUSED,
9488 enum machine_mode known_mode
9489 ATTRIBUTE_UNUSED,
9490 unsigned int known_ret ATTRIBUTE_UNUSED,
9491 unsigned int *result)
9492 {
9493 rtx tem;
9494 reg_stat_type *rsp;
9495
9496 rsp = VEC_index (reg_stat_type, reg_stat, REGNO (x));
9497 if (rsp->last_set_value != 0
9498 && rsp->last_set_mode == mode
9499 && ((rsp->last_set_label >= label_tick_ebb_start
9500 && rsp->last_set_label < label_tick)
9501 || (rsp->last_set_label == label_tick
9502 && DF_INSN_LUID (rsp->last_set) < subst_low_luid)
9503 || (REGNO (x) >= FIRST_PSEUDO_REGISTER
9504 && REG_N_SETS (REGNO (x)) == 1
9505 && !REGNO_REG_SET_P
9506 (DF_LR_IN (ENTRY_BLOCK_PTR->next_bb), REGNO (x)))))
9507 {
9508 *result = rsp->last_set_sign_bit_copies;
9509 return NULL;
9510 }
9511
9512 tem = get_last_value (x);
9513 if (tem != 0)
9514 return tem;
9515
9516 if (nonzero_sign_valid && rsp->sign_bit_copies != 0
9517 && GET_MODE_BITSIZE (GET_MODE (x)) == GET_MODE_BITSIZE (mode))
9518 *result = rsp->sign_bit_copies;
9519
9520 return NULL;
9521 }
9522 \f
9523 /* Return the number of "extended" bits there are in X, when interpreted
9524 as a quantity in MODE whose signedness is indicated by UNSIGNEDP. For
9525 unsigned quantities, this is the number of high-order zero bits.
9526 For signed quantities, this is the number of copies of the sign bit
9527 minus 1. In both case, this function returns the number of "spare"
9528 bits. For example, if two quantities for which this function returns
9529 at least 1 are added, the addition is known not to overflow.
9530
9531 This function will always return 0 unless called during combine, which
9532 implies that it must be called from a define_split. */
9533
9534 unsigned int
9535 extended_count (const_rtx x, enum machine_mode mode, int unsignedp)
9536 {
9537 if (nonzero_sign_valid == 0)
9538 return 0;
9539
9540 return (unsignedp
9541 ? (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
9542 ? (unsigned int) (GET_MODE_BITSIZE (mode) - 1
9543 - floor_log2 (nonzero_bits (x, mode)))
9544 : 0)
9545 : num_sign_bit_copies (x, mode) - 1);
9546 }
9547 \f
9548 /* This function is called from `simplify_shift_const' to merge two
9549 outer operations. Specifically, we have already found that we need
9550 to perform operation *POP0 with constant *PCONST0 at the outermost
9551 position. We would now like to also perform OP1 with constant CONST1
9552 (with *POP0 being done last).
9553
9554 Return 1 if we can do the operation and update *POP0 and *PCONST0 with
9555 the resulting operation. *PCOMP_P is set to 1 if we would need to
9556 complement the innermost operand, otherwise it is unchanged.
9557
9558 MODE is the mode in which the operation will be done. No bits outside
9559 the width of this mode matter. It is assumed that the width of this mode
9560 is smaller than or equal to HOST_BITS_PER_WIDE_INT.
9561
9562 If *POP0 or OP1 are UNKNOWN, it means no operation is required. Only NEG, PLUS,
9563 IOR, XOR, and AND are supported. We may set *POP0 to SET if the proper
9564 result is simply *PCONST0.
9565
9566 If the resulting operation cannot be expressed as one operation, we
9567 return 0 and do not change *POP0, *PCONST0, and *PCOMP_P. */
9568
9569 static int
9570 merge_outer_ops (enum rtx_code *pop0, HOST_WIDE_INT *pconst0, enum rtx_code op1, HOST_WIDE_INT const1, enum machine_mode mode, int *pcomp_p)
9571 {
9572 enum rtx_code op0 = *pop0;
9573 HOST_WIDE_INT const0 = *pconst0;
9574
9575 const0 &= GET_MODE_MASK (mode);
9576 const1 &= GET_MODE_MASK (mode);
9577
9578 /* If OP0 is an AND, clear unimportant bits in CONST1. */
9579 if (op0 == AND)
9580 const1 &= const0;
9581
9582 /* If OP0 or OP1 is UNKNOWN, this is easy. Similarly if they are the same or
9583 if OP0 is SET. */
9584
9585 if (op1 == UNKNOWN || op0 == SET)
9586 return 1;
9587
9588 else if (op0 == UNKNOWN)
9589 op0 = op1, const0 = const1;
9590
9591 else if (op0 == op1)
9592 {
9593 switch (op0)
9594 {
9595 case AND:
9596 const0 &= const1;
9597 break;
9598 case IOR:
9599 const0 |= const1;
9600 break;
9601 case XOR:
9602 const0 ^= const1;
9603 break;
9604 case PLUS:
9605 const0 += const1;
9606 break;
9607 case NEG:
9608 op0 = UNKNOWN;
9609 break;
9610 default:
9611 break;
9612 }
9613 }
9614
9615 /* Otherwise, if either is a PLUS or NEG, we can't do anything. */
9616 else if (op0 == PLUS || op1 == PLUS || op0 == NEG || op1 == NEG)
9617 return 0;
9618
9619 /* If the two constants aren't the same, we can't do anything. The
9620 remaining six cases can all be done. */
9621 else if (const0 != const1)
9622 return 0;
9623
9624 else
9625 switch (op0)
9626 {
9627 case IOR:
9628 if (op1 == AND)
9629 /* (a & b) | b == b */
9630 op0 = SET;
9631 else /* op1 == XOR */
9632 /* (a ^ b) | b == a | b */
9633 {;}
9634 break;
9635
9636 case XOR:
9637 if (op1 == AND)
9638 /* (a & b) ^ b == (~a) & b */
9639 op0 = AND, *pcomp_p = 1;
9640 else /* op1 == IOR */
9641 /* (a | b) ^ b == a & ~b */
9642 op0 = AND, const0 = ~const0;
9643 break;
9644
9645 case AND:
9646 if (op1 == IOR)
9647 /* (a | b) & b == b */
9648 op0 = SET;
9649 else /* op1 == XOR */
9650 /* (a ^ b) & b) == (~a) & b */
9651 *pcomp_p = 1;
9652 break;
9653 default:
9654 break;
9655 }
9656
9657 /* Check for NO-OP cases. */
9658 const0 &= GET_MODE_MASK (mode);
9659 if (const0 == 0
9660 && (op0 == IOR || op0 == XOR || op0 == PLUS))
9661 op0 = UNKNOWN;
9662 else if (const0 == 0 && op0 == AND)
9663 op0 = SET;
9664 else if ((unsigned HOST_WIDE_INT) const0 == GET_MODE_MASK (mode)
9665 && op0 == AND)
9666 op0 = UNKNOWN;
9667
9668 *pop0 = op0;
9669
9670 /* ??? Slightly redundant with the above mask, but not entirely.
9671 Moving this above means we'd have to sign-extend the mode mask
9672 for the final test. */
9673 if (op0 != UNKNOWN && op0 != NEG)
9674 *pconst0 = trunc_int_for_mode (const0, mode);
9675
9676 return 1;
9677 }
9678 \f
9679 /* A helper to simplify_shift_const_1 to determine the mode we can perform
9680 the shift in. The original shift operation CODE is performed on OP in
9681 ORIG_MODE. Return the wider mode MODE if we can perform the operation
9682 in that mode. Return ORIG_MODE otherwise. We can also assume that the
9683 result of the shift is subject to operation OUTER_CODE with operand
9684 OUTER_CONST. */
9685
9686 static enum machine_mode
9687 try_widen_shift_mode (enum rtx_code code, rtx op, int count,
9688 enum machine_mode orig_mode, enum machine_mode mode,
9689 enum rtx_code outer_code, HOST_WIDE_INT outer_const)
9690 {
9691 if (orig_mode == mode)
9692 return mode;
9693 gcc_assert (GET_MODE_BITSIZE (mode) > GET_MODE_BITSIZE (orig_mode));
9694
9695 /* In general we can't perform in wider mode for right shift and rotate. */
9696 switch (code)
9697 {
9698 case ASHIFTRT:
9699 /* We can still widen if the bits brought in from the left are identical
9700 to the sign bit of ORIG_MODE. */
9701 if (num_sign_bit_copies (op, mode)
9702 > (unsigned) (GET_MODE_BITSIZE (mode)
9703 - GET_MODE_BITSIZE (orig_mode)))
9704 return mode;
9705 return orig_mode;
9706
9707 case LSHIFTRT:
9708 /* Similarly here but with zero bits. */
9709 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
9710 && (nonzero_bits (op, mode) & ~GET_MODE_MASK (orig_mode)) == 0)
9711 return mode;
9712
9713 /* We can also widen if the bits brought in will be masked off. This
9714 operation is performed in ORIG_MODE. */
9715 if (outer_code == AND)
9716 {
9717 int care_bits = low_bitmask_len (orig_mode, outer_const);
9718
9719 if (care_bits >= 0
9720 && GET_MODE_BITSIZE (orig_mode) - care_bits >= count)
9721 return mode;
9722 }
9723 /* fall through */
9724
9725 case ROTATE:
9726 return orig_mode;
9727
9728 case ROTATERT:
9729 gcc_unreachable ();
9730
9731 default:
9732 return mode;
9733 }
9734 }
9735
9736 /* Simplify a shift of VAROP by COUNT bits. CODE says what kind of shift.
9737 The result of the shift is RESULT_MODE. Return NULL_RTX if we cannot
9738 simplify it. Otherwise, return a simplified value.
9739
9740 The shift is normally computed in the widest mode we find in VAROP, as
9741 long as it isn't a different number of words than RESULT_MODE. Exceptions
9742 are ASHIFTRT and ROTATE, which are always done in their original mode. */
9743
9744 static rtx
9745 simplify_shift_const_1 (enum rtx_code code, enum machine_mode result_mode,
9746 rtx varop, int orig_count)
9747 {
9748 enum rtx_code orig_code = code;
9749 rtx orig_varop = varop;
9750 int count;
9751 enum machine_mode mode = result_mode;
9752 enum machine_mode shift_mode, tmode;
9753 unsigned int mode_words
9754 = (GET_MODE_SIZE (mode) + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD;
9755 /* We form (outer_op (code varop count) (outer_const)). */
9756 enum rtx_code outer_op = UNKNOWN;
9757 HOST_WIDE_INT outer_const = 0;
9758 int complement_p = 0;
9759 rtx new_rtx, x;
9760
9761 /* Make sure and truncate the "natural" shift on the way in. We don't
9762 want to do this inside the loop as it makes it more difficult to
9763 combine shifts. */
9764 if (SHIFT_COUNT_TRUNCATED)
9765 orig_count &= GET_MODE_BITSIZE (mode) - 1;
9766
9767 /* If we were given an invalid count, don't do anything except exactly
9768 what was requested. */
9769
9770 if (orig_count < 0 || orig_count >= (int) GET_MODE_BITSIZE (mode))
9771 return NULL_RTX;
9772
9773 count = orig_count;
9774
9775 /* Unless one of the branches of the `if' in this loop does a `continue',
9776 we will `break' the loop after the `if'. */
9777
9778 while (count != 0)
9779 {
9780 /* If we have an operand of (clobber (const_int 0)), fail. */
9781 if (GET_CODE (varop) == CLOBBER)
9782 return NULL_RTX;
9783
9784 /* Convert ROTATERT to ROTATE. */
9785 if (code == ROTATERT)
9786 {
9787 unsigned int bitsize = GET_MODE_BITSIZE (result_mode);;
9788 code = ROTATE;
9789 if (VECTOR_MODE_P (result_mode))
9790 count = bitsize / GET_MODE_NUNITS (result_mode) - count;
9791 else
9792 count = bitsize - count;
9793 }
9794
9795 shift_mode = try_widen_shift_mode (code, varop, count, result_mode,
9796 mode, outer_op, outer_const);
9797
9798 /* Handle cases where the count is greater than the size of the mode
9799 minus 1. For ASHIFT, use the size minus one as the count (this can
9800 occur when simplifying (lshiftrt (ashiftrt ..))). For rotates,
9801 take the count modulo the size. For other shifts, the result is
9802 zero.
9803
9804 Since these shifts are being produced by the compiler by combining
9805 multiple operations, each of which are defined, we know what the
9806 result is supposed to be. */
9807
9808 if (count > (GET_MODE_BITSIZE (shift_mode) - 1))
9809 {
9810 if (code == ASHIFTRT)
9811 count = GET_MODE_BITSIZE (shift_mode) - 1;
9812 else if (code == ROTATE || code == ROTATERT)
9813 count %= GET_MODE_BITSIZE (shift_mode);
9814 else
9815 {
9816 /* We can't simply return zero because there may be an
9817 outer op. */
9818 varop = const0_rtx;
9819 count = 0;
9820 break;
9821 }
9822 }
9823
9824 /* If we discovered we had to complement VAROP, leave. Making a NOT
9825 here would cause an infinite loop. */
9826 if (complement_p)
9827 break;
9828
9829 /* An arithmetic right shift of a quantity known to be -1 or 0
9830 is a no-op. */
9831 if (code == ASHIFTRT
9832 && (num_sign_bit_copies (varop, shift_mode)
9833 == GET_MODE_BITSIZE (shift_mode)))
9834 {
9835 count = 0;
9836 break;
9837 }
9838
9839 /* If we are doing an arithmetic right shift and discarding all but
9840 the sign bit copies, this is equivalent to doing a shift by the
9841 bitsize minus one. Convert it into that shift because it will often
9842 allow other simplifications. */
9843
9844 if (code == ASHIFTRT
9845 && (count + num_sign_bit_copies (varop, shift_mode)
9846 >= GET_MODE_BITSIZE (shift_mode)))
9847 count = GET_MODE_BITSIZE (shift_mode) - 1;
9848
9849 /* We simplify the tests below and elsewhere by converting
9850 ASHIFTRT to LSHIFTRT if we know the sign bit is clear.
9851 `make_compound_operation' will convert it to an ASHIFTRT for
9852 those machines (such as VAX) that don't have an LSHIFTRT. */
9853 if (GET_MODE_BITSIZE (shift_mode) <= HOST_BITS_PER_WIDE_INT
9854 && code == ASHIFTRT
9855 && ((nonzero_bits (varop, shift_mode)
9856 & ((unsigned HOST_WIDE_INT) 1
9857 << (GET_MODE_BITSIZE (shift_mode) - 1))) == 0))
9858 code = LSHIFTRT;
9859
9860 if (((code == LSHIFTRT
9861 && GET_MODE_BITSIZE (shift_mode) <= HOST_BITS_PER_WIDE_INT
9862 && !(nonzero_bits (varop, shift_mode) >> count))
9863 || (code == ASHIFT
9864 && GET_MODE_BITSIZE (shift_mode) <= HOST_BITS_PER_WIDE_INT
9865 && !((nonzero_bits (varop, shift_mode) << count)
9866 & GET_MODE_MASK (shift_mode))))
9867 && !side_effects_p (varop))
9868 varop = const0_rtx;
9869
9870 switch (GET_CODE (varop))
9871 {
9872 case SIGN_EXTEND:
9873 case ZERO_EXTEND:
9874 case SIGN_EXTRACT:
9875 case ZERO_EXTRACT:
9876 new_rtx = expand_compound_operation (varop);
9877 if (new_rtx != varop)
9878 {
9879 varop = new_rtx;
9880 continue;
9881 }
9882 break;
9883
9884 case MEM:
9885 /* If we have (xshiftrt (mem ...) C) and C is MODE_WIDTH
9886 minus the width of a smaller mode, we can do this with a
9887 SIGN_EXTEND or ZERO_EXTEND from the narrower memory location. */
9888 if ((code == ASHIFTRT || code == LSHIFTRT)
9889 && ! mode_dependent_address_p (XEXP (varop, 0))
9890 && ! MEM_VOLATILE_P (varop)
9891 && (tmode = mode_for_size (GET_MODE_BITSIZE (mode) - count,
9892 MODE_INT, 1)) != BLKmode)
9893 {
9894 new_rtx = adjust_address_nv (varop, tmode,
9895 BYTES_BIG_ENDIAN ? 0
9896 : count / BITS_PER_UNIT);
9897
9898 varop = gen_rtx_fmt_e (code == ASHIFTRT ? SIGN_EXTEND
9899 : ZERO_EXTEND, mode, new_rtx);
9900 count = 0;
9901 continue;
9902 }
9903 break;
9904
9905 case SUBREG:
9906 /* If VAROP is a SUBREG, strip it as long as the inner operand has
9907 the same number of words as what we've seen so far. Then store
9908 the widest mode in MODE. */
9909 if (subreg_lowpart_p (varop)
9910 && (GET_MODE_SIZE (GET_MODE (SUBREG_REG (varop)))
9911 > GET_MODE_SIZE (GET_MODE (varop)))
9912 && (unsigned int) ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (varop)))
9913 + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)
9914 == mode_words
9915 && GET_MODE_CLASS (GET_MODE (varop)) == MODE_INT
9916 && GET_MODE_CLASS (GET_MODE (SUBREG_REG (varop))) == MODE_INT)
9917 {
9918 varop = SUBREG_REG (varop);
9919 if (GET_MODE_SIZE (GET_MODE (varop)) > GET_MODE_SIZE (mode))
9920 mode = GET_MODE (varop);
9921 continue;
9922 }
9923 break;
9924
9925 case MULT:
9926 /* Some machines use MULT instead of ASHIFT because MULT
9927 is cheaper. But it is still better on those machines to
9928 merge two shifts into one. */
9929 if (CONST_INT_P (XEXP (varop, 1))
9930 && exact_log2 (UINTVAL (XEXP (varop, 1))) >= 0)
9931 {
9932 varop
9933 = simplify_gen_binary (ASHIFT, GET_MODE (varop),
9934 XEXP (varop, 0),
9935 GEN_INT (exact_log2 (
9936 UINTVAL (XEXP (varop, 1)))));
9937 continue;
9938 }
9939 break;
9940
9941 case UDIV:
9942 /* Similar, for when divides are cheaper. */
9943 if (CONST_INT_P (XEXP (varop, 1))
9944 && exact_log2 (UINTVAL (XEXP (varop, 1))) >= 0)
9945 {
9946 varop
9947 = simplify_gen_binary (LSHIFTRT, GET_MODE (varop),
9948 XEXP (varop, 0),
9949 GEN_INT (exact_log2 (
9950 UINTVAL (XEXP (varop, 1)))));
9951 continue;
9952 }
9953 break;
9954
9955 case ASHIFTRT:
9956 /* If we are extracting just the sign bit of an arithmetic
9957 right shift, that shift is not needed. However, the sign
9958 bit of a wider mode may be different from what would be
9959 interpreted as the sign bit in a narrower mode, so, if
9960 the result is narrower, don't discard the shift. */
9961 if (code == LSHIFTRT
9962 && count == (GET_MODE_BITSIZE (result_mode) - 1)
9963 && (GET_MODE_BITSIZE (result_mode)
9964 >= GET_MODE_BITSIZE (GET_MODE (varop))))
9965 {
9966 varop = XEXP (varop, 0);
9967 continue;
9968 }
9969
9970 /* ... fall through ... */
9971
9972 case LSHIFTRT:
9973 case ASHIFT:
9974 case ROTATE:
9975 /* Here we have two nested shifts. The result is usually the
9976 AND of a new shift with a mask. We compute the result below. */
9977 if (CONST_INT_P (XEXP (varop, 1))
9978 && INTVAL (XEXP (varop, 1)) >= 0
9979 && INTVAL (XEXP (varop, 1)) < GET_MODE_BITSIZE (GET_MODE (varop))
9980 && GET_MODE_BITSIZE (result_mode) <= HOST_BITS_PER_WIDE_INT
9981 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
9982 && !VECTOR_MODE_P (result_mode))
9983 {
9984 enum rtx_code first_code = GET_CODE (varop);
9985 unsigned int first_count = INTVAL (XEXP (varop, 1));
9986 unsigned HOST_WIDE_INT mask;
9987 rtx mask_rtx;
9988
9989 /* We have one common special case. We can't do any merging if
9990 the inner code is an ASHIFTRT of a smaller mode. However, if
9991 we have (ashift:M1 (subreg:M1 (ashiftrt:M2 FOO C1) 0) C2)
9992 with C2 == GET_MODE_BITSIZE (M1) - GET_MODE_BITSIZE (M2),
9993 we can convert it to
9994 (ashiftrt:M1 (ashift:M1 (and:M1 (subreg:M1 FOO 0 C2) C3) C1).
9995 This simplifies certain SIGN_EXTEND operations. */
9996 if (code == ASHIFT && first_code == ASHIFTRT
9997 && count == (GET_MODE_BITSIZE (result_mode)
9998 - GET_MODE_BITSIZE (GET_MODE (varop))))
9999 {
10000 /* C3 has the low-order C1 bits zero. */
10001
10002 mask = GET_MODE_MASK (mode)
10003 & ~(((unsigned HOST_WIDE_INT) 1 << first_count) - 1);
10004
10005 varop = simplify_and_const_int (NULL_RTX, result_mode,
10006 XEXP (varop, 0), mask);
10007 varop = simplify_shift_const (NULL_RTX, ASHIFT, result_mode,
10008 varop, count);
10009 count = first_count;
10010 code = ASHIFTRT;
10011 continue;
10012 }
10013
10014 /* If this was (ashiftrt (ashift foo C1) C2) and FOO has more
10015 than C1 high-order bits equal to the sign bit, we can convert
10016 this to either an ASHIFT or an ASHIFTRT depending on the
10017 two counts.
10018
10019 We cannot do this if VAROP's mode is not SHIFT_MODE. */
10020
10021 if (code == ASHIFTRT && first_code == ASHIFT
10022 && GET_MODE (varop) == shift_mode
10023 && (num_sign_bit_copies (XEXP (varop, 0), shift_mode)
10024 > first_count))
10025 {
10026 varop = XEXP (varop, 0);
10027 count -= first_count;
10028 if (count < 0)
10029 {
10030 count = -count;
10031 code = ASHIFT;
10032 }
10033
10034 continue;
10035 }
10036
10037 /* There are some cases we can't do. If CODE is ASHIFTRT,
10038 we can only do this if FIRST_CODE is also ASHIFTRT.
10039
10040 We can't do the case when CODE is ROTATE and FIRST_CODE is
10041 ASHIFTRT.
10042
10043 If the mode of this shift is not the mode of the outer shift,
10044 we can't do this if either shift is a right shift or ROTATE.
10045
10046 Finally, we can't do any of these if the mode is too wide
10047 unless the codes are the same.
10048
10049 Handle the case where the shift codes are the same
10050 first. */
10051
10052 if (code == first_code)
10053 {
10054 if (GET_MODE (varop) != result_mode
10055 && (code == ASHIFTRT || code == LSHIFTRT
10056 || code == ROTATE))
10057 break;
10058
10059 count += first_count;
10060 varop = XEXP (varop, 0);
10061 continue;
10062 }
10063
10064 if (code == ASHIFTRT
10065 || (code == ROTATE && first_code == ASHIFTRT)
10066 || GET_MODE_BITSIZE (mode) > HOST_BITS_PER_WIDE_INT
10067 || (GET_MODE (varop) != result_mode
10068 && (first_code == ASHIFTRT || first_code == LSHIFTRT
10069 || first_code == ROTATE
10070 || code == ROTATE)))
10071 break;
10072
10073 /* To compute the mask to apply after the shift, shift the
10074 nonzero bits of the inner shift the same way the
10075 outer shift will. */
10076
10077 mask_rtx = GEN_INT (nonzero_bits (varop, GET_MODE (varop)));
10078
10079 mask_rtx
10080 = simplify_const_binary_operation (code, result_mode, mask_rtx,
10081 GEN_INT (count));
10082
10083 /* Give up if we can't compute an outer operation to use. */
10084 if (mask_rtx == 0
10085 || !CONST_INT_P (mask_rtx)
10086 || ! merge_outer_ops (&outer_op, &outer_const, AND,
10087 INTVAL (mask_rtx),
10088 result_mode, &complement_p))
10089 break;
10090
10091 /* If the shifts are in the same direction, we add the
10092 counts. Otherwise, we subtract them. */
10093 if ((code == ASHIFTRT || code == LSHIFTRT)
10094 == (first_code == ASHIFTRT || first_code == LSHIFTRT))
10095 count += first_count;
10096 else
10097 count -= first_count;
10098
10099 /* If COUNT is positive, the new shift is usually CODE,
10100 except for the two exceptions below, in which case it is
10101 FIRST_CODE. If the count is negative, FIRST_CODE should
10102 always be used */
10103 if (count > 0
10104 && ((first_code == ROTATE && code == ASHIFT)
10105 || (first_code == ASHIFTRT && code == LSHIFTRT)))
10106 code = first_code;
10107 else if (count < 0)
10108 code = first_code, count = -count;
10109
10110 varop = XEXP (varop, 0);
10111 continue;
10112 }
10113
10114 /* If we have (A << B << C) for any shift, we can convert this to
10115 (A << C << B). This wins if A is a constant. Only try this if
10116 B is not a constant. */
10117
10118 else if (GET_CODE (varop) == code
10119 && CONST_INT_P (XEXP (varop, 0))
10120 && !CONST_INT_P (XEXP (varop, 1)))
10121 {
10122 rtx new_rtx = simplify_const_binary_operation (code, mode,
10123 XEXP (varop, 0),
10124 GEN_INT (count));
10125 varop = gen_rtx_fmt_ee (code, mode, new_rtx, XEXP (varop, 1));
10126 count = 0;
10127 continue;
10128 }
10129 break;
10130
10131 case NOT:
10132 if (VECTOR_MODE_P (mode))
10133 break;
10134
10135 /* Make this fit the case below. */
10136 varop = gen_rtx_XOR (mode, XEXP (varop, 0),
10137 GEN_INT (GET_MODE_MASK (mode)));
10138 continue;
10139
10140 case IOR:
10141 case AND:
10142 case XOR:
10143 /* If we have (xshiftrt (ior (plus X (const_int -1)) X) C)
10144 with C the size of VAROP - 1 and the shift is logical if
10145 STORE_FLAG_VALUE is 1 and arithmetic if STORE_FLAG_VALUE is -1,
10146 we have an (le X 0) operation. If we have an arithmetic shift
10147 and STORE_FLAG_VALUE is 1 or we have a logical shift with
10148 STORE_FLAG_VALUE of -1, we have a (neg (le X 0)) operation. */
10149
10150 if (GET_CODE (varop) == IOR && GET_CODE (XEXP (varop, 0)) == PLUS
10151 && XEXP (XEXP (varop, 0), 1) == constm1_rtx
10152 && (STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
10153 && (code == LSHIFTRT || code == ASHIFTRT)
10154 && count == (GET_MODE_BITSIZE (GET_MODE (varop)) - 1)
10155 && rtx_equal_p (XEXP (XEXP (varop, 0), 0), XEXP (varop, 1)))
10156 {
10157 count = 0;
10158 varop = gen_rtx_LE (GET_MODE (varop), XEXP (varop, 1),
10159 const0_rtx);
10160
10161 if (STORE_FLAG_VALUE == 1 ? code == ASHIFTRT : code == LSHIFTRT)
10162 varop = gen_rtx_NEG (GET_MODE (varop), varop);
10163
10164 continue;
10165 }
10166
10167 /* If we have (shift (logical)), move the logical to the outside
10168 to allow it to possibly combine with another logical and the
10169 shift to combine with another shift. This also canonicalizes to
10170 what a ZERO_EXTRACT looks like. Also, some machines have
10171 (and (shift)) insns. */
10172
10173 if (CONST_INT_P (XEXP (varop, 1))
10174 /* We can't do this if we have (ashiftrt (xor)) and the
10175 constant has its sign bit set in shift_mode. */
10176 && !(code == ASHIFTRT && GET_CODE (varop) == XOR
10177 && 0 > trunc_int_for_mode (INTVAL (XEXP (varop, 1)),
10178 shift_mode))
10179 && (new_rtx = simplify_const_binary_operation (code, result_mode,
10180 XEXP (varop, 1),
10181 GEN_INT (count))) != 0
10182 && CONST_INT_P (new_rtx)
10183 && merge_outer_ops (&outer_op, &outer_const, GET_CODE (varop),
10184 INTVAL (new_rtx), result_mode, &complement_p))
10185 {
10186 varop = XEXP (varop, 0);
10187 continue;
10188 }
10189
10190 /* If we can't do that, try to simplify the shift in each arm of the
10191 logical expression, make a new logical expression, and apply
10192 the inverse distributive law. This also can't be done
10193 for some (ashiftrt (xor)). */
10194 if (CONST_INT_P (XEXP (varop, 1))
10195 && !(code == ASHIFTRT && GET_CODE (varop) == XOR
10196 && 0 > trunc_int_for_mode (INTVAL (XEXP (varop, 1)),
10197 shift_mode)))
10198 {
10199 rtx lhs = simplify_shift_const (NULL_RTX, code, shift_mode,
10200 XEXP (varop, 0), count);
10201 rtx rhs = simplify_shift_const (NULL_RTX, code, shift_mode,
10202 XEXP (varop, 1), count);
10203
10204 varop = simplify_gen_binary (GET_CODE (varop), shift_mode,
10205 lhs, rhs);
10206 varop = apply_distributive_law (varop);
10207
10208 count = 0;
10209 continue;
10210 }
10211 break;
10212
10213 case EQ:
10214 /* Convert (lshiftrt (eq FOO 0) C) to (xor FOO 1) if STORE_FLAG_VALUE
10215 says that the sign bit can be tested, FOO has mode MODE, C is
10216 GET_MODE_BITSIZE (MODE) - 1, and FOO has only its low-order bit
10217 that may be nonzero. */
10218 if (code == LSHIFTRT
10219 && XEXP (varop, 1) == const0_rtx
10220 && GET_MODE (XEXP (varop, 0)) == result_mode
10221 && count == (GET_MODE_BITSIZE (result_mode) - 1)
10222 && GET_MODE_BITSIZE (result_mode) <= HOST_BITS_PER_WIDE_INT
10223 && STORE_FLAG_VALUE == -1
10224 && nonzero_bits (XEXP (varop, 0), result_mode) == 1
10225 && merge_outer_ops (&outer_op, &outer_const, XOR, 1, result_mode,
10226 &complement_p))
10227 {
10228 varop = XEXP (varop, 0);
10229 count = 0;
10230 continue;
10231 }
10232 break;
10233
10234 case NEG:
10235 /* (lshiftrt (neg A) C) where A is either 0 or 1 and C is one less
10236 than the number of bits in the mode is equivalent to A. */
10237 if (code == LSHIFTRT
10238 && count == (GET_MODE_BITSIZE (result_mode) - 1)
10239 && nonzero_bits (XEXP (varop, 0), result_mode) == 1)
10240 {
10241 varop = XEXP (varop, 0);
10242 count = 0;
10243 continue;
10244 }
10245
10246 /* NEG commutes with ASHIFT since it is multiplication. Move the
10247 NEG outside to allow shifts to combine. */
10248 if (code == ASHIFT
10249 && merge_outer_ops (&outer_op, &outer_const, NEG, 0, result_mode,
10250 &complement_p))
10251 {
10252 varop = XEXP (varop, 0);
10253 continue;
10254 }
10255 break;
10256
10257 case PLUS:
10258 /* (lshiftrt (plus A -1) C) where A is either 0 or 1 and C
10259 is one less than the number of bits in the mode is
10260 equivalent to (xor A 1). */
10261 if (code == LSHIFTRT
10262 && count == (GET_MODE_BITSIZE (result_mode) - 1)
10263 && XEXP (varop, 1) == constm1_rtx
10264 && nonzero_bits (XEXP (varop, 0), result_mode) == 1
10265 && merge_outer_ops (&outer_op, &outer_const, XOR, 1, result_mode,
10266 &complement_p))
10267 {
10268 count = 0;
10269 varop = XEXP (varop, 0);
10270 continue;
10271 }
10272
10273 /* If we have (xshiftrt (plus FOO BAR) C), and the only bits
10274 that might be nonzero in BAR are those being shifted out and those
10275 bits are known zero in FOO, we can replace the PLUS with FOO.
10276 Similarly in the other operand order. This code occurs when
10277 we are computing the size of a variable-size array. */
10278
10279 if ((code == ASHIFTRT || code == LSHIFTRT)
10280 && count < HOST_BITS_PER_WIDE_INT
10281 && nonzero_bits (XEXP (varop, 1), result_mode) >> count == 0
10282 && (nonzero_bits (XEXP (varop, 1), result_mode)
10283 & nonzero_bits (XEXP (varop, 0), result_mode)) == 0)
10284 {
10285 varop = XEXP (varop, 0);
10286 continue;
10287 }
10288 else if ((code == ASHIFTRT || code == LSHIFTRT)
10289 && count < HOST_BITS_PER_WIDE_INT
10290 && GET_MODE_BITSIZE (result_mode) <= HOST_BITS_PER_WIDE_INT
10291 && 0 == (nonzero_bits (XEXP (varop, 0), result_mode)
10292 >> count)
10293 && 0 == (nonzero_bits (XEXP (varop, 0), result_mode)
10294 & nonzero_bits (XEXP (varop, 1),
10295 result_mode)))
10296 {
10297 varop = XEXP (varop, 1);
10298 continue;
10299 }
10300
10301 /* (ashift (plus foo C) N) is (plus (ashift foo N) C'). */
10302 if (code == ASHIFT
10303 && CONST_INT_P (XEXP (varop, 1))
10304 && (new_rtx = simplify_const_binary_operation (ASHIFT, result_mode,
10305 XEXP (varop, 1),
10306 GEN_INT (count))) != 0
10307 && CONST_INT_P (new_rtx)
10308 && merge_outer_ops (&outer_op, &outer_const, PLUS,
10309 INTVAL (new_rtx), result_mode, &complement_p))
10310 {
10311 varop = XEXP (varop, 0);
10312 continue;
10313 }
10314
10315 /* Check for 'PLUS signbit', which is the canonical form of 'XOR
10316 signbit', and attempt to change the PLUS to an XOR and move it to
10317 the outer operation as is done above in the AND/IOR/XOR case
10318 leg for shift(logical). See details in logical handling above
10319 for reasoning in doing so. */
10320 if (code == LSHIFTRT
10321 && CONST_INT_P (XEXP (varop, 1))
10322 && mode_signbit_p (result_mode, XEXP (varop, 1))
10323 && (new_rtx = simplify_const_binary_operation (code, result_mode,
10324 XEXP (varop, 1),
10325 GEN_INT (count))) != 0
10326 && CONST_INT_P (new_rtx)
10327 && merge_outer_ops (&outer_op, &outer_const, XOR,
10328 INTVAL (new_rtx), result_mode, &complement_p))
10329 {
10330 varop = XEXP (varop, 0);
10331 continue;
10332 }
10333
10334 break;
10335
10336 case MINUS:
10337 /* If we have (xshiftrt (minus (ashiftrt X C)) X) C)
10338 with C the size of VAROP - 1 and the shift is logical if
10339 STORE_FLAG_VALUE is 1 and arithmetic if STORE_FLAG_VALUE is -1,
10340 we have a (gt X 0) operation. If the shift is arithmetic with
10341 STORE_FLAG_VALUE of 1 or logical with STORE_FLAG_VALUE == -1,
10342 we have a (neg (gt X 0)) operation. */
10343
10344 if ((STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
10345 && GET_CODE (XEXP (varop, 0)) == ASHIFTRT
10346 && count == (GET_MODE_BITSIZE (GET_MODE (varop)) - 1)
10347 && (code == LSHIFTRT || code == ASHIFTRT)
10348 && CONST_INT_P (XEXP (XEXP (varop, 0), 1))
10349 && INTVAL (XEXP (XEXP (varop, 0), 1)) == count
10350 && rtx_equal_p (XEXP (XEXP (varop, 0), 0), XEXP (varop, 1)))
10351 {
10352 count = 0;
10353 varop = gen_rtx_GT (GET_MODE (varop), XEXP (varop, 1),
10354 const0_rtx);
10355
10356 if (STORE_FLAG_VALUE == 1 ? code == ASHIFTRT : code == LSHIFTRT)
10357 varop = gen_rtx_NEG (GET_MODE (varop), varop);
10358
10359 continue;
10360 }
10361 break;
10362
10363 case TRUNCATE:
10364 /* Change (lshiftrt (truncate (lshiftrt))) to (truncate (lshiftrt))
10365 if the truncate does not affect the value. */
10366 if (code == LSHIFTRT
10367 && GET_CODE (XEXP (varop, 0)) == LSHIFTRT
10368 && CONST_INT_P (XEXP (XEXP (varop, 0), 1))
10369 && (INTVAL (XEXP (XEXP (varop, 0), 1))
10370 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (varop, 0)))
10371 - GET_MODE_BITSIZE (GET_MODE (varop)))))
10372 {
10373 rtx varop_inner = XEXP (varop, 0);
10374
10375 varop_inner
10376 = gen_rtx_LSHIFTRT (GET_MODE (varop_inner),
10377 XEXP (varop_inner, 0),
10378 GEN_INT
10379 (count + INTVAL (XEXP (varop_inner, 1))));
10380 varop = gen_rtx_TRUNCATE (GET_MODE (varop), varop_inner);
10381 count = 0;
10382 continue;
10383 }
10384 break;
10385
10386 default:
10387 break;
10388 }
10389
10390 break;
10391 }
10392
10393 shift_mode = try_widen_shift_mode (code, varop, count, result_mode, mode,
10394 outer_op, outer_const);
10395
10396 /* We have now finished analyzing the shift. The result should be
10397 a shift of type CODE with SHIFT_MODE shifting VAROP COUNT places. If
10398 OUTER_OP is non-UNKNOWN, it is an operation that needs to be applied
10399 to the result of the shift. OUTER_CONST is the relevant constant,
10400 but we must turn off all bits turned off in the shift. */
10401
10402 if (outer_op == UNKNOWN
10403 && orig_code == code && orig_count == count
10404 && varop == orig_varop
10405 && shift_mode == GET_MODE (varop))
10406 return NULL_RTX;
10407
10408 /* Make a SUBREG if necessary. If we can't make it, fail. */
10409 varop = gen_lowpart (shift_mode, varop);
10410 if (varop == NULL_RTX || GET_CODE (varop) == CLOBBER)
10411 return NULL_RTX;
10412
10413 /* If we have an outer operation and we just made a shift, it is
10414 possible that we could have simplified the shift were it not
10415 for the outer operation. So try to do the simplification
10416 recursively. */
10417
10418 if (outer_op != UNKNOWN)
10419 x = simplify_shift_const_1 (code, shift_mode, varop, count);
10420 else
10421 x = NULL_RTX;
10422
10423 if (x == NULL_RTX)
10424 x = simplify_gen_binary (code, shift_mode, varop, GEN_INT (count));
10425
10426 /* If we were doing an LSHIFTRT in a wider mode than it was originally,
10427 turn off all the bits that the shift would have turned off. */
10428 if (orig_code == LSHIFTRT && result_mode != shift_mode)
10429 x = simplify_and_const_int (NULL_RTX, shift_mode, x,
10430 GET_MODE_MASK (result_mode) >> orig_count);
10431
10432 /* Do the remainder of the processing in RESULT_MODE. */
10433 x = gen_lowpart_or_truncate (result_mode, x);
10434
10435 /* If COMPLEMENT_P is set, we have to complement X before doing the outer
10436 operation. */
10437 if (complement_p)
10438 x = simplify_gen_unary (NOT, result_mode, x, result_mode);
10439
10440 if (outer_op != UNKNOWN)
10441 {
10442 if (GET_RTX_CLASS (outer_op) != RTX_UNARY
10443 && GET_MODE_BITSIZE (result_mode) < HOST_BITS_PER_WIDE_INT)
10444 outer_const = trunc_int_for_mode (outer_const, result_mode);
10445
10446 if (outer_op == AND)
10447 x = simplify_and_const_int (NULL_RTX, result_mode, x, outer_const);
10448 else if (outer_op == SET)
10449 {
10450 /* This means that we have determined that the result is
10451 equivalent to a constant. This should be rare. */
10452 if (!side_effects_p (x))
10453 x = GEN_INT (outer_const);
10454 }
10455 else if (GET_RTX_CLASS (outer_op) == RTX_UNARY)
10456 x = simplify_gen_unary (outer_op, result_mode, x, result_mode);
10457 else
10458 x = simplify_gen_binary (outer_op, result_mode, x,
10459 GEN_INT (outer_const));
10460 }
10461
10462 return x;
10463 }
10464
10465 /* Simplify a shift of VAROP by COUNT bits. CODE says what kind of shift.
10466 The result of the shift is RESULT_MODE. If we cannot simplify it,
10467 return X or, if it is NULL, synthesize the expression with
10468 simplify_gen_binary. Otherwise, return a simplified value.
10469
10470 The shift is normally computed in the widest mode we find in VAROP, as
10471 long as it isn't a different number of words than RESULT_MODE. Exceptions
10472 are ASHIFTRT and ROTATE, which are always done in their original mode. */
10473
10474 static rtx
10475 simplify_shift_const (rtx x, enum rtx_code code, enum machine_mode result_mode,
10476 rtx varop, int count)
10477 {
10478 rtx tem = simplify_shift_const_1 (code, result_mode, varop, count);
10479 if (tem)
10480 return tem;
10481
10482 if (!x)
10483 x = simplify_gen_binary (code, GET_MODE (varop), varop, GEN_INT (count));
10484 if (GET_MODE (x) != result_mode)
10485 x = gen_lowpart (result_mode, x);
10486 return x;
10487 }
10488
10489 \f
10490 /* Like recog, but we receive the address of a pointer to a new pattern.
10491 We try to match the rtx that the pointer points to.
10492 If that fails, we may try to modify or replace the pattern,
10493 storing the replacement into the same pointer object.
10494
10495 Modifications include deletion or addition of CLOBBERs.
10496
10497 PNOTES is a pointer to a location where any REG_UNUSED notes added for
10498 the CLOBBERs are placed.
10499
10500 The value is the final insn code from the pattern ultimately matched,
10501 or -1. */
10502
10503 static int
10504 recog_for_combine (rtx *pnewpat, rtx insn, rtx *pnotes)
10505 {
10506 rtx pat = *pnewpat;
10507 int insn_code_number;
10508 int num_clobbers_to_add = 0;
10509 int i;
10510 rtx notes = 0;
10511 rtx old_notes, old_pat;
10512
10513 /* If PAT is a PARALLEL, check to see if it contains the CLOBBER
10514 we use to indicate that something didn't match. If we find such a
10515 thing, force rejection. */
10516 if (GET_CODE (pat) == PARALLEL)
10517 for (i = XVECLEN (pat, 0) - 1; i >= 0; i--)
10518 if (GET_CODE (XVECEXP (pat, 0, i)) == CLOBBER
10519 && XEXP (XVECEXP (pat, 0, i), 0) == const0_rtx)
10520 return -1;
10521
10522 old_pat = PATTERN (insn);
10523 old_notes = REG_NOTES (insn);
10524 PATTERN (insn) = pat;
10525 REG_NOTES (insn) = 0;
10526
10527 insn_code_number = recog (pat, insn, &num_clobbers_to_add);
10528 if (dump_file && (dump_flags & TDF_DETAILS))
10529 {
10530 if (insn_code_number < 0)
10531 fputs ("Failed to match this instruction:\n", dump_file);
10532 else
10533 fputs ("Successfully matched this instruction:\n", dump_file);
10534 print_rtl_single (dump_file, pat);
10535 }
10536
10537 /* If it isn't, there is the possibility that we previously had an insn
10538 that clobbered some register as a side effect, but the combined
10539 insn doesn't need to do that. So try once more without the clobbers
10540 unless this represents an ASM insn. */
10541
10542 if (insn_code_number < 0 && ! check_asm_operands (pat)
10543 && GET_CODE (pat) == PARALLEL)
10544 {
10545 int pos;
10546
10547 for (pos = 0, i = 0; i < XVECLEN (pat, 0); i++)
10548 if (GET_CODE (XVECEXP (pat, 0, i)) != CLOBBER)
10549 {
10550 if (i != pos)
10551 SUBST (XVECEXP (pat, 0, pos), XVECEXP (pat, 0, i));
10552 pos++;
10553 }
10554
10555 SUBST_INT (XVECLEN (pat, 0), pos);
10556
10557 if (pos == 1)
10558 pat = XVECEXP (pat, 0, 0);
10559
10560 PATTERN (insn) = pat;
10561 insn_code_number = recog (pat, insn, &num_clobbers_to_add);
10562 if (dump_file && (dump_flags & TDF_DETAILS))
10563 {
10564 if (insn_code_number < 0)
10565 fputs ("Failed to match this instruction:\n", dump_file);
10566 else
10567 fputs ("Successfully matched this instruction:\n", dump_file);
10568 print_rtl_single (dump_file, pat);
10569 }
10570 }
10571 PATTERN (insn) = old_pat;
10572 REG_NOTES (insn) = old_notes;
10573
10574 /* Recognize all noop sets, these will be killed by followup pass. */
10575 if (insn_code_number < 0 && GET_CODE (pat) == SET && set_noop_p (pat))
10576 insn_code_number = NOOP_MOVE_INSN_CODE, num_clobbers_to_add = 0;
10577
10578 /* If we had any clobbers to add, make a new pattern than contains
10579 them. Then check to make sure that all of them are dead. */
10580 if (num_clobbers_to_add)
10581 {
10582 rtx newpat = gen_rtx_PARALLEL (VOIDmode,
10583 rtvec_alloc (GET_CODE (pat) == PARALLEL
10584 ? (XVECLEN (pat, 0)
10585 + num_clobbers_to_add)
10586 : num_clobbers_to_add + 1));
10587
10588 if (GET_CODE (pat) == PARALLEL)
10589 for (i = 0; i < XVECLEN (pat, 0); i++)
10590 XVECEXP (newpat, 0, i) = XVECEXP (pat, 0, i);
10591 else
10592 XVECEXP (newpat, 0, 0) = pat;
10593
10594 add_clobbers (newpat, insn_code_number);
10595
10596 for (i = XVECLEN (newpat, 0) - num_clobbers_to_add;
10597 i < XVECLEN (newpat, 0); i++)
10598 {
10599 if (REG_P (XEXP (XVECEXP (newpat, 0, i), 0))
10600 && ! reg_dead_at_p (XEXP (XVECEXP (newpat, 0, i), 0), insn))
10601 return -1;
10602 if (GET_CODE (XEXP (XVECEXP (newpat, 0, i), 0)) != SCRATCH)
10603 {
10604 gcc_assert (REG_P (XEXP (XVECEXP (newpat, 0, i), 0)));
10605 notes = alloc_reg_note (REG_UNUSED,
10606 XEXP (XVECEXP (newpat, 0, i), 0), notes);
10607 }
10608 }
10609 pat = newpat;
10610 }
10611
10612 *pnewpat = pat;
10613 *pnotes = notes;
10614
10615 return insn_code_number;
10616 }
10617 \f
10618 /* Like gen_lowpart_general but for use by combine. In combine it
10619 is not possible to create any new pseudoregs. However, it is
10620 safe to create invalid memory addresses, because combine will
10621 try to recognize them and all they will do is make the combine
10622 attempt fail.
10623
10624 If for some reason this cannot do its job, an rtx
10625 (clobber (const_int 0)) is returned.
10626 An insn containing that will not be recognized. */
10627
10628 static rtx
10629 gen_lowpart_for_combine (enum machine_mode omode, rtx x)
10630 {
10631 enum machine_mode imode = GET_MODE (x);
10632 unsigned int osize = GET_MODE_SIZE (omode);
10633 unsigned int isize = GET_MODE_SIZE (imode);
10634 rtx result;
10635
10636 if (omode == imode)
10637 return x;
10638
10639 /* Return identity if this is a CONST or symbolic reference. */
10640 if (omode == Pmode
10641 && (GET_CODE (x) == CONST
10642 || GET_CODE (x) == SYMBOL_REF
10643 || GET_CODE (x) == LABEL_REF))
10644 return x;
10645
10646 /* We can only support MODE being wider than a word if X is a
10647 constant integer or has a mode the same size. */
10648 if (GET_MODE_SIZE (omode) > UNITS_PER_WORD
10649 && ! ((imode == VOIDmode
10650 && (CONST_INT_P (x)
10651 || GET_CODE (x) == CONST_DOUBLE))
10652 || isize == osize))
10653 goto fail;
10654
10655 /* X might be a paradoxical (subreg (mem)). In that case, gen_lowpart
10656 won't know what to do. So we will strip off the SUBREG here and
10657 process normally. */
10658 if (GET_CODE (x) == SUBREG && MEM_P (SUBREG_REG (x)))
10659 {
10660 x = SUBREG_REG (x);
10661
10662 /* For use in case we fall down into the address adjustments
10663 further below, we need to adjust the known mode and size of
10664 x; imode and isize, since we just adjusted x. */
10665 imode = GET_MODE (x);
10666
10667 if (imode == omode)
10668 return x;
10669
10670 isize = GET_MODE_SIZE (imode);
10671 }
10672
10673 result = gen_lowpart_common (omode, x);
10674
10675 if (result)
10676 return result;
10677
10678 if (MEM_P (x))
10679 {
10680 int offset = 0;
10681
10682 /* Refuse to work on a volatile memory ref or one with a mode-dependent
10683 address. */
10684 if (MEM_VOLATILE_P (x) || mode_dependent_address_p (XEXP (x, 0)))
10685 goto fail;
10686
10687 /* If we want to refer to something bigger than the original memref,
10688 generate a paradoxical subreg instead. That will force a reload
10689 of the original memref X. */
10690 if (isize < osize)
10691 return gen_rtx_SUBREG (omode, x, 0);
10692
10693 if (WORDS_BIG_ENDIAN)
10694 offset = MAX (isize, UNITS_PER_WORD) - MAX (osize, UNITS_PER_WORD);
10695
10696 /* Adjust the address so that the address-after-the-data is
10697 unchanged. */
10698 if (BYTES_BIG_ENDIAN)
10699 offset -= MIN (UNITS_PER_WORD, osize) - MIN (UNITS_PER_WORD, isize);
10700
10701 return adjust_address_nv (x, omode, offset);
10702 }
10703
10704 /* If X is a comparison operator, rewrite it in a new mode. This
10705 probably won't match, but may allow further simplifications. */
10706 else if (COMPARISON_P (x))
10707 return gen_rtx_fmt_ee (GET_CODE (x), omode, XEXP (x, 0), XEXP (x, 1));
10708
10709 /* If we couldn't simplify X any other way, just enclose it in a
10710 SUBREG. Normally, this SUBREG won't match, but some patterns may
10711 include an explicit SUBREG or we may simplify it further in combine. */
10712 else
10713 {
10714 int offset = 0;
10715 rtx res;
10716
10717 offset = subreg_lowpart_offset (omode, imode);
10718 if (imode == VOIDmode)
10719 {
10720 imode = int_mode_for_mode (omode);
10721 x = gen_lowpart_common (imode, x);
10722 if (x == NULL)
10723 goto fail;
10724 }
10725 res = simplify_gen_subreg (omode, x, imode, offset);
10726 if (res)
10727 return res;
10728 }
10729
10730 fail:
10731 return gen_rtx_CLOBBER (omode, const0_rtx);
10732 }
10733 \f
10734 /* Simplify a comparison between *POP0 and *POP1 where CODE is the
10735 comparison code that will be tested.
10736
10737 The result is a possibly different comparison code to use. *POP0 and
10738 *POP1 may be updated.
10739
10740 It is possible that we might detect that a comparison is either always
10741 true or always false. However, we do not perform general constant
10742 folding in combine, so this knowledge isn't useful. Such tautologies
10743 should have been detected earlier. Hence we ignore all such cases. */
10744
10745 static enum rtx_code
10746 simplify_comparison (enum rtx_code code, rtx *pop0, rtx *pop1)
10747 {
10748 rtx op0 = *pop0;
10749 rtx op1 = *pop1;
10750 rtx tem, tem1;
10751 int i;
10752 enum machine_mode mode, tmode;
10753
10754 /* Try a few ways of applying the same transformation to both operands. */
10755 while (1)
10756 {
10757 #ifndef WORD_REGISTER_OPERATIONS
10758 /* The test below this one won't handle SIGN_EXTENDs on these machines,
10759 so check specially. */
10760 if (code != GTU && code != GEU && code != LTU && code != LEU
10761 && GET_CODE (op0) == ASHIFTRT && GET_CODE (op1) == ASHIFTRT
10762 && GET_CODE (XEXP (op0, 0)) == ASHIFT
10763 && GET_CODE (XEXP (op1, 0)) == ASHIFT
10764 && GET_CODE (XEXP (XEXP (op0, 0), 0)) == SUBREG
10765 && GET_CODE (XEXP (XEXP (op1, 0), 0)) == SUBREG
10766 && (GET_MODE (SUBREG_REG (XEXP (XEXP (op0, 0), 0)))
10767 == GET_MODE (SUBREG_REG (XEXP (XEXP (op1, 0), 0))))
10768 && CONST_INT_P (XEXP (op0, 1))
10769 && XEXP (op0, 1) == XEXP (op1, 1)
10770 && XEXP (op0, 1) == XEXP (XEXP (op0, 0), 1)
10771 && XEXP (op0, 1) == XEXP (XEXP (op1, 0), 1)
10772 && (INTVAL (XEXP (op0, 1))
10773 == (GET_MODE_BITSIZE (GET_MODE (op0))
10774 - (GET_MODE_BITSIZE
10775 (GET_MODE (SUBREG_REG (XEXP (XEXP (op0, 0), 0))))))))
10776 {
10777 op0 = SUBREG_REG (XEXP (XEXP (op0, 0), 0));
10778 op1 = SUBREG_REG (XEXP (XEXP (op1, 0), 0));
10779 }
10780 #endif
10781
10782 /* If both operands are the same constant shift, see if we can ignore the
10783 shift. We can if the shift is a rotate or if the bits shifted out of
10784 this shift are known to be zero for both inputs and if the type of
10785 comparison is compatible with the shift. */
10786 if (GET_CODE (op0) == GET_CODE (op1)
10787 && GET_MODE_BITSIZE (GET_MODE (op0)) <= HOST_BITS_PER_WIDE_INT
10788 && ((GET_CODE (op0) == ROTATE && (code == NE || code == EQ))
10789 || ((GET_CODE (op0) == LSHIFTRT || GET_CODE (op0) == ASHIFT)
10790 && (code != GT && code != LT && code != GE && code != LE))
10791 || (GET_CODE (op0) == ASHIFTRT
10792 && (code != GTU && code != LTU
10793 && code != GEU && code != LEU)))
10794 && CONST_INT_P (XEXP (op0, 1))
10795 && INTVAL (XEXP (op0, 1)) >= 0
10796 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT
10797 && XEXP (op0, 1) == XEXP (op1, 1))
10798 {
10799 enum machine_mode mode = GET_MODE (op0);
10800 unsigned HOST_WIDE_INT mask = GET_MODE_MASK (mode);
10801 int shift_count = INTVAL (XEXP (op0, 1));
10802
10803 if (GET_CODE (op0) == LSHIFTRT || GET_CODE (op0) == ASHIFTRT)
10804 mask &= (mask >> shift_count) << shift_count;
10805 else if (GET_CODE (op0) == ASHIFT)
10806 mask = (mask & (mask << shift_count)) >> shift_count;
10807
10808 if ((nonzero_bits (XEXP (op0, 0), mode) & ~mask) == 0
10809 && (nonzero_bits (XEXP (op1, 0), mode) & ~mask) == 0)
10810 op0 = XEXP (op0, 0), op1 = XEXP (op1, 0);
10811 else
10812 break;
10813 }
10814
10815 /* If both operands are AND's of a paradoxical SUBREG by constant, the
10816 SUBREGs are of the same mode, and, in both cases, the AND would
10817 be redundant if the comparison was done in the narrower mode,
10818 do the comparison in the narrower mode (e.g., we are AND'ing with 1
10819 and the operand's possibly nonzero bits are 0xffffff01; in that case
10820 if we only care about QImode, we don't need the AND). This case
10821 occurs if the output mode of an scc insn is not SImode and
10822 STORE_FLAG_VALUE == 1 (e.g., the 386).
10823
10824 Similarly, check for a case where the AND's are ZERO_EXTEND
10825 operations from some narrower mode even though a SUBREG is not
10826 present. */
10827
10828 else if (GET_CODE (op0) == AND && GET_CODE (op1) == AND
10829 && CONST_INT_P (XEXP (op0, 1))
10830 && CONST_INT_P (XEXP (op1, 1)))
10831 {
10832 rtx inner_op0 = XEXP (op0, 0);
10833 rtx inner_op1 = XEXP (op1, 0);
10834 HOST_WIDE_INT c0 = INTVAL (XEXP (op0, 1));
10835 HOST_WIDE_INT c1 = INTVAL (XEXP (op1, 1));
10836 int changed = 0;
10837
10838 if (GET_CODE (inner_op0) == SUBREG && GET_CODE (inner_op1) == SUBREG
10839 && (GET_MODE_SIZE (GET_MODE (inner_op0))
10840 > GET_MODE_SIZE (GET_MODE (SUBREG_REG (inner_op0))))
10841 && (GET_MODE (SUBREG_REG (inner_op0))
10842 == GET_MODE (SUBREG_REG (inner_op1)))
10843 && (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (inner_op0)))
10844 <= HOST_BITS_PER_WIDE_INT)
10845 && (0 == ((~c0) & nonzero_bits (SUBREG_REG (inner_op0),
10846 GET_MODE (SUBREG_REG (inner_op0)))))
10847 && (0 == ((~c1) & nonzero_bits (SUBREG_REG (inner_op1),
10848 GET_MODE (SUBREG_REG (inner_op1))))))
10849 {
10850 op0 = SUBREG_REG (inner_op0);
10851 op1 = SUBREG_REG (inner_op1);
10852
10853 /* The resulting comparison is always unsigned since we masked
10854 off the original sign bit. */
10855 code = unsigned_condition (code);
10856
10857 changed = 1;
10858 }
10859
10860 else if (c0 == c1)
10861 for (tmode = GET_CLASS_NARROWEST_MODE
10862 (GET_MODE_CLASS (GET_MODE (op0)));
10863 tmode != GET_MODE (op0); tmode = GET_MODE_WIDER_MODE (tmode))
10864 if ((unsigned HOST_WIDE_INT) c0 == GET_MODE_MASK (tmode))
10865 {
10866 op0 = gen_lowpart (tmode, inner_op0);
10867 op1 = gen_lowpart (tmode, inner_op1);
10868 code = unsigned_condition (code);
10869 changed = 1;
10870 break;
10871 }
10872
10873 if (! changed)
10874 break;
10875 }
10876
10877 /* If both operands are NOT, we can strip off the outer operation
10878 and adjust the comparison code for swapped operands; similarly for
10879 NEG, except that this must be an equality comparison. */
10880 else if ((GET_CODE (op0) == NOT && GET_CODE (op1) == NOT)
10881 || (GET_CODE (op0) == NEG && GET_CODE (op1) == NEG
10882 && (code == EQ || code == NE)))
10883 op0 = XEXP (op0, 0), op1 = XEXP (op1, 0), code = swap_condition (code);
10884
10885 else
10886 break;
10887 }
10888
10889 /* If the first operand is a constant, swap the operands and adjust the
10890 comparison code appropriately, but don't do this if the second operand
10891 is already a constant integer. */
10892 if (swap_commutative_operands_p (op0, op1))
10893 {
10894 tem = op0, op0 = op1, op1 = tem;
10895 code = swap_condition (code);
10896 }
10897
10898 /* We now enter a loop during which we will try to simplify the comparison.
10899 For the most part, we only are concerned with comparisons with zero,
10900 but some things may really be comparisons with zero but not start
10901 out looking that way. */
10902
10903 while (CONST_INT_P (op1))
10904 {
10905 enum machine_mode mode = GET_MODE (op0);
10906 unsigned int mode_width = GET_MODE_BITSIZE (mode);
10907 unsigned HOST_WIDE_INT mask = GET_MODE_MASK (mode);
10908 int equality_comparison_p;
10909 int sign_bit_comparison_p;
10910 int unsigned_comparison_p;
10911 HOST_WIDE_INT const_op;
10912
10913 /* We only want to handle integral modes. This catches VOIDmode,
10914 CCmode, and the floating-point modes. An exception is that we
10915 can handle VOIDmode if OP0 is a COMPARE or a comparison
10916 operation. */
10917
10918 if (GET_MODE_CLASS (mode) != MODE_INT
10919 && ! (mode == VOIDmode
10920 && (GET_CODE (op0) == COMPARE || COMPARISON_P (op0))))
10921 break;
10922
10923 /* Get the constant we are comparing against and turn off all bits
10924 not on in our mode. */
10925 const_op = INTVAL (op1);
10926 if (mode != VOIDmode)
10927 const_op = trunc_int_for_mode (const_op, mode);
10928 op1 = GEN_INT (const_op);
10929
10930 /* If we are comparing against a constant power of two and the value
10931 being compared can only have that single bit nonzero (e.g., it was
10932 `and'ed with that bit), we can replace this with a comparison
10933 with zero. */
10934 if (const_op
10935 && (code == EQ || code == NE || code == GE || code == GEU
10936 || code == LT || code == LTU)
10937 && mode_width <= HOST_BITS_PER_WIDE_INT
10938 && exact_log2 (const_op) >= 0
10939 && nonzero_bits (op0, mode) == (unsigned HOST_WIDE_INT) const_op)
10940 {
10941 code = (code == EQ || code == GE || code == GEU ? NE : EQ);
10942 op1 = const0_rtx, const_op = 0;
10943 }
10944
10945 /* Similarly, if we are comparing a value known to be either -1 or
10946 0 with -1, change it to the opposite comparison against zero. */
10947
10948 if (const_op == -1
10949 && (code == EQ || code == NE || code == GT || code == LE
10950 || code == GEU || code == LTU)
10951 && num_sign_bit_copies (op0, mode) == mode_width)
10952 {
10953 code = (code == EQ || code == LE || code == GEU ? NE : EQ);
10954 op1 = const0_rtx, const_op = 0;
10955 }
10956
10957 /* Do some canonicalizations based on the comparison code. We prefer
10958 comparisons against zero and then prefer equality comparisons.
10959 If we can reduce the size of a constant, we will do that too. */
10960
10961 switch (code)
10962 {
10963 case LT:
10964 /* < C is equivalent to <= (C - 1) */
10965 if (const_op > 0)
10966 {
10967 const_op -= 1;
10968 op1 = GEN_INT (const_op);
10969 code = LE;
10970 /* ... fall through to LE case below. */
10971 }
10972 else
10973 break;
10974
10975 case LE:
10976 /* <= C is equivalent to < (C + 1); we do this for C < 0 */
10977 if (const_op < 0)
10978 {
10979 const_op += 1;
10980 op1 = GEN_INT (const_op);
10981 code = LT;
10982 }
10983
10984 /* If we are doing a <= 0 comparison on a value known to have
10985 a zero sign bit, we can replace this with == 0. */
10986 else if (const_op == 0
10987 && mode_width <= HOST_BITS_PER_WIDE_INT
10988 && (nonzero_bits (op0, mode)
10989 & ((unsigned HOST_WIDE_INT) 1 << (mode_width - 1)))
10990 == 0)
10991 code = EQ;
10992 break;
10993
10994 case GE:
10995 /* >= C is equivalent to > (C - 1). */
10996 if (const_op > 0)
10997 {
10998 const_op -= 1;
10999 op1 = GEN_INT (const_op);
11000 code = GT;
11001 /* ... fall through to GT below. */
11002 }
11003 else
11004 break;
11005
11006 case GT:
11007 /* > C is equivalent to >= (C + 1); we do this for C < 0. */
11008 if (const_op < 0)
11009 {
11010 const_op += 1;
11011 op1 = GEN_INT (const_op);
11012 code = GE;
11013 }
11014
11015 /* If we are doing a > 0 comparison on a value known to have
11016 a zero sign bit, we can replace this with != 0. */
11017 else if (const_op == 0
11018 && mode_width <= HOST_BITS_PER_WIDE_INT
11019 && (nonzero_bits (op0, mode)
11020 & ((unsigned HOST_WIDE_INT) 1 << (mode_width - 1)))
11021 == 0)
11022 code = NE;
11023 break;
11024
11025 case LTU:
11026 /* < C is equivalent to <= (C - 1). */
11027 if (const_op > 0)
11028 {
11029 const_op -= 1;
11030 op1 = GEN_INT (const_op);
11031 code = LEU;
11032 /* ... fall through ... */
11033 }
11034
11035 /* (unsigned) < 0x80000000 is equivalent to >= 0. */
11036 else if (mode_width <= HOST_BITS_PER_WIDE_INT
11037 && (unsigned HOST_WIDE_INT) const_op
11038 == (unsigned HOST_WIDE_INT) 1 << (mode_width - 1))
11039 {
11040 const_op = 0, op1 = const0_rtx;
11041 code = GE;
11042 break;
11043 }
11044 else
11045 break;
11046
11047 case LEU:
11048 /* unsigned <= 0 is equivalent to == 0 */
11049 if (const_op == 0)
11050 code = EQ;
11051
11052 /* (unsigned) <= 0x7fffffff is equivalent to >= 0. */
11053 else if (mode_width <= HOST_BITS_PER_WIDE_INT
11054 && (unsigned HOST_WIDE_INT) const_op
11055 == ((unsigned HOST_WIDE_INT) 1 << (mode_width - 1)) - 1)
11056 {
11057 const_op = 0, op1 = const0_rtx;
11058 code = GE;
11059 }
11060 break;
11061
11062 case GEU:
11063 /* >= C is equivalent to > (C - 1). */
11064 if (const_op > 1)
11065 {
11066 const_op -= 1;
11067 op1 = GEN_INT (const_op);
11068 code = GTU;
11069 /* ... fall through ... */
11070 }
11071
11072 /* (unsigned) >= 0x80000000 is equivalent to < 0. */
11073 else if (mode_width <= HOST_BITS_PER_WIDE_INT
11074 && (unsigned HOST_WIDE_INT) const_op
11075 == (unsigned HOST_WIDE_INT) 1 << (mode_width - 1))
11076 {
11077 const_op = 0, op1 = const0_rtx;
11078 code = LT;
11079 break;
11080 }
11081 else
11082 break;
11083
11084 case GTU:
11085 /* unsigned > 0 is equivalent to != 0 */
11086 if (const_op == 0)
11087 code = NE;
11088
11089 /* (unsigned) > 0x7fffffff is equivalent to < 0. */
11090 else if (mode_width <= HOST_BITS_PER_WIDE_INT
11091 && (unsigned HOST_WIDE_INT) const_op
11092 == ((unsigned HOST_WIDE_INT) 1 << (mode_width - 1)) - 1)
11093 {
11094 const_op = 0, op1 = const0_rtx;
11095 code = LT;
11096 }
11097 break;
11098
11099 default:
11100 break;
11101 }
11102
11103 /* Compute some predicates to simplify code below. */
11104
11105 equality_comparison_p = (code == EQ || code == NE);
11106 sign_bit_comparison_p = ((code == LT || code == GE) && const_op == 0);
11107 unsigned_comparison_p = (code == LTU || code == LEU || code == GTU
11108 || code == GEU);
11109
11110 /* If this is a sign bit comparison and we can do arithmetic in
11111 MODE, say that we will only be needing the sign bit of OP0. */
11112 if (sign_bit_comparison_p
11113 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
11114 op0 = force_to_mode (op0, mode,
11115 (unsigned HOST_WIDE_INT) 1
11116 << (GET_MODE_BITSIZE (mode) - 1),
11117 0);
11118
11119 /* Now try cases based on the opcode of OP0. If none of the cases
11120 does a "continue", we exit this loop immediately after the
11121 switch. */
11122
11123 switch (GET_CODE (op0))
11124 {
11125 case ZERO_EXTRACT:
11126 /* If we are extracting a single bit from a variable position in
11127 a constant that has only a single bit set and are comparing it
11128 with zero, we can convert this into an equality comparison
11129 between the position and the location of the single bit. */
11130 /* Except we can't if SHIFT_COUNT_TRUNCATED is set, since we might
11131 have already reduced the shift count modulo the word size. */
11132 if (!SHIFT_COUNT_TRUNCATED
11133 && CONST_INT_P (XEXP (op0, 0))
11134 && XEXP (op0, 1) == const1_rtx
11135 && equality_comparison_p && const_op == 0
11136 && (i = exact_log2 (UINTVAL (XEXP (op0, 0)))) >= 0)
11137 {
11138 if (BITS_BIG_ENDIAN)
11139 {
11140 enum machine_mode new_mode
11141 = mode_for_extraction (EP_extzv, 1);
11142 if (new_mode == MAX_MACHINE_MODE)
11143 i = BITS_PER_WORD - 1 - i;
11144 else
11145 {
11146 mode = new_mode;
11147 i = (GET_MODE_BITSIZE (mode) - 1 - i);
11148 }
11149 }
11150
11151 op0 = XEXP (op0, 2);
11152 op1 = GEN_INT (i);
11153 const_op = i;
11154
11155 /* Result is nonzero iff shift count is equal to I. */
11156 code = reverse_condition (code);
11157 continue;
11158 }
11159
11160 /* ... fall through ... */
11161
11162 case SIGN_EXTRACT:
11163 tem = expand_compound_operation (op0);
11164 if (tem != op0)
11165 {
11166 op0 = tem;
11167 continue;
11168 }
11169 break;
11170
11171 case NOT:
11172 /* If testing for equality, we can take the NOT of the constant. */
11173 if (equality_comparison_p
11174 && (tem = simplify_unary_operation (NOT, mode, op1, mode)) != 0)
11175 {
11176 op0 = XEXP (op0, 0);
11177 op1 = tem;
11178 continue;
11179 }
11180
11181 /* If just looking at the sign bit, reverse the sense of the
11182 comparison. */
11183 if (sign_bit_comparison_p)
11184 {
11185 op0 = XEXP (op0, 0);
11186 code = (code == GE ? LT : GE);
11187 continue;
11188 }
11189 break;
11190
11191 case NEG:
11192 /* If testing for equality, we can take the NEG of the constant. */
11193 if (equality_comparison_p
11194 && (tem = simplify_unary_operation (NEG, mode, op1, mode)) != 0)
11195 {
11196 op0 = XEXP (op0, 0);
11197 op1 = tem;
11198 continue;
11199 }
11200
11201 /* The remaining cases only apply to comparisons with zero. */
11202 if (const_op != 0)
11203 break;
11204
11205 /* When X is ABS or is known positive,
11206 (neg X) is < 0 if and only if X != 0. */
11207
11208 if (sign_bit_comparison_p
11209 && (GET_CODE (XEXP (op0, 0)) == ABS
11210 || (mode_width <= HOST_BITS_PER_WIDE_INT
11211 && (nonzero_bits (XEXP (op0, 0), mode)
11212 & ((unsigned HOST_WIDE_INT) 1 << (mode_width - 1)))
11213 == 0)))
11214 {
11215 op0 = XEXP (op0, 0);
11216 code = (code == LT ? NE : EQ);
11217 continue;
11218 }
11219
11220 /* If we have NEG of something whose two high-order bits are the
11221 same, we know that "(-a) < 0" is equivalent to "a > 0". */
11222 if (num_sign_bit_copies (op0, mode) >= 2)
11223 {
11224 op0 = XEXP (op0, 0);
11225 code = swap_condition (code);
11226 continue;
11227 }
11228 break;
11229
11230 case ROTATE:
11231 /* If we are testing equality and our count is a constant, we
11232 can perform the inverse operation on our RHS. */
11233 if (equality_comparison_p && CONST_INT_P (XEXP (op0, 1))
11234 && (tem = simplify_binary_operation (ROTATERT, mode,
11235 op1, XEXP (op0, 1))) != 0)
11236 {
11237 op0 = XEXP (op0, 0);
11238 op1 = tem;
11239 continue;
11240 }
11241
11242 /* If we are doing a < 0 or >= 0 comparison, it means we are testing
11243 a particular bit. Convert it to an AND of a constant of that
11244 bit. This will be converted into a ZERO_EXTRACT. */
11245 if (const_op == 0 && sign_bit_comparison_p
11246 && CONST_INT_P (XEXP (op0, 1))
11247 && mode_width <= HOST_BITS_PER_WIDE_INT)
11248 {
11249 op0 = simplify_and_const_int (NULL_RTX, mode, XEXP (op0, 0),
11250 ((unsigned HOST_WIDE_INT) 1
11251 << (mode_width - 1
11252 - INTVAL (XEXP (op0, 1)))));
11253 code = (code == LT ? NE : EQ);
11254 continue;
11255 }
11256
11257 /* Fall through. */
11258
11259 case ABS:
11260 /* ABS is ignorable inside an equality comparison with zero. */
11261 if (const_op == 0 && equality_comparison_p)
11262 {
11263 op0 = XEXP (op0, 0);
11264 continue;
11265 }
11266 break;
11267
11268 case SIGN_EXTEND:
11269 /* Can simplify (compare (zero/sign_extend FOO) CONST) to
11270 (compare FOO CONST) if CONST fits in FOO's mode and we
11271 are either testing inequality or have an unsigned
11272 comparison with ZERO_EXTEND or a signed comparison with
11273 SIGN_EXTEND. But don't do it if we don't have a compare
11274 insn of the given mode, since we'd have to revert it
11275 later on, and then we wouldn't know whether to sign- or
11276 zero-extend. */
11277 mode = GET_MODE (XEXP (op0, 0));
11278 if (mode != VOIDmode && GET_MODE_CLASS (mode) == MODE_INT
11279 && ! unsigned_comparison_p
11280 && (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
11281 && ((unsigned HOST_WIDE_INT) const_op
11282 < (((unsigned HOST_WIDE_INT) 1
11283 << (GET_MODE_BITSIZE (mode) - 1))))
11284 && have_insn_for (COMPARE, mode))
11285 {
11286 op0 = XEXP (op0, 0);
11287 continue;
11288 }
11289 break;
11290
11291 case SUBREG:
11292 /* Check for the case where we are comparing A - C1 with C2, that is
11293
11294 (subreg:MODE (plus (A) (-C1))) op (C2)
11295
11296 with C1 a constant, and try to lift the SUBREG, i.e. to do the
11297 comparison in the wider mode. One of the following two conditions
11298 must be true in order for this to be valid:
11299
11300 1. The mode extension results in the same bit pattern being added
11301 on both sides and the comparison is equality or unsigned. As
11302 C2 has been truncated to fit in MODE, the pattern can only be
11303 all 0s or all 1s.
11304
11305 2. The mode extension results in the sign bit being copied on
11306 each side.
11307
11308 The difficulty here is that we have predicates for A but not for
11309 (A - C1) so we need to check that C1 is within proper bounds so
11310 as to perturbate A as little as possible. */
11311
11312 if (mode_width <= HOST_BITS_PER_WIDE_INT
11313 && subreg_lowpart_p (op0)
11314 && GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0))) > mode_width
11315 && GET_CODE (SUBREG_REG (op0)) == PLUS
11316 && CONST_INT_P (XEXP (SUBREG_REG (op0), 1)))
11317 {
11318 enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op0));
11319 rtx a = XEXP (SUBREG_REG (op0), 0);
11320 HOST_WIDE_INT c1 = -INTVAL (XEXP (SUBREG_REG (op0), 1));
11321
11322 if ((c1 > 0
11323 && (unsigned HOST_WIDE_INT) c1
11324 < (unsigned HOST_WIDE_INT) 1 << (mode_width - 1)
11325 && (equality_comparison_p || unsigned_comparison_p)
11326 /* (A - C1) zero-extends if it is positive and sign-extends
11327 if it is negative, C2 both zero- and sign-extends. */
11328 && ((0 == (nonzero_bits (a, inner_mode)
11329 & ~GET_MODE_MASK (mode))
11330 && const_op >= 0)
11331 /* (A - C1) sign-extends if it is positive and 1-extends
11332 if it is negative, C2 both sign- and 1-extends. */
11333 || (num_sign_bit_copies (a, inner_mode)
11334 > (unsigned int) (GET_MODE_BITSIZE (inner_mode)
11335 - mode_width)
11336 && const_op < 0)))
11337 || ((unsigned HOST_WIDE_INT) c1
11338 < (unsigned HOST_WIDE_INT) 1 << (mode_width - 2)
11339 /* (A - C1) always sign-extends, like C2. */
11340 && num_sign_bit_copies (a, inner_mode)
11341 > (unsigned int) (GET_MODE_BITSIZE (inner_mode)
11342 - (mode_width - 1))))
11343 {
11344 op0 = SUBREG_REG (op0);
11345 continue;
11346 }
11347 }
11348
11349 /* If the inner mode is narrower and we are extracting the low part,
11350 we can treat the SUBREG as if it were a ZERO_EXTEND. */
11351 if (subreg_lowpart_p (op0)
11352 && GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0))) < mode_width)
11353 /* Fall through */ ;
11354 else
11355 break;
11356
11357 /* ... fall through ... */
11358
11359 case ZERO_EXTEND:
11360 mode = GET_MODE (XEXP (op0, 0));
11361 if (mode != VOIDmode && GET_MODE_CLASS (mode) == MODE_INT
11362 && (unsigned_comparison_p || equality_comparison_p)
11363 && (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
11364 && ((unsigned HOST_WIDE_INT) const_op < GET_MODE_MASK (mode))
11365 && have_insn_for (COMPARE, mode))
11366 {
11367 op0 = XEXP (op0, 0);
11368 continue;
11369 }
11370 break;
11371
11372 case PLUS:
11373 /* (eq (plus X A) B) -> (eq X (minus B A)). We can only do
11374 this for equality comparisons due to pathological cases involving
11375 overflows. */
11376 if (equality_comparison_p
11377 && 0 != (tem = simplify_binary_operation (MINUS, mode,
11378 op1, XEXP (op0, 1))))
11379 {
11380 op0 = XEXP (op0, 0);
11381 op1 = tem;
11382 continue;
11383 }
11384
11385 /* (plus (abs X) (const_int -1)) is < 0 if and only if X == 0. */
11386 if (const_op == 0 && XEXP (op0, 1) == constm1_rtx
11387 && GET_CODE (XEXP (op0, 0)) == ABS && sign_bit_comparison_p)
11388 {
11389 op0 = XEXP (XEXP (op0, 0), 0);
11390 code = (code == LT ? EQ : NE);
11391 continue;
11392 }
11393 break;
11394
11395 case MINUS:
11396 /* We used to optimize signed comparisons against zero, but that
11397 was incorrect. Unsigned comparisons against zero (GTU, LEU)
11398 arrive here as equality comparisons, or (GEU, LTU) are
11399 optimized away. No need to special-case them. */
11400
11401 /* (eq (minus A B) C) -> (eq A (plus B C)) or
11402 (eq B (minus A C)), whichever simplifies. We can only do
11403 this for equality comparisons due to pathological cases involving
11404 overflows. */
11405 if (equality_comparison_p
11406 && 0 != (tem = simplify_binary_operation (PLUS, mode,
11407 XEXP (op0, 1), op1)))
11408 {
11409 op0 = XEXP (op0, 0);
11410 op1 = tem;
11411 continue;
11412 }
11413
11414 if (equality_comparison_p
11415 && 0 != (tem = simplify_binary_operation (MINUS, mode,
11416 XEXP (op0, 0), op1)))
11417 {
11418 op0 = XEXP (op0, 1);
11419 op1 = tem;
11420 continue;
11421 }
11422
11423 /* The sign bit of (minus (ashiftrt X C) X), where C is the number
11424 of bits in X minus 1, is one iff X > 0. */
11425 if (sign_bit_comparison_p && GET_CODE (XEXP (op0, 0)) == ASHIFTRT
11426 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
11427 && UINTVAL (XEXP (XEXP (op0, 0), 1)) == mode_width - 1
11428 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), XEXP (op0, 1)))
11429 {
11430 op0 = XEXP (op0, 1);
11431 code = (code == GE ? LE : GT);
11432 continue;
11433 }
11434 break;
11435
11436 case XOR:
11437 /* (eq (xor A B) C) -> (eq A (xor B C)). This is a simplification
11438 if C is zero or B is a constant. */
11439 if (equality_comparison_p
11440 && 0 != (tem = simplify_binary_operation (XOR, mode,
11441 XEXP (op0, 1), op1)))
11442 {
11443 op0 = XEXP (op0, 0);
11444 op1 = tem;
11445 continue;
11446 }
11447 break;
11448
11449 case EQ: case NE:
11450 case UNEQ: case LTGT:
11451 case LT: case LTU: case UNLT: case LE: case LEU: case UNLE:
11452 case GT: case GTU: case UNGT: case GE: case GEU: case UNGE:
11453 case UNORDERED: case ORDERED:
11454 /* We can't do anything if OP0 is a condition code value, rather
11455 than an actual data value. */
11456 if (const_op != 0
11457 || CC0_P (XEXP (op0, 0))
11458 || GET_MODE_CLASS (GET_MODE (XEXP (op0, 0))) == MODE_CC)
11459 break;
11460
11461 /* Get the two operands being compared. */
11462 if (GET_CODE (XEXP (op0, 0)) == COMPARE)
11463 tem = XEXP (XEXP (op0, 0), 0), tem1 = XEXP (XEXP (op0, 0), 1);
11464 else
11465 tem = XEXP (op0, 0), tem1 = XEXP (op0, 1);
11466
11467 /* Check for the cases where we simply want the result of the
11468 earlier test or the opposite of that result. */
11469 if (code == NE || code == EQ
11470 || (GET_MODE_BITSIZE (GET_MODE (op0)) <= HOST_BITS_PER_WIDE_INT
11471 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT
11472 && (STORE_FLAG_VALUE
11473 & (((unsigned HOST_WIDE_INT) 1
11474 << (GET_MODE_BITSIZE (GET_MODE (op0)) - 1))))
11475 && (code == LT || code == GE)))
11476 {
11477 enum rtx_code new_code;
11478 if (code == LT || code == NE)
11479 new_code = GET_CODE (op0);
11480 else
11481 new_code = reversed_comparison_code (op0, NULL);
11482
11483 if (new_code != UNKNOWN)
11484 {
11485 code = new_code;
11486 op0 = tem;
11487 op1 = tem1;
11488 continue;
11489 }
11490 }
11491 break;
11492
11493 case IOR:
11494 /* The sign bit of (ior (plus X (const_int -1)) X) is nonzero
11495 iff X <= 0. */
11496 if (sign_bit_comparison_p && GET_CODE (XEXP (op0, 0)) == PLUS
11497 && XEXP (XEXP (op0, 0), 1) == constm1_rtx
11498 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), XEXP (op0, 1)))
11499 {
11500 op0 = XEXP (op0, 1);
11501 code = (code == GE ? GT : LE);
11502 continue;
11503 }
11504 break;
11505
11506 case AND:
11507 /* Convert (and (xshift 1 X) Y) to (and (lshiftrt Y X) 1). This
11508 will be converted to a ZERO_EXTRACT later. */
11509 if (const_op == 0 && equality_comparison_p
11510 && GET_CODE (XEXP (op0, 0)) == ASHIFT
11511 && XEXP (XEXP (op0, 0), 0) == const1_rtx)
11512 {
11513 op0 = gen_rtx_LSHIFTRT (mode, XEXP (op0, 1),
11514 XEXP (XEXP (op0, 0), 1));
11515 op0 = simplify_and_const_int (NULL_RTX, mode, op0, 1);
11516 continue;
11517 }
11518
11519 /* If we are comparing (and (lshiftrt X C1) C2) for equality with
11520 zero and X is a comparison and C1 and C2 describe only bits set
11521 in STORE_FLAG_VALUE, we can compare with X. */
11522 if (const_op == 0 && equality_comparison_p
11523 && mode_width <= HOST_BITS_PER_WIDE_INT
11524 && CONST_INT_P (XEXP (op0, 1))
11525 && GET_CODE (XEXP (op0, 0)) == LSHIFTRT
11526 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
11527 && INTVAL (XEXP (XEXP (op0, 0), 1)) >= 0
11528 && INTVAL (XEXP (XEXP (op0, 0), 1)) < HOST_BITS_PER_WIDE_INT)
11529 {
11530 mask = ((INTVAL (XEXP (op0, 1)) & GET_MODE_MASK (mode))
11531 << INTVAL (XEXP (XEXP (op0, 0), 1)));
11532 if ((~STORE_FLAG_VALUE & mask) == 0
11533 && (COMPARISON_P (XEXP (XEXP (op0, 0), 0))
11534 || ((tem = get_last_value (XEXP (XEXP (op0, 0), 0))) != 0
11535 && COMPARISON_P (tem))))
11536 {
11537 op0 = XEXP (XEXP (op0, 0), 0);
11538 continue;
11539 }
11540 }
11541
11542 /* If we are doing an equality comparison of an AND of a bit equal
11543 to the sign bit, replace this with a LT or GE comparison of
11544 the underlying value. */
11545 if (equality_comparison_p
11546 && const_op == 0
11547 && CONST_INT_P (XEXP (op0, 1))
11548 && mode_width <= HOST_BITS_PER_WIDE_INT
11549 && ((INTVAL (XEXP (op0, 1)) & GET_MODE_MASK (mode))
11550 == (unsigned HOST_WIDE_INT) 1 << (mode_width - 1)))
11551 {
11552 op0 = XEXP (op0, 0);
11553 code = (code == EQ ? GE : LT);
11554 continue;
11555 }
11556
11557 /* If this AND operation is really a ZERO_EXTEND from a narrower
11558 mode, the constant fits within that mode, and this is either an
11559 equality or unsigned comparison, try to do this comparison in
11560 the narrower mode.
11561
11562 Note that in:
11563
11564 (ne:DI (and:DI (reg:DI 4) (const_int 0xffffffff)) (const_int 0))
11565 -> (ne:DI (reg:SI 4) (const_int 0))
11566
11567 unless TRULY_NOOP_TRUNCATION allows it or the register is
11568 known to hold a value of the required mode the
11569 transformation is invalid. */
11570 if ((equality_comparison_p || unsigned_comparison_p)
11571 && CONST_INT_P (XEXP (op0, 1))
11572 && (i = exact_log2 ((UINTVAL (XEXP (op0, 1))
11573 & GET_MODE_MASK (mode))
11574 + 1)) >= 0
11575 && const_op >> i == 0
11576 && (tmode = mode_for_size (i, MODE_INT, 1)) != BLKmode
11577 && (TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (tmode),
11578 GET_MODE_BITSIZE (GET_MODE (op0)))
11579 || (REG_P (XEXP (op0, 0))
11580 && reg_truncated_to_mode (tmode, XEXP (op0, 0)))))
11581 {
11582 op0 = gen_lowpart (tmode, XEXP (op0, 0));
11583 continue;
11584 }
11585
11586 /* If this is (and:M1 (subreg:M2 X 0) (const_int C1)) where C1
11587 fits in both M1 and M2 and the SUBREG is either paradoxical
11588 or represents the low part, permute the SUBREG and the AND
11589 and try again. */
11590 if (GET_CODE (XEXP (op0, 0)) == SUBREG)
11591 {
11592 unsigned HOST_WIDE_INT c1;
11593 tmode = GET_MODE (SUBREG_REG (XEXP (op0, 0)));
11594 /* Require an integral mode, to avoid creating something like
11595 (AND:SF ...). */
11596 if (SCALAR_INT_MODE_P (tmode)
11597 /* It is unsafe to commute the AND into the SUBREG if the
11598 SUBREG is paradoxical and WORD_REGISTER_OPERATIONS is
11599 not defined. As originally written the upper bits
11600 have a defined value due to the AND operation.
11601 However, if we commute the AND inside the SUBREG then
11602 they no longer have defined values and the meaning of
11603 the code has been changed. */
11604 && (0
11605 #ifdef WORD_REGISTER_OPERATIONS
11606 || (mode_width > GET_MODE_BITSIZE (tmode)
11607 && mode_width <= BITS_PER_WORD)
11608 #endif
11609 || (mode_width <= GET_MODE_BITSIZE (tmode)
11610 && subreg_lowpart_p (XEXP (op0, 0))))
11611 && CONST_INT_P (XEXP (op0, 1))
11612 && mode_width <= HOST_BITS_PER_WIDE_INT
11613 && GET_MODE_BITSIZE (tmode) <= HOST_BITS_PER_WIDE_INT
11614 && ((c1 = INTVAL (XEXP (op0, 1))) & ~mask) == 0
11615 && (c1 & ~GET_MODE_MASK (tmode)) == 0
11616 && c1 != mask
11617 && c1 != GET_MODE_MASK (tmode))
11618 {
11619 op0 = simplify_gen_binary (AND, tmode,
11620 SUBREG_REG (XEXP (op0, 0)),
11621 gen_int_mode (c1, tmode));
11622 op0 = gen_lowpart (mode, op0);
11623 continue;
11624 }
11625 }
11626
11627 /* Convert (ne (and (not X) 1) 0) to (eq (and X 1) 0). */
11628 if (const_op == 0 && equality_comparison_p
11629 && XEXP (op0, 1) == const1_rtx
11630 && GET_CODE (XEXP (op0, 0)) == NOT)
11631 {
11632 op0 = simplify_and_const_int (NULL_RTX, mode,
11633 XEXP (XEXP (op0, 0), 0), 1);
11634 code = (code == NE ? EQ : NE);
11635 continue;
11636 }
11637
11638 /* Convert (ne (and (lshiftrt (not X)) 1) 0) to
11639 (eq (and (lshiftrt X) 1) 0).
11640 Also handle the case where (not X) is expressed using xor. */
11641 if (const_op == 0 && equality_comparison_p
11642 && XEXP (op0, 1) == const1_rtx
11643 && GET_CODE (XEXP (op0, 0)) == LSHIFTRT)
11644 {
11645 rtx shift_op = XEXP (XEXP (op0, 0), 0);
11646 rtx shift_count = XEXP (XEXP (op0, 0), 1);
11647
11648 if (GET_CODE (shift_op) == NOT
11649 || (GET_CODE (shift_op) == XOR
11650 && CONST_INT_P (XEXP (shift_op, 1))
11651 && CONST_INT_P (shift_count)
11652 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
11653 && (UINTVAL (XEXP (shift_op, 1))
11654 == (unsigned HOST_WIDE_INT) 1
11655 << INTVAL (shift_count))))
11656 {
11657 op0
11658 = gen_rtx_LSHIFTRT (mode, XEXP (shift_op, 0), shift_count);
11659 op0 = simplify_and_const_int (NULL_RTX, mode, op0, 1);
11660 code = (code == NE ? EQ : NE);
11661 continue;
11662 }
11663 }
11664 break;
11665
11666 case ASHIFT:
11667 /* If we have (compare (ashift FOO N) (const_int C)) and
11668 the high order N bits of FOO (N+1 if an inequality comparison)
11669 are known to be zero, we can do this by comparing FOO with C
11670 shifted right N bits so long as the low-order N bits of C are
11671 zero. */
11672 if (CONST_INT_P (XEXP (op0, 1))
11673 && INTVAL (XEXP (op0, 1)) >= 0
11674 && ((INTVAL (XEXP (op0, 1)) + ! equality_comparison_p)
11675 < HOST_BITS_PER_WIDE_INT)
11676 && (((unsigned HOST_WIDE_INT) const_op
11677 & (((unsigned HOST_WIDE_INT) 1 << INTVAL (XEXP (op0, 1)))
11678 - 1)) == 0)
11679 && mode_width <= HOST_BITS_PER_WIDE_INT
11680 && (nonzero_bits (XEXP (op0, 0), mode)
11681 & ~(mask >> (INTVAL (XEXP (op0, 1))
11682 + ! equality_comparison_p))) == 0)
11683 {
11684 /* We must perform a logical shift, not an arithmetic one,
11685 as we want the top N bits of C to be zero. */
11686 unsigned HOST_WIDE_INT temp = const_op & GET_MODE_MASK (mode);
11687
11688 temp >>= INTVAL (XEXP (op0, 1));
11689 op1 = gen_int_mode (temp, mode);
11690 op0 = XEXP (op0, 0);
11691 continue;
11692 }
11693
11694 /* If we are doing a sign bit comparison, it means we are testing
11695 a particular bit. Convert it to the appropriate AND. */
11696 if (sign_bit_comparison_p && CONST_INT_P (XEXP (op0, 1))
11697 && mode_width <= HOST_BITS_PER_WIDE_INT)
11698 {
11699 op0 = simplify_and_const_int (NULL_RTX, mode, XEXP (op0, 0),
11700 ((unsigned HOST_WIDE_INT) 1
11701 << (mode_width - 1
11702 - INTVAL (XEXP (op0, 1)))));
11703 code = (code == LT ? NE : EQ);
11704 continue;
11705 }
11706
11707 /* If this an equality comparison with zero and we are shifting
11708 the low bit to the sign bit, we can convert this to an AND of the
11709 low-order bit. */
11710 if (const_op == 0 && equality_comparison_p
11711 && CONST_INT_P (XEXP (op0, 1))
11712 && UINTVAL (XEXP (op0, 1)) == mode_width - 1)
11713 {
11714 op0 = simplify_and_const_int (NULL_RTX, mode, XEXP (op0, 0), 1);
11715 continue;
11716 }
11717 break;
11718
11719 case ASHIFTRT:
11720 /* If this is an equality comparison with zero, we can do this
11721 as a logical shift, which might be much simpler. */
11722 if (equality_comparison_p && const_op == 0
11723 && CONST_INT_P (XEXP (op0, 1)))
11724 {
11725 op0 = simplify_shift_const (NULL_RTX, LSHIFTRT, mode,
11726 XEXP (op0, 0),
11727 INTVAL (XEXP (op0, 1)));
11728 continue;
11729 }
11730
11731 /* If OP0 is a sign extension and CODE is not an unsigned comparison,
11732 do the comparison in a narrower mode. */
11733 if (! unsigned_comparison_p
11734 && CONST_INT_P (XEXP (op0, 1))
11735 && GET_CODE (XEXP (op0, 0)) == ASHIFT
11736 && XEXP (op0, 1) == XEXP (XEXP (op0, 0), 1)
11737 && (tmode = mode_for_size (mode_width - INTVAL (XEXP (op0, 1)),
11738 MODE_INT, 1)) != BLKmode
11739 && (((unsigned HOST_WIDE_INT) const_op
11740 + (GET_MODE_MASK (tmode) >> 1) + 1)
11741 <= GET_MODE_MASK (tmode)))
11742 {
11743 op0 = gen_lowpart (tmode, XEXP (XEXP (op0, 0), 0));
11744 continue;
11745 }
11746
11747 /* Likewise if OP0 is a PLUS of a sign extension with a
11748 constant, which is usually represented with the PLUS
11749 between the shifts. */
11750 if (! unsigned_comparison_p
11751 && CONST_INT_P (XEXP (op0, 1))
11752 && GET_CODE (XEXP (op0, 0)) == PLUS
11753 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
11754 && GET_CODE (XEXP (XEXP (op0, 0), 0)) == ASHIFT
11755 && XEXP (op0, 1) == XEXP (XEXP (XEXP (op0, 0), 0), 1)
11756 && (tmode = mode_for_size (mode_width - INTVAL (XEXP (op0, 1)),
11757 MODE_INT, 1)) != BLKmode
11758 && (((unsigned HOST_WIDE_INT) const_op
11759 + (GET_MODE_MASK (tmode) >> 1) + 1)
11760 <= GET_MODE_MASK (tmode)))
11761 {
11762 rtx inner = XEXP (XEXP (XEXP (op0, 0), 0), 0);
11763 rtx add_const = XEXP (XEXP (op0, 0), 1);
11764 rtx new_const = simplify_gen_binary (ASHIFTRT, GET_MODE (op0),
11765 add_const, XEXP (op0, 1));
11766
11767 op0 = simplify_gen_binary (PLUS, tmode,
11768 gen_lowpart (tmode, inner),
11769 new_const);
11770 continue;
11771 }
11772
11773 /* ... fall through ... */
11774 case LSHIFTRT:
11775 /* If we have (compare (xshiftrt FOO N) (const_int C)) and
11776 the low order N bits of FOO are known to be zero, we can do this
11777 by comparing FOO with C shifted left N bits so long as no
11778 overflow occurs. Even if the low order N bits of FOO aren't known
11779 to be zero, if the comparison is >= or < we can use the same
11780 optimization and for > or <= by setting all the low
11781 order N bits in the comparison constant. */
11782 if (CONST_INT_P (XEXP (op0, 1))
11783 && INTVAL (XEXP (op0, 1)) > 0
11784 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT
11785 && mode_width <= HOST_BITS_PER_WIDE_INT
11786 && (((unsigned HOST_WIDE_INT) const_op
11787 + (GET_CODE (op0) != LSHIFTRT
11788 ? ((GET_MODE_MASK (mode) >> INTVAL (XEXP (op0, 1)) >> 1)
11789 + 1)
11790 : 0))
11791 <= GET_MODE_MASK (mode) >> INTVAL (XEXP (op0, 1))))
11792 {
11793 unsigned HOST_WIDE_INT low_bits
11794 = (nonzero_bits (XEXP (op0, 0), mode)
11795 & (((unsigned HOST_WIDE_INT) 1
11796 << INTVAL (XEXP (op0, 1))) - 1));
11797 if (low_bits == 0 || !equality_comparison_p)
11798 {
11799 /* If the shift was logical, then we must make the condition
11800 unsigned. */
11801 if (GET_CODE (op0) == LSHIFTRT)
11802 code = unsigned_condition (code);
11803
11804 const_op <<= INTVAL (XEXP (op0, 1));
11805 if (low_bits != 0
11806 && (code == GT || code == GTU
11807 || code == LE || code == LEU))
11808 const_op
11809 |= (((HOST_WIDE_INT) 1 << INTVAL (XEXP (op0, 1))) - 1);
11810 op1 = GEN_INT (const_op);
11811 op0 = XEXP (op0, 0);
11812 continue;
11813 }
11814 }
11815
11816 /* If we are using this shift to extract just the sign bit, we
11817 can replace this with an LT or GE comparison. */
11818 if (const_op == 0
11819 && (equality_comparison_p || sign_bit_comparison_p)
11820 && CONST_INT_P (XEXP (op0, 1))
11821 && UINTVAL (XEXP (op0, 1)) == mode_width - 1)
11822 {
11823 op0 = XEXP (op0, 0);
11824 code = (code == NE || code == GT ? LT : GE);
11825 continue;
11826 }
11827 break;
11828
11829 default:
11830 break;
11831 }
11832
11833 break;
11834 }
11835
11836 /* Now make any compound operations involved in this comparison. Then,
11837 check for an outmost SUBREG on OP0 that is not doing anything or is
11838 paradoxical. The latter transformation must only be performed when
11839 it is known that the "extra" bits will be the same in op0 and op1 or
11840 that they don't matter. There are three cases to consider:
11841
11842 1. SUBREG_REG (op0) is a register. In this case the bits are don't
11843 care bits and we can assume they have any convenient value. So
11844 making the transformation is safe.
11845
11846 2. SUBREG_REG (op0) is a memory and LOAD_EXTEND_OP is not defined.
11847 In this case the upper bits of op0 are undefined. We should not make
11848 the simplification in that case as we do not know the contents of
11849 those bits.
11850
11851 3. SUBREG_REG (op0) is a memory and LOAD_EXTEND_OP is defined and not
11852 UNKNOWN. In that case we know those bits are zeros or ones. We must
11853 also be sure that they are the same as the upper bits of op1.
11854
11855 We can never remove a SUBREG for a non-equality comparison because
11856 the sign bit is in a different place in the underlying object. */
11857
11858 op0 = make_compound_operation (op0, op1 == const0_rtx ? COMPARE : SET);
11859 op1 = make_compound_operation (op1, SET);
11860
11861 if (GET_CODE (op0) == SUBREG && subreg_lowpart_p (op0)
11862 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT
11863 && GET_MODE_CLASS (GET_MODE (SUBREG_REG (op0))) == MODE_INT
11864 && (code == NE || code == EQ))
11865 {
11866 if (GET_MODE_SIZE (GET_MODE (op0))
11867 > GET_MODE_SIZE (GET_MODE (SUBREG_REG (op0))))
11868 {
11869 /* For paradoxical subregs, allow case 1 as above. Case 3 isn't
11870 implemented. */
11871 if (REG_P (SUBREG_REG (op0)))
11872 {
11873 op0 = SUBREG_REG (op0);
11874 op1 = gen_lowpart (GET_MODE (op0), op1);
11875 }
11876 }
11877 else if ((GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0)))
11878 <= HOST_BITS_PER_WIDE_INT)
11879 && (nonzero_bits (SUBREG_REG (op0),
11880 GET_MODE (SUBREG_REG (op0)))
11881 & ~GET_MODE_MASK (GET_MODE (op0))) == 0)
11882 {
11883 tem = gen_lowpart (GET_MODE (SUBREG_REG (op0)), op1);
11884
11885 if ((nonzero_bits (tem, GET_MODE (SUBREG_REG (op0)))
11886 & ~GET_MODE_MASK (GET_MODE (op0))) == 0)
11887 op0 = SUBREG_REG (op0), op1 = tem;
11888 }
11889 }
11890
11891 /* We now do the opposite procedure: Some machines don't have compare
11892 insns in all modes. If OP0's mode is an integer mode smaller than a
11893 word and we can't do a compare in that mode, see if there is a larger
11894 mode for which we can do the compare. There are a number of cases in
11895 which we can use the wider mode. */
11896
11897 mode = GET_MODE (op0);
11898 if (mode != VOIDmode && GET_MODE_CLASS (mode) == MODE_INT
11899 && GET_MODE_SIZE (mode) < UNITS_PER_WORD
11900 && ! have_insn_for (COMPARE, mode))
11901 for (tmode = GET_MODE_WIDER_MODE (mode);
11902 (tmode != VOIDmode
11903 && GET_MODE_BITSIZE (tmode) <= HOST_BITS_PER_WIDE_INT);
11904 tmode = GET_MODE_WIDER_MODE (tmode))
11905 if (have_insn_for (COMPARE, tmode))
11906 {
11907 int zero_extended;
11908
11909 /* If this is a test for negative, we can make an explicit
11910 test of the sign bit. Test this first so we can use
11911 a paradoxical subreg to extend OP0. */
11912
11913 if (op1 == const0_rtx && (code == LT || code == GE)
11914 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
11915 {
11916 op0 = simplify_gen_binary (AND, tmode,
11917 gen_lowpart (tmode, op0),
11918 GEN_INT ((unsigned HOST_WIDE_INT) 1
11919 << (GET_MODE_BITSIZE (mode)
11920 - 1)));
11921 code = (code == LT) ? NE : EQ;
11922 break;
11923 }
11924
11925 /* If the only nonzero bits in OP0 and OP1 are those in the
11926 narrower mode and this is an equality or unsigned comparison,
11927 we can use the wider mode. Similarly for sign-extended
11928 values, in which case it is true for all comparisons. */
11929 zero_extended = ((code == EQ || code == NE
11930 || code == GEU || code == GTU
11931 || code == LEU || code == LTU)
11932 && (nonzero_bits (op0, tmode)
11933 & ~GET_MODE_MASK (mode)) == 0
11934 && ((CONST_INT_P (op1)
11935 || (nonzero_bits (op1, tmode)
11936 & ~GET_MODE_MASK (mode)) == 0)));
11937
11938 if (zero_extended
11939 || ((num_sign_bit_copies (op0, tmode)
11940 > (unsigned int) (GET_MODE_BITSIZE (tmode)
11941 - GET_MODE_BITSIZE (mode)))
11942 && (num_sign_bit_copies (op1, tmode)
11943 > (unsigned int) (GET_MODE_BITSIZE (tmode)
11944 - GET_MODE_BITSIZE (mode)))))
11945 {
11946 /* If OP0 is an AND and we don't have an AND in MODE either,
11947 make a new AND in the proper mode. */
11948 if (GET_CODE (op0) == AND
11949 && !have_insn_for (AND, mode))
11950 op0 = simplify_gen_binary (AND, tmode,
11951 gen_lowpart (tmode,
11952 XEXP (op0, 0)),
11953 gen_lowpart (tmode,
11954 XEXP (op0, 1)));
11955 else
11956 {
11957 if (zero_extended)
11958 {
11959 op0 = simplify_gen_unary (ZERO_EXTEND, tmode, op0, mode);
11960 op1 = simplify_gen_unary (ZERO_EXTEND, tmode, op1, mode);
11961 }
11962 else
11963 {
11964 op0 = simplify_gen_unary (SIGN_EXTEND, tmode, op0, mode);
11965 op1 = simplify_gen_unary (SIGN_EXTEND, tmode, op1, mode);
11966 }
11967 break;
11968 }
11969 }
11970 }
11971
11972 #ifdef CANONICALIZE_COMPARISON
11973 /* If this machine only supports a subset of valid comparisons, see if we
11974 can convert an unsupported one into a supported one. */
11975 CANONICALIZE_COMPARISON (code, op0, op1);
11976 #endif
11977
11978 *pop0 = op0;
11979 *pop1 = op1;
11980
11981 return code;
11982 }
11983 \f
11984 /* Utility function for record_value_for_reg. Count number of
11985 rtxs in X. */
11986 static int
11987 count_rtxs (rtx x)
11988 {
11989 enum rtx_code code = GET_CODE (x);
11990 const char *fmt;
11991 int i, j, ret = 1;
11992
11993 if (GET_RTX_CLASS (code) == '2'
11994 || GET_RTX_CLASS (code) == 'c')
11995 {
11996 rtx x0 = XEXP (x, 0);
11997 rtx x1 = XEXP (x, 1);
11998
11999 if (x0 == x1)
12000 return 1 + 2 * count_rtxs (x0);
12001
12002 if ((GET_RTX_CLASS (GET_CODE (x1)) == '2'
12003 || GET_RTX_CLASS (GET_CODE (x1)) == 'c')
12004 && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
12005 return 2 + 2 * count_rtxs (x0)
12006 + count_rtxs (x == XEXP (x1, 0)
12007 ? XEXP (x1, 1) : XEXP (x1, 0));
12008
12009 if ((GET_RTX_CLASS (GET_CODE (x0)) == '2'
12010 || GET_RTX_CLASS (GET_CODE (x0)) == 'c')
12011 && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
12012 return 2 + 2 * count_rtxs (x1)
12013 + count_rtxs (x == XEXP (x0, 0)
12014 ? XEXP (x0, 1) : XEXP (x0, 0));
12015 }
12016
12017 fmt = GET_RTX_FORMAT (code);
12018 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
12019 if (fmt[i] == 'e')
12020 ret += count_rtxs (XEXP (x, i));
12021 else if (fmt[i] == 'E')
12022 for (j = 0; j < XVECLEN (x, i); j++)
12023 ret += count_rtxs (XVECEXP (x, i, j));
12024
12025 return ret;
12026 }
12027 \f
12028 /* Utility function for following routine. Called when X is part of a value
12029 being stored into last_set_value. Sets last_set_table_tick
12030 for each register mentioned. Similar to mention_regs in cse.c */
12031
12032 static void
12033 update_table_tick (rtx x)
12034 {
12035 enum rtx_code code = GET_CODE (x);
12036 const char *fmt = GET_RTX_FORMAT (code);
12037 int i, j;
12038
12039 if (code == REG)
12040 {
12041 unsigned int regno = REGNO (x);
12042 unsigned int endregno = END_REGNO (x);
12043 unsigned int r;
12044
12045 for (r = regno; r < endregno; r++)
12046 {
12047 reg_stat_type *rsp = VEC_index (reg_stat_type, reg_stat, r);
12048 rsp->last_set_table_tick = label_tick;
12049 }
12050
12051 return;
12052 }
12053
12054 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
12055 if (fmt[i] == 'e')
12056 {
12057 /* Check for identical subexpressions. If x contains
12058 identical subexpression we only have to traverse one of
12059 them. */
12060 if (i == 0 && ARITHMETIC_P (x))
12061 {
12062 /* Note that at this point x1 has already been
12063 processed. */
12064 rtx x0 = XEXP (x, 0);
12065 rtx x1 = XEXP (x, 1);
12066
12067 /* If x0 and x1 are identical then there is no need to
12068 process x0. */
12069 if (x0 == x1)
12070 break;
12071
12072 /* If x0 is identical to a subexpression of x1 then while
12073 processing x1, x0 has already been processed. Thus we
12074 are done with x. */
12075 if (ARITHMETIC_P (x1)
12076 && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
12077 break;
12078
12079 /* If x1 is identical to a subexpression of x0 then we
12080 still have to process the rest of x0. */
12081 if (ARITHMETIC_P (x0)
12082 && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
12083 {
12084 update_table_tick (XEXP (x0, x1 == XEXP (x0, 0) ? 1 : 0));
12085 break;
12086 }
12087 }
12088
12089 update_table_tick (XEXP (x, i));
12090 }
12091 else if (fmt[i] == 'E')
12092 for (j = 0; j < XVECLEN (x, i); j++)
12093 update_table_tick (XVECEXP (x, i, j));
12094 }
12095
12096 /* Record that REG is set to VALUE in insn INSN. If VALUE is zero, we
12097 are saying that the register is clobbered and we no longer know its
12098 value. If INSN is zero, don't update reg_stat[].last_set; this is
12099 only permitted with VALUE also zero and is used to invalidate the
12100 register. */
12101
12102 static void
12103 record_value_for_reg (rtx reg, rtx insn, rtx value)
12104 {
12105 unsigned int regno = REGNO (reg);
12106 unsigned int endregno = END_REGNO (reg);
12107 unsigned int i;
12108 reg_stat_type *rsp;
12109
12110 /* If VALUE contains REG and we have a previous value for REG, substitute
12111 the previous value. */
12112 if (value && insn && reg_overlap_mentioned_p (reg, value))
12113 {
12114 rtx tem;
12115
12116 /* Set things up so get_last_value is allowed to see anything set up to
12117 our insn. */
12118 subst_low_luid = DF_INSN_LUID (insn);
12119 tem = get_last_value (reg);
12120
12121 /* If TEM is simply a binary operation with two CLOBBERs as operands,
12122 it isn't going to be useful and will take a lot of time to process,
12123 so just use the CLOBBER. */
12124
12125 if (tem)
12126 {
12127 if (ARITHMETIC_P (tem)
12128 && GET_CODE (XEXP (tem, 0)) == CLOBBER
12129 && GET_CODE (XEXP (tem, 1)) == CLOBBER)
12130 tem = XEXP (tem, 0);
12131 else if (count_occurrences (value, reg, 1) >= 2)
12132 {
12133 /* If there are two or more occurrences of REG in VALUE,
12134 prevent the value from growing too much. */
12135 if (count_rtxs (tem) > MAX_LAST_VALUE_RTL)
12136 tem = gen_rtx_CLOBBER (GET_MODE (tem), const0_rtx);
12137 }
12138
12139 value = replace_rtx (copy_rtx (value), reg, tem);
12140 }
12141 }
12142
12143 /* For each register modified, show we don't know its value, that
12144 we don't know about its bitwise content, that its value has been
12145 updated, and that we don't know the location of the death of the
12146 register. */
12147 for (i = regno; i < endregno; i++)
12148 {
12149 rsp = VEC_index (reg_stat_type, reg_stat, i);
12150
12151 if (insn)
12152 rsp->last_set = insn;
12153
12154 rsp->last_set_value = 0;
12155 rsp->last_set_mode = VOIDmode;
12156 rsp->last_set_nonzero_bits = 0;
12157 rsp->last_set_sign_bit_copies = 0;
12158 rsp->last_death = 0;
12159 rsp->truncated_to_mode = VOIDmode;
12160 }
12161
12162 /* Mark registers that are being referenced in this value. */
12163 if (value)
12164 update_table_tick (value);
12165
12166 /* Now update the status of each register being set.
12167 If someone is using this register in this block, set this register
12168 to invalid since we will get confused between the two lives in this
12169 basic block. This makes using this register always invalid. In cse, we
12170 scan the table to invalidate all entries using this register, but this
12171 is too much work for us. */
12172
12173 for (i = regno; i < endregno; i++)
12174 {
12175 rsp = VEC_index (reg_stat_type, reg_stat, i);
12176 rsp->last_set_label = label_tick;
12177 if (!insn
12178 || (value && rsp->last_set_table_tick >= label_tick_ebb_start))
12179 rsp->last_set_invalid = 1;
12180 else
12181 rsp->last_set_invalid = 0;
12182 }
12183
12184 /* The value being assigned might refer to X (like in "x++;"). In that
12185 case, we must replace it with (clobber (const_int 0)) to prevent
12186 infinite loops. */
12187 rsp = VEC_index (reg_stat_type, reg_stat, regno);
12188 if (value && !get_last_value_validate (&value, insn, label_tick, 0))
12189 {
12190 value = copy_rtx (value);
12191 if (!get_last_value_validate (&value, insn, label_tick, 1))
12192 value = 0;
12193 }
12194
12195 /* For the main register being modified, update the value, the mode, the
12196 nonzero bits, and the number of sign bit copies. */
12197
12198 rsp->last_set_value = value;
12199
12200 if (value)
12201 {
12202 enum machine_mode mode = GET_MODE (reg);
12203 subst_low_luid = DF_INSN_LUID (insn);
12204 rsp->last_set_mode = mode;
12205 if (GET_MODE_CLASS (mode) == MODE_INT
12206 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
12207 mode = nonzero_bits_mode;
12208 rsp->last_set_nonzero_bits = nonzero_bits (value, mode);
12209 rsp->last_set_sign_bit_copies
12210 = num_sign_bit_copies (value, GET_MODE (reg));
12211 }
12212 }
12213
12214 /* Called via note_stores from record_dead_and_set_regs to handle one
12215 SET or CLOBBER in an insn. DATA is the instruction in which the
12216 set is occurring. */
12217
12218 static void
12219 record_dead_and_set_regs_1 (rtx dest, const_rtx setter, void *data)
12220 {
12221 rtx record_dead_insn = (rtx) data;
12222
12223 if (GET_CODE (dest) == SUBREG)
12224 dest = SUBREG_REG (dest);
12225
12226 if (!record_dead_insn)
12227 {
12228 if (REG_P (dest))
12229 record_value_for_reg (dest, NULL_RTX, NULL_RTX);
12230 return;
12231 }
12232
12233 if (REG_P (dest))
12234 {
12235 /* If we are setting the whole register, we know its value. Otherwise
12236 show that we don't know the value. We can handle SUBREG in
12237 some cases. */
12238 if (GET_CODE (setter) == SET && dest == SET_DEST (setter))
12239 record_value_for_reg (dest, record_dead_insn, SET_SRC (setter));
12240 else if (GET_CODE (setter) == SET
12241 && GET_CODE (SET_DEST (setter)) == SUBREG
12242 && SUBREG_REG (SET_DEST (setter)) == dest
12243 && GET_MODE_BITSIZE (GET_MODE (dest)) <= BITS_PER_WORD
12244 && subreg_lowpart_p (SET_DEST (setter)))
12245 record_value_for_reg (dest, record_dead_insn,
12246 gen_lowpart (GET_MODE (dest),
12247 SET_SRC (setter)));
12248 else
12249 record_value_for_reg (dest, record_dead_insn, NULL_RTX);
12250 }
12251 else if (MEM_P (dest)
12252 /* Ignore pushes, they clobber nothing. */
12253 && ! push_operand (dest, GET_MODE (dest)))
12254 mem_last_set = DF_INSN_LUID (record_dead_insn);
12255 }
12256
12257 /* Update the records of when each REG was most recently set or killed
12258 for the things done by INSN. This is the last thing done in processing
12259 INSN in the combiner loop.
12260
12261 We update reg_stat[], in particular fields last_set, last_set_value,
12262 last_set_mode, last_set_nonzero_bits, last_set_sign_bit_copies,
12263 last_death, and also the similar information mem_last_set (which insn
12264 most recently modified memory) and last_call_luid (which insn was the
12265 most recent subroutine call). */
12266
12267 static void
12268 record_dead_and_set_regs (rtx insn)
12269 {
12270 rtx link;
12271 unsigned int i;
12272
12273 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
12274 {
12275 if (REG_NOTE_KIND (link) == REG_DEAD
12276 && REG_P (XEXP (link, 0)))
12277 {
12278 unsigned int regno = REGNO (XEXP (link, 0));
12279 unsigned int endregno = END_REGNO (XEXP (link, 0));
12280
12281 for (i = regno; i < endregno; i++)
12282 {
12283 reg_stat_type *rsp;
12284
12285 rsp = VEC_index (reg_stat_type, reg_stat, i);
12286 rsp->last_death = insn;
12287 }
12288 }
12289 else if (REG_NOTE_KIND (link) == REG_INC)
12290 record_value_for_reg (XEXP (link, 0), insn, NULL_RTX);
12291 }
12292
12293 if (CALL_P (insn))
12294 {
12295 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
12296 if (TEST_HARD_REG_BIT (regs_invalidated_by_call, i))
12297 {
12298 reg_stat_type *rsp;
12299
12300 rsp = VEC_index (reg_stat_type, reg_stat, i);
12301 rsp->last_set_invalid = 1;
12302 rsp->last_set = insn;
12303 rsp->last_set_value = 0;
12304 rsp->last_set_mode = VOIDmode;
12305 rsp->last_set_nonzero_bits = 0;
12306 rsp->last_set_sign_bit_copies = 0;
12307 rsp->last_death = 0;
12308 rsp->truncated_to_mode = VOIDmode;
12309 }
12310
12311 last_call_luid = mem_last_set = DF_INSN_LUID (insn);
12312
12313 /* We can't combine into a call pattern. Remember, though, that
12314 the return value register is set at this LUID. We could
12315 still replace a register with the return value from the
12316 wrong subroutine call! */
12317 note_stores (PATTERN (insn), record_dead_and_set_regs_1, NULL_RTX);
12318 }
12319 else
12320 note_stores (PATTERN (insn), record_dead_and_set_regs_1, insn);
12321 }
12322
12323 /* If a SUBREG has the promoted bit set, it is in fact a property of the
12324 register present in the SUBREG, so for each such SUBREG go back and
12325 adjust nonzero and sign bit information of the registers that are
12326 known to have some zero/sign bits set.
12327
12328 This is needed because when combine blows the SUBREGs away, the
12329 information on zero/sign bits is lost and further combines can be
12330 missed because of that. */
12331
12332 static void
12333 record_promoted_value (rtx insn, rtx subreg)
12334 {
12335 rtx links, set;
12336 unsigned int regno = REGNO (SUBREG_REG (subreg));
12337 enum machine_mode mode = GET_MODE (subreg);
12338
12339 if (GET_MODE_BITSIZE (mode) > HOST_BITS_PER_WIDE_INT)
12340 return;
12341
12342 for (links = LOG_LINKS (insn); links;)
12343 {
12344 reg_stat_type *rsp;
12345
12346 insn = XEXP (links, 0);
12347 set = single_set (insn);
12348
12349 if (! set || !REG_P (SET_DEST (set))
12350 || REGNO (SET_DEST (set)) != regno
12351 || GET_MODE (SET_DEST (set)) != GET_MODE (SUBREG_REG (subreg)))
12352 {
12353 links = XEXP (links, 1);
12354 continue;
12355 }
12356
12357 rsp = VEC_index (reg_stat_type, reg_stat, regno);
12358 if (rsp->last_set == insn)
12359 {
12360 if (SUBREG_PROMOTED_UNSIGNED_P (subreg) > 0)
12361 rsp->last_set_nonzero_bits &= GET_MODE_MASK (mode);
12362 }
12363
12364 if (REG_P (SET_SRC (set)))
12365 {
12366 regno = REGNO (SET_SRC (set));
12367 links = LOG_LINKS (insn);
12368 }
12369 else
12370 break;
12371 }
12372 }
12373
12374 /* Check if X, a register, is known to contain a value already
12375 truncated to MODE. In this case we can use a subreg to refer to
12376 the truncated value even though in the generic case we would need
12377 an explicit truncation. */
12378
12379 static bool
12380 reg_truncated_to_mode (enum machine_mode mode, const_rtx x)
12381 {
12382 reg_stat_type *rsp = VEC_index (reg_stat_type, reg_stat, REGNO (x));
12383 enum machine_mode truncated = rsp->truncated_to_mode;
12384
12385 if (truncated == 0
12386 || rsp->truncation_label < label_tick_ebb_start)
12387 return false;
12388 if (GET_MODE_SIZE (truncated) <= GET_MODE_SIZE (mode))
12389 return true;
12390 if (TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
12391 GET_MODE_BITSIZE (truncated)))
12392 return true;
12393 return false;
12394 }
12395
12396 /* Callback for for_each_rtx. If *P is a hard reg or a subreg record the mode
12397 that the register is accessed in. For non-TRULY_NOOP_TRUNCATION targets we
12398 might be able to turn a truncate into a subreg using this information.
12399 Return -1 if traversing *P is complete or 0 otherwise. */
12400
12401 static int
12402 record_truncated_value (rtx *p, void *data ATTRIBUTE_UNUSED)
12403 {
12404 rtx x = *p;
12405 enum machine_mode truncated_mode;
12406 reg_stat_type *rsp;
12407
12408 if (GET_CODE (x) == SUBREG && REG_P (SUBREG_REG (x)))
12409 {
12410 enum machine_mode original_mode = GET_MODE (SUBREG_REG (x));
12411 truncated_mode = GET_MODE (x);
12412
12413 if (GET_MODE_SIZE (original_mode) <= GET_MODE_SIZE (truncated_mode))
12414 return -1;
12415
12416 if (TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (truncated_mode),
12417 GET_MODE_BITSIZE (original_mode)))
12418 return -1;
12419
12420 x = SUBREG_REG (x);
12421 }
12422 /* ??? For hard-regs we now record everything. We might be able to
12423 optimize this using last_set_mode. */
12424 else if (REG_P (x) && REGNO (x) < FIRST_PSEUDO_REGISTER)
12425 truncated_mode = GET_MODE (x);
12426 else
12427 return 0;
12428
12429 rsp = VEC_index (reg_stat_type, reg_stat, REGNO (x));
12430 if (rsp->truncated_to_mode == 0
12431 || rsp->truncation_label < label_tick_ebb_start
12432 || (GET_MODE_SIZE (truncated_mode)
12433 < GET_MODE_SIZE (rsp->truncated_to_mode)))
12434 {
12435 rsp->truncated_to_mode = truncated_mode;
12436 rsp->truncation_label = label_tick;
12437 }
12438
12439 return -1;
12440 }
12441
12442 /* Callback for note_uses. Find hardregs and subregs of pseudos and
12443 the modes they are used in. This can help truning TRUNCATEs into
12444 SUBREGs. */
12445
12446 static void
12447 record_truncated_values (rtx *x, void *data ATTRIBUTE_UNUSED)
12448 {
12449 for_each_rtx (x, record_truncated_value, NULL);
12450 }
12451
12452 /* Scan X for promoted SUBREGs. For each one found,
12453 note what it implies to the registers used in it. */
12454
12455 static void
12456 check_promoted_subreg (rtx insn, rtx x)
12457 {
12458 if (GET_CODE (x) == SUBREG
12459 && SUBREG_PROMOTED_VAR_P (x)
12460 && REG_P (SUBREG_REG (x)))
12461 record_promoted_value (insn, x);
12462 else
12463 {
12464 const char *format = GET_RTX_FORMAT (GET_CODE (x));
12465 int i, j;
12466
12467 for (i = 0; i < GET_RTX_LENGTH (GET_CODE (x)); i++)
12468 switch (format[i])
12469 {
12470 case 'e':
12471 check_promoted_subreg (insn, XEXP (x, i));
12472 break;
12473 case 'V':
12474 case 'E':
12475 if (XVEC (x, i) != 0)
12476 for (j = 0; j < XVECLEN (x, i); j++)
12477 check_promoted_subreg (insn, XVECEXP (x, i, j));
12478 break;
12479 }
12480 }
12481 }
12482 \f
12483 /* Verify that all the registers and memory references mentioned in *LOC are
12484 still valid. *LOC was part of a value set in INSN when label_tick was
12485 equal to TICK. Return 0 if some are not. If REPLACE is nonzero, replace
12486 the invalid references with (clobber (const_int 0)) and return 1. This
12487 replacement is useful because we often can get useful information about
12488 the form of a value (e.g., if it was produced by a shift that always
12489 produces -1 or 0) even though we don't know exactly what registers it
12490 was produced from. */
12491
12492 static int
12493 get_last_value_validate (rtx *loc, rtx insn, int tick, int replace)
12494 {
12495 rtx x = *loc;
12496 const char *fmt = GET_RTX_FORMAT (GET_CODE (x));
12497 int len = GET_RTX_LENGTH (GET_CODE (x));
12498 int i, j;
12499
12500 if (REG_P (x))
12501 {
12502 unsigned int regno = REGNO (x);
12503 unsigned int endregno = END_REGNO (x);
12504 unsigned int j;
12505
12506 for (j = regno; j < endregno; j++)
12507 {
12508 reg_stat_type *rsp = VEC_index (reg_stat_type, reg_stat, j);
12509 if (rsp->last_set_invalid
12510 /* If this is a pseudo-register that was only set once and not
12511 live at the beginning of the function, it is always valid. */
12512 || (! (regno >= FIRST_PSEUDO_REGISTER
12513 && REG_N_SETS (regno) == 1
12514 && (!REGNO_REG_SET_P
12515 (DF_LR_IN (ENTRY_BLOCK_PTR->next_bb), regno)))
12516 && rsp->last_set_label > tick))
12517 {
12518 if (replace)
12519 *loc = gen_rtx_CLOBBER (GET_MODE (x), const0_rtx);
12520 return replace;
12521 }
12522 }
12523
12524 return 1;
12525 }
12526 /* If this is a memory reference, make sure that there were no stores after
12527 it that might have clobbered the value. We don't have alias info, so we
12528 assume any store invalidates it. Moreover, we only have local UIDs, so
12529 we also assume that there were stores in the intervening basic blocks. */
12530 else if (MEM_P (x) && !MEM_READONLY_P (x)
12531 && (tick != label_tick || DF_INSN_LUID (insn) <= mem_last_set))
12532 {
12533 if (replace)
12534 *loc = gen_rtx_CLOBBER (GET_MODE (x), const0_rtx);
12535 return replace;
12536 }
12537
12538 for (i = 0; i < len; i++)
12539 {
12540 if (fmt[i] == 'e')
12541 {
12542 /* Check for identical subexpressions. If x contains
12543 identical subexpression we only have to traverse one of
12544 them. */
12545 if (i == 1 && ARITHMETIC_P (x))
12546 {
12547 /* Note that at this point x0 has already been checked
12548 and found valid. */
12549 rtx x0 = XEXP (x, 0);
12550 rtx x1 = XEXP (x, 1);
12551
12552 /* If x0 and x1 are identical then x is also valid. */
12553 if (x0 == x1)
12554 return 1;
12555
12556 /* If x1 is identical to a subexpression of x0 then
12557 while checking x0, x1 has already been checked. Thus
12558 it is valid and so as x. */
12559 if (ARITHMETIC_P (x0)
12560 && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
12561 return 1;
12562
12563 /* If x0 is identical to a subexpression of x1 then x is
12564 valid iff the rest of x1 is valid. */
12565 if (ARITHMETIC_P (x1)
12566 && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
12567 return
12568 get_last_value_validate (&XEXP (x1,
12569 x0 == XEXP (x1, 0) ? 1 : 0),
12570 insn, tick, replace);
12571 }
12572
12573 if (get_last_value_validate (&XEXP (x, i), insn, tick,
12574 replace) == 0)
12575 return 0;
12576 }
12577 else if (fmt[i] == 'E')
12578 for (j = 0; j < XVECLEN (x, i); j++)
12579 if (get_last_value_validate (&XVECEXP (x, i, j),
12580 insn, tick, replace) == 0)
12581 return 0;
12582 }
12583
12584 /* If we haven't found a reason for it to be invalid, it is valid. */
12585 return 1;
12586 }
12587
12588 /* Get the last value assigned to X, if known. Some registers
12589 in the value may be replaced with (clobber (const_int 0)) if their value
12590 is known longer known reliably. */
12591
12592 static rtx
12593 get_last_value (const_rtx x)
12594 {
12595 unsigned int regno;
12596 rtx value;
12597 reg_stat_type *rsp;
12598
12599 /* If this is a non-paradoxical SUBREG, get the value of its operand and
12600 then convert it to the desired mode. If this is a paradoxical SUBREG,
12601 we cannot predict what values the "extra" bits might have. */
12602 if (GET_CODE (x) == SUBREG
12603 && subreg_lowpart_p (x)
12604 && (GET_MODE_SIZE (GET_MODE (x))
12605 <= GET_MODE_SIZE (GET_MODE (SUBREG_REG (x))))
12606 && (value = get_last_value (SUBREG_REG (x))) != 0)
12607 return gen_lowpart (GET_MODE (x), value);
12608
12609 if (!REG_P (x))
12610 return 0;
12611
12612 regno = REGNO (x);
12613 rsp = VEC_index (reg_stat_type, reg_stat, regno);
12614 value = rsp->last_set_value;
12615
12616 /* If we don't have a value, or if it isn't for this basic block and
12617 it's either a hard register, set more than once, or it's a live
12618 at the beginning of the function, return 0.
12619
12620 Because if it's not live at the beginning of the function then the reg
12621 is always set before being used (is never used without being set).
12622 And, if it's set only once, and it's always set before use, then all
12623 uses must have the same last value, even if it's not from this basic
12624 block. */
12625
12626 if (value == 0
12627 || (rsp->last_set_label < label_tick_ebb_start
12628 && (regno < FIRST_PSEUDO_REGISTER
12629 || REG_N_SETS (regno) != 1
12630 || REGNO_REG_SET_P
12631 (DF_LR_IN (ENTRY_BLOCK_PTR->next_bb), regno))))
12632 return 0;
12633
12634 /* If the value was set in a later insn than the ones we are processing,
12635 we can't use it even if the register was only set once. */
12636 if (rsp->last_set_label == label_tick
12637 && DF_INSN_LUID (rsp->last_set) >= subst_low_luid)
12638 return 0;
12639
12640 /* If the value has all its registers valid, return it. */
12641 if (get_last_value_validate (&value, rsp->last_set, rsp->last_set_label, 0))
12642 return value;
12643
12644 /* Otherwise, make a copy and replace any invalid register with
12645 (clobber (const_int 0)). If that fails for some reason, return 0. */
12646
12647 value = copy_rtx (value);
12648 if (get_last_value_validate (&value, rsp->last_set, rsp->last_set_label, 1))
12649 return value;
12650
12651 return 0;
12652 }
12653 \f
12654 /* Return nonzero if expression X refers to a REG or to memory
12655 that is set in an instruction more recent than FROM_LUID. */
12656
12657 static int
12658 use_crosses_set_p (const_rtx x, int from_luid)
12659 {
12660 const char *fmt;
12661 int i;
12662 enum rtx_code code = GET_CODE (x);
12663
12664 if (code == REG)
12665 {
12666 unsigned int regno = REGNO (x);
12667 unsigned endreg = END_REGNO (x);
12668
12669 #ifdef PUSH_ROUNDING
12670 /* Don't allow uses of the stack pointer to be moved,
12671 because we don't know whether the move crosses a push insn. */
12672 if (regno == STACK_POINTER_REGNUM && PUSH_ARGS)
12673 return 1;
12674 #endif
12675 for (; regno < endreg; regno++)
12676 {
12677 reg_stat_type *rsp = VEC_index (reg_stat_type, reg_stat, regno);
12678 if (rsp->last_set
12679 && rsp->last_set_label == label_tick
12680 && DF_INSN_LUID (rsp->last_set) > from_luid)
12681 return 1;
12682 }
12683 return 0;
12684 }
12685
12686 if (code == MEM && mem_last_set > from_luid)
12687 return 1;
12688
12689 fmt = GET_RTX_FORMAT (code);
12690
12691 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
12692 {
12693 if (fmt[i] == 'E')
12694 {
12695 int j;
12696 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
12697 if (use_crosses_set_p (XVECEXP (x, i, j), from_luid))
12698 return 1;
12699 }
12700 else if (fmt[i] == 'e'
12701 && use_crosses_set_p (XEXP (x, i), from_luid))
12702 return 1;
12703 }
12704 return 0;
12705 }
12706 \f
12707 /* Define three variables used for communication between the following
12708 routines. */
12709
12710 static unsigned int reg_dead_regno, reg_dead_endregno;
12711 static int reg_dead_flag;
12712
12713 /* Function called via note_stores from reg_dead_at_p.
12714
12715 If DEST is within [reg_dead_regno, reg_dead_endregno), set
12716 reg_dead_flag to 1 if X is a CLOBBER and to -1 it is a SET. */
12717
12718 static void
12719 reg_dead_at_p_1 (rtx dest, const_rtx x, void *data ATTRIBUTE_UNUSED)
12720 {
12721 unsigned int regno, endregno;
12722
12723 if (!REG_P (dest))
12724 return;
12725
12726 regno = REGNO (dest);
12727 endregno = END_REGNO (dest);
12728 if (reg_dead_endregno > regno && reg_dead_regno < endregno)
12729 reg_dead_flag = (GET_CODE (x) == CLOBBER) ? 1 : -1;
12730 }
12731
12732 /* Return nonzero if REG is known to be dead at INSN.
12733
12734 We scan backwards from INSN. If we hit a REG_DEAD note or a CLOBBER
12735 referencing REG, it is dead. If we hit a SET referencing REG, it is
12736 live. Otherwise, see if it is live or dead at the start of the basic
12737 block we are in. Hard regs marked as being live in NEWPAT_USED_REGS
12738 must be assumed to be always live. */
12739
12740 static int
12741 reg_dead_at_p (rtx reg, rtx insn)
12742 {
12743 basic_block block;
12744 unsigned int i;
12745
12746 /* Set variables for reg_dead_at_p_1. */
12747 reg_dead_regno = REGNO (reg);
12748 reg_dead_endregno = END_REGNO (reg);
12749
12750 reg_dead_flag = 0;
12751
12752 /* Check that reg isn't mentioned in NEWPAT_USED_REGS. For fixed registers
12753 we allow the machine description to decide whether use-and-clobber
12754 patterns are OK. */
12755 if (reg_dead_regno < FIRST_PSEUDO_REGISTER)
12756 {
12757 for (i = reg_dead_regno; i < reg_dead_endregno; i++)
12758 if (!fixed_regs[i] && TEST_HARD_REG_BIT (newpat_used_regs, i))
12759 return 0;
12760 }
12761
12762 /* Scan backwards until we find a REG_DEAD note, SET, CLOBBER, or
12763 beginning of basic block. */
12764 block = BLOCK_FOR_INSN (insn);
12765 for (;;)
12766 {
12767 if (INSN_P (insn))
12768 {
12769 note_stores (PATTERN (insn), reg_dead_at_p_1, NULL);
12770 if (reg_dead_flag)
12771 return reg_dead_flag == 1 ? 1 : 0;
12772
12773 if (find_regno_note (insn, REG_DEAD, reg_dead_regno))
12774 return 1;
12775 }
12776
12777 if (insn == BB_HEAD (block))
12778 break;
12779
12780 insn = PREV_INSN (insn);
12781 }
12782
12783 /* Look at live-in sets for the basic block that we were in. */
12784 for (i = reg_dead_regno; i < reg_dead_endregno; i++)
12785 if (REGNO_REG_SET_P (df_get_live_in (block), i))
12786 return 0;
12787
12788 return 1;
12789 }
12790 \f
12791 /* Note hard registers in X that are used. */
12792
12793 static void
12794 mark_used_regs_combine (rtx x)
12795 {
12796 RTX_CODE code = GET_CODE (x);
12797 unsigned int regno;
12798 int i;
12799
12800 switch (code)
12801 {
12802 case LABEL_REF:
12803 case SYMBOL_REF:
12804 case CONST_INT:
12805 case CONST:
12806 case CONST_DOUBLE:
12807 case CONST_VECTOR:
12808 case PC:
12809 case ADDR_VEC:
12810 case ADDR_DIFF_VEC:
12811 case ASM_INPUT:
12812 #ifdef HAVE_cc0
12813 /* CC0 must die in the insn after it is set, so we don't need to take
12814 special note of it here. */
12815 case CC0:
12816 #endif
12817 return;
12818
12819 case CLOBBER:
12820 /* If we are clobbering a MEM, mark any hard registers inside the
12821 address as used. */
12822 if (MEM_P (XEXP (x, 0)))
12823 mark_used_regs_combine (XEXP (XEXP (x, 0), 0));
12824 return;
12825
12826 case REG:
12827 regno = REGNO (x);
12828 /* A hard reg in a wide mode may really be multiple registers.
12829 If so, mark all of them just like the first. */
12830 if (regno < FIRST_PSEUDO_REGISTER)
12831 {
12832 /* None of this applies to the stack, frame or arg pointers. */
12833 if (regno == STACK_POINTER_REGNUM
12834 #if !HARD_FRAME_POINTER_IS_FRAME_POINTER
12835 || regno == HARD_FRAME_POINTER_REGNUM
12836 #endif
12837 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
12838 || (regno == ARG_POINTER_REGNUM && fixed_regs[regno])
12839 #endif
12840 || regno == FRAME_POINTER_REGNUM)
12841 return;
12842
12843 add_to_hard_reg_set (&newpat_used_regs, GET_MODE (x), regno);
12844 }
12845 return;
12846
12847 case SET:
12848 {
12849 /* If setting a MEM, or a SUBREG of a MEM, then note any hard regs in
12850 the address. */
12851 rtx testreg = SET_DEST (x);
12852
12853 while (GET_CODE (testreg) == SUBREG
12854 || GET_CODE (testreg) == ZERO_EXTRACT
12855 || GET_CODE (testreg) == STRICT_LOW_PART)
12856 testreg = XEXP (testreg, 0);
12857
12858 if (MEM_P (testreg))
12859 mark_used_regs_combine (XEXP (testreg, 0));
12860
12861 mark_used_regs_combine (SET_SRC (x));
12862 }
12863 return;
12864
12865 default:
12866 break;
12867 }
12868
12869 /* Recursively scan the operands of this expression. */
12870
12871 {
12872 const char *fmt = GET_RTX_FORMAT (code);
12873
12874 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
12875 {
12876 if (fmt[i] == 'e')
12877 mark_used_regs_combine (XEXP (x, i));
12878 else if (fmt[i] == 'E')
12879 {
12880 int j;
12881
12882 for (j = 0; j < XVECLEN (x, i); j++)
12883 mark_used_regs_combine (XVECEXP (x, i, j));
12884 }
12885 }
12886 }
12887 }
12888 \f
12889 /* Remove register number REGNO from the dead registers list of INSN.
12890
12891 Return the note used to record the death, if there was one. */
12892
12893 rtx
12894 remove_death (unsigned int regno, rtx insn)
12895 {
12896 rtx note = find_regno_note (insn, REG_DEAD, regno);
12897
12898 if (note)
12899 remove_note (insn, note);
12900
12901 return note;
12902 }
12903
12904 /* For each register (hardware or pseudo) used within expression X, if its
12905 death is in an instruction with luid between FROM_LUID (inclusive) and
12906 TO_INSN (exclusive), put a REG_DEAD note for that register in the
12907 list headed by PNOTES.
12908
12909 That said, don't move registers killed by maybe_kill_insn.
12910
12911 This is done when X is being merged by combination into TO_INSN. These
12912 notes will then be distributed as needed. */
12913
12914 static void
12915 move_deaths (rtx x, rtx maybe_kill_insn, int from_luid, rtx to_insn,
12916 rtx *pnotes)
12917 {
12918 const char *fmt;
12919 int len, i;
12920 enum rtx_code code = GET_CODE (x);
12921
12922 if (code == REG)
12923 {
12924 unsigned int regno = REGNO (x);
12925 rtx where_dead = VEC_index (reg_stat_type, reg_stat, regno)->last_death;
12926
12927 /* Don't move the register if it gets killed in between from and to. */
12928 if (maybe_kill_insn && reg_set_p (x, maybe_kill_insn)
12929 && ! reg_referenced_p (x, maybe_kill_insn))
12930 return;
12931
12932 if (where_dead
12933 && BLOCK_FOR_INSN (where_dead) == BLOCK_FOR_INSN (to_insn)
12934 && DF_INSN_LUID (where_dead) >= from_luid
12935 && DF_INSN_LUID (where_dead) < DF_INSN_LUID (to_insn))
12936 {
12937 rtx note = remove_death (regno, where_dead);
12938
12939 /* It is possible for the call above to return 0. This can occur
12940 when last_death points to I2 or I1 that we combined with.
12941 In that case make a new note.
12942
12943 We must also check for the case where X is a hard register
12944 and NOTE is a death note for a range of hard registers
12945 including X. In that case, we must put REG_DEAD notes for
12946 the remaining registers in place of NOTE. */
12947
12948 if (note != 0 && regno < FIRST_PSEUDO_REGISTER
12949 && (GET_MODE_SIZE (GET_MODE (XEXP (note, 0)))
12950 > GET_MODE_SIZE (GET_MODE (x))))
12951 {
12952 unsigned int deadregno = REGNO (XEXP (note, 0));
12953 unsigned int deadend = END_HARD_REGNO (XEXP (note, 0));
12954 unsigned int ourend = END_HARD_REGNO (x);
12955 unsigned int i;
12956
12957 for (i = deadregno; i < deadend; i++)
12958 if (i < regno || i >= ourend)
12959 add_reg_note (where_dead, REG_DEAD, regno_reg_rtx[i]);
12960 }
12961
12962 /* If we didn't find any note, or if we found a REG_DEAD note that
12963 covers only part of the given reg, and we have a multi-reg hard
12964 register, then to be safe we must check for REG_DEAD notes
12965 for each register other than the first. They could have
12966 their own REG_DEAD notes lying around. */
12967 else if ((note == 0
12968 || (note != 0
12969 && (GET_MODE_SIZE (GET_MODE (XEXP (note, 0)))
12970 < GET_MODE_SIZE (GET_MODE (x)))))
12971 && regno < FIRST_PSEUDO_REGISTER
12972 && hard_regno_nregs[regno][GET_MODE (x)] > 1)
12973 {
12974 unsigned int ourend = END_HARD_REGNO (x);
12975 unsigned int i, offset;
12976 rtx oldnotes = 0;
12977
12978 if (note)
12979 offset = hard_regno_nregs[regno][GET_MODE (XEXP (note, 0))];
12980 else
12981 offset = 1;
12982
12983 for (i = regno + offset; i < ourend; i++)
12984 move_deaths (regno_reg_rtx[i],
12985 maybe_kill_insn, from_luid, to_insn, &oldnotes);
12986 }
12987
12988 if (note != 0 && GET_MODE (XEXP (note, 0)) == GET_MODE (x))
12989 {
12990 XEXP (note, 1) = *pnotes;
12991 *pnotes = note;
12992 }
12993 else
12994 *pnotes = alloc_reg_note (REG_DEAD, x, *pnotes);
12995 }
12996
12997 return;
12998 }
12999
13000 else if (GET_CODE (x) == SET)
13001 {
13002 rtx dest = SET_DEST (x);
13003
13004 move_deaths (SET_SRC (x), maybe_kill_insn, from_luid, to_insn, pnotes);
13005
13006 /* In the case of a ZERO_EXTRACT, a STRICT_LOW_PART, or a SUBREG
13007 that accesses one word of a multi-word item, some
13008 piece of everything register in the expression is used by
13009 this insn, so remove any old death. */
13010 /* ??? So why do we test for equality of the sizes? */
13011
13012 if (GET_CODE (dest) == ZERO_EXTRACT
13013 || GET_CODE (dest) == STRICT_LOW_PART
13014 || (GET_CODE (dest) == SUBREG
13015 && (((GET_MODE_SIZE (GET_MODE (dest))
13016 + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
13017 == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest)))
13018 + UNITS_PER_WORD - 1) / UNITS_PER_WORD))))
13019 {
13020 move_deaths (dest, maybe_kill_insn, from_luid, to_insn, pnotes);
13021 return;
13022 }
13023
13024 /* If this is some other SUBREG, we know it replaces the entire
13025 value, so use that as the destination. */
13026 if (GET_CODE (dest) == SUBREG)
13027 dest = SUBREG_REG (dest);
13028
13029 /* If this is a MEM, adjust deaths of anything used in the address.
13030 For a REG (the only other possibility), the entire value is
13031 being replaced so the old value is not used in this insn. */
13032
13033 if (MEM_P (dest))
13034 move_deaths (XEXP (dest, 0), maybe_kill_insn, from_luid,
13035 to_insn, pnotes);
13036 return;
13037 }
13038
13039 else if (GET_CODE (x) == CLOBBER)
13040 return;
13041
13042 len = GET_RTX_LENGTH (code);
13043 fmt = GET_RTX_FORMAT (code);
13044
13045 for (i = 0; i < len; i++)
13046 {
13047 if (fmt[i] == 'E')
13048 {
13049 int j;
13050 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
13051 move_deaths (XVECEXP (x, i, j), maybe_kill_insn, from_luid,
13052 to_insn, pnotes);
13053 }
13054 else if (fmt[i] == 'e')
13055 move_deaths (XEXP (x, i), maybe_kill_insn, from_luid, to_insn, pnotes);
13056 }
13057 }
13058 \f
13059 /* Return 1 if X is the target of a bit-field assignment in BODY, the
13060 pattern of an insn. X must be a REG. */
13061
13062 static int
13063 reg_bitfield_target_p (rtx x, rtx body)
13064 {
13065 int i;
13066
13067 if (GET_CODE (body) == SET)
13068 {
13069 rtx dest = SET_DEST (body);
13070 rtx target;
13071 unsigned int regno, tregno, endregno, endtregno;
13072
13073 if (GET_CODE (dest) == ZERO_EXTRACT)
13074 target = XEXP (dest, 0);
13075 else if (GET_CODE (dest) == STRICT_LOW_PART)
13076 target = SUBREG_REG (XEXP (dest, 0));
13077 else
13078 return 0;
13079
13080 if (GET_CODE (target) == SUBREG)
13081 target = SUBREG_REG (target);
13082
13083 if (!REG_P (target))
13084 return 0;
13085
13086 tregno = REGNO (target), regno = REGNO (x);
13087 if (tregno >= FIRST_PSEUDO_REGISTER || regno >= FIRST_PSEUDO_REGISTER)
13088 return target == x;
13089
13090 endtregno = end_hard_regno (GET_MODE (target), tregno);
13091 endregno = end_hard_regno (GET_MODE (x), regno);
13092
13093 return endregno > tregno && regno < endtregno;
13094 }
13095
13096 else if (GET_CODE (body) == PARALLEL)
13097 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
13098 if (reg_bitfield_target_p (x, XVECEXP (body, 0, i)))
13099 return 1;
13100
13101 return 0;
13102 }
13103 \f
13104 /* Given a chain of REG_NOTES originally from FROM_INSN, try to place them
13105 as appropriate. I3 and I2 are the insns resulting from the combination
13106 insns including FROM (I2 may be zero).
13107
13108 ELIM_I2 and ELIM_I1 are either zero or registers that we know will
13109 not need REG_DEAD notes because they are being substituted for. This
13110 saves searching in the most common cases.
13111
13112 Each note in the list is either ignored or placed on some insns, depending
13113 on the type of note. */
13114
13115 static void
13116 distribute_notes (rtx notes, rtx from_insn, rtx i3, rtx i2, rtx elim_i2,
13117 rtx elim_i1, rtx elim_i0)
13118 {
13119 rtx note, next_note;
13120 rtx tem;
13121
13122 for (note = notes; note; note = next_note)
13123 {
13124 rtx place = 0, place2 = 0;
13125
13126 next_note = XEXP (note, 1);
13127 switch (REG_NOTE_KIND (note))
13128 {
13129 case REG_BR_PROB:
13130 case REG_BR_PRED:
13131 /* Doesn't matter much where we put this, as long as it's somewhere.
13132 It is preferable to keep these notes on branches, which is most
13133 likely to be i3. */
13134 place = i3;
13135 break;
13136
13137 case REG_VALUE_PROFILE:
13138 /* Just get rid of this note, as it is unused later anyway. */
13139 break;
13140
13141 case REG_NON_LOCAL_GOTO:
13142 if (JUMP_P (i3))
13143 place = i3;
13144 else
13145 {
13146 gcc_assert (i2 && JUMP_P (i2));
13147 place = i2;
13148 }
13149 break;
13150
13151 case REG_EH_REGION:
13152 /* These notes must remain with the call or trapping instruction. */
13153 if (CALL_P (i3))
13154 place = i3;
13155 else if (i2 && CALL_P (i2))
13156 place = i2;
13157 else
13158 {
13159 gcc_assert (cfun->can_throw_non_call_exceptions);
13160 if (may_trap_p (i3))
13161 place = i3;
13162 else if (i2 && may_trap_p (i2))
13163 place = i2;
13164 /* ??? Otherwise assume we've combined things such that we
13165 can now prove that the instructions can't trap. Drop the
13166 note in this case. */
13167 }
13168 break;
13169
13170 case REG_NORETURN:
13171 case REG_SETJMP:
13172 /* These notes must remain with the call. It should not be
13173 possible for both I2 and I3 to be a call. */
13174 if (CALL_P (i3))
13175 place = i3;
13176 else
13177 {
13178 gcc_assert (i2 && CALL_P (i2));
13179 place = i2;
13180 }
13181 break;
13182
13183 case REG_UNUSED:
13184 /* Any clobbers for i3 may still exist, and so we must process
13185 REG_UNUSED notes from that insn.
13186
13187 Any clobbers from i2 or i1 can only exist if they were added by
13188 recog_for_combine. In that case, recog_for_combine created the
13189 necessary REG_UNUSED notes. Trying to keep any original
13190 REG_UNUSED notes from these insns can cause incorrect output
13191 if it is for the same register as the original i3 dest.
13192 In that case, we will notice that the register is set in i3,
13193 and then add a REG_UNUSED note for the destination of i3, which
13194 is wrong. However, it is possible to have REG_UNUSED notes from
13195 i2 or i1 for register which were both used and clobbered, so
13196 we keep notes from i2 or i1 if they will turn into REG_DEAD
13197 notes. */
13198
13199 /* If this register is set or clobbered in I3, put the note there
13200 unless there is one already. */
13201 if (reg_set_p (XEXP (note, 0), PATTERN (i3)))
13202 {
13203 if (from_insn != i3)
13204 break;
13205
13206 if (! (REG_P (XEXP (note, 0))
13207 ? find_regno_note (i3, REG_UNUSED, REGNO (XEXP (note, 0)))
13208 : find_reg_note (i3, REG_UNUSED, XEXP (note, 0))))
13209 place = i3;
13210 }
13211 /* Otherwise, if this register is used by I3, then this register
13212 now dies here, so we must put a REG_DEAD note here unless there
13213 is one already. */
13214 else if (reg_referenced_p (XEXP (note, 0), PATTERN (i3))
13215 && ! (REG_P (XEXP (note, 0))
13216 ? find_regno_note (i3, REG_DEAD,
13217 REGNO (XEXP (note, 0)))
13218 : find_reg_note (i3, REG_DEAD, XEXP (note, 0))))
13219 {
13220 PUT_REG_NOTE_KIND (note, REG_DEAD);
13221 place = i3;
13222 }
13223 break;
13224
13225 case REG_EQUAL:
13226 case REG_EQUIV:
13227 case REG_NOALIAS:
13228 /* These notes say something about results of an insn. We can
13229 only support them if they used to be on I3 in which case they
13230 remain on I3. Otherwise they are ignored.
13231
13232 If the note refers to an expression that is not a constant, we
13233 must also ignore the note since we cannot tell whether the
13234 equivalence is still true. It might be possible to do
13235 slightly better than this (we only have a problem if I2DEST
13236 or I1DEST is present in the expression), but it doesn't
13237 seem worth the trouble. */
13238
13239 if (from_insn == i3
13240 && (XEXP (note, 0) == 0 || CONSTANT_P (XEXP (note, 0))))
13241 place = i3;
13242 break;
13243
13244 case REG_INC:
13245 /* These notes say something about how a register is used. They must
13246 be present on any use of the register in I2 or I3. */
13247 if (reg_mentioned_p (XEXP (note, 0), PATTERN (i3)))
13248 place = i3;
13249
13250 if (i2 && reg_mentioned_p (XEXP (note, 0), PATTERN (i2)))
13251 {
13252 if (place)
13253 place2 = i2;
13254 else
13255 place = i2;
13256 }
13257 break;
13258
13259 case REG_LABEL_TARGET:
13260 case REG_LABEL_OPERAND:
13261 /* This can show up in several ways -- either directly in the
13262 pattern, or hidden off in the constant pool with (or without?)
13263 a REG_EQUAL note. */
13264 /* ??? Ignore the without-reg_equal-note problem for now. */
13265 if (reg_mentioned_p (XEXP (note, 0), PATTERN (i3))
13266 || ((tem = find_reg_note (i3, REG_EQUAL, NULL_RTX))
13267 && GET_CODE (XEXP (tem, 0)) == LABEL_REF
13268 && XEXP (XEXP (tem, 0), 0) == XEXP (note, 0)))
13269 place = i3;
13270
13271 if (i2
13272 && (reg_mentioned_p (XEXP (note, 0), PATTERN (i2))
13273 || ((tem = find_reg_note (i2, REG_EQUAL, NULL_RTX))
13274 && GET_CODE (XEXP (tem, 0)) == LABEL_REF
13275 && XEXP (XEXP (tem, 0), 0) == XEXP (note, 0))))
13276 {
13277 if (place)
13278 place2 = i2;
13279 else
13280 place = i2;
13281 }
13282
13283 /* For REG_LABEL_TARGET on a JUMP_P, we prefer to put the note
13284 as a JUMP_LABEL or decrement LABEL_NUSES if it's already
13285 there. */
13286 if (place && JUMP_P (place)
13287 && REG_NOTE_KIND (note) == REG_LABEL_TARGET
13288 && (JUMP_LABEL (place) == NULL
13289 || JUMP_LABEL (place) == XEXP (note, 0)))
13290 {
13291 rtx label = JUMP_LABEL (place);
13292
13293 if (!label)
13294 JUMP_LABEL (place) = XEXP (note, 0);
13295 else if (LABEL_P (label))
13296 LABEL_NUSES (label)--;
13297 }
13298
13299 if (place2 && JUMP_P (place2)
13300 && REG_NOTE_KIND (note) == REG_LABEL_TARGET
13301 && (JUMP_LABEL (place2) == NULL
13302 || JUMP_LABEL (place2) == XEXP (note, 0)))
13303 {
13304 rtx label = JUMP_LABEL (place2);
13305
13306 if (!label)
13307 JUMP_LABEL (place2) = XEXP (note, 0);
13308 else if (LABEL_P (label))
13309 LABEL_NUSES (label)--;
13310 place2 = 0;
13311 }
13312 break;
13313
13314 case REG_NONNEG:
13315 /* This note says something about the value of a register prior
13316 to the execution of an insn. It is too much trouble to see
13317 if the note is still correct in all situations. It is better
13318 to simply delete it. */
13319 break;
13320
13321 case REG_DEAD:
13322 /* If we replaced the right hand side of FROM_INSN with a
13323 REG_EQUAL note, the original use of the dying register
13324 will not have been combined into I3 and I2. In such cases,
13325 FROM_INSN is guaranteed to be the first of the combined
13326 instructions, so we simply need to search back before
13327 FROM_INSN for the previous use or set of this register,
13328 then alter the notes there appropriately.
13329
13330 If the register is used as an input in I3, it dies there.
13331 Similarly for I2, if it is nonzero and adjacent to I3.
13332
13333 If the register is not used as an input in either I3 or I2
13334 and it is not one of the registers we were supposed to eliminate,
13335 there are two possibilities. We might have a non-adjacent I2
13336 or we might have somehow eliminated an additional register
13337 from a computation. For example, we might have had A & B where
13338 we discover that B will always be zero. In this case we will
13339 eliminate the reference to A.
13340
13341 In both cases, we must search to see if we can find a previous
13342 use of A and put the death note there. */
13343
13344 if (from_insn
13345 && from_insn == i2mod
13346 && !reg_overlap_mentioned_p (XEXP (note, 0), i2mod_new_rhs))
13347 tem = from_insn;
13348 else
13349 {
13350 if (from_insn
13351 && CALL_P (from_insn)
13352 && find_reg_fusage (from_insn, USE, XEXP (note, 0)))
13353 place = from_insn;
13354 else if (reg_referenced_p (XEXP (note, 0), PATTERN (i3)))
13355 place = i3;
13356 else if (i2 != 0 && next_nonnote_nondebug_insn (i2) == i3
13357 && reg_referenced_p (XEXP (note, 0), PATTERN (i2)))
13358 place = i2;
13359 else if ((rtx_equal_p (XEXP (note, 0), elim_i2)
13360 && !(i2mod
13361 && reg_overlap_mentioned_p (XEXP (note, 0),
13362 i2mod_old_rhs)))
13363 || rtx_equal_p (XEXP (note, 0), elim_i1)
13364 || rtx_equal_p (XEXP (note, 0), elim_i0))
13365 break;
13366 tem = i3;
13367 }
13368
13369 if (place == 0)
13370 {
13371 basic_block bb = this_basic_block;
13372
13373 for (tem = PREV_INSN (tem); place == 0; tem = PREV_INSN (tem))
13374 {
13375 if (!NONDEBUG_INSN_P (tem))
13376 {
13377 if (tem == BB_HEAD (bb))
13378 break;
13379 continue;
13380 }
13381
13382 /* If the register is being set at TEM, see if that is all
13383 TEM is doing. If so, delete TEM. Otherwise, make this
13384 into a REG_UNUSED note instead. Don't delete sets to
13385 global register vars. */
13386 if ((REGNO (XEXP (note, 0)) >= FIRST_PSEUDO_REGISTER
13387 || !global_regs[REGNO (XEXP (note, 0))])
13388 && reg_set_p (XEXP (note, 0), PATTERN (tem)))
13389 {
13390 rtx set = single_set (tem);
13391 rtx inner_dest = 0;
13392 #ifdef HAVE_cc0
13393 rtx cc0_setter = NULL_RTX;
13394 #endif
13395
13396 if (set != 0)
13397 for (inner_dest = SET_DEST (set);
13398 (GET_CODE (inner_dest) == STRICT_LOW_PART
13399 || GET_CODE (inner_dest) == SUBREG
13400 || GET_CODE (inner_dest) == ZERO_EXTRACT);
13401 inner_dest = XEXP (inner_dest, 0))
13402 ;
13403
13404 /* Verify that it was the set, and not a clobber that
13405 modified the register.
13406
13407 CC0 targets must be careful to maintain setter/user
13408 pairs. If we cannot delete the setter due to side
13409 effects, mark the user with an UNUSED note instead
13410 of deleting it. */
13411
13412 if (set != 0 && ! side_effects_p (SET_SRC (set))
13413 && rtx_equal_p (XEXP (note, 0), inner_dest)
13414 #ifdef HAVE_cc0
13415 && (! reg_mentioned_p (cc0_rtx, SET_SRC (set))
13416 || ((cc0_setter = prev_cc0_setter (tem)) != NULL
13417 && sets_cc0_p (PATTERN (cc0_setter)) > 0))
13418 #endif
13419 )
13420 {
13421 /* Move the notes and links of TEM elsewhere.
13422 This might delete other dead insns recursively.
13423 First set the pattern to something that won't use
13424 any register. */
13425 rtx old_notes = REG_NOTES (tem);
13426
13427 PATTERN (tem) = pc_rtx;
13428 REG_NOTES (tem) = NULL;
13429
13430 distribute_notes (old_notes, tem, tem, NULL_RTX,
13431 NULL_RTX, NULL_RTX, NULL_RTX);
13432 distribute_links (LOG_LINKS (tem));
13433
13434 SET_INSN_DELETED (tem);
13435 if (tem == i2)
13436 i2 = NULL_RTX;
13437
13438 #ifdef HAVE_cc0
13439 /* Delete the setter too. */
13440 if (cc0_setter)
13441 {
13442 PATTERN (cc0_setter) = pc_rtx;
13443 old_notes = REG_NOTES (cc0_setter);
13444 REG_NOTES (cc0_setter) = NULL;
13445
13446 distribute_notes (old_notes, cc0_setter,
13447 cc0_setter, NULL_RTX,
13448 NULL_RTX, NULL_RTX, NULL_RTX);
13449 distribute_links (LOG_LINKS (cc0_setter));
13450
13451 SET_INSN_DELETED (cc0_setter);
13452 if (cc0_setter == i2)
13453 i2 = NULL_RTX;
13454 }
13455 #endif
13456 }
13457 else
13458 {
13459 PUT_REG_NOTE_KIND (note, REG_UNUSED);
13460
13461 /* If there isn't already a REG_UNUSED note, put one
13462 here. Do not place a REG_DEAD note, even if
13463 the register is also used here; that would not
13464 match the algorithm used in lifetime analysis
13465 and can cause the consistency check in the
13466 scheduler to fail. */
13467 if (! find_regno_note (tem, REG_UNUSED,
13468 REGNO (XEXP (note, 0))))
13469 place = tem;
13470 break;
13471 }
13472 }
13473 else if (reg_referenced_p (XEXP (note, 0), PATTERN (tem))
13474 || (CALL_P (tem)
13475 && find_reg_fusage (tem, USE, XEXP (note, 0))))
13476 {
13477 place = tem;
13478
13479 /* If we are doing a 3->2 combination, and we have a
13480 register which formerly died in i3 and was not used
13481 by i2, which now no longer dies in i3 and is used in
13482 i2 but does not die in i2, and place is between i2
13483 and i3, then we may need to move a link from place to
13484 i2. */
13485 if (i2 && DF_INSN_LUID (place) > DF_INSN_LUID (i2)
13486 && from_insn
13487 && DF_INSN_LUID (from_insn) > DF_INSN_LUID (i2)
13488 && reg_referenced_p (XEXP (note, 0), PATTERN (i2)))
13489 {
13490 rtx links = LOG_LINKS (place);
13491 LOG_LINKS (place) = 0;
13492 distribute_links (links);
13493 }
13494 break;
13495 }
13496
13497 if (tem == BB_HEAD (bb))
13498 break;
13499 }
13500
13501 }
13502
13503 /* If the register is set or already dead at PLACE, we needn't do
13504 anything with this note if it is still a REG_DEAD note.
13505 We check here if it is set at all, not if is it totally replaced,
13506 which is what `dead_or_set_p' checks, so also check for it being
13507 set partially. */
13508
13509 if (place && REG_NOTE_KIND (note) == REG_DEAD)
13510 {
13511 unsigned int regno = REGNO (XEXP (note, 0));
13512 reg_stat_type *rsp = VEC_index (reg_stat_type, reg_stat, regno);
13513
13514 if (dead_or_set_p (place, XEXP (note, 0))
13515 || reg_bitfield_target_p (XEXP (note, 0), PATTERN (place)))
13516 {
13517 /* Unless the register previously died in PLACE, clear
13518 last_death. [I no longer understand why this is
13519 being done.] */
13520 if (rsp->last_death != place)
13521 rsp->last_death = 0;
13522 place = 0;
13523 }
13524 else
13525 rsp->last_death = place;
13526
13527 /* If this is a death note for a hard reg that is occupying
13528 multiple registers, ensure that we are still using all
13529 parts of the object. If we find a piece of the object
13530 that is unused, we must arrange for an appropriate REG_DEAD
13531 note to be added for it. However, we can't just emit a USE
13532 and tag the note to it, since the register might actually
13533 be dead; so we recourse, and the recursive call then finds
13534 the previous insn that used this register. */
13535
13536 if (place && regno < FIRST_PSEUDO_REGISTER
13537 && hard_regno_nregs[regno][GET_MODE (XEXP (note, 0))] > 1)
13538 {
13539 unsigned int endregno = END_HARD_REGNO (XEXP (note, 0));
13540 int all_used = 1;
13541 unsigned int i;
13542
13543 for (i = regno; i < endregno; i++)
13544 if ((! refers_to_regno_p (i, i + 1, PATTERN (place), 0)
13545 && ! find_regno_fusage (place, USE, i))
13546 || dead_or_set_regno_p (place, i))
13547 all_used = 0;
13548
13549 if (! all_used)
13550 {
13551 /* Put only REG_DEAD notes for pieces that are
13552 not already dead or set. */
13553
13554 for (i = regno; i < endregno;
13555 i += hard_regno_nregs[i][reg_raw_mode[i]])
13556 {
13557 rtx piece = regno_reg_rtx[i];
13558 basic_block bb = this_basic_block;
13559
13560 if (! dead_or_set_p (place, piece)
13561 && ! reg_bitfield_target_p (piece,
13562 PATTERN (place)))
13563 {
13564 rtx new_note = alloc_reg_note (REG_DEAD, piece,
13565 NULL_RTX);
13566
13567 distribute_notes (new_note, place, place,
13568 NULL_RTX, NULL_RTX, NULL_RTX,
13569 NULL_RTX);
13570 }
13571 else if (! refers_to_regno_p (i, i + 1,
13572 PATTERN (place), 0)
13573 && ! find_regno_fusage (place, USE, i))
13574 for (tem = PREV_INSN (place); ;
13575 tem = PREV_INSN (tem))
13576 {
13577 if (!NONDEBUG_INSN_P (tem))
13578 {
13579 if (tem == BB_HEAD (bb))
13580 break;
13581 continue;
13582 }
13583 if (dead_or_set_p (tem, piece)
13584 || reg_bitfield_target_p (piece,
13585 PATTERN (tem)))
13586 {
13587 add_reg_note (tem, REG_UNUSED, piece);
13588 break;
13589 }
13590 }
13591
13592 }
13593
13594 place = 0;
13595 }
13596 }
13597 }
13598 break;
13599
13600 default:
13601 /* Any other notes should not be present at this point in the
13602 compilation. */
13603 gcc_unreachable ();
13604 }
13605
13606 if (place)
13607 {
13608 XEXP (note, 1) = REG_NOTES (place);
13609 REG_NOTES (place) = note;
13610 }
13611
13612 if (place2)
13613 add_reg_note (place2, REG_NOTE_KIND (note), XEXP (note, 0));
13614 }
13615 }
13616 \f
13617 /* Similarly to above, distribute the LOG_LINKS that used to be present on
13618 I3, I2, and I1 to new locations. This is also called to add a link
13619 pointing at I3 when I3's destination is changed. */
13620
13621 static void
13622 distribute_links (rtx links)
13623 {
13624 rtx link, next_link;
13625
13626 for (link = links; link; link = next_link)
13627 {
13628 rtx place = 0;
13629 rtx insn;
13630 rtx set, reg;
13631
13632 next_link = XEXP (link, 1);
13633
13634 /* If the insn that this link points to is a NOTE or isn't a single
13635 set, ignore it. In the latter case, it isn't clear what we
13636 can do other than ignore the link, since we can't tell which
13637 register it was for. Such links wouldn't be used by combine
13638 anyway.
13639
13640 It is not possible for the destination of the target of the link to
13641 have been changed by combine. The only potential of this is if we
13642 replace I3, I2, and I1 by I3 and I2. But in that case the
13643 destination of I2 also remains unchanged. */
13644
13645 if (NOTE_P (XEXP (link, 0))
13646 || (set = single_set (XEXP (link, 0))) == 0)
13647 continue;
13648
13649 reg = SET_DEST (set);
13650 while (GET_CODE (reg) == SUBREG || GET_CODE (reg) == ZERO_EXTRACT
13651 || GET_CODE (reg) == STRICT_LOW_PART)
13652 reg = XEXP (reg, 0);
13653
13654 /* A LOG_LINK is defined as being placed on the first insn that uses
13655 a register and points to the insn that sets the register. Start
13656 searching at the next insn after the target of the link and stop
13657 when we reach a set of the register or the end of the basic block.
13658
13659 Note that this correctly handles the link that used to point from
13660 I3 to I2. Also note that not much searching is typically done here
13661 since most links don't point very far away. */
13662
13663 for (insn = NEXT_INSN (XEXP (link, 0));
13664 (insn && (this_basic_block->next_bb == EXIT_BLOCK_PTR
13665 || BB_HEAD (this_basic_block->next_bb) != insn));
13666 insn = NEXT_INSN (insn))
13667 if (DEBUG_INSN_P (insn))
13668 continue;
13669 else if (INSN_P (insn) && reg_overlap_mentioned_p (reg, PATTERN (insn)))
13670 {
13671 if (reg_referenced_p (reg, PATTERN (insn)))
13672 place = insn;
13673 break;
13674 }
13675 else if (CALL_P (insn)
13676 && find_reg_fusage (insn, USE, reg))
13677 {
13678 place = insn;
13679 break;
13680 }
13681 else if (INSN_P (insn) && reg_set_p (reg, insn))
13682 break;
13683
13684 /* If we found a place to put the link, place it there unless there
13685 is already a link to the same insn as LINK at that point. */
13686
13687 if (place)
13688 {
13689 rtx link2;
13690
13691 for (link2 = LOG_LINKS (place); link2; link2 = XEXP (link2, 1))
13692 if (XEXP (link2, 0) == XEXP (link, 0))
13693 break;
13694
13695 if (link2 == 0)
13696 {
13697 XEXP (link, 1) = LOG_LINKS (place);
13698 LOG_LINKS (place) = link;
13699
13700 /* Set added_links_insn to the earliest insn we added a
13701 link to. */
13702 if (added_links_insn == 0
13703 || DF_INSN_LUID (added_links_insn) > DF_INSN_LUID (place))
13704 added_links_insn = place;
13705 }
13706 }
13707 }
13708 }
13709 \f
13710 /* Subroutine of unmentioned_reg_p and callback from for_each_rtx.
13711 Check whether the expression pointer to by LOC is a register or
13712 memory, and if so return 1 if it isn't mentioned in the rtx EXPR.
13713 Otherwise return zero. */
13714
13715 static int
13716 unmentioned_reg_p_1 (rtx *loc, void *expr)
13717 {
13718 rtx x = *loc;
13719
13720 if (x != NULL_RTX
13721 && (REG_P (x) || MEM_P (x))
13722 && ! reg_mentioned_p (x, (rtx) expr))
13723 return 1;
13724 return 0;
13725 }
13726
13727 /* Check for any register or memory mentioned in EQUIV that is not
13728 mentioned in EXPR. This is used to restrict EQUIV to "specializations"
13729 of EXPR where some registers may have been replaced by constants. */
13730
13731 static bool
13732 unmentioned_reg_p (rtx equiv, rtx expr)
13733 {
13734 return for_each_rtx (&equiv, unmentioned_reg_p_1, expr);
13735 }
13736 \f
13737 void
13738 dump_combine_stats (FILE *file)
13739 {
13740 fprintf
13741 (file,
13742 ";; Combiner statistics: %d attempts, %d substitutions (%d requiring new space),\n;; %d successes.\n\n",
13743 combine_attempts, combine_merges, combine_extras, combine_successes);
13744 }
13745
13746 void
13747 dump_combine_total_stats (FILE *file)
13748 {
13749 fprintf
13750 (file,
13751 "\n;; Combiner totals: %d attempts, %d substitutions (%d requiring new space),\n;; %d successes.\n",
13752 total_attempts, total_merges, total_extras, total_successes);
13753 }
13754 \f
13755 static bool
13756 gate_handle_combine (void)
13757 {
13758 return (optimize > 0);
13759 }
13760
13761 /* Try combining insns through substitution. */
13762 static unsigned int
13763 rest_of_handle_combine (void)
13764 {
13765 int rebuild_jump_labels_after_combine;
13766
13767 df_set_flags (DF_LR_RUN_DCE + DF_DEFER_INSN_RESCAN);
13768 df_note_add_problem ();
13769 df_analyze ();
13770
13771 regstat_init_n_sets_and_refs ();
13772
13773 rebuild_jump_labels_after_combine
13774 = combine_instructions (get_insns (), max_reg_num ());
13775
13776 /* Combining insns may have turned an indirect jump into a
13777 direct jump. Rebuild the JUMP_LABEL fields of jumping
13778 instructions. */
13779 if (rebuild_jump_labels_after_combine)
13780 {
13781 timevar_push (TV_JUMP);
13782 rebuild_jump_labels (get_insns ());
13783 cleanup_cfg (0);
13784 timevar_pop (TV_JUMP);
13785 }
13786
13787 regstat_free_n_sets_and_refs ();
13788 return 0;
13789 }
13790
13791 struct rtl_opt_pass pass_combine =
13792 {
13793 {
13794 RTL_PASS,
13795 "combine", /* name */
13796 gate_handle_combine, /* gate */
13797 rest_of_handle_combine, /* execute */
13798 NULL, /* sub */
13799 NULL, /* next */
13800 0, /* static_pass_number */
13801 TV_COMBINE, /* tv_id */
13802 PROP_cfglayout, /* properties_required */
13803 0, /* properties_provided */
13804 0, /* properties_destroyed */
13805 0, /* todo_flags_start */
13806 TODO_dump_func |
13807 TODO_df_finish | TODO_verify_rtl_sharing |
13808 TODO_ggc_collect, /* todo_flags_finish */
13809 }
13810 };