re PR rtl-optimization/46034 (internal compiler error: segmentation fault)
[gcc.git] / gcc / combine.c
1 /* Optimize by combining instructions for GNU compiler.
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
4 Free Software Foundation, Inc.
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
12
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
21
22 /* This module is essentially the "combiner" phase of the U. of Arizona
23 Portable Optimizer, but redone to work on our list-structured
24 representation for RTL instead of their string representation.
25
26 The LOG_LINKS of each insn identify the most recent assignment
27 to each REG used in the insn. It is a list of previous insns,
28 each of which contains a SET for a REG that is used in this insn
29 and not used or set in between. LOG_LINKs never cross basic blocks.
30 They were set up by the preceding pass (lifetime analysis).
31
32 We try to combine each pair of insns joined by a logical link.
33 We also try to combine triples of insns A, B and C when
34 C has a link back to B and B has a link back to A.
35
36 LOG_LINKS does not have links for use of the CC0. They don't
37 need to, because the insn that sets the CC0 is always immediately
38 before the insn that tests it. So we always regard a branch
39 insn as having a logical link to the preceding insn. The same is true
40 for an insn explicitly using CC0.
41
42 We check (with use_crosses_set_p) to avoid combining in such a way
43 as to move a computation to a place where its value would be different.
44
45 Combination is done by mathematically substituting the previous
46 insn(s) values for the regs they set into the expressions in
47 the later insns that refer to these regs. If the result is a valid insn
48 for our target machine, according to the machine description,
49 we install it, delete the earlier insns, and update the data flow
50 information (LOG_LINKS and REG_NOTES) for what we did.
51
52 There are a few exceptions where the dataflow information isn't
53 completely updated (however this is only a local issue since it is
54 regenerated before the next pass that uses it):
55
56 - reg_live_length is not updated
57 - reg_n_refs is not adjusted in the rare case when a register is
58 no longer required in a computation
59 - there are extremely rare cases (see distribute_notes) when a
60 REG_DEAD note is lost
61 - a LOG_LINKS entry that refers to an insn with multiple SETs may be
62 removed because there is no way to know which register it was
63 linking
64
65 To simplify substitution, we combine only when the earlier insn(s)
66 consist of only a single assignment. To simplify updating afterward,
67 we never combine when a subroutine call appears in the middle.
68
69 Since we do not represent assignments to CC0 explicitly except when that
70 is all an insn does, there is no LOG_LINKS entry in an insn that uses
71 the condition code for the insn that set the condition code.
72 Fortunately, these two insns must be consecutive.
73 Therefore, every JUMP_INSN is taken to have an implicit logical link
74 to the preceding insn. This is not quite right, since non-jumps can
75 also use the condition code; but in practice such insns would not
76 combine anyway. */
77
78 #include "config.h"
79 #include "system.h"
80 #include "coretypes.h"
81 #include "tm.h"
82 #include "rtl.h"
83 #include "tree.h"
84 #include "tm_p.h"
85 #include "flags.h"
86 #include "regs.h"
87 #include "hard-reg-set.h"
88 #include "basic-block.h"
89 #include "insn-config.h"
90 #include "function.h"
91 /* Include expr.h after insn-config.h so we get HAVE_conditional_move. */
92 #include "expr.h"
93 #include "insn-attr.h"
94 #include "recog.h"
95 #include "diagnostic-core.h"
96 #include "toplev.h"
97 #include "target.h"
98 #include "optabs.h"
99 #include "insn-codes.h"
100 #include "rtlhooks-def.h"
101 /* Include output.h for dump_file. */
102 #include "output.h"
103 #include "params.h"
104 #include "timevar.h"
105 #include "tree-pass.h"
106 #include "df.h"
107 #include "cgraph.h"
108
109 /* Number of attempts to combine instructions in this function. */
110
111 static int combine_attempts;
112
113 /* Number of attempts that got as far as substitution in this function. */
114
115 static int combine_merges;
116
117 /* Number of instructions combined with added SETs in this function. */
118
119 static int combine_extras;
120
121 /* Number of instructions combined in this function. */
122
123 static int combine_successes;
124
125 /* Totals over entire compilation. */
126
127 static int total_attempts, total_merges, total_extras, total_successes;
128
129 /* combine_instructions may try to replace the right hand side of the
130 second instruction with the value of an associated REG_EQUAL note
131 before throwing it at try_combine. That is problematic when there
132 is a REG_DEAD note for a register used in the old right hand side
133 and can cause distribute_notes to do wrong things. This is the
134 second instruction if it has been so modified, null otherwise. */
135
136 static rtx i2mod;
137
138 /* When I2MOD is nonnull, this is a copy of the old right hand side. */
139
140 static rtx i2mod_old_rhs;
141
142 /* When I2MOD is nonnull, this is a copy of the new right hand side. */
143
144 static rtx i2mod_new_rhs;
145 \f
146 typedef struct reg_stat_struct {
147 /* Record last point of death of (hard or pseudo) register n. */
148 rtx last_death;
149
150 /* Record last point of modification of (hard or pseudo) register n. */
151 rtx last_set;
152
153 /* The next group of fields allows the recording of the last value assigned
154 to (hard or pseudo) register n. We use this information to see if an
155 operation being processed is redundant given a prior operation performed
156 on the register. For example, an `and' with a constant is redundant if
157 all the zero bits are already known to be turned off.
158
159 We use an approach similar to that used by cse, but change it in the
160 following ways:
161
162 (1) We do not want to reinitialize at each label.
163 (2) It is useful, but not critical, to know the actual value assigned
164 to a register. Often just its form is helpful.
165
166 Therefore, we maintain the following fields:
167
168 last_set_value the last value assigned
169 last_set_label records the value of label_tick when the
170 register was assigned
171 last_set_table_tick records the value of label_tick when a
172 value using the register is assigned
173 last_set_invalid set to nonzero when it is not valid
174 to use the value of this register in some
175 register's value
176
177 To understand the usage of these tables, it is important to understand
178 the distinction between the value in last_set_value being valid and
179 the register being validly contained in some other expression in the
180 table.
181
182 (The next two parameters are out of date).
183
184 reg_stat[i].last_set_value is valid if it is nonzero, and either
185 reg_n_sets[i] is 1 or reg_stat[i].last_set_label == label_tick.
186
187 Register I may validly appear in any expression returned for the value
188 of another register if reg_n_sets[i] is 1. It may also appear in the
189 value for register J if reg_stat[j].last_set_invalid is zero, or
190 reg_stat[i].last_set_label < reg_stat[j].last_set_label.
191
192 If an expression is found in the table containing a register which may
193 not validly appear in an expression, the register is replaced by
194 something that won't match, (clobber (const_int 0)). */
195
196 /* Record last value assigned to (hard or pseudo) register n. */
197
198 rtx last_set_value;
199
200 /* Record the value of label_tick when an expression involving register n
201 is placed in last_set_value. */
202
203 int last_set_table_tick;
204
205 /* Record the value of label_tick when the value for register n is placed in
206 last_set_value. */
207
208 int last_set_label;
209
210 /* These fields are maintained in parallel with last_set_value and are
211 used to store the mode in which the register was last set, the bits
212 that were known to be zero when it was last set, and the number of
213 sign bits copies it was known to have when it was last set. */
214
215 unsigned HOST_WIDE_INT last_set_nonzero_bits;
216 char last_set_sign_bit_copies;
217 ENUM_BITFIELD(machine_mode) last_set_mode : 8;
218
219 /* Set nonzero if references to register n in expressions should not be
220 used. last_set_invalid is set nonzero when this register is being
221 assigned to and last_set_table_tick == label_tick. */
222
223 char last_set_invalid;
224
225 /* Some registers that are set more than once and used in more than one
226 basic block are nevertheless always set in similar ways. For example,
227 a QImode register may be loaded from memory in two places on a machine
228 where byte loads zero extend.
229
230 We record in the following fields if a register has some leading bits
231 that are always equal to the sign bit, and what we know about the
232 nonzero bits of a register, specifically which bits are known to be
233 zero.
234
235 If an entry is zero, it means that we don't know anything special. */
236
237 unsigned char sign_bit_copies;
238
239 unsigned HOST_WIDE_INT nonzero_bits;
240
241 /* Record the value of the label_tick when the last truncation
242 happened. The field truncated_to_mode is only valid if
243 truncation_label == label_tick. */
244
245 int truncation_label;
246
247 /* Record the last truncation seen for this register. If truncation
248 is not a nop to this mode we might be able to save an explicit
249 truncation if we know that value already contains a truncated
250 value. */
251
252 ENUM_BITFIELD(machine_mode) truncated_to_mode : 8;
253 } reg_stat_type;
254
255 DEF_VEC_O(reg_stat_type);
256 DEF_VEC_ALLOC_O(reg_stat_type,heap);
257
258 static VEC(reg_stat_type,heap) *reg_stat;
259
260 /* Record the luid of the last insn that invalidated memory
261 (anything that writes memory, and subroutine calls, but not pushes). */
262
263 static int mem_last_set;
264
265 /* Record the luid of the last CALL_INSN
266 so we can tell whether a potential combination crosses any calls. */
267
268 static int last_call_luid;
269
270 /* When `subst' is called, this is the insn that is being modified
271 (by combining in a previous insn). The PATTERN of this insn
272 is still the old pattern partially modified and it should not be
273 looked at, but this may be used to examine the successors of the insn
274 to judge whether a simplification is valid. */
275
276 static rtx subst_insn;
277
278 /* This is the lowest LUID that `subst' is currently dealing with.
279 get_last_value will not return a value if the register was set at or
280 after this LUID. If not for this mechanism, we could get confused if
281 I2 or I1 in try_combine were an insn that used the old value of a register
282 to obtain a new value. In that case, we might erroneously get the
283 new value of the register when we wanted the old one. */
284
285 static int subst_low_luid;
286
287 /* This contains any hard registers that are used in newpat; reg_dead_at_p
288 must consider all these registers to be always live. */
289
290 static HARD_REG_SET newpat_used_regs;
291
292 /* This is an insn to which a LOG_LINKS entry has been added. If this
293 insn is the earlier than I2 or I3, combine should rescan starting at
294 that location. */
295
296 static rtx added_links_insn;
297
298 /* Basic block in which we are performing combines. */
299 static basic_block this_basic_block;
300 static bool optimize_this_for_speed_p;
301
302 \f
303 /* Length of the currently allocated uid_insn_cost array. */
304
305 static int max_uid_known;
306
307 /* The following array records the insn_rtx_cost for every insn
308 in the instruction stream. */
309
310 static int *uid_insn_cost;
311
312 /* The following array records the LOG_LINKS for every insn in the
313 instruction stream as an INSN_LIST rtx. */
314
315 static rtx *uid_log_links;
316
317 #define INSN_COST(INSN) (uid_insn_cost[INSN_UID (INSN)])
318 #define LOG_LINKS(INSN) (uid_log_links[INSN_UID (INSN)])
319
320 /* Incremented for each basic block. */
321
322 static int label_tick;
323
324 /* Reset to label_tick for each extended basic block in scanning order. */
325
326 static int label_tick_ebb_start;
327
328 /* Mode used to compute significance in reg_stat[].nonzero_bits. It is the
329 largest integer mode that can fit in HOST_BITS_PER_WIDE_INT. */
330
331 static enum machine_mode nonzero_bits_mode;
332
333 /* Nonzero when reg_stat[].nonzero_bits and reg_stat[].sign_bit_copies can
334 be safely used. It is zero while computing them and after combine has
335 completed. This former test prevents propagating values based on
336 previously set values, which can be incorrect if a variable is modified
337 in a loop. */
338
339 static int nonzero_sign_valid;
340
341 \f
342 /* Record one modification to rtl structure
343 to be undone by storing old_contents into *where. */
344
345 enum undo_kind { UNDO_RTX, UNDO_INT, UNDO_MODE };
346
347 struct undo
348 {
349 struct undo *next;
350 enum undo_kind kind;
351 union { rtx r; int i; enum machine_mode m; } old_contents;
352 union { rtx *r; int *i; } where;
353 };
354
355 /* Record a bunch of changes to be undone, up to MAX_UNDO of them.
356 num_undo says how many are currently recorded.
357
358 other_insn is nonzero if we have modified some other insn in the process
359 of working on subst_insn. It must be verified too. */
360
361 struct undobuf
362 {
363 struct undo *undos;
364 struct undo *frees;
365 rtx other_insn;
366 };
367
368 static struct undobuf undobuf;
369
370 /* Number of times the pseudo being substituted for
371 was found and replaced. */
372
373 static int n_occurrences;
374
375 static rtx reg_nonzero_bits_for_combine (const_rtx, enum machine_mode, const_rtx,
376 enum machine_mode,
377 unsigned HOST_WIDE_INT,
378 unsigned HOST_WIDE_INT *);
379 static rtx reg_num_sign_bit_copies_for_combine (const_rtx, enum machine_mode, const_rtx,
380 enum machine_mode,
381 unsigned int, unsigned int *);
382 static void do_SUBST (rtx *, rtx);
383 static void do_SUBST_INT (int *, int);
384 static void init_reg_last (void);
385 static void setup_incoming_promotions (rtx);
386 static void set_nonzero_bits_and_sign_copies (rtx, const_rtx, void *);
387 static int cant_combine_insn_p (rtx);
388 static int can_combine_p (rtx, rtx, rtx, rtx, rtx, rtx, rtx *, rtx *);
389 static int combinable_i3pat (rtx, rtx *, rtx, rtx, rtx, int, int, rtx *);
390 static int contains_muldiv (rtx);
391 static rtx try_combine (rtx, rtx, rtx, rtx, int *);
392 static void undo_all (void);
393 static void undo_commit (void);
394 static rtx *find_split_point (rtx *, rtx, bool);
395 static rtx subst (rtx, rtx, rtx, int, int);
396 static rtx combine_simplify_rtx (rtx, enum machine_mode, int);
397 static rtx simplify_if_then_else (rtx);
398 static rtx simplify_set (rtx);
399 static rtx simplify_logical (rtx);
400 static rtx expand_compound_operation (rtx);
401 static const_rtx expand_field_assignment (const_rtx);
402 static rtx make_extraction (enum machine_mode, rtx, HOST_WIDE_INT,
403 rtx, unsigned HOST_WIDE_INT, int, int, int);
404 static rtx extract_left_shift (rtx, int);
405 static rtx make_compound_operation (rtx, enum rtx_code);
406 static int get_pos_from_mask (unsigned HOST_WIDE_INT,
407 unsigned HOST_WIDE_INT *);
408 static rtx canon_reg_for_combine (rtx, rtx);
409 static rtx force_to_mode (rtx, enum machine_mode,
410 unsigned HOST_WIDE_INT, int);
411 static rtx if_then_else_cond (rtx, rtx *, rtx *);
412 static rtx known_cond (rtx, enum rtx_code, rtx, rtx);
413 static int rtx_equal_for_field_assignment_p (rtx, rtx);
414 static rtx make_field_assignment (rtx);
415 static rtx apply_distributive_law (rtx);
416 static rtx distribute_and_simplify_rtx (rtx, int);
417 static rtx simplify_and_const_int_1 (enum machine_mode, rtx,
418 unsigned HOST_WIDE_INT);
419 static rtx simplify_and_const_int (rtx, enum machine_mode, rtx,
420 unsigned HOST_WIDE_INT);
421 static int merge_outer_ops (enum rtx_code *, HOST_WIDE_INT *, enum rtx_code,
422 HOST_WIDE_INT, enum machine_mode, int *);
423 static rtx simplify_shift_const_1 (enum rtx_code, enum machine_mode, rtx, int);
424 static rtx simplify_shift_const (rtx, enum rtx_code, enum machine_mode, rtx,
425 int);
426 static int recog_for_combine (rtx *, rtx, rtx *);
427 static rtx gen_lowpart_for_combine (enum machine_mode, rtx);
428 static enum rtx_code simplify_comparison (enum rtx_code, rtx *, rtx *);
429 static void update_table_tick (rtx);
430 static void record_value_for_reg (rtx, rtx, rtx);
431 static void check_promoted_subreg (rtx, rtx);
432 static void record_dead_and_set_regs_1 (rtx, const_rtx, void *);
433 static void record_dead_and_set_regs (rtx);
434 static int get_last_value_validate (rtx *, rtx, int, int);
435 static rtx get_last_value (const_rtx);
436 static int use_crosses_set_p (const_rtx, int);
437 static void reg_dead_at_p_1 (rtx, const_rtx, void *);
438 static int reg_dead_at_p (rtx, rtx);
439 static void move_deaths (rtx, rtx, int, rtx, rtx *);
440 static int reg_bitfield_target_p (rtx, rtx);
441 static void distribute_notes (rtx, rtx, rtx, rtx, rtx, rtx, rtx);
442 static void distribute_links (rtx);
443 static void mark_used_regs_combine (rtx);
444 static void record_promoted_value (rtx, rtx);
445 static int unmentioned_reg_p_1 (rtx *, void *);
446 static bool unmentioned_reg_p (rtx, rtx);
447 static int record_truncated_value (rtx *, void *);
448 static void record_truncated_values (rtx *, void *);
449 static bool reg_truncated_to_mode (enum machine_mode, const_rtx);
450 static rtx gen_lowpart_or_truncate (enum machine_mode, rtx);
451 \f
452
453 /* It is not safe to use ordinary gen_lowpart in combine.
454 See comments in gen_lowpart_for_combine. */
455 #undef RTL_HOOKS_GEN_LOWPART
456 #define RTL_HOOKS_GEN_LOWPART gen_lowpart_for_combine
457
458 /* Our implementation of gen_lowpart never emits a new pseudo. */
459 #undef RTL_HOOKS_GEN_LOWPART_NO_EMIT
460 #define RTL_HOOKS_GEN_LOWPART_NO_EMIT gen_lowpart_for_combine
461
462 #undef RTL_HOOKS_REG_NONZERO_REG_BITS
463 #define RTL_HOOKS_REG_NONZERO_REG_BITS reg_nonzero_bits_for_combine
464
465 #undef RTL_HOOKS_REG_NUM_SIGN_BIT_COPIES
466 #define RTL_HOOKS_REG_NUM_SIGN_BIT_COPIES reg_num_sign_bit_copies_for_combine
467
468 #undef RTL_HOOKS_REG_TRUNCATED_TO_MODE
469 #define RTL_HOOKS_REG_TRUNCATED_TO_MODE reg_truncated_to_mode
470
471 static const struct rtl_hooks combine_rtl_hooks = RTL_HOOKS_INITIALIZER;
472
473 \f
474 /* Try to split PATTERN found in INSN. This returns NULL_RTX if
475 PATTERN can not be split. Otherwise, it returns an insn sequence.
476 This is a wrapper around split_insns which ensures that the
477 reg_stat vector is made larger if the splitter creates a new
478 register. */
479
480 static rtx
481 combine_split_insns (rtx pattern, rtx insn)
482 {
483 rtx ret;
484 unsigned int nregs;
485
486 ret = split_insns (pattern, insn);
487 nregs = max_reg_num ();
488 if (nregs > VEC_length (reg_stat_type, reg_stat))
489 VEC_safe_grow_cleared (reg_stat_type, heap, reg_stat, nregs);
490 return ret;
491 }
492
493 /* This is used by find_single_use to locate an rtx in LOC that
494 contains exactly one use of DEST, which is typically either a REG
495 or CC0. It returns a pointer to the innermost rtx expression
496 containing DEST. Appearances of DEST that are being used to
497 totally replace it are not counted. */
498
499 static rtx *
500 find_single_use_1 (rtx dest, rtx *loc)
501 {
502 rtx x = *loc;
503 enum rtx_code code = GET_CODE (x);
504 rtx *result = NULL;
505 rtx *this_result;
506 int i;
507 const char *fmt;
508
509 switch (code)
510 {
511 case CONST_INT:
512 case CONST:
513 case LABEL_REF:
514 case SYMBOL_REF:
515 case CONST_DOUBLE:
516 case CONST_VECTOR:
517 case CLOBBER:
518 return 0;
519
520 case SET:
521 /* If the destination is anything other than CC0, PC, a REG or a SUBREG
522 of a REG that occupies all of the REG, the insn uses DEST if
523 it is mentioned in the destination or the source. Otherwise, we
524 need just check the source. */
525 if (GET_CODE (SET_DEST (x)) != CC0
526 && GET_CODE (SET_DEST (x)) != PC
527 && !REG_P (SET_DEST (x))
528 && ! (GET_CODE (SET_DEST (x)) == SUBREG
529 && REG_P (SUBREG_REG (SET_DEST (x)))
530 && (((GET_MODE_SIZE (GET_MODE (SUBREG_REG (SET_DEST (x))))
531 + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)
532 == ((GET_MODE_SIZE (GET_MODE (SET_DEST (x)))
533 + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD))))
534 break;
535
536 return find_single_use_1 (dest, &SET_SRC (x));
537
538 case MEM:
539 case SUBREG:
540 return find_single_use_1 (dest, &XEXP (x, 0));
541
542 default:
543 break;
544 }
545
546 /* If it wasn't one of the common cases above, check each expression and
547 vector of this code. Look for a unique usage of DEST. */
548
549 fmt = GET_RTX_FORMAT (code);
550 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
551 {
552 if (fmt[i] == 'e')
553 {
554 if (dest == XEXP (x, i)
555 || (REG_P (dest) && REG_P (XEXP (x, i))
556 && REGNO (dest) == REGNO (XEXP (x, i))))
557 this_result = loc;
558 else
559 this_result = find_single_use_1 (dest, &XEXP (x, i));
560
561 if (result == NULL)
562 result = this_result;
563 else if (this_result)
564 /* Duplicate usage. */
565 return NULL;
566 }
567 else if (fmt[i] == 'E')
568 {
569 int j;
570
571 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
572 {
573 if (XVECEXP (x, i, j) == dest
574 || (REG_P (dest)
575 && REG_P (XVECEXP (x, i, j))
576 && REGNO (XVECEXP (x, i, j)) == REGNO (dest)))
577 this_result = loc;
578 else
579 this_result = find_single_use_1 (dest, &XVECEXP (x, i, j));
580
581 if (result == NULL)
582 result = this_result;
583 else if (this_result)
584 return NULL;
585 }
586 }
587 }
588
589 return result;
590 }
591
592
593 /* See if DEST, produced in INSN, is used only a single time in the
594 sequel. If so, return a pointer to the innermost rtx expression in which
595 it is used.
596
597 If PLOC is nonzero, *PLOC is set to the insn containing the single use.
598
599 If DEST is cc0_rtx, we look only at the next insn. In that case, we don't
600 care about REG_DEAD notes or LOG_LINKS.
601
602 Otherwise, we find the single use by finding an insn that has a
603 LOG_LINKS pointing at INSN and has a REG_DEAD note for DEST. If DEST is
604 only referenced once in that insn, we know that it must be the first
605 and last insn referencing DEST. */
606
607 static rtx *
608 find_single_use (rtx dest, rtx insn, rtx *ploc)
609 {
610 basic_block bb;
611 rtx next;
612 rtx *result;
613 rtx link;
614
615 #ifdef HAVE_cc0
616 if (dest == cc0_rtx)
617 {
618 next = NEXT_INSN (insn);
619 if (next == 0
620 || (!NONJUMP_INSN_P (next) && !JUMP_P (next)))
621 return 0;
622
623 result = find_single_use_1 (dest, &PATTERN (next));
624 if (result && ploc)
625 *ploc = next;
626 return result;
627 }
628 #endif
629
630 if (!REG_P (dest))
631 return 0;
632
633 bb = BLOCK_FOR_INSN (insn);
634 for (next = NEXT_INSN (insn);
635 next && BLOCK_FOR_INSN (next) == bb;
636 next = NEXT_INSN (next))
637 if (INSN_P (next) && dead_or_set_p (next, dest))
638 {
639 for (link = LOG_LINKS (next); link; link = XEXP (link, 1))
640 if (XEXP (link, 0) == insn)
641 break;
642
643 if (link)
644 {
645 result = find_single_use_1 (dest, &PATTERN (next));
646 if (ploc)
647 *ploc = next;
648 return result;
649 }
650 }
651
652 return 0;
653 }
654 \f
655 /* Substitute NEWVAL, an rtx expression, into INTO, a place in some
656 insn. The substitution can be undone by undo_all. If INTO is already
657 set to NEWVAL, do not record this change. Because computing NEWVAL might
658 also call SUBST, we have to compute it before we put anything into
659 the undo table. */
660
661 static void
662 do_SUBST (rtx *into, rtx newval)
663 {
664 struct undo *buf;
665 rtx oldval = *into;
666
667 if (oldval == newval)
668 return;
669
670 /* We'd like to catch as many invalid transformations here as
671 possible. Unfortunately, there are way too many mode changes
672 that are perfectly valid, so we'd waste too much effort for
673 little gain doing the checks here. Focus on catching invalid
674 transformations involving integer constants. */
675 if (GET_MODE_CLASS (GET_MODE (oldval)) == MODE_INT
676 && CONST_INT_P (newval))
677 {
678 /* Sanity check that we're replacing oldval with a CONST_INT
679 that is a valid sign-extension for the original mode. */
680 gcc_assert (INTVAL (newval)
681 == trunc_int_for_mode (INTVAL (newval), GET_MODE (oldval)));
682
683 /* Replacing the operand of a SUBREG or a ZERO_EXTEND with a
684 CONST_INT is not valid, because after the replacement, the
685 original mode would be gone. Unfortunately, we can't tell
686 when do_SUBST is called to replace the operand thereof, so we
687 perform this test on oldval instead, checking whether an
688 invalid replacement took place before we got here. */
689 gcc_assert (!(GET_CODE (oldval) == SUBREG
690 && CONST_INT_P (SUBREG_REG (oldval))));
691 gcc_assert (!(GET_CODE (oldval) == ZERO_EXTEND
692 && CONST_INT_P (XEXP (oldval, 0))));
693 }
694
695 if (undobuf.frees)
696 buf = undobuf.frees, undobuf.frees = buf->next;
697 else
698 buf = XNEW (struct undo);
699
700 buf->kind = UNDO_RTX;
701 buf->where.r = into;
702 buf->old_contents.r = oldval;
703 *into = newval;
704
705 buf->next = undobuf.undos, undobuf.undos = buf;
706 }
707
708 #define SUBST(INTO, NEWVAL) do_SUBST(&(INTO), (NEWVAL))
709
710 /* Similar to SUBST, but NEWVAL is an int expression. Note that substitution
711 for the value of a HOST_WIDE_INT value (including CONST_INT) is
712 not safe. */
713
714 static void
715 do_SUBST_INT (int *into, int newval)
716 {
717 struct undo *buf;
718 int oldval = *into;
719
720 if (oldval == newval)
721 return;
722
723 if (undobuf.frees)
724 buf = undobuf.frees, undobuf.frees = buf->next;
725 else
726 buf = XNEW (struct undo);
727
728 buf->kind = UNDO_INT;
729 buf->where.i = into;
730 buf->old_contents.i = oldval;
731 *into = newval;
732
733 buf->next = undobuf.undos, undobuf.undos = buf;
734 }
735
736 #define SUBST_INT(INTO, NEWVAL) do_SUBST_INT(&(INTO), (NEWVAL))
737
738 /* Similar to SUBST, but just substitute the mode. This is used when
739 changing the mode of a pseudo-register, so that any other
740 references to the entry in the regno_reg_rtx array will change as
741 well. */
742
743 static void
744 do_SUBST_MODE (rtx *into, enum machine_mode newval)
745 {
746 struct undo *buf;
747 enum machine_mode oldval = GET_MODE (*into);
748
749 if (oldval == newval)
750 return;
751
752 if (undobuf.frees)
753 buf = undobuf.frees, undobuf.frees = buf->next;
754 else
755 buf = XNEW (struct undo);
756
757 buf->kind = UNDO_MODE;
758 buf->where.r = into;
759 buf->old_contents.m = oldval;
760 adjust_reg_mode (*into, newval);
761
762 buf->next = undobuf.undos, undobuf.undos = buf;
763 }
764
765 #define SUBST_MODE(INTO, NEWVAL) do_SUBST_MODE(&(INTO), (NEWVAL))
766 \f
767 /* Subroutine of try_combine. Determine whether the combine replacement
768 patterns NEWPAT, NEWI2PAT and NEWOTHERPAT are cheaper according to
769 insn_rtx_cost that the original instruction sequence I0, I1, I2, I3 and
770 undobuf.other_insn. Note that I1 and/or NEWI2PAT may be NULL_RTX.
771 NEWOTHERPAT and undobuf.other_insn may also both be NULL_RTX. This
772 function returns false, if the costs of all instructions can be
773 estimated, and the replacements are more expensive than the original
774 sequence. */
775
776 static bool
777 combine_validate_cost (rtx i0, rtx i1, rtx i2, rtx i3, rtx newpat,
778 rtx newi2pat, rtx newotherpat)
779 {
780 int i0_cost, i1_cost, i2_cost, i3_cost;
781 int new_i2_cost, new_i3_cost;
782 int old_cost, new_cost;
783
784 /* Lookup the original insn_rtx_costs. */
785 i2_cost = INSN_COST (i2);
786 i3_cost = INSN_COST (i3);
787
788 if (i1)
789 {
790 i1_cost = INSN_COST (i1);
791 if (i0)
792 {
793 i0_cost = INSN_COST (i0);
794 old_cost = (i0_cost > 0 && i1_cost > 0 && i2_cost > 0 && i3_cost > 0
795 ? i0_cost + i1_cost + i2_cost + i3_cost : 0);
796 }
797 else
798 {
799 old_cost = (i1_cost > 0 && i2_cost > 0 && i3_cost > 0
800 ? i1_cost + i2_cost + i3_cost : 0);
801 i0_cost = 0;
802 }
803 }
804 else
805 {
806 old_cost = (i2_cost > 0 && i3_cost > 0) ? i2_cost + i3_cost : 0;
807 i1_cost = i0_cost = 0;
808 }
809
810 /* Calculate the replacement insn_rtx_costs. */
811 new_i3_cost = insn_rtx_cost (newpat, optimize_this_for_speed_p);
812 if (newi2pat)
813 {
814 new_i2_cost = insn_rtx_cost (newi2pat, optimize_this_for_speed_p);
815 new_cost = (new_i2_cost > 0 && new_i3_cost > 0)
816 ? new_i2_cost + new_i3_cost : 0;
817 }
818 else
819 {
820 new_cost = new_i3_cost;
821 new_i2_cost = 0;
822 }
823
824 if (undobuf.other_insn)
825 {
826 int old_other_cost, new_other_cost;
827
828 old_other_cost = INSN_COST (undobuf.other_insn);
829 new_other_cost = insn_rtx_cost (newotherpat, optimize_this_for_speed_p);
830 if (old_other_cost > 0 && new_other_cost > 0)
831 {
832 old_cost += old_other_cost;
833 new_cost += new_other_cost;
834 }
835 else
836 old_cost = 0;
837 }
838
839 /* Disallow this recombination if both new_cost and old_cost are
840 greater than zero, and new_cost is greater than old cost. */
841 if (old_cost > 0
842 && new_cost > old_cost)
843 {
844 if (dump_file)
845 {
846 if (i0)
847 {
848 fprintf (dump_file,
849 "rejecting combination of insns %d, %d, %d and %d\n",
850 INSN_UID (i0), INSN_UID (i1), INSN_UID (i2),
851 INSN_UID (i3));
852 fprintf (dump_file, "original costs %d + %d + %d + %d = %d\n",
853 i0_cost, i1_cost, i2_cost, i3_cost, old_cost);
854 }
855 else if (i1)
856 {
857 fprintf (dump_file,
858 "rejecting combination of insns %d, %d and %d\n",
859 INSN_UID (i1), INSN_UID (i2), INSN_UID (i3));
860 fprintf (dump_file, "original costs %d + %d + %d = %d\n",
861 i1_cost, i2_cost, i3_cost, old_cost);
862 }
863 else
864 {
865 fprintf (dump_file,
866 "rejecting combination of insns %d and %d\n",
867 INSN_UID (i2), INSN_UID (i3));
868 fprintf (dump_file, "original costs %d + %d = %d\n",
869 i2_cost, i3_cost, old_cost);
870 }
871
872 if (newi2pat)
873 {
874 fprintf (dump_file, "replacement costs %d + %d = %d\n",
875 new_i2_cost, new_i3_cost, new_cost);
876 }
877 else
878 fprintf (dump_file, "replacement cost %d\n", new_cost);
879 }
880
881 return false;
882 }
883
884 /* Update the uid_insn_cost array with the replacement costs. */
885 INSN_COST (i2) = new_i2_cost;
886 INSN_COST (i3) = new_i3_cost;
887 if (i1)
888 INSN_COST (i1) = 0;
889
890 return true;
891 }
892
893
894 /* Delete any insns that copy a register to itself. */
895
896 static void
897 delete_noop_moves (void)
898 {
899 rtx insn, next;
900 basic_block bb;
901
902 FOR_EACH_BB (bb)
903 {
904 for (insn = BB_HEAD (bb); insn != NEXT_INSN (BB_END (bb)); insn = next)
905 {
906 next = NEXT_INSN (insn);
907 if (INSN_P (insn) && noop_move_p (insn))
908 {
909 if (dump_file)
910 fprintf (dump_file, "deleting noop move %d\n", INSN_UID (insn));
911
912 delete_insn_and_edges (insn);
913 }
914 }
915 }
916 }
917
918 \f
919 /* Fill in log links field for all insns. */
920
921 static void
922 create_log_links (void)
923 {
924 basic_block bb;
925 rtx *next_use, insn;
926 df_ref *def_vec, *use_vec;
927
928 next_use = XCNEWVEC (rtx, max_reg_num ());
929
930 /* Pass through each block from the end, recording the uses of each
931 register and establishing log links when def is encountered.
932 Note that we do not clear next_use array in order to save time,
933 so we have to test whether the use is in the same basic block as def.
934
935 There are a few cases below when we do not consider the definition or
936 usage -- these are taken from original flow.c did. Don't ask me why it is
937 done this way; I don't know and if it works, I don't want to know. */
938
939 FOR_EACH_BB (bb)
940 {
941 FOR_BB_INSNS_REVERSE (bb, insn)
942 {
943 if (!NONDEBUG_INSN_P (insn))
944 continue;
945
946 /* Log links are created only once. */
947 gcc_assert (!LOG_LINKS (insn));
948
949 for (def_vec = DF_INSN_DEFS (insn); *def_vec; def_vec++)
950 {
951 df_ref def = *def_vec;
952 int regno = DF_REF_REGNO (def);
953 rtx use_insn;
954
955 if (!next_use[regno])
956 continue;
957
958 /* Do not consider if it is pre/post modification in MEM. */
959 if (DF_REF_FLAGS (def) & DF_REF_PRE_POST_MODIFY)
960 continue;
961
962 /* Do not make the log link for frame pointer. */
963 if ((regno == FRAME_POINTER_REGNUM
964 && (! reload_completed || frame_pointer_needed))
965 #if !HARD_FRAME_POINTER_IS_FRAME_POINTER
966 || (regno == HARD_FRAME_POINTER_REGNUM
967 && (! reload_completed || frame_pointer_needed))
968 #endif
969 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
970 || (regno == ARG_POINTER_REGNUM && fixed_regs[regno])
971 #endif
972 )
973 continue;
974
975 use_insn = next_use[regno];
976 if (BLOCK_FOR_INSN (use_insn) == bb)
977 {
978 /* flow.c claimed:
979
980 We don't build a LOG_LINK for hard registers contained
981 in ASM_OPERANDs. If these registers get replaced,
982 we might wind up changing the semantics of the insn,
983 even if reload can make what appear to be valid
984 assignments later. */
985 if (regno >= FIRST_PSEUDO_REGISTER
986 || asm_noperands (PATTERN (use_insn)) < 0)
987 {
988 /* Don't add duplicate links between instructions. */
989 rtx links;
990 for (links = LOG_LINKS (use_insn); links;
991 links = XEXP (links, 1))
992 if (insn == XEXP (links, 0))
993 break;
994
995 if (!links)
996 LOG_LINKS (use_insn) =
997 alloc_INSN_LIST (insn, LOG_LINKS (use_insn));
998 }
999 }
1000 next_use[regno] = NULL_RTX;
1001 }
1002
1003 for (use_vec = DF_INSN_USES (insn); *use_vec; use_vec++)
1004 {
1005 df_ref use = *use_vec;
1006 int regno = DF_REF_REGNO (use);
1007
1008 /* Do not consider the usage of the stack pointer
1009 by function call. */
1010 if (DF_REF_FLAGS (use) & DF_REF_CALL_STACK_USAGE)
1011 continue;
1012
1013 next_use[regno] = insn;
1014 }
1015 }
1016 }
1017
1018 free (next_use);
1019 }
1020
1021 /* Clear LOG_LINKS fields of insns. */
1022
1023 static void
1024 clear_log_links (void)
1025 {
1026 rtx insn;
1027
1028 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
1029 if (INSN_P (insn))
1030 free_INSN_LIST_list (&LOG_LINKS (insn));
1031 }
1032
1033 /* Walk the LOG_LINKS of insn B to see if we find a reference to A. Return
1034 true if we found a LOG_LINK that proves that A feeds B. This only works
1035 if there are no instructions between A and B which could have a link
1036 depending on A, since in that case we would not record a link for B. */
1037
1038 static bool
1039 insn_a_feeds_b (rtx a, rtx b)
1040 {
1041 rtx links;
1042 for (links = LOG_LINKS (b); links; links = XEXP (links, 1))
1043 if (XEXP (links, 0) == a)
1044 return true;
1045 return false;
1046 }
1047 \f
1048 /* Main entry point for combiner. F is the first insn of the function.
1049 NREGS is the first unused pseudo-reg number.
1050
1051 Return nonzero if the combiner has turned an indirect jump
1052 instruction into a direct jump. */
1053 static int
1054 combine_instructions (rtx f, unsigned int nregs)
1055 {
1056 rtx insn, next;
1057 #ifdef HAVE_cc0
1058 rtx prev;
1059 #endif
1060 rtx links, nextlinks;
1061 rtx first;
1062 basic_block last_bb;
1063
1064 int new_direct_jump_p = 0;
1065
1066 for (first = f; first && !INSN_P (first); )
1067 first = NEXT_INSN (first);
1068 if (!first)
1069 return 0;
1070
1071 combine_attempts = 0;
1072 combine_merges = 0;
1073 combine_extras = 0;
1074 combine_successes = 0;
1075
1076 rtl_hooks = combine_rtl_hooks;
1077
1078 VEC_safe_grow_cleared (reg_stat_type, heap, reg_stat, nregs);
1079
1080 init_recog_no_volatile ();
1081
1082 /* Allocate array for insn info. */
1083 max_uid_known = get_max_uid ();
1084 uid_log_links = XCNEWVEC (rtx, max_uid_known + 1);
1085 uid_insn_cost = XCNEWVEC (int, max_uid_known + 1);
1086
1087 nonzero_bits_mode = mode_for_size (HOST_BITS_PER_WIDE_INT, MODE_INT, 0);
1088
1089 /* Don't use reg_stat[].nonzero_bits when computing it. This can cause
1090 problems when, for example, we have j <<= 1 in a loop. */
1091
1092 nonzero_sign_valid = 0;
1093 label_tick = label_tick_ebb_start = 1;
1094
1095 /* Scan all SETs and see if we can deduce anything about what
1096 bits are known to be zero for some registers and how many copies
1097 of the sign bit are known to exist for those registers.
1098
1099 Also set any known values so that we can use it while searching
1100 for what bits are known to be set. */
1101
1102 setup_incoming_promotions (first);
1103 /* Allow the entry block and the first block to fall into the same EBB.
1104 Conceptually the incoming promotions are assigned to the entry block. */
1105 last_bb = ENTRY_BLOCK_PTR;
1106
1107 create_log_links ();
1108 FOR_EACH_BB (this_basic_block)
1109 {
1110 optimize_this_for_speed_p = optimize_bb_for_speed_p (this_basic_block);
1111 last_call_luid = 0;
1112 mem_last_set = -1;
1113
1114 label_tick++;
1115 if (!single_pred_p (this_basic_block)
1116 || single_pred (this_basic_block) != last_bb)
1117 label_tick_ebb_start = label_tick;
1118 last_bb = this_basic_block;
1119
1120 FOR_BB_INSNS (this_basic_block, insn)
1121 if (INSN_P (insn) && BLOCK_FOR_INSN (insn))
1122 {
1123 subst_low_luid = DF_INSN_LUID (insn);
1124 subst_insn = insn;
1125
1126 note_stores (PATTERN (insn), set_nonzero_bits_and_sign_copies,
1127 insn);
1128 record_dead_and_set_regs (insn);
1129
1130 #ifdef AUTO_INC_DEC
1131 for (links = REG_NOTES (insn); links; links = XEXP (links, 1))
1132 if (REG_NOTE_KIND (links) == REG_INC)
1133 set_nonzero_bits_and_sign_copies (XEXP (links, 0), NULL_RTX,
1134 insn);
1135 #endif
1136
1137 /* Record the current insn_rtx_cost of this instruction. */
1138 if (NONJUMP_INSN_P (insn))
1139 INSN_COST (insn) = insn_rtx_cost (PATTERN (insn),
1140 optimize_this_for_speed_p);
1141 if (dump_file)
1142 fprintf(dump_file, "insn_cost %d: %d\n",
1143 INSN_UID (insn), INSN_COST (insn));
1144 }
1145 }
1146
1147 nonzero_sign_valid = 1;
1148
1149 /* Now scan all the insns in forward order. */
1150 label_tick = label_tick_ebb_start = 1;
1151 init_reg_last ();
1152 setup_incoming_promotions (first);
1153 last_bb = ENTRY_BLOCK_PTR;
1154
1155 FOR_EACH_BB (this_basic_block)
1156 {
1157 optimize_this_for_speed_p = optimize_bb_for_speed_p (this_basic_block);
1158 last_call_luid = 0;
1159 mem_last_set = -1;
1160
1161 label_tick++;
1162 if (!single_pred_p (this_basic_block)
1163 || single_pred (this_basic_block) != last_bb)
1164 label_tick_ebb_start = label_tick;
1165 last_bb = this_basic_block;
1166
1167 rtl_profile_for_bb (this_basic_block);
1168 for (insn = BB_HEAD (this_basic_block);
1169 insn != NEXT_INSN (BB_END (this_basic_block));
1170 insn = next ? next : NEXT_INSN (insn))
1171 {
1172 next = 0;
1173 if (NONDEBUG_INSN_P (insn))
1174 {
1175 /* See if we know about function return values before this
1176 insn based upon SUBREG flags. */
1177 check_promoted_subreg (insn, PATTERN (insn));
1178
1179 /* See if we can find hardregs and subreg of pseudos in
1180 narrower modes. This could help turning TRUNCATEs
1181 into SUBREGs. */
1182 note_uses (&PATTERN (insn), record_truncated_values, NULL);
1183
1184 /* Try this insn with each insn it links back to. */
1185
1186 for (links = LOG_LINKS (insn); links; links = XEXP (links, 1))
1187 if ((next = try_combine (insn, XEXP (links, 0), NULL_RTX,
1188 NULL_RTX, &new_direct_jump_p)) != 0)
1189 goto retry;
1190
1191 /* Try each sequence of three linked insns ending with this one. */
1192
1193 for (links = LOG_LINKS (insn); links; links = XEXP (links, 1))
1194 {
1195 rtx link = XEXP (links, 0);
1196
1197 /* If the linked insn has been replaced by a note, then there
1198 is no point in pursuing this chain any further. */
1199 if (NOTE_P (link))
1200 continue;
1201
1202 for (nextlinks = LOG_LINKS (link);
1203 nextlinks;
1204 nextlinks = XEXP (nextlinks, 1))
1205 if ((next = try_combine (insn, link, XEXP (nextlinks, 0),
1206 NULL_RTX,
1207 &new_direct_jump_p)) != 0)
1208 goto retry;
1209 }
1210
1211 #ifdef HAVE_cc0
1212 /* Try to combine a jump insn that uses CC0
1213 with a preceding insn that sets CC0, and maybe with its
1214 logical predecessor as well.
1215 This is how we make decrement-and-branch insns.
1216 We need this special code because data flow connections
1217 via CC0 do not get entered in LOG_LINKS. */
1218
1219 if (JUMP_P (insn)
1220 && (prev = prev_nonnote_insn (insn)) != 0
1221 && NONJUMP_INSN_P (prev)
1222 && sets_cc0_p (PATTERN (prev)))
1223 {
1224 if ((next = try_combine (insn, prev, NULL_RTX, NULL_RTX,
1225 &new_direct_jump_p)) != 0)
1226 goto retry;
1227
1228 for (nextlinks = LOG_LINKS (prev); nextlinks;
1229 nextlinks = XEXP (nextlinks, 1))
1230 if ((next = try_combine (insn, prev, XEXP (nextlinks, 0),
1231 NULL_RTX,
1232 &new_direct_jump_p)) != 0)
1233 goto retry;
1234 }
1235
1236 /* Do the same for an insn that explicitly references CC0. */
1237 if (NONJUMP_INSN_P (insn)
1238 && (prev = prev_nonnote_insn (insn)) != 0
1239 && NONJUMP_INSN_P (prev)
1240 && sets_cc0_p (PATTERN (prev))
1241 && GET_CODE (PATTERN (insn)) == SET
1242 && reg_mentioned_p (cc0_rtx, SET_SRC (PATTERN (insn))))
1243 {
1244 if ((next = try_combine (insn, prev, NULL_RTX, NULL_RTX,
1245 &new_direct_jump_p)) != 0)
1246 goto retry;
1247
1248 for (nextlinks = LOG_LINKS (prev); nextlinks;
1249 nextlinks = XEXP (nextlinks, 1))
1250 if ((next = try_combine (insn, prev, XEXP (nextlinks, 0),
1251 NULL_RTX,
1252 &new_direct_jump_p)) != 0)
1253 goto retry;
1254 }
1255
1256 /* Finally, see if any of the insns that this insn links to
1257 explicitly references CC0. If so, try this insn, that insn,
1258 and its predecessor if it sets CC0. */
1259 for (links = LOG_LINKS (insn); links; links = XEXP (links, 1))
1260 if (NONJUMP_INSN_P (XEXP (links, 0))
1261 && GET_CODE (PATTERN (XEXP (links, 0))) == SET
1262 && reg_mentioned_p (cc0_rtx, SET_SRC (PATTERN (XEXP (links, 0))))
1263 && (prev = prev_nonnote_insn (XEXP (links, 0))) != 0
1264 && NONJUMP_INSN_P (prev)
1265 && sets_cc0_p (PATTERN (prev))
1266 && (next = try_combine (insn, XEXP (links, 0),
1267 prev, NULL_RTX,
1268 &new_direct_jump_p)) != 0)
1269 goto retry;
1270 #endif
1271
1272 /* Try combining an insn with two different insns whose results it
1273 uses. */
1274 for (links = LOG_LINKS (insn); links; links = XEXP (links, 1))
1275 for (nextlinks = XEXP (links, 1); nextlinks;
1276 nextlinks = XEXP (nextlinks, 1))
1277 if ((next = try_combine (insn, XEXP (links, 0),
1278 XEXP (nextlinks, 0), NULL_RTX,
1279 &new_direct_jump_p)) != 0)
1280 goto retry;
1281
1282 /* Try four-instruction combinations. */
1283 for (links = LOG_LINKS (insn); links; links = XEXP (links, 1))
1284 {
1285 rtx next1;
1286 rtx link = XEXP (links, 0);
1287
1288 /* If the linked insn has been replaced by a note, then there
1289 is no point in pursuing this chain any further. */
1290 if (NOTE_P (link))
1291 continue;
1292
1293 for (next1 = LOG_LINKS (link); next1; next1 = XEXP (next1, 1))
1294 {
1295 rtx link1 = XEXP (next1, 0);
1296 if (NOTE_P (link1))
1297 continue;
1298 /* I0 -> I1 -> I2 -> I3. */
1299 for (nextlinks = LOG_LINKS (link1); nextlinks;
1300 nextlinks = XEXP (nextlinks, 1))
1301 if ((next = try_combine (insn, link, link1,
1302 XEXP (nextlinks, 0),
1303 &new_direct_jump_p)) != 0)
1304 goto retry;
1305 /* I0, I1 -> I2, I2 -> I3. */
1306 for (nextlinks = XEXP (next1, 1); nextlinks;
1307 nextlinks = XEXP (nextlinks, 1))
1308 if ((next = try_combine (insn, link, link1,
1309 XEXP (nextlinks, 0),
1310 &new_direct_jump_p)) != 0)
1311 goto retry;
1312 }
1313
1314 for (next1 = XEXP (links, 1); next1; next1 = XEXP (next1, 1))
1315 {
1316 rtx link1 = XEXP (next1, 0);
1317 if (NOTE_P (link1))
1318 continue;
1319 /* I0 -> I2; I1, I2 -> I3. */
1320 for (nextlinks = LOG_LINKS (link); nextlinks;
1321 nextlinks = XEXP (nextlinks, 1))
1322 if ((next = try_combine (insn, link, link1,
1323 XEXP (nextlinks, 0),
1324 &new_direct_jump_p)) != 0)
1325 goto retry;
1326 /* I0 -> I1; I1, I2 -> I3. */
1327 for (nextlinks = LOG_LINKS (link1); nextlinks;
1328 nextlinks = XEXP (nextlinks, 1))
1329 if ((next = try_combine (insn, link, link1,
1330 XEXP (nextlinks, 0),
1331 &new_direct_jump_p)) != 0)
1332 goto retry;
1333 }
1334 }
1335
1336 /* Try this insn with each REG_EQUAL note it links back to. */
1337 for (links = LOG_LINKS (insn); links; links = XEXP (links, 1))
1338 {
1339 rtx set, note;
1340 rtx temp = XEXP (links, 0);
1341 if ((set = single_set (temp)) != 0
1342 && (note = find_reg_equal_equiv_note (temp)) != 0
1343 && (note = XEXP (note, 0), GET_CODE (note)) != EXPR_LIST
1344 /* Avoid using a register that may already been marked
1345 dead by an earlier instruction. */
1346 && ! unmentioned_reg_p (note, SET_SRC (set))
1347 && (GET_MODE (note) == VOIDmode
1348 ? SCALAR_INT_MODE_P (GET_MODE (SET_DEST (set)))
1349 : GET_MODE (SET_DEST (set)) == GET_MODE (note)))
1350 {
1351 /* Temporarily replace the set's source with the
1352 contents of the REG_EQUAL note. The insn will
1353 be deleted or recognized by try_combine. */
1354 rtx orig = SET_SRC (set);
1355 SET_SRC (set) = note;
1356 i2mod = temp;
1357 i2mod_old_rhs = copy_rtx (orig);
1358 i2mod_new_rhs = copy_rtx (note);
1359 next = try_combine (insn, i2mod, NULL_RTX, NULL_RTX,
1360 &new_direct_jump_p);
1361 i2mod = NULL_RTX;
1362 if (next)
1363 goto retry;
1364 SET_SRC (set) = orig;
1365 }
1366 }
1367
1368 if (!NOTE_P (insn))
1369 record_dead_and_set_regs (insn);
1370
1371 retry:
1372 ;
1373 }
1374 }
1375 }
1376
1377 default_rtl_profile ();
1378 clear_log_links ();
1379 clear_bb_flags ();
1380 new_direct_jump_p |= purge_all_dead_edges ();
1381 delete_noop_moves ();
1382
1383 /* Clean up. */
1384 free (uid_log_links);
1385 free (uid_insn_cost);
1386 VEC_free (reg_stat_type, heap, reg_stat);
1387
1388 {
1389 struct undo *undo, *next;
1390 for (undo = undobuf.frees; undo; undo = next)
1391 {
1392 next = undo->next;
1393 free (undo);
1394 }
1395 undobuf.frees = 0;
1396 }
1397
1398 total_attempts += combine_attempts;
1399 total_merges += combine_merges;
1400 total_extras += combine_extras;
1401 total_successes += combine_successes;
1402
1403 nonzero_sign_valid = 0;
1404 rtl_hooks = general_rtl_hooks;
1405
1406 /* Make recognizer allow volatile MEMs again. */
1407 init_recog ();
1408
1409 return new_direct_jump_p;
1410 }
1411
1412 /* Wipe the last_xxx fields of reg_stat in preparation for another pass. */
1413
1414 static void
1415 init_reg_last (void)
1416 {
1417 unsigned int i;
1418 reg_stat_type *p;
1419
1420 FOR_EACH_VEC_ELT (reg_stat_type, reg_stat, i, p)
1421 memset (p, 0, offsetof (reg_stat_type, sign_bit_copies));
1422 }
1423 \f
1424 /* Set up any promoted values for incoming argument registers. */
1425
1426 static void
1427 setup_incoming_promotions (rtx first)
1428 {
1429 tree arg;
1430 bool strictly_local = false;
1431
1432 for (arg = DECL_ARGUMENTS (current_function_decl); arg;
1433 arg = DECL_CHAIN (arg))
1434 {
1435 rtx x, reg = DECL_INCOMING_RTL (arg);
1436 int uns1, uns3;
1437 enum machine_mode mode1, mode2, mode3, mode4;
1438
1439 /* Only continue if the incoming argument is in a register. */
1440 if (!REG_P (reg))
1441 continue;
1442
1443 /* Determine, if possible, whether all call sites of the current
1444 function lie within the current compilation unit. (This does
1445 take into account the exporting of a function via taking its
1446 address, and so forth.) */
1447 strictly_local = cgraph_local_info (current_function_decl)->local;
1448
1449 /* The mode and signedness of the argument before any promotions happen
1450 (equal to the mode of the pseudo holding it at that stage). */
1451 mode1 = TYPE_MODE (TREE_TYPE (arg));
1452 uns1 = TYPE_UNSIGNED (TREE_TYPE (arg));
1453
1454 /* The mode and signedness of the argument after any source language and
1455 TARGET_PROMOTE_PROTOTYPES-driven promotions. */
1456 mode2 = TYPE_MODE (DECL_ARG_TYPE (arg));
1457 uns3 = TYPE_UNSIGNED (DECL_ARG_TYPE (arg));
1458
1459 /* The mode and signedness of the argument as it is actually passed,
1460 after any TARGET_PROMOTE_FUNCTION_ARGS-driven ABI promotions. */
1461 mode3 = promote_function_mode (DECL_ARG_TYPE (arg), mode2, &uns3,
1462 TREE_TYPE (cfun->decl), 0);
1463
1464 /* The mode of the register in which the argument is being passed. */
1465 mode4 = GET_MODE (reg);
1466
1467 /* Eliminate sign extensions in the callee when:
1468 (a) A mode promotion has occurred; */
1469 if (mode1 == mode3)
1470 continue;
1471 /* (b) The mode of the register is the same as the mode of
1472 the argument as it is passed; */
1473 if (mode3 != mode4)
1474 continue;
1475 /* (c) There's no language level extension; */
1476 if (mode1 == mode2)
1477 ;
1478 /* (c.1) All callers are from the current compilation unit. If that's
1479 the case we don't have to rely on an ABI, we only have to know
1480 what we're generating right now, and we know that we will do the
1481 mode1 to mode2 promotion with the given sign. */
1482 else if (!strictly_local)
1483 continue;
1484 /* (c.2) The combination of the two promotions is useful. This is
1485 true when the signs match, or if the first promotion is unsigned.
1486 In the later case, (sign_extend (zero_extend x)) is the same as
1487 (zero_extend (zero_extend x)), so make sure to force UNS3 true. */
1488 else if (uns1)
1489 uns3 = true;
1490 else if (uns3)
1491 continue;
1492
1493 /* Record that the value was promoted from mode1 to mode3,
1494 so that any sign extension at the head of the current
1495 function may be eliminated. */
1496 x = gen_rtx_CLOBBER (mode1, const0_rtx);
1497 x = gen_rtx_fmt_e ((uns3 ? ZERO_EXTEND : SIGN_EXTEND), mode3, x);
1498 record_value_for_reg (reg, first, x);
1499 }
1500 }
1501
1502 /* Called via note_stores. If X is a pseudo that is narrower than
1503 HOST_BITS_PER_WIDE_INT and is being set, record what bits are known zero.
1504
1505 If we are setting only a portion of X and we can't figure out what
1506 portion, assume all bits will be used since we don't know what will
1507 be happening.
1508
1509 Similarly, set how many bits of X are known to be copies of the sign bit
1510 at all locations in the function. This is the smallest number implied
1511 by any set of X. */
1512
1513 static void
1514 set_nonzero_bits_and_sign_copies (rtx x, const_rtx set, void *data)
1515 {
1516 rtx insn = (rtx) data;
1517 unsigned int num;
1518
1519 if (REG_P (x)
1520 && REGNO (x) >= FIRST_PSEUDO_REGISTER
1521 /* If this register is undefined at the start of the file, we can't
1522 say what its contents were. */
1523 && ! REGNO_REG_SET_P
1524 (DF_LR_IN (ENTRY_BLOCK_PTR->next_bb), REGNO (x))
1525 && GET_MODE_BITSIZE (GET_MODE (x)) <= HOST_BITS_PER_WIDE_INT)
1526 {
1527 reg_stat_type *rsp = VEC_index (reg_stat_type, reg_stat, REGNO (x));
1528
1529 if (set == 0 || GET_CODE (set) == CLOBBER)
1530 {
1531 rsp->nonzero_bits = GET_MODE_MASK (GET_MODE (x));
1532 rsp->sign_bit_copies = 1;
1533 return;
1534 }
1535
1536 /* If this register is being initialized using itself, and the
1537 register is uninitialized in this basic block, and there are
1538 no LOG_LINKS which set the register, then part of the
1539 register is uninitialized. In that case we can't assume
1540 anything about the number of nonzero bits.
1541
1542 ??? We could do better if we checked this in
1543 reg_{nonzero_bits,num_sign_bit_copies}_for_combine. Then we
1544 could avoid making assumptions about the insn which initially
1545 sets the register, while still using the information in other
1546 insns. We would have to be careful to check every insn
1547 involved in the combination. */
1548
1549 if (insn
1550 && reg_referenced_p (x, PATTERN (insn))
1551 && !REGNO_REG_SET_P (DF_LR_IN (BLOCK_FOR_INSN (insn)),
1552 REGNO (x)))
1553 {
1554 rtx link;
1555
1556 for (link = LOG_LINKS (insn); link; link = XEXP (link, 1))
1557 {
1558 if (dead_or_set_p (XEXP (link, 0), x))
1559 break;
1560 }
1561 if (!link)
1562 {
1563 rsp->nonzero_bits = GET_MODE_MASK (GET_MODE (x));
1564 rsp->sign_bit_copies = 1;
1565 return;
1566 }
1567 }
1568
1569 /* If this is a complex assignment, see if we can convert it into a
1570 simple assignment. */
1571 set = expand_field_assignment (set);
1572
1573 /* If this is a simple assignment, or we have a paradoxical SUBREG,
1574 set what we know about X. */
1575
1576 if (SET_DEST (set) == x
1577 || (GET_CODE (SET_DEST (set)) == SUBREG
1578 && (GET_MODE_SIZE (GET_MODE (SET_DEST (set)))
1579 > GET_MODE_SIZE (GET_MODE (SUBREG_REG (SET_DEST (set)))))
1580 && SUBREG_REG (SET_DEST (set)) == x))
1581 {
1582 rtx src = SET_SRC (set);
1583
1584 #ifdef SHORT_IMMEDIATES_SIGN_EXTEND
1585 /* If X is narrower than a word and SRC is a non-negative
1586 constant that would appear negative in the mode of X,
1587 sign-extend it for use in reg_stat[].nonzero_bits because some
1588 machines (maybe most) will actually do the sign-extension
1589 and this is the conservative approach.
1590
1591 ??? For 2.5, try to tighten up the MD files in this regard
1592 instead of this kludge. */
1593
1594 if (GET_MODE_BITSIZE (GET_MODE (x)) < BITS_PER_WORD
1595 && CONST_INT_P (src)
1596 && INTVAL (src) > 0
1597 && 0 != (UINTVAL (src)
1598 & ((unsigned HOST_WIDE_INT) 1
1599 << (GET_MODE_BITSIZE (GET_MODE (x)) - 1))))
1600 src = GEN_INT (UINTVAL (src)
1601 | ((unsigned HOST_WIDE_INT) (-1)
1602 << GET_MODE_BITSIZE (GET_MODE (x))));
1603 #endif
1604
1605 /* Don't call nonzero_bits if it cannot change anything. */
1606 if (rsp->nonzero_bits != ~(unsigned HOST_WIDE_INT) 0)
1607 rsp->nonzero_bits |= nonzero_bits (src, nonzero_bits_mode);
1608 num = num_sign_bit_copies (SET_SRC (set), GET_MODE (x));
1609 if (rsp->sign_bit_copies == 0
1610 || rsp->sign_bit_copies > num)
1611 rsp->sign_bit_copies = num;
1612 }
1613 else
1614 {
1615 rsp->nonzero_bits = GET_MODE_MASK (GET_MODE (x));
1616 rsp->sign_bit_copies = 1;
1617 }
1618 }
1619 }
1620 \f
1621 /* See if INSN can be combined into I3. PRED, PRED2, SUCC and SUCC2 are
1622 optionally insns that were previously combined into I3 or that will be
1623 combined into the merger of INSN and I3. The order is PRED, PRED2,
1624 INSN, SUCC, SUCC2, I3.
1625
1626 Return 0 if the combination is not allowed for any reason.
1627
1628 If the combination is allowed, *PDEST will be set to the single
1629 destination of INSN and *PSRC to the single source, and this function
1630 will return 1. */
1631
1632 static int
1633 can_combine_p (rtx insn, rtx i3, rtx pred ATTRIBUTE_UNUSED,
1634 rtx pred2 ATTRIBUTE_UNUSED, rtx succ, rtx succ2,
1635 rtx *pdest, rtx *psrc)
1636 {
1637 int i;
1638 const_rtx set = 0;
1639 rtx src, dest;
1640 rtx p;
1641 #ifdef AUTO_INC_DEC
1642 rtx link;
1643 #endif
1644 bool all_adjacent = true;
1645
1646 if (succ)
1647 {
1648 if (succ2)
1649 {
1650 if (next_active_insn (succ2) != i3)
1651 all_adjacent = false;
1652 if (next_active_insn (succ) != succ2)
1653 all_adjacent = false;
1654 }
1655 else if (next_active_insn (succ) != i3)
1656 all_adjacent = false;
1657 if (next_active_insn (insn) != succ)
1658 all_adjacent = false;
1659 }
1660 else if (next_active_insn (insn) != i3)
1661 all_adjacent = false;
1662
1663 /* Can combine only if previous insn is a SET of a REG, a SUBREG or CC0.
1664 or a PARALLEL consisting of such a SET and CLOBBERs.
1665
1666 If INSN has CLOBBER parallel parts, ignore them for our processing.
1667 By definition, these happen during the execution of the insn. When it
1668 is merged with another insn, all bets are off. If they are, in fact,
1669 needed and aren't also supplied in I3, they may be added by
1670 recog_for_combine. Otherwise, it won't match.
1671
1672 We can also ignore a SET whose SET_DEST is mentioned in a REG_UNUSED
1673 note.
1674
1675 Get the source and destination of INSN. If more than one, can't
1676 combine. */
1677
1678 if (GET_CODE (PATTERN (insn)) == SET)
1679 set = PATTERN (insn);
1680 else if (GET_CODE (PATTERN (insn)) == PARALLEL
1681 && GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) == SET)
1682 {
1683 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
1684 {
1685 rtx elt = XVECEXP (PATTERN (insn), 0, i);
1686
1687 switch (GET_CODE (elt))
1688 {
1689 /* This is important to combine floating point insns
1690 for the SH4 port. */
1691 case USE:
1692 /* Combining an isolated USE doesn't make sense.
1693 We depend here on combinable_i3pat to reject them. */
1694 /* The code below this loop only verifies that the inputs of
1695 the SET in INSN do not change. We call reg_set_between_p
1696 to verify that the REG in the USE does not change between
1697 I3 and INSN.
1698 If the USE in INSN was for a pseudo register, the matching
1699 insn pattern will likely match any register; combining this
1700 with any other USE would only be safe if we knew that the
1701 used registers have identical values, or if there was
1702 something to tell them apart, e.g. different modes. For
1703 now, we forgo such complicated tests and simply disallow
1704 combining of USES of pseudo registers with any other USE. */
1705 if (REG_P (XEXP (elt, 0))
1706 && GET_CODE (PATTERN (i3)) == PARALLEL)
1707 {
1708 rtx i3pat = PATTERN (i3);
1709 int i = XVECLEN (i3pat, 0) - 1;
1710 unsigned int regno = REGNO (XEXP (elt, 0));
1711
1712 do
1713 {
1714 rtx i3elt = XVECEXP (i3pat, 0, i);
1715
1716 if (GET_CODE (i3elt) == USE
1717 && REG_P (XEXP (i3elt, 0))
1718 && (REGNO (XEXP (i3elt, 0)) == regno
1719 ? reg_set_between_p (XEXP (elt, 0),
1720 PREV_INSN (insn), i3)
1721 : regno >= FIRST_PSEUDO_REGISTER))
1722 return 0;
1723 }
1724 while (--i >= 0);
1725 }
1726 break;
1727
1728 /* We can ignore CLOBBERs. */
1729 case CLOBBER:
1730 break;
1731
1732 case SET:
1733 /* Ignore SETs whose result isn't used but not those that
1734 have side-effects. */
1735 if (find_reg_note (insn, REG_UNUSED, SET_DEST (elt))
1736 && insn_nothrow_p (insn)
1737 && !side_effects_p (elt))
1738 break;
1739
1740 /* If we have already found a SET, this is a second one and
1741 so we cannot combine with this insn. */
1742 if (set)
1743 return 0;
1744
1745 set = elt;
1746 break;
1747
1748 default:
1749 /* Anything else means we can't combine. */
1750 return 0;
1751 }
1752 }
1753
1754 if (set == 0
1755 /* If SET_SRC is an ASM_OPERANDS we can't throw away these CLOBBERs,
1756 so don't do anything with it. */
1757 || GET_CODE (SET_SRC (set)) == ASM_OPERANDS)
1758 return 0;
1759 }
1760 else
1761 return 0;
1762
1763 if (set == 0)
1764 return 0;
1765
1766 set = expand_field_assignment (set);
1767 src = SET_SRC (set), dest = SET_DEST (set);
1768
1769 /* Don't eliminate a store in the stack pointer. */
1770 if (dest == stack_pointer_rtx
1771 /* Don't combine with an insn that sets a register to itself if it has
1772 a REG_EQUAL note. This may be part of a LIBCALL sequence. */
1773 || (rtx_equal_p (src, dest) && find_reg_note (insn, REG_EQUAL, NULL_RTX))
1774 /* Can't merge an ASM_OPERANDS. */
1775 || GET_CODE (src) == ASM_OPERANDS
1776 /* Can't merge a function call. */
1777 || GET_CODE (src) == CALL
1778 /* Don't eliminate a function call argument. */
1779 || (CALL_P (i3)
1780 && (find_reg_fusage (i3, USE, dest)
1781 || (REG_P (dest)
1782 && REGNO (dest) < FIRST_PSEUDO_REGISTER
1783 && global_regs[REGNO (dest)])))
1784 /* Don't substitute into an incremented register. */
1785 || FIND_REG_INC_NOTE (i3, dest)
1786 || (succ && FIND_REG_INC_NOTE (succ, dest))
1787 || (succ2 && FIND_REG_INC_NOTE (succ2, dest))
1788 /* Don't substitute into a non-local goto, this confuses CFG. */
1789 || (JUMP_P (i3) && find_reg_note (i3, REG_NON_LOCAL_GOTO, NULL_RTX))
1790 /* Make sure that DEST is not used after SUCC but before I3. */
1791 || (!all_adjacent
1792 && ((succ2
1793 && (reg_used_between_p (dest, succ2, i3)
1794 || reg_used_between_p (dest, succ, succ2)))
1795 || (!succ2 && succ && reg_used_between_p (dest, succ, i3))))
1796 /* Make sure that the value that is to be substituted for the register
1797 does not use any registers whose values alter in between. However,
1798 If the insns are adjacent, a use can't cross a set even though we
1799 think it might (this can happen for a sequence of insns each setting
1800 the same destination; last_set of that register might point to
1801 a NOTE). If INSN has a REG_EQUIV note, the register is always
1802 equivalent to the memory so the substitution is valid even if there
1803 are intervening stores. Also, don't move a volatile asm or
1804 UNSPEC_VOLATILE across any other insns. */
1805 || (! all_adjacent
1806 && (((!MEM_P (src)
1807 || ! find_reg_note (insn, REG_EQUIV, src))
1808 && use_crosses_set_p (src, DF_INSN_LUID (insn)))
1809 || (GET_CODE (src) == ASM_OPERANDS && MEM_VOLATILE_P (src))
1810 || GET_CODE (src) == UNSPEC_VOLATILE))
1811 /* Don't combine across a CALL_INSN, because that would possibly
1812 change whether the life span of some REGs crosses calls or not,
1813 and it is a pain to update that information.
1814 Exception: if source is a constant, moving it later can't hurt.
1815 Accept that as a special case. */
1816 || (DF_INSN_LUID (insn) < last_call_luid && ! CONSTANT_P (src)))
1817 return 0;
1818
1819 /* DEST must either be a REG or CC0. */
1820 if (REG_P (dest))
1821 {
1822 /* If register alignment is being enforced for multi-word items in all
1823 cases except for parameters, it is possible to have a register copy
1824 insn referencing a hard register that is not allowed to contain the
1825 mode being copied and which would not be valid as an operand of most
1826 insns. Eliminate this problem by not combining with such an insn.
1827
1828 Also, on some machines we don't want to extend the life of a hard
1829 register. */
1830
1831 if (REG_P (src)
1832 && ((REGNO (dest) < FIRST_PSEUDO_REGISTER
1833 && ! HARD_REGNO_MODE_OK (REGNO (dest), GET_MODE (dest)))
1834 /* Don't extend the life of a hard register unless it is
1835 user variable (if we have few registers) or it can't
1836 fit into the desired register (meaning something special
1837 is going on).
1838 Also avoid substituting a return register into I3, because
1839 reload can't handle a conflict with constraints of other
1840 inputs. */
1841 || (REGNO (src) < FIRST_PSEUDO_REGISTER
1842 && ! HARD_REGNO_MODE_OK (REGNO (src), GET_MODE (src)))))
1843 return 0;
1844 }
1845 else if (GET_CODE (dest) != CC0)
1846 return 0;
1847
1848
1849 if (GET_CODE (PATTERN (i3)) == PARALLEL)
1850 for (i = XVECLEN (PATTERN (i3), 0) - 1; i >= 0; i--)
1851 if (GET_CODE (XVECEXP (PATTERN (i3), 0, i)) == CLOBBER)
1852 {
1853 /* Don't substitute for a register intended as a clobberable
1854 operand. */
1855 rtx reg = XEXP (XVECEXP (PATTERN (i3), 0, i), 0);
1856 if (rtx_equal_p (reg, dest))
1857 return 0;
1858
1859 /* If the clobber represents an earlyclobber operand, we must not
1860 substitute an expression containing the clobbered register.
1861 As we do not analyze the constraint strings here, we have to
1862 make the conservative assumption. However, if the register is
1863 a fixed hard reg, the clobber cannot represent any operand;
1864 we leave it up to the machine description to either accept or
1865 reject use-and-clobber patterns. */
1866 if (!REG_P (reg)
1867 || REGNO (reg) >= FIRST_PSEUDO_REGISTER
1868 || !fixed_regs[REGNO (reg)])
1869 if (reg_overlap_mentioned_p (reg, src))
1870 return 0;
1871 }
1872
1873 /* If INSN contains anything volatile, or is an `asm' (whether volatile
1874 or not), reject, unless nothing volatile comes between it and I3 */
1875
1876 if (GET_CODE (src) == ASM_OPERANDS || volatile_refs_p (src))
1877 {
1878 /* Make sure neither succ nor succ2 contains a volatile reference. */
1879 if (succ2 != 0 && volatile_refs_p (PATTERN (succ2)))
1880 return 0;
1881 if (succ != 0 && volatile_refs_p (PATTERN (succ)))
1882 return 0;
1883 /* We'll check insns between INSN and I3 below. */
1884 }
1885
1886 /* If INSN is an asm, and DEST is a hard register, reject, since it has
1887 to be an explicit register variable, and was chosen for a reason. */
1888
1889 if (GET_CODE (src) == ASM_OPERANDS
1890 && REG_P (dest) && REGNO (dest) < FIRST_PSEUDO_REGISTER)
1891 return 0;
1892
1893 /* If there are any volatile insns between INSN and I3, reject, because
1894 they might affect machine state. */
1895
1896 for (p = NEXT_INSN (insn); p != i3; p = NEXT_INSN (p))
1897 if (INSN_P (p) && p != succ && p != succ2 && volatile_insn_p (PATTERN (p)))
1898 return 0;
1899
1900 /* If INSN contains an autoincrement or autodecrement, make sure that
1901 register is not used between there and I3, and not already used in
1902 I3 either. Neither must it be used in PRED or SUCC, if they exist.
1903 Also insist that I3 not be a jump; if it were one
1904 and the incremented register were spilled, we would lose. */
1905
1906 #ifdef AUTO_INC_DEC
1907 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
1908 if (REG_NOTE_KIND (link) == REG_INC
1909 && (JUMP_P (i3)
1910 || reg_used_between_p (XEXP (link, 0), insn, i3)
1911 || (pred != NULL_RTX
1912 && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (pred)))
1913 || (pred2 != NULL_RTX
1914 && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (pred2)))
1915 || (succ != NULL_RTX
1916 && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (succ)))
1917 || (succ2 != NULL_RTX
1918 && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (succ2)))
1919 || reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (i3))))
1920 return 0;
1921 #endif
1922
1923 #ifdef HAVE_cc0
1924 /* Don't combine an insn that follows a CC0-setting insn.
1925 An insn that uses CC0 must not be separated from the one that sets it.
1926 We do, however, allow I2 to follow a CC0-setting insn if that insn
1927 is passed as I1; in that case it will be deleted also.
1928 We also allow combining in this case if all the insns are adjacent
1929 because that would leave the two CC0 insns adjacent as well.
1930 It would be more logical to test whether CC0 occurs inside I1 or I2,
1931 but that would be much slower, and this ought to be equivalent. */
1932
1933 p = prev_nonnote_insn (insn);
1934 if (p && p != pred && NONJUMP_INSN_P (p) && sets_cc0_p (PATTERN (p))
1935 && ! all_adjacent)
1936 return 0;
1937 #endif
1938
1939 /* If we get here, we have passed all the tests and the combination is
1940 to be allowed. */
1941
1942 *pdest = dest;
1943 *psrc = src;
1944
1945 return 1;
1946 }
1947 \f
1948 /* LOC is the location within I3 that contains its pattern or the component
1949 of a PARALLEL of the pattern. We validate that it is valid for combining.
1950
1951 One problem is if I3 modifies its output, as opposed to replacing it
1952 entirely, we can't allow the output to contain I2DEST, I1DEST or I0DEST as
1953 doing so would produce an insn that is not equivalent to the original insns.
1954
1955 Consider:
1956
1957 (set (reg:DI 101) (reg:DI 100))
1958 (set (subreg:SI (reg:DI 101) 0) <foo>)
1959
1960 This is NOT equivalent to:
1961
1962 (parallel [(set (subreg:SI (reg:DI 100) 0) <foo>)
1963 (set (reg:DI 101) (reg:DI 100))])
1964
1965 Not only does this modify 100 (in which case it might still be valid
1966 if 100 were dead in I2), it sets 101 to the ORIGINAL value of 100.
1967
1968 We can also run into a problem if I2 sets a register that I1
1969 uses and I1 gets directly substituted into I3 (not via I2). In that
1970 case, we would be getting the wrong value of I2DEST into I3, so we
1971 must reject the combination. This case occurs when I2 and I1 both
1972 feed into I3, rather than when I1 feeds into I2, which feeds into I3.
1973 If I1_NOT_IN_SRC is nonzero, it means that finding I1 in the source
1974 of a SET must prevent combination from occurring. The same situation
1975 can occur for I0, in which case I0_NOT_IN_SRC is set.
1976
1977 Before doing the above check, we first try to expand a field assignment
1978 into a set of logical operations.
1979
1980 If PI3_DEST_KILLED is nonzero, it is a pointer to a location in which
1981 we place a register that is both set and used within I3. If more than one
1982 such register is detected, we fail.
1983
1984 Return 1 if the combination is valid, zero otherwise. */
1985
1986 static int
1987 combinable_i3pat (rtx i3, rtx *loc, rtx i2dest, rtx i1dest, rtx i0dest,
1988 int i1_not_in_src, int i0_not_in_src, rtx *pi3dest_killed)
1989 {
1990 rtx x = *loc;
1991
1992 if (GET_CODE (x) == SET)
1993 {
1994 rtx set = x ;
1995 rtx dest = SET_DEST (set);
1996 rtx src = SET_SRC (set);
1997 rtx inner_dest = dest;
1998 rtx subdest;
1999
2000 while (GET_CODE (inner_dest) == STRICT_LOW_PART
2001 || GET_CODE (inner_dest) == SUBREG
2002 || GET_CODE (inner_dest) == ZERO_EXTRACT)
2003 inner_dest = XEXP (inner_dest, 0);
2004
2005 /* Check for the case where I3 modifies its output, as discussed
2006 above. We don't want to prevent pseudos from being combined
2007 into the address of a MEM, so only prevent the combination if
2008 i1 or i2 set the same MEM. */
2009 if ((inner_dest != dest &&
2010 (!MEM_P (inner_dest)
2011 || rtx_equal_p (i2dest, inner_dest)
2012 || (i1dest && rtx_equal_p (i1dest, inner_dest))
2013 || (i0dest && rtx_equal_p (i0dest, inner_dest)))
2014 && (reg_overlap_mentioned_p (i2dest, inner_dest)
2015 || (i1dest && reg_overlap_mentioned_p (i1dest, inner_dest))
2016 || (i0dest && reg_overlap_mentioned_p (i0dest, inner_dest))))
2017
2018 /* This is the same test done in can_combine_p except we can't test
2019 all_adjacent; we don't have to, since this instruction will stay
2020 in place, thus we are not considering increasing the lifetime of
2021 INNER_DEST.
2022
2023 Also, if this insn sets a function argument, combining it with
2024 something that might need a spill could clobber a previous
2025 function argument; the all_adjacent test in can_combine_p also
2026 checks this; here, we do a more specific test for this case. */
2027
2028 || (REG_P (inner_dest)
2029 && REGNO (inner_dest) < FIRST_PSEUDO_REGISTER
2030 && (! HARD_REGNO_MODE_OK (REGNO (inner_dest),
2031 GET_MODE (inner_dest))))
2032 || (i1_not_in_src && reg_overlap_mentioned_p (i1dest, src))
2033 || (i0_not_in_src && reg_overlap_mentioned_p (i0dest, src)))
2034 return 0;
2035
2036 /* If DEST is used in I3, it is being killed in this insn, so
2037 record that for later. We have to consider paradoxical
2038 subregs here, since they kill the whole register, but we
2039 ignore partial subregs, STRICT_LOW_PART, etc.
2040 Never add REG_DEAD notes for the FRAME_POINTER_REGNUM or the
2041 STACK_POINTER_REGNUM, since these are always considered to be
2042 live. Similarly for ARG_POINTER_REGNUM if it is fixed. */
2043 subdest = dest;
2044 if (GET_CODE (subdest) == SUBREG
2045 && (GET_MODE_SIZE (GET_MODE (subdest))
2046 >= GET_MODE_SIZE (GET_MODE (SUBREG_REG (subdest)))))
2047 subdest = SUBREG_REG (subdest);
2048 if (pi3dest_killed
2049 && REG_P (subdest)
2050 && reg_referenced_p (subdest, PATTERN (i3))
2051 && REGNO (subdest) != FRAME_POINTER_REGNUM
2052 #if !HARD_FRAME_POINTER_IS_FRAME_POINTER
2053 && REGNO (subdest) != HARD_FRAME_POINTER_REGNUM
2054 #endif
2055 #if ARG_POINTER_REGNUM != FRAME_POINTER_REGNUM
2056 && (REGNO (subdest) != ARG_POINTER_REGNUM
2057 || ! fixed_regs [REGNO (subdest)])
2058 #endif
2059 && REGNO (subdest) != STACK_POINTER_REGNUM)
2060 {
2061 if (*pi3dest_killed)
2062 return 0;
2063
2064 *pi3dest_killed = subdest;
2065 }
2066 }
2067
2068 else if (GET_CODE (x) == PARALLEL)
2069 {
2070 int i;
2071
2072 for (i = 0; i < XVECLEN (x, 0); i++)
2073 if (! combinable_i3pat (i3, &XVECEXP (x, 0, i), i2dest, i1dest, i0dest,
2074 i1_not_in_src, i0_not_in_src, pi3dest_killed))
2075 return 0;
2076 }
2077
2078 return 1;
2079 }
2080 \f
2081 /* Return 1 if X is an arithmetic expression that contains a multiplication
2082 and division. We don't count multiplications by powers of two here. */
2083
2084 static int
2085 contains_muldiv (rtx x)
2086 {
2087 switch (GET_CODE (x))
2088 {
2089 case MOD: case DIV: case UMOD: case UDIV:
2090 return 1;
2091
2092 case MULT:
2093 return ! (CONST_INT_P (XEXP (x, 1))
2094 && exact_log2 (UINTVAL (XEXP (x, 1))) >= 0);
2095 default:
2096 if (BINARY_P (x))
2097 return contains_muldiv (XEXP (x, 0))
2098 || contains_muldiv (XEXP (x, 1));
2099
2100 if (UNARY_P (x))
2101 return contains_muldiv (XEXP (x, 0));
2102
2103 return 0;
2104 }
2105 }
2106 \f
2107 /* Determine whether INSN can be used in a combination. Return nonzero if
2108 not. This is used in try_combine to detect early some cases where we
2109 can't perform combinations. */
2110
2111 static int
2112 cant_combine_insn_p (rtx insn)
2113 {
2114 rtx set;
2115 rtx src, dest;
2116
2117 /* If this isn't really an insn, we can't do anything.
2118 This can occur when flow deletes an insn that it has merged into an
2119 auto-increment address. */
2120 if (! INSN_P (insn))
2121 return 1;
2122
2123 /* Never combine loads and stores involving hard regs that are likely
2124 to be spilled. The register allocator can usually handle such
2125 reg-reg moves by tying. If we allow the combiner to make
2126 substitutions of likely-spilled regs, reload might die.
2127 As an exception, we allow combinations involving fixed regs; these are
2128 not available to the register allocator so there's no risk involved. */
2129
2130 set = single_set (insn);
2131 if (! set)
2132 return 0;
2133 src = SET_SRC (set);
2134 dest = SET_DEST (set);
2135 if (GET_CODE (src) == SUBREG)
2136 src = SUBREG_REG (src);
2137 if (GET_CODE (dest) == SUBREG)
2138 dest = SUBREG_REG (dest);
2139 if (REG_P (src) && REG_P (dest)
2140 && ((HARD_REGISTER_P (src)
2141 && ! TEST_HARD_REG_BIT (fixed_reg_set, REGNO (src))
2142 && targetm.class_likely_spilled_p (REGNO_REG_CLASS (REGNO (src))))
2143 || (HARD_REGISTER_P (dest)
2144 && ! TEST_HARD_REG_BIT (fixed_reg_set, REGNO (dest))
2145 && targetm.class_likely_spilled_p (REGNO_REG_CLASS (REGNO (dest))))))
2146 return 1;
2147
2148 return 0;
2149 }
2150
2151 struct likely_spilled_retval_info
2152 {
2153 unsigned regno, nregs;
2154 unsigned mask;
2155 };
2156
2157 /* Called via note_stores by likely_spilled_retval_p. Remove from info->mask
2158 hard registers that are known to be written to / clobbered in full. */
2159 static void
2160 likely_spilled_retval_1 (rtx x, const_rtx set, void *data)
2161 {
2162 struct likely_spilled_retval_info *const info =
2163 (struct likely_spilled_retval_info *) data;
2164 unsigned regno, nregs;
2165 unsigned new_mask;
2166
2167 if (!REG_P (XEXP (set, 0)))
2168 return;
2169 regno = REGNO (x);
2170 if (regno >= info->regno + info->nregs)
2171 return;
2172 nregs = hard_regno_nregs[regno][GET_MODE (x)];
2173 if (regno + nregs <= info->regno)
2174 return;
2175 new_mask = (2U << (nregs - 1)) - 1;
2176 if (regno < info->regno)
2177 new_mask >>= info->regno - regno;
2178 else
2179 new_mask <<= regno - info->regno;
2180 info->mask &= ~new_mask;
2181 }
2182
2183 /* Return nonzero iff part of the return value is live during INSN, and
2184 it is likely spilled. This can happen when more than one insn is needed
2185 to copy the return value, e.g. when we consider to combine into the
2186 second copy insn for a complex value. */
2187
2188 static int
2189 likely_spilled_retval_p (rtx insn)
2190 {
2191 rtx use = BB_END (this_basic_block);
2192 rtx reg, p;
2193 unsigned regno, nregs;
2194 /* We assume here that no machine mode needs more than
2195 32 hard registers when the value overlaps with a register
2196 for which TARGET_FUNCTION_VALUE_REGNO_P is true. */
2197 unsigned mask;
2198 struct likely_spilled_retval_info info;
2199
2200 if (!NONJUMP_INSN_P (use) || GET_CODE (PATTERN (use)) != USE || insn == use)
2201 return 0;
2202 reg = XEXP (PATTERN (use), 0);
2203 if (!REG_P (reg) || !targetm.calls.function_value_regno_p (REGNO (reg)))
2204 return 0;
2205 regno = REGNO (reg);
2206 nregs = hard_regno_nregs[regno][GET_MODE (reg)];
2207 if (nregs == 1)
2208 return 0;
2209 mask = (2U << (nregs - 1)) - 1;
2210
2211 /* Disregard parts of the return value that are set later. */
2212 info.regno = regno;
2213 info.nregs = nregs;
2214 info.mask = mask;
2215 for (p = PREV_INSN (use); info.mask && p != insn; p = PREV_INSN (p))
2216 if (INSN_P (p))
2217 note_stores (PATTERN (p), likely_spilled_retval_1, &info);
2218 mask = info.mask;
2219
2220 /* Check if any of the (probably) live return value registers is
2221 likely spilled. */
2222 nregs --;
2223 do
2224 {
2225 if ((mask & 1 << nregs)
2226 && targetm.class_likely_spilled_p (REGNO_REG_CLASS (regno + nregs)))
2227 return 1;
2228 } while (nregs--);
2229 return 0;
2230 }
2231
2232 /* Adjust INSN after we made a change to its destination.
2233
2234 Changing the destination can invalidate notes that say something about
2235 the results of the insn and a LOG_LINK pointing to the insn. */
2236
2237 static void
2238 adjust_for_new_dest (rtx insn)
2239 {
2240 /* For notes, be conservative and simply remove them. */
2241 remove_reg_equal_equiv_notes (insn);
2242
2243 /* The new insn will have a destination that was previously the destination
2244 of an insn just above it. Call distribute_links to make a LOG_LINK from
2245 the next use of that destination. */
2246 distribute_links (gen_rtx_INSN_LIST (VOIDmode, insn, NULL_RTX));
2247
2248 df_insn_rescan (insn);
2249 }
2250
2251 /* Return TRUE if combine can reuse reg X in mode MODE.
2252 ADDED_SETS is nonzero if the original set is still required. */
2253 static bool
2254 can_change_dest_mode (rtx x, int added_sets, enum machine_mode mode)
2255 {
2256 unsigned int regno;
2257
2258 if (!REG_P(x))
2259 return false;
2260
2261 regno = REGNO (x);
2262 /* Allow hard registers if the new mode is legal, and occupies no more
2263 registers than the old mode. */
2264 if (regno < FIRST_PSEUDO_REGISTER)
2265 return (HARD_REGNO_MODE_OK (regno, mode)
2266 && (hard_regno_nregs[regno][GET_MODE (x)]
2267 >= hard_regno_nregs[regno][mode]));
2268
2269 /* Or a pseudo that is only used once. */
2270 return (REG_N_SETS (regno) == 1 && !added_sets
2271 && !REG_USERVAR_P (x));
2272 }
2273
2274
2275 /* Check whether X, the destination of a set, refers to part of
2276 the register specified by REG. */
2277
2278 static bool
2279 reg_subword_p (rtx x, rtx reg)
2280 {
2281 /* Check that reg is an integer mode register. */
2282 if (!REG_P (reg) || GET_MODE_CLASS (GET_MODE (reg)) != MODE_INT)
2283 return false;
2284
2285 if (GET_CODE (x) == STRICT_LOW_PART
2286 || GET_CODE (x) == ZERO_EXTRACT)
2287 x = XEXP (x, 0);
2288
2289 return GET_CODE (x) == SUBREG
2290 && SUBREG_REG (x) == reg
2291 && GET_MODE_CLASS (GET_MODE (x)) == MODE_INT;
2292 }
2293
2294 #ifdef AUTO_INC_DEC
2295 /* Replace auto-increment addressing modes with explicit operations to access
2296 the same addresses without modifying the corresponding registers. */
2297
2298 static rtx
2299 cleanup_auto_inc_dec (rtx src, enum machine_mode mem_mode)
2300 {
2301 rtx x = src;
2302 const RTX_CODE code = GET_CODE (x);
2303 int i;
2304 const char *fmt;
2305
2306 switch (code)
2307 {
2308 case REG:
2309 case CONST_INT:
2310 case CONST_DOUBLE:
2311 case CONST_FIXED:
2312 case CONST_VECTOR:
2313 case SYMBOL_REF:
2314 case CODE_LABEL:
2315 case PC:
2316 case CC0:
2317 case SCRATCH:
2318 /* SCRATCH must be shared because they represent distinct values. */
2319 return x;
2320 case CLOBBER:
2321 if (REG_P (XEXP (x, 0)) && REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER)
2322 return x;
2323 break;
2324
2325 case CONST:
2326 if (shared_const_p (x))
2327 return x;
2328 break;
2329
2330 case MEM:
2331 mem_mode = GET_MODE (x);
2332 break;
2333
2334 case PRE_INC:
2335 case PRE_DEC:
2336 gcc_assert (mem_mode != VOIDmode && mem_mode != BLKmode);
2337 return gen_rtx_PLUS (GET_MODE (x),
2338 cleanup_auto_inc_dec (XEXP (x, 0), mem_mode),
2339 GEN_INT (code == PRE_INC
2340 ? GET_MODE_SIZE (mem_mode)
2341 : -GET_MODE_SIZE (mem_mode)));
2342
2343 case POST_INC:
2344 case POST_DEC:
2345 case PRE_MODIFY:
2346 case POST_MODIFY:
2347 return cleanup_auto_inc_dec (code == PRE_MODIFY
2348 ? XEXP (x, 1) : XEXP (x, 0),
2349 mem_mode);
2350
2351 default:
2352 break;
2353 }
2354
2355 /* Copy the various flags, fields, and other information. We assume
2356 that all fields need copying, and then clear the fields that should
2357 not be copied. That is the sensible default behavior, and forces
2358 us to explicitly document why we are *not* copying a flag. */
2359 x = shallow_copy_rtx (x);
2360
2361 /* We do not copy the USED flag, which is used as a mark bit during
2362 walks over the RTL. */
2363 RTX_FLAG (x, used) = 0;
2364
2365 /* We do not copy FRAME_RELATED for INSNs. */
2366 if (INSN_P (x))
2367 RTX_FLAG (x, frame_related) = 0;
2368
2369 fmt = GET_RTX_FORMAT (code);
2370 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2371 if (fmt[i] == 'e')
2372 XEXP (x, i) = cleanup_auto_inc_dec (XEXP (x, i), mem_mode);
2373 else if (fmt[i] == 'E' || fmt[i] == 'V')
2374 {
2375 int j;
2376 XVEC (x, i) = rtvec_alloc (XVECLEN (x, i));
2377 for (j = 0; j < XVECLEN (x, i); j++)
2378 XVECEXP (x, i, j)
2379 = cleanup_auto_inc_dec (XVECEXP (src, i, j), mem_mode);
2380 }
2381
2382 return x;
2383 }
2384 #endif
2385
2386 /* Auxiliary data structure for propagate_for_debug_stmt. */
2387
2388 struct rtx_subst_pair
2389 {
2390 rtx to;
2391 bool adjusted;
2392 };
2393
2394 /* DATA points to an rtx_subst_pair. Return the value that should be
2395 substituted. */
2396
2397 static rtx
2398 propagate_for_debug_subst (rtx from, const_rtx old_rtx, void *data)
2399 {
2400 struct rtx_subst_pair *pair = (struct rtx_subst_pair *)data;
2401
2402 if (!rtx_equal_p (from, old_rtx))
2403 return NULL_RTX;
2404 if (!pair->adjusted)
2405 {
2406 pair->adjusted = true;
2407 #ifdef AUTO_INC_DEC
2408 pair->to = cleanup_auto_inc_dec (pair->to, VOIDmode);
2409 #else
2410 pair->to = copy_rtx (pair->to);
2411 #endif
2412 pair->to = make_compound_operation (pair->to, SET);
2413 return pair->to;
2414 }
2415 return copy_rtx (pair->to);
2416 }
2417
2418 /* Replace all the occurrences of DEST with SRC in DEBUG_INSNs between INSN
2419 and LAST. */
2420
2421 static void
2422 propagate_for_debug (rtx insn, rtx last, rtx dest, rtx src)
2423 {
2424 rtx next, loc;
2425
2426 struct rtx_subst_pair p;
2427 p.to = src;
2428 p.adjusted = false;
2429
2430 next = NEXT_INSN (insn);
2431 while (next != last)
2432 {
2433 insn = next;
2434 next = NEXT_INSN (insn);
2435 if (DEBUG_INSN_P (insn))
2436 {
2437 loc = simplify_replace_fn_rtx (INSN_VAR_LOCATION_LOC (insn),
2438 dest, propagate_for_debug_subst, &p);
2439 if (loc == INSN_VAR_LOCATION_LOC (insn))
2440 continue;
2441 INSN_VAR_LOCATION_LOC (insn) = loc;
2442 df_insn_rescan (insn);
2443 }
2444 }
2445 }
2446
2447 /* Delete the unconditional jump INSN and adjust the CFG correspondingly.
2448 Note that the INSN should be deleted *after* removing dead edges, so
2449 that the kept edge is the fallthrough edge for a (set (pc) (pc))
2450 but not for a (set (pc) (label_ref FOO)). */
2451
2452 static void
2453 update_cfg_for_uncondjump (rtx insn)
2454 {
2455 basic_block bb = BLOCK_FOR_INSN (insn);
2456 bool at_end = (BB_END (bb) == insn);
2457
2458 if (at_end)
2459 purge_dead_edges (bb);
2460
2461 delete_insn (insn);
2462 if (at_end && EDGE_COUNT (bb->succs) == 1)
2463 single_succ_edge (bb)->flags |= EDGE_FALLTHRU;
2464 }
2465
2466 /* Try to combine the insns I0, I1 and I2 into I3.
2467 Here I0, I1 and I2 appear earlier than I3.
2468 I0 and I1 can be zero; then we combine just I2 into I3, or I1 and I2 into
2469 I3.
2470
2471 If we are combining more than two insns and the resulting insn is not
2472 recognized, try splitting it into two insns. If that happens, I2 and I3
2473 are retained and I1/I0 are pseudo-deleted by turning them into a NOTE.
2474 Otherwise, I0, I1 and I2 are pseudo-deleted.
2475
2476 Return 0 if the combination does not work. Then nothing is changed.
2477 If we did the combination, return the insn at which combine should
2478 resume scanning.
2479
2480 Set NEW_DIRECT_JUMP_P to a nonzero value if try_combine creates a
2481 new direct jump instruction. */
2482
2483 static rtx
2484 try_combine (rtx i3, rtx i2, rtx i1, rtx i0, int *new_direct_jump_p)
2485 {
2486 /* New patterns for I3 and I2, respectively. */
2487 rtx newpat, newi2pat = 0;
2488 rtvec newpat_vec_with_clobbers = 0;
2489 int substed_i2 = 0, substed_i1 = 0, substed_i0 = 0;
2490 /* Indicates need to preserve SET in I0, I1 or I2 in I3 if it is not
2491 dead. */
2492 int added_sets_0, added_sets_1, added_sets_2;
2493 /* Total number of SETs to put into I3. */
2494 int total_sets;
2495 /* Nonzero if I2's or I1's body now appears in I3. */
2496 int i2_is_used = 0, i1_is_used = 0;
2497 /* INSN_CODEs for new I3, new I2, and user of condition code. */
2498 int insn_code_number, i2_code_number = 0, other_code_number = 0;
2499 /* Contains I3 if the destination of I3 is used in its source, which means
2500 that the old life of I3 is being killed. If that usage is placed into
2501 I2 and not in I3, a REG_DEAD note must be made. */
2502 rtx i3dest_killed = 0;
2503 /* SET_DEST and SET_SRC of I2, I1 and I0. */
2504 rtx i2dest = 0, i2src = 0, i1dest = 0, i1src = 0, i0dest = 0, i0src = 0;
2505 rtx i1src_copy = 0;
2506 /* Set if I2DEST was reused as a scratch register. */
2507 bool i2scratch = false;
2508 /* The PATTERNs of I0, I1, and I2, or a copy of them in certain cases. */
2509 rtx i0pat = 0, i1pat = 0, i2pat = 0;
2510 /* Indicates if I2DEST or I1DEST is in I2SRC or I1_SRC. */
2511 int i2dest_in_i2src = 0, i1dest_in_i1src = 0, i2dest_in_i1src = 0;
2512 int i0dest_in_i0src = 0, i1dest_in_i0src = 0, i2dest_in_i0src = 0;
2513 int i2dest_killed = 0, i1dest_killed = 0, i0dest_killed = 0;
2514 int i1_feeds_i2_n = 0, i0_feeds_i2_n = 0, i0_feeds_i1_n = 0;
2515 /* Notes that must be added to REG_NOTES in I3 and I2. */
2516 rtx new_i3_notes, new_i2_notes;
2517 /* Notes that we substituted I3 into I2 instead of the normal case. */
2518 int i3_subst_into_i2 = 0;
2519 /* Notes that I1, I2 or I3 is a MULT operation. */
2520 int have_mult = 0;
2521 int swap_i2i3 = 0;
2522 int changed_i3_dest = 0;
2523
2524 int maxreg;
2525 rtx temp;
2526 rtx link;
2527 rtx other_pat = 0;
2528 rtx new_other_notes;
2529 int i;
2530
2531 /* Only try four-insn combinations when there's high likelihood of
2532 success. Look for simple insns, such as loads of constants or
2533 binary operations involving a constant. */
2534 if (i0)
2535 {
2536 int i;
2537 int ngood = 0;
2538 int nshift = 0;
2539
2540 if (!flag_expensive_optimizations)
2541 return 0;
2542
2543 for (i = 0; i < 4; i++)
2544 {
2545 rtx insn = i == 0 ? i0 : i == 1 ? i1 : i == 2 ? i2 : i3;
2546 rtx set = single_set (insn);
2547 rtx src;
2548 if (!set)
2549 continue;
2550 src = SET_SRC (set);
2551 if (CONSTANT_P (src))
2552 {
2553 ngood += 2;
2554 break;
2555 }
2556 else if (BINARY_P (src) && CONSTANT_P (XEXP (src, 1)))
2557 ngood++;
2558 else if (GET_CODE (src) == ASHIFT || GET_CODE (src) == ASHIFTRT
2559 || GET_CODE (src) == LSHIFTRT)
2560 nshift++;
2561 }
2562 if (ngood < 2 && nshift < 2)
2563 return 0;
2564 }
2565
2566 /* Exit early if one of the insns involved can't be used for
2567 combinations. */
2568 if (cant_combine_insn_p (i3)
2569 || cant_combine_insn_p (i2)
2570 || (i1 && cant_combine_insn_p (i1))
2571 || (i0 && cant_combine_insn_p (i0))
2572 || likely_spilled_retval_p (i3))
2573 return 0;
2574
2575 combine_attempts++;
2576 undobuf.other_insn = 0;
2577
2578 /* Reset the hard register usage information. */
2579 CLEAR_HARD_REG_SET (newpat_used_regs);
2580
2581 if (dump_file && (dump_flags & TDF_DETAILS))
2582 {
2583 if (i0)
2584 fprintf (dump_file, "\nTrying %d, %d, %d -> %d:\n",
2585 INSN_UID (i0), INSN_UID (i1), INSN_UID (i2), INSN_UID (i3));
2586 else if (i1)
2587 fprintf (dump_file, "\nTrying %d, %d -> %d:\n",
2588 INSN_UID (i1), INSN_UID (i2), INSN_UID (i3));
2589 else
2590 fprintf (dump_file, "\nTrying %d -> %d:\n",
2591 INSN_UID (i2), INSN_UID (i3));
2592 }
2593
2594 /* If multiple insns feed into one of I2 or I3, they can be in any
2595 order. To simplify the code below, reorder them in sequence. */
2596 if (i0 && DF_INSN_LUID (i0) > DF_INSN_LUID (i2))
2597 temp = i2, i2 = i0, i0 = temp;
2598 if (i0 && DF_INSN_LUID (i0) > DF_INSN_LUID (i1))
2599 temp = i1, i1 = i0, i0 = temp;
2600 if (i1 && DF_INSN_LUID (i1) > DF_INSN_LUID (i2))
2601 temp = i1, i1 = i2, i2 = temp;
2602
2603 added_links_insn = 0;
2604
2605 /* First check for one important special case that the code below will
2606 not handle. Namely, the case where I1 is zero, I2 is a PARALLEL
2607 and I3 is a SET whose SET_SRC is a SET_DEST in I2. In that case,
2608 we may be able to replace that destination with the destination of I3.
2609 This occurs in the common code where we compute both a quotient and
2610 remainder into a structure, in which case we want to do the computation
2611 directly into the structure to avoid register-register copies.
2612
2613 Note that this case handles both multiple sets in I2 and also cases
2614 where I2 has a number of CLOBBERs inside the PARALLEL.
2615
2616 We make very conservative checks below and only try to handle the
2617 most common cases of this. For example, we only handle the case
2618 where I2 and I3 are adjacent to avoid making difficult register
2619 usage tests. */
2620
2621 if (i1 == 0 && NONJUMP_INSN_P (i3) && GET_CODE (PATTERN (i3)) == SET
2622 && REG_P (SET_SRC (PATTERN (i3)))
2623 && REGNO (SET_SRC (PATTERN (i3))) >= FIRST_PSEUDO_REGISTER
2624 && find_reg_note (i3, REG_DEAD, SET_SRC (PATTERN (i3)))
2625 && GET_CODE (PATTERN (i2)) == PARALLEL
2626 && ! side_effects_p (SET_DEST (PATTERN (i3)))
2627 /* If the dest of I3 is a ZERO_EXTRACT or STRICT_LOW_PART, the code
2628 below would need to check what is inside (and reg_overlap_mentioned_p
2629 doesn't support those codes anyway). Don't allow those destinations;
2630 the resulting insn isn't likely to be recognized anyway. */
2631 && GET_CODE (SET_DEST (PATTERN (i3))) != ZERO_EXTRACT
2632 && GET_CODE (SET_DEST (PATTERN (i3))) != STRICT_LOW_PART
2633 && ! reg_overlap_mentioned_p (SET_SRC (PATTERN (i3)),
2634 SET_DEST (PATTERN (i3)))
2635 && next_active_insn (i2) == i3)
2636 {
2637 rtx p2 = PATTERN (i2);
2638
2639 /* Make sure that the destination of I3,
2640 which we are going to substitute into one output of I2,
2641 is not used within another output of I2. We must avoid making this:
2642 (parallel [(set (mem (reg 69)) ...)
2643 (set (reg 69) ...)])
2644 which is not well-defined as to order of actions.
2645 (Besides, reload can't handle output reloads for this.)
2646
2647 The problem can also happen if the dest of I3 is a memory ref,
2648 if another dest in I2 is an indirect memory ref. */
2649 for (i = 0; i < XVECLEN (p2, 0); i++)
2650 if ((GET_CODE (XVECEXP (p2, 0, i)) == SET
2651 || GET_CODE (XVECEXP (p2, 0, i)) == CLOBBER)
2652 && reg_overlap_mentioned_p (SET_DEST (PATTERN (i3)),
2653 SET_DEST (XVECEXP (p2, 0, i))))
2654 break;
2655
2656 if (i == XVECLEN (p2, 0))
2657 for (i = 0; i < XVECLEN (p2, 0); i++)
2658 if (GET_CODE (XVECEXP (p2, 0, i)) == SET
2659 && SET_DEST (XVECEXP (p2, 0, i)) == SET_SRC (PATTERN (i3)))
2660 {
2661 combine_merges++;
2662
2663 subst_insn = i3;
2664 subst_low_luid = DF_INSN_LUID (i2);
2665
2666 added_sets_2 = added_sets_1 = added_sets_0 = 0;
2667 i2src = SET_SRC (XVECEXP (p2, 0, i));
2668 i2dest = SET_DEST (XVECEXP (p2, 0, i));
2669 i2dest_killed = dead_or_set_p (i2, i2dest);
2670
2671 /* Replace the dest in I2 with our dest and make the resulting
2672 insn the new pattern for I3. Then skip to where we validate
2673 the pattern. Everything was set up above. */
2674 SUBST (SET_DEST (XVECEXP (p2, 0, i)), SET_DEST (PATTERN (i3)));
2675 newpat = p2;
2676 i3_subst_into_i2 = 1;
2677 goto validate_replacement;
2678 }
2679 }
2680
2681 /* If I2 is setting a pseudo to a constant and I3 is setting some
2682 sub-part of it to another constant, merge them by making a new
2683 constant. */
2684 if (i1 == 0
2685 && (temp = single_set (i2)) != 0
2686 && (CONST_INT_P (SET_SRC (temp))
2687 || GET_CODE (SET_SRC (temp)) == CONST_DOUBLE)
2688 && GET_CODE (PATTERN (i3)) == SET
2689 && (CONST_INT_P (SET_SRC (PATTERN (i3)))
2690 || GET_CODE (SET_SRC (PATTERN (i3))) == CONST_DOUBLE)
2691 && reg_subword_p (SET_DEST (PATTERN (i3)), SET_DEST (temp)))
2692 {
2693 rtx dest = SET_DEST (PATTERN (i3));
2694 int offset = -1;
2695 int width = 0;
2696
2697 if (GET_CODE (dest) == ZERO_EXTRACT)
2698 {
2699 if (CONST_INT_P (XEXP (dest, 1))
2700 && CONST_INT_P (XEXP (dest, 2)))
2701 {
2702 width = INTVAL (XEXP (dest, 1));
2703 offset = INTVAL (XEXP (dest, 2));
2704 dest = XEXP (dest, 0);
2705 if (BITS_BIG_ENDIAN)
2706 offset = GET_MODE_BITSIZE (GET_MODE (dest)) - width - offset;
2707 }
2708 }
2709 else
2710 {
2711 if (GET_CODE (dest) == STRICT_LOW_PART)
2712 dest = XEXP (dest, 0);
2713 width = GET_MODE_BITSIZE (GET_MODE (dest));
2714 offset = 0;
2715 }
2716
2717 if (offset >= 0)
2718 {
2719 /* If this is the low part, we're done. */
2720 if (subreg_lowpart_p (dest))
2721 ;
2722 /* Handle the case where inner is twice the size of outer. */
2723 else if (GET_MODE_BITSIZE (GET_MODE (SET_DEST (temp)))
2724 == 2 * GET_MODE_BITSIZE (GET_MODE (dest)))
2725 offset += GET_MODE_BITSIZE (GET_MODE (dest));
2726 /* Otherwise give up for now. */
2727 else
2728 offset = -1;
2729 }
2730
2731 if (offset >= 0
2732 && (GET_MODE_BITSIZE (GET_MODE (SET_DEST (temp)))
2733 <= HOST_BITS_PER_DOUBLE_INT))
2734 {
2735 double_int m, o, i;
2736 rtx inner = SET_SRC (PATTERN (i3));
2737 rtx outer = SET_SRC (temp);
2738
2739 o = rtx_to_double_int (outer);
2740 i = rtx_to_double_int (inner);
2741
2742 m = double_int_mask (width);
2743 i = double_int_and (i, m);
2744 m = double_int_lshift (m, offset, HOST_BITS_PER_DOUBLE_INT, false);
2745 i = double_int_lshift (i, offset, HOST_BITS_PER_DOUBLE_INT, false);
2746 o = double_int_ior (double_int_and_not (o, m), i);
2747
2748 combine_merges++;
2749 subst_insn = i3;
2750 subst_low_luid = DF_INSN_LUID (i2);
2751 added_sets_2 = added_sets_1 = added_sets_0 = 0;
2752 i2dest = SET_DEST (temp);
2753 i2dest_killed = dead_or_set_p (i2, i2dest);
2754
2755 /* Replace the source in I2 with the new constant and make the
2756 resulting insn the new pattern for I3. Then skip to where we
2757 validate the pattern. Everything was set up above. */
2758 SUBST (SET_SRC (temp),
2759 immed_double_int_const (o, GET_MODE (SET_DEST (temp))));
2760
2761 newpat = PATTERN (i2);
2762
2763 /* The dest of I3 has been replaced with the dest of I2. */
2764 changed_i3_dest = 1;
2765 goto validate_replacement;
2766 }
2767 }
2768
2769 #ifndef HAVE_cc0
2770 /* If we have no I1 and I2 looks like:
2771 (parallel [(set (reg:CC X) (compare:CC OP (const_int 0)))
2772 (set Y OP)])
2773 make up a dummy I1 that is
2774 (set Y OP)
2775 and change I2 to be
2776 (set (reg:CC X) (compare:CC Y (const_int 0)))
2777
2778 (We can ignore any trailing CLOBBERs.)
2779
2780 This undoes a previous combination and allows us to match a branch-and-
2781 decrement insn. */
2782
2783 if (i1 == 0 && GET_CODE (PATTERN (i2)) == PARALLEL
2784 && XVECLEN (PATTERN (i2), 0) >= 2
2785 && GET_CODE (XVECEXP (PATTERN (i2), 0, 0)) == SET
2786 && (GET_MODE_CLASS (GET_MODE (SET_DEST (XVECEXP (PATTERN (i2), 0, 0))))
2787 == MODE_CC)
2788 && GET_CODE (SET_SRC (XVECEXP (PATTERN (i2), 0, 0))) == COMPARE
2789 && XEXP (SET_SRC (XVECEXP (PATTERN (i2), 0, 0)), 1) == const0_rtx
2790 && GET_CODE (XVECEXP (PATTERN (i2), 0, 1)) == SET
2791 && REG_P (SET_DEST (XVECEXP (PATTERN (i2), 0, 1)))
2792 && rtx_equal_p (XEXP (SET_SRC (XVECEXP (PATTERN (i2), 0, 0)), 0),
2793 SET_SRC (XVECEXP (PATTERN (i2), 0, 1))))
2794 {
2795 for (i = XVECLEN (PATTERN (i2), 0) - 1; i >= 2; i--)
2796 if (GET_CODE (XVECEXP (PATTERN (i2), 0, i)) != CLOBBER)
2797 break;
2798
2799 if (i == 1)
2800 {
2801 /* We make I1 with the same INSN_UID as I2. This gives it
2802 the same DF_INSN_LUID for value tracking. Our fake I1 will
2803 never appear in the insn stream so giving it the same INSN_UID
2804 as I2 will not cause a problem. */
2805
2806 i1 = gen_rtx_INSN (VOIDmode, INSN_UID (i2), NULL_RTX, i2,
2807 BLOCK_FOR_INSN (i2), XVECEXP (PATTERN (i2), 0, 1),
2808 INSN_LOCATOR (i2), -1, NULL_RTX);
2809
2810 SUBST (PATTERN (i2), XVECEXP (PATTERN (i2), 0, 0));
2811 SUBST (XEXP (SET_SRC (PATTERN (i2)), 0),
2812 SET_DEST (PATTERN (i1)));
2813 }
2814 }
2815 #endif
2816
2817 /* Verify that I2 and I1 are valid for combining. */
2818 if (! can_combine_p (i2, i3, i0, i1, NULL_RTX, NULL_RTX, &i2dest, &i2src)
2819 || (i1 && ! can_combine_p (i1, i3, i0, NULL_RTX, i2, NULL_RTX,
2820 &i1dest, &i1src))
2821 || (i0 && ! can_combine_p (i0, i3, NULL_RTX, NULL_RTX, i1, i2,
2822 &i0dest, &i0src)))
2823 {
2824 undo_all ();
2825 return 0;
2826 }
2827
2828 /* Record whether I2DEST is used in I2SRC and similarly for the other
2829 cases. Knowing this will help in register status updating below. */
2830 i2dest_in_i2src = reg_overlap_mentioned_p (i2dest, i2src);
2831 i1dest_in_i1src = i1 && reg_overlap_mentioned_p (i1dest, i1src);
2832 i2dest_in_i1src = i1 && reg_overlap_mentioned_p (i2dest, i1src);
2833 i0dest_in_i0src = i0 && reg_overlap_mentioned_p (i0dest, i0src);
2834 i1dest_in_i0src = i0 && reg_overlap_mentioned_p (i1dest, i0src);
2835 i2dest_in_i0src = i0 && reg_overlap_mentioned_p (i2dest, i0src);
2836 i2dest_killed = dead_or_set_p (i2, i2dest);
2837 i1dest_killed = i1 && dead_or_set_p (i1, i1dest);
2838 i0dest_killed = i0 && dead_or_set_p (i0, i0dest);
2839
2840 /* For the earlier insns, determine which of the subsequent ones they
2841 feed. */
2842 i1_feeds_i2_n = i1 && insn_a_feeds_b (i1, i2);
2843 i0_feeds_i1_n = i0 && insn_a_feeds_b (i0, i1);
2844 i0_feeds_i2_n = (i0 && (!i0_feeds_i1_n ? insn_a_feeds_b (i0, i2)
2845 : (!reg_overlap_mentioned_p (i1dest, i0dest)
2846 && reg_overlap_mentioned_p (i0dest, i2src))));
2847
2848 /* Ensure that I3's pattern can be the destination of combines. */
2849 if (! combinable_i3pat (i3, &PATTERN (i3), i2dest, i1dest, i0dest,
2850 i1 && i2dest_in_i1src && !i1_feeds_i2_n,
2851 i0 && ((i2dest_in_i0src && !i0_feeds_i2_n)
2852 || (i1dest_in_i0src && !i0_feeds_i1_n)),
2853 &i3dest_killed))
2854 {
2855 undo_all ();
2856 return 0;
2857 }
2858
2859 /* See if any of the insns is a MULT operation. Unless one is, we will
2860 reject a combination that is, since it must be slower. Be conservative
2861 here. */
2862 if (GET_CODE (i2src) == MULT
2863 || (i1 != 0 && GET_CODE (i1src) == MULT)
2864 || (i0 != 0 && GET_CODE (i0src) == MULT)
2865 || (GET_CODE (PATTERN (i3)) == SET
2866 && GET_CODE (SET_SRC (PATTERN (i3))) == MULT))
2867 have_mult = 1;
2868
2869 /* If I3 has an inc, then give up if I1 or I2 uses the reg that is inc'd.
2870 We used to do this EXCEPT in one case: I3 has a post-inc in an
2871 output operand. However, that exception can give rise to insns like
2872 mov r3,(r3)+
2873 which is a famous insn on the PDP-11 where the value of r3 used as the
2874 source was model-dependent. Avoid this sort of thing. */
2875
2876 #if 0
2877 if (!(GET_CODE (PATTERN (i3)) == SET
2878 && REG_P (SET_SRC (PATTERN (i3)))
2879 && MEM_P (SET_DEST (PATTERN (i3)))
2880 && (GET_CODE (XEXP (SET_DEST (PATTERN (i3)), 0)) == POST_INC
2881 || GET_CODE (XEXP (SET_DEST (PATTERN (i3)), 0)) == POST_DEC)))
2882 /* It's not the exception. */
2883 #endif
2884 #ifdef AUTO_INC_DEC
2885 for (link = REG_NOTES (i3); link; link = XEXP (link, 1))
2886 if (REG_NOTE_KIND (link) == REG_INC
2887 && (reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (i2))
2888 || (i1 != 0
2889 && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (i1)))))
2890 {
2891 undo_all ();
2892 return 0;
2893 }
2894 #endif
2895
2896 /* See if the SETs in I1 or I2 need to be kept around in the merged
2897 instruction: whenever the value set there is still needed past I3.
2898 For the SETs in I2, this is easy: we see if I2DEST dies or is set in I3.
2899
2900 For the SET in I1, we have two cases: If I1 and I2 independently
2901 feed into I3, the set in I1 needs to be kept around if I1DEST dies
2902 or is set in I3. Otherwise (if I1 feeds I2 which feeds I3), the set
2903 in I1 needs to be kept around unless I1DEST dies or is set in either
2904 I2 or I3. The same consideration applies to I0. */
2905
2906 added_sets_2 = !dead_or_set_p (i3, i2dest);
2907
2908 if (i1)
2909 added_sets_1 = !(dead_or_set_p (i3, i1dest)
2910 || (i1_feeds_i2_n && dead_or_set_p (i2, i1dest)));
2911 else
2912 added_sets_1 = 0;
2913
2914 if (i0)
2915 added_sets_0 = !(dead_or_set_p (i3, i0dest)
2916 || (i0_feeds_i2_n && dead_or_set_p (i2, i0dest))
2917 || (i0_feeds_i1_n && dead_or_set_p (i1, i0dest)));
2918 else
2919 added_sets_0 = 0;
2920
2921 /* We are about to copy insns for the case where they need to be kept
2922 around. Check that they can be copied in the merged instruction. */
2923
2924 if (targetm.cannot_copy_insn_p
2925 && ((added_sets_2 && targetm.cannot_copy_insn_p (i2))
2926 || (i1 && added_sets_1 && targetm.cannot_copy_insn_p (i1))
2927 || (i0 && added_sets_0 && targetm.cannot_copy_insn_p (i0))))
2928 {
2929 undo_all ();
2930 return 0;
2931 }
2932
2933 /* If the set in I2 needs to be kept around, we must make a copy of
2934 PATTERN (I2), so that when we substitute I1SRC for I1DEST in
2935 PATTERN (I2), we are only substituting for the original I1DEST, not into
2936 an already-substituted copy. This also prevents making self-referential
2937 rtx. If I2 is a PARALLEL, we just need the piece that assigns I2SRC to
2938 I2DEST. */
2939
2940 if (added_sets_2)
2941 {
2942 if (GET_CODE (PATTERN (i2)) == PARALLEL)
2943 i2pat = gen_rtx_SET (VOIDmode, i2dest, copy_rtx (i2src));
2944 else
2945 i2pat = copy_rtx (PATTERN (i2));
2946 }
2947
2948 if (added_sets_1)
2949 {
2950 if (GET_CODE (PATTERN (i1)) == PARALLEL)
2951 i1pat = gen_rtx_SET (VOIDmode, i1dest, copy_rtx (i1src));
2952 else
2953 i1pat = copy_rtx (PATTERN (i1));
2954 }
2955
2956 if (added_sets_0)
2957 {
2958 if (GET_CODE (PATTERN (i0)) == PARALLEL)
2959 i0pat = gen_rtx_SET (VOIDmode, i0dest, copy_rtx (i0src));
2960 else
2961 i0pat = copy_rtx (PATTERN (i0));
2962 }
2963
2964 combine_merges++;
2965
2966 /* Substitute in the latest insn for the regs set by the earlier ones. */
2967
2968 maxreg = max_reg_num ();
2969
2970 subst_insn = i3;
2971
2972 #ifndef HAVE_cc0
2973 /* Many machines that don't use CC0 have insns that can both perform an
2974 arithmetic operation and set the condition code. These operations will
2975 be represented as a PARALLEL with the first element of the vector
2976 being a COMPARE of an arithmetic operation with the constant zero.
2977 The second element of the vector will set some pseudo to the result
2978 of the same arithmetic operation. If we simplify the COMPARE, we won't
2979 match such a pattern and so will generate an extra insn. Here we test
2980 for this case, where both the comparison and the operation result are
2981 needed, and make the PARALLEL by just replacing I2DEST in I3SRC with
2982 I2SRC. Later we will make the PARALLEL that contains I2. */
2983
2984 if (i1 == 0 && added_sets_2 && GET_CODE (PATTERN (i3)) == SET
2985 && GET_CODE (SET_SRC (PATTERN (i3))) == COMPARE
2986 && XEXP (SET_SRC (PATTERN (i3)), 1) == const0_rtx
2987 && rtx_equal_p (XEXP (SET_SRC (PATTERN (i3)), 0), i2dest))
2988 {
2989 #ifdef SELECT_CC_MODE
2990 rtx *cc_use;
2991 enum machine_mode compare_mode;
2992 #endif
2993
2994 newpat = PATTERN (i3);
2995 SUBST (XEXP (SET_SRC (newpat), 0), i2src);
2996
2997 i2_is_used = 1;
2998
2999 #ifdef SELECT_CC_MODE
3000 /* See if a COMPARE with the operand we substituted in should be done
3001 with the mode that is currently being used. If not, do the same
3002 processing we do in `subst' for a SET; namely, if the destination
3003 is used only once, try to replace it with a register of the proper
3004 mode and also replace the COMPARE. */
3005 if (undobuf.other_insn == 0
3006 && (cc_use = find_single_use (SET_DEST (newpat), i3,
3007 &undobuf.other_insn))
3008 && ((compare_mode = SELECT_CC_MODE (GET_CODE (*cc_use),
3009 i2src, const0_rtx))
3010 != GET_MODE (SET_DEST (newpat))))
3011 {
3012 if (can_change_dest_mode (SET_DEST (newpat), added_sets_2,
3013 compare_mode))
3014 {
3015 unsigned int regno = REGNO (SET_DEST (newpat));
3016 rtx new_dest;
3017
3018 if (regno < FIRST_PSEUDO_REGISTER)
3019 new_dest = gen_rtx_REG (compare_mode, regno);
3020 else
3021 {
3022 SUBST_MODE (regno_reg_rtx[regno], compare_mode);
3023 new_dest = regno_reg_rtx[regno];
3024 }
3025
3026 SUBST (SET_DEST (newpat), new_dest);
3027 SUBST (XEXP (*cc_use, 0), new_dest);
3028 SUBST (SET_SRC (newpat),
3029 gen_rtx_COMPARE (compare_mode, i2src, const0_rtx));
3030 }
3031 else
3032 undobuf.other_insn = 0;
3033 }
3034 #endif
3035 }
3036 else
3037 #endif
3038 {
3039 /* It is possible that the source of I2 or I1 may be performing
3040 an unneeded operation, such as a ZERO_EXTEND of something
3041 that is known to have the high part zero. Handle that case
3042 by letting subst look at the innermost one of them.
3043
3044 Another way to do this would be to have a function that tries
3045 to simplify a single insn instead of merging two or more
3046 insns. We don't do this because of the potential of infinite
3047 loops and because of the potential extra memory required.
3048 However, doing it the way we are is a bit of a kludge and
3049 doesn't catch all cases.
3050
3051 But only do this if -fexpensive-optimizations since it slows
3052 things down and doesn't usually win.
3053
3054 This is not done in the COMPARE case above because the
3055 unmodified I2PAT is used in the PARALLEL and so a pattern
3056 with a modified I2SRC would not match. */
3057
3058 if (flag_expensive_optimizations)
3059 {
3060 /* Pass pc_rtx so no substitutions are done, just
3061 simplifications. */
3062 if (i1)
3063 {
3064 subst_low_luid = DF_INSN_LUID (i1);
3065 i1src = subst (i1src, pc_rtx, pc_rtx, 0, 0);
3066 }
3067 else
3068 {
3069 subst_low_luid = DF_INSN_LUID (i2);
3070 i2src = subst (i2src, pc_rtx, pc_rtx, 0, 0);
3071 }
3072 }
3073
3074 n_occurrences = 0; /* `subst' counts here */
3075
3076 /* If I1 feeds into I2 and I1DEST is in I1SRC, we need to make a
3077 unique copy of I2SRC each time we substitute it to avoid
3078 self-referential rtl. */
3079
3080 subst_low_luid = DF_INSN_LUID (i2);
3081 newpat = subst (PATTERN (i3), i2dest, i2src, 0,
3082 ((i1_feeds_i2_n && i1dest_in_i1src)
3083 || (i0_feeds_i2_n && i0dest_in_i0src)));
3084 substed_i2 = 1;
3085
3086 /* Record whether i2's body now appears within i3's body. */
3087 i2_is_used = n_occurrences;
3088 }
3089
3090 /* If we already got a failure, don't try to do more. Otherwise,
3091 try to substitute in I1 if we have it. */
3092
3093 if (i1 && GET_CODE (newpat) != CLOBBER)
3094 {
3095 /* Check that an autoincrement side-effect on I1 has not been lost.
3096 This happens if I1DEST is mentioned in I2 and dies there, and
3097 has disappeared from the new pattern. */
3098 if ((FIND_REG_INC_NOTE (i1, NULL_RTX) != 0
3099 && i1_feeds_i2_n
3100 && dead_or_set_p (i2, i1dest)
3101 && !reg_overlap_mentioned_p (i1dest, newpat))
3102 /* Before we can do this substitution, we must redo the test done
3103 above (see detailed comments there) that ensures that I1DEST
3104 isn't mentioned in any SETs in NEWPAT that are field assignments. */
3105 || !combinable_i3pat (NULL_RTX, &newpat, i1dest, NULL_RTX, NULL_RTX,
3106 0, 0, 0))
3107 {
3108 undo_all ();
3109 return 0;
3110 }
3111
3112 n_occurrences = 0;
3113 subst_low_luid = DF_INSN_LUID (i1);
3114 newpat = subst (newpat, i1dest, i1src, 0,
3115 i0_feeds_i1_n && i0dest_in_i0src);
3116 substed_i1 = 1;
3117 i1_is_used = n_occurrences;
3118 }
3119 if (i0 && GET_CODE (newpat) != CLOBBER)
3120 {
3121 if ((FIND_REG_INC_NOTE (i0, NULL_RTX) != 0
3122 && ((i0_feeds_i2_n && dead_or_set_p (i2, i0dest))
3123 || (i0_feeds_i1_n && dead_or_set_p (i1, i0dest)))
3124 && !reg_overlap_mentioned_p (i0dest, newpat))
3125 || !combinable_i3pat (NULL_RTX, &newpat, i0dest, NULL_RTX, NULL_RTX,
3126 0, 0, 0))
3127 {
3128 undo_all ();
3129 return 0;
3130 }
3131
3132 /* Following subst may modify i1src, make a copy of it
3133 before it is for added_sets_2 handling if needed. */
3134 if (i0_feeds_i1_n && added_sets_2 && i1_feeds_i2_n)
3135 i1src_copy = copy_rtx (i1src);
3136
3137 n_occurrences = 0;
3138 subst_low_luid = DF_INSN_LUID (i0);
3139 newpat = subst (newpat, i0dest, i0src, 0,
3140 i0_feeds_i1_n && i0dest_in_i0src);
3141 substed_i0 = 1;
3142 }
3143
3144 /* Fail if an autoincrement side-effect has been duplicated. Be careful
3145 to count all the ways that I2SRC and I1SRC can be used. */
3146 if ((FIND_REG_INC_NOTE (i2, NULL_RTX) != 0
3147 && i2_is_used + added_sets_2 > 1)
3148 || (i1 != 0 && FIND_REG_INC_NOTE (i1, NULL_RTX) != 0
3149 && (i1_is_used + added_sets_1 + (added_sets_2 && i1_feeds_i2_n)
3150 > 1))
3151 || (i0 != 0 && FIND_REG_INC_NOTE (i0, NULL_RTX) != 0
3152 && (n_occurrences + added_sets_0
3153 + (added_sets_1 && i0_feeds_i1_n)
3154 + (added_sets_2 && i0_feeds_i2_n)
3155 > 1))
3156 /* Fail if we tried to make a new register. */
3157 || max_reg_num () != maxreg
3158 /* Fail if we couldn't do something and have a CLOBBER. */
3159 || GET_CODE (newpat) == CLOBBER
3160 /* Fail if this new pattern is a MULT and we didn't have one before
3161 at the outer level. */
3162 || (GET_CODE (newpat) == SET && GET_CODE (SET_SRC (newpat)) == MULT
3163 && ! have_mult))
3164 {
3165 undo_all ();
3166 return 0;
3167 }
3168
3169 /* If the actions of the earlier insns must be kept
3170 in addition to substituting them into the latest one,
3171 we must make a new PARALLEL for the latest insn
3172 to hold additional the SETs. */
3173
3174 if (added_sets_0 || added_sets_1 || added_sets_2)
3175 {
3176 int extra_sets = added_sets_0 + added_sets_1 + added_sets_2;
3177 combine_extras++;
3178
3179 if (GET_CODE (newpat) == PARALLEL)
3180 {
3181 rtvec old = XVEC (newpat, 0);
3182 total_sets = XVECLEN (newpat, 0) + extra_sets;
3183 newpat = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (total_sets));
3184 memcpy (XVEC (newpat, 0)->elem, &old->elem[0],
3185 sizeof (old->elem[0]) * old->num_elem);
3186 }
3187 else
3188 {
3189 rtx old = newpat;
3190 total_sets = 1 + extra_sets;
3191 newpat = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (total_sets));
3192 XVECEXP (newpat, 0, 0) = old;
3193 }
3194
3195 if (added_sets_0)
3196 XVECEXP (newpat, 0, --total_sets) = i0pat;
3197
3198 if (added_sets_1)
3199 {
3200 rtx t = i1pat;
3201 if (i0_feeds_i1_n)
3202 t = subst (t, i0dest, i0src, 0, 0);
3203
3204 XVECEXP (newpat, 0, --total_sets) = t;
3205 }
3206 if (added_sets_2)
3207 {
3208 rtx t = i2pat;
3209 if (i1_feeds_i2_n)
3210 t = subst (t, i1dest, i1src_copy ? i1src_copy : i1src, 0,
3211 i0_feeds_i1_n && i0dest_in_i0src);
3212 if ((i0_feeds_i1_n && i1_feeds_i2_n) || i0_feeds_i2_n)
3213 t = subst (t, i0dest, i0src, 0, 0);
3214
3215 XVECEXP (newpat, 0, --total_sets) = t;
3216 }
3217 }
3218
3219 validate_replacement:
3220
3221 /* Note which hard regs this insn has as inputs. */
3222 mark_used_regs_combine (newpat);
3223
3224 /* If recog_for_combine fails, it strips existing clobbers. If we'll
3225 consider splitting this pattern, we might need these clobbers. */
3226 if (i1 && GET_CODE (newpat) == PARALLEL
3227 && GET_CODE (XVECEXP (newpat, 0, XVECLEN (newpat, 0) - 1)) == CLOBBER)
3228 {
3229 int len = XVECLEN (newpat, 0);
3230
3231 newpat_vec_with_clobbers = rtvec_alloc (len);
3232 for (i = 0; i < len; i++)
3233 RTVEC_ELT (newpat_vec_with_clobbers, i) = XVECEXP (newpat, 0, i);
3234 }
3235
3236 /* Is the result of combination a valid instruction? */
3237 insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
3238
3239 /* If the result isn't valid, see if it is a PARALLEL of two SETs where
3240 the second SET's destination is a register that is unused and isn't
3241 marked as an instruction that might trap in an EH region. In that case,
3242 we just need the first SET. This can occur when simplifying a divmod
3243 insn. We *must* test for this case here because the code below that
3244 splits two independent SETs doesn't handle this case correctly when it
3245 updates the register status.
3246
3247 It's pointless doing this if we originally had two sets, one from
3248 i3, and one from i2. Combining then splitting the parallel results
3249 in the original i2 again plus an invalid insn (which we delete).
3250 The net effect is only to move instructions around, which makes
3251 debug info less accurate.
3252
3253 Also check the case where the first SET's destination is unused.
3254 That would not cause incorrect code, but does cause an unneeded
3255 insn to remain. */
3256
3257 if (insn_code_number < 0
3258 && !(added_sets_2 && i1 == 0)
3259 && GET_CODE (newpat) == PARALLEL
3260 && XVECLEN (newpat, 0) == 2
3261 && GET_CODE (XVECEXP (newpat, 0, 0)) == SET
3262 && GET_CODE (XVECEXP (newpat, 0, 1)) == SET
3263 && asm_noperands (newpat) < 0)
3264 {
3265 rtx set0 = XVECEXP (newpat, 0, 0);
3266 rtx set1 = XVECEXP (newpat, 0, 1);
3267
3268 if (((REG_P (SET_DEST (set1))
3269 && find_reg_note (i3, REG_UNUSED, SET_DEST (set1)))
3270 || (GET_CODE (SET_DEST (set1)) == SUBREG
3271 && find_reg_note (i3, REG_UNUSED, SUBREG_REG (SET_DEST (set1)))))
3272 && insn_nothrow_p (i3)
3273 && !side_effects_p (SET_SRC (set1)))
3274 {
3275 newpat = set0;
3276 insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
3277 }
3278
3279 else if (((REG_P (SET_DEST (set0))
3280 && find_reg_note (i3, REG_UNUSED, SET_DEST (set0)))
3281 || (GET_CODE (SET_DEST (set0)) == SUBREG
3282 && find_reg_note (i3, REG_UNUSED,
3283 SUBREG_REG (SET_DEST (set0)))))
3284 && insn_nothrow_p (i3)
3285 && !side_effects_p (SET_SRC (set0)))
3286 {
3287 newpat = set1;
3288 insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
3289
3290 if (insn_code_number >= 0)
3291 changed_i3_dest = 1;
3292 }
3293 }
3294
3295 /* If we were combining three insns and the result is a simple SET
3296 with no ASM_OPERANDS that wasn't recognized, try to split it into two
3297 insns. There are two ways to do this. It can be split using a
3298 machine-specific method (like when you have an addition of a large
3299 constant) or by combine in the function find_split_point. */
3300
3301 if (i1 && insn_code_number < 0 && GET_CODE (newpat) == SET
3302 && asm_noperands (newpat) < 0)
3303 {
3304 rtx parallel, m_split, *split;
3305
3306 /* See if the MD file can split NEWPAT. If it can't, see if letting it
3307 use I2DEST as a scratch register will help. In the latter case,
3308 convert I2DEST to the mode of the source of NEWPAT if we can. */
3309
3310 m_split = combine_split_insns (newpat, i3);
3311
3312 /* We can only use I2DEST as a scratch reg if it doesn't overlap any
3313 inputs of NEWPAT. */
3314
3315 /* ??? If I2DEST is not safe, and I1DEST exists, then it would be
3316 possible to try that as a scratch reg. This would require adding
3317 more code to make it work though. */
3318
3319 if (m_split == 0 && ! reg_overlap_mentioned_p (i2dest, newpat))
3320 {
3321 enum machine_mode new_mode = GET_MODE (SET_DEST (newpat));
3322
3323 /* First try to split using the original register as a
3324 scratch register. */
3325 parallel = gen_rtx_PARALLEL (VOIDmode,
3326 gen_rtvec (2, newpat,
3327 gen_rtx_CLOBBER (VOIDmode,
3328 i2dest)));
3329 m_split = combine_split_insns (parallel, i3);
3330
3331 /* If that didn't work, try changing the mode of I2DEST if
3332 we can. */
3333 if (m_split == 0
3334 && new_mode != GET_MODE (i2dest)
3335 && new_mode != VOIDmode
3336 && can_change_dest_mode (i2dest, added_sets_2, new_mode))
3337 {
3338 enum machine_mode old_mode = GET_MODE (i2dest);
3339 rtx ni2dest;
3340
3341 if (REGNO (i2dest) < FIRST_PSEUDO_REGISTER)
3342 ni2dest = gen_rtx_REG (new_mode, REGNO (i2dest));
3343 else
3344 {
3345 SUBST_MODE (regno_reg_rtx[REGNO (i2dest)], new_mode);
3346 ni2dest = regno_reg_rtx[REGNO (i2dest)];
3347 }
3348
3349 parallel = (gen_rtx_PARALLEL
3350 (VOIDmode,
3351 gen_rtvec (2, newpat,
3352 gen_rtx_CLOBBER (VOIDmode,
3353 ni2dest))));
3354 m_split = combine_split_insns (parallel, i3);
3355
3356 if (m_split == 0
3357 && REGNO (i2dest) >= FIRST_PSEUDO_REGISTER)
3358 {
3359 struct undo *buf;
3360
3361 adjust_reg_mode (regno_reg_rtx[REGNO (i2dest)], old_mode);
3362 buf = undobuf.undos;
3363 undobuf.undos = buf->next;
3364 buf->next = undobuf.frees;
3365 undobuf.frees = buf;
3366 }
3367 }
3368
3369 i2scratch = m_split != 0;
3370 }
3371
3372 /* If recog_for_combine has discarded clobbers, try to use them
3373 again for the split. */
3374 if (m_split == 0 && newpat_vec_with_clobbers)
3375 {
3376 parallel = gen_rtx_PARALLEL (VOIDmode, newpat_vec_with_clobbers);
3377 m_split = combine_split_insns (parallel, i3);
3378 }
3379
3380 if (m_split && NEXT_INSN (m_split) == NULL_RTX)
3381 {
3382 m_split = PATTERN (m_split);
3383 insn_code_number = recog_for_combine (&m_split, i3, &new_i3_notes);
3384 if (insn_code_number >= 0)
3385 newpat = m_split;
3386 }
3387 else if (m_split && NEXT_INSN (NEXT_INSN (m_split)) == NULL_RTX
3388 && (next_real_insn (i2) == i3
3389 || ! use_crosses_set_p (PATTERN (m_split), DF_INSN_LUID (i2))))
3390 {
3391 rtx i2set, i3set;
3392 rtx newi3pat = PATTERN (NEXT_INSN (m_split));
3393 newi2pat = PATTERN (m_split);
3394
3395 i3set = single_set (NEXT_INSN (m_split));
3396 i2set = single_set (m_split);
3397
3398 i2_code_number = recog_for_combine (&newi2pat, i2, &new_i2_notes);
3399
3400 /* If I2 or I3 has multiple SETs, we won't know how to track
3401 register status, so don't use these insns. If I2's destination
3402 is used between I2 and I3, we also can't use these insns. */
3403
3404 if (i2_code_number >= 0 && i2set && i3set
3405 && (next_real_insn (i2) == i3
3406 || ! reg_used_between_p (SET_DEST (i2set), i2, i3)))
3407 insn_code_number = recog_for_combine (&newi3pat, i3,
3408 &new_i3_notes);
3409 if (insn_code_number >= 0)
3410 newpat = newi3pat;
3411
3412 /* It is possible that both insns now set the destination of I3.
3413 If so, we must show an extra use of it. */
3414
3415 if (insn_code_number >= 0)
3416 {
3417 rtx new_i3_dest = SET_DEST (i3set);
3418 rtx new_i2_dest = SET_DEST (i2set);
3419
3420 while (GET_CODE (new_i3_dest) == ZERO_EXTRACT
3421 || GET_CODE (new_i3_dest) == STRICT_LOW_PART
3422 || GET_CODE (new_i3_dest) == SUBREG)
3423 new_i3_dest = XEXP (new_i3_dest, 0);
3424
3425 while (GET_CODE (new_i2_dest) == ZERO_EXTRACT
3426 || GET_CODE (new_i2_dest) == STRICT_LOW_PART
3427 || GET_CODE (new_i2_dest) == SUBREG)
3428 new_i2_dest = XEXP (new_i2_dest, 0);
3429
3430 if (REG_P (new_i3_dest)
3431 && REG_P (new_i2_dest)
3432 && REGNO (new_i3_dest) == REGNO (new_i2_dest))
3433 INC_REG_N_SETS (REGNO (new_i2_dest), 1);
3434 }
3435 }
3436
3437 /* If we can split it and use I2DEST, go ahead and see if that
3438 helps things be recognized. Verify that none of the registers
3439 are set between I2 and I3. */
3440 if (insn_code_number < 0
3441 && (split = find_split_point (&newpat, i3, false)) != 0
3442 #ifdef HAVE_cc0
3443 && REG_P (i2dest)
3444 #endif
3445 /* We need I2DEST in the proper mode. If it is a hard register
3446 or the only use of a pseudo, we can change its mode.
3447 Make sure we don't change a hard register to have a mode that
3448 isn't valid for it, or change the number of registers. */
3449 && (GET_MODE (*split) == GET_MODE (i2dest)
3450 || GET_MODE (*split) == VOIDmode
3451 || can_change_dest_mode (i2dest, added_sets_2,
3452 GET_MODE (*split)))
3453 && (next_real_insn (i2) == i3
3454 || ! use_crosses_set_p (*split, DF_INSN_LUID (i2)))
3455 /* We can't overwrite I2DEST if its value is still used by
3456 NEWPAT. */
3457 && ! reg_referenced_p (i2dest, newpat))
3458 {
3459 rtx newdest = i2dest;
3460 enum rtx_code split_code = GET_CODE (*split);
3461 enum machine_mode split_mode = GET_MODE (*split);
3462 bool subst_done = false;
3463 newi2pat = NULL_RTX;
3464
3465 i2scratch = true;
3466
3467 /* *SPLIT may be part of I2SRC, so make sure we have the
3468 original expression around for later debug processing.
3469 We should not need I2SRC any more in other cases. */
3470 if (MAY_HAVE_DEBUG_INSNS)
3471 i2src = copy_rtx (i2src);
3472 else
3473 i2src = NULL;
3474
3475 /* Get NEWDEST as a register in the proper mode. We have already
3476 validated that we can do this. */
3477 if (GET_MODE (i2dest) != split_mode && split_mode != VOIDmode)
3478 {
3479 if (REGNO (i2dest) < FIRST_PSEUDO_REGISTER)
3480 newdest = gen_rtx_REG (split_mode, REGNO (i2dest));
3481 else
3482 {
3483 SUBST_MODE (regno_reg_rtx[REGNO (i2dest)], split_mode);
3484 newdest = regno_reg_rtx[REGNO (i2dest)];
3485 }
3486 }
3487
3488 /* If *SPLIT is a (mult FOO (const_int pow2)), convert it to
3489 an ASHIFT. This can occur if it was inside a PLUS and hence
3490 appeared to be a memory address. This is a kludge. */
3491 if (split_code == MULT
3492 && CONST_INT_P (XEXP (*split, 1))
3493 && INTVAL (XEXP (*split, 1)) > 0
3494 && (i = exact_log2 (UINTVAL (XEXP (*split, 1)))) >= 0)
3495 {
3496 SUBST (*split, gen_rtx_ASHIFT (split_mode,
3497 XEXP (*split, 0), GEN_INT (i)));
3498 /* Update split_code because we may not have a multiply
3499 anymore. */
3500 split_code = GET_CODE (*split);
3501 }
3502
3503 #ifdef INSN_SCHEDULING
3504 /* If *SPLIT is a paradoxical SUBREG, when we split it, it should
3505 be written as a ZERO_EXTEND. */
3506 if (split_code == SUBREG && MEM_P (SUBREG_REG (*split)))
3507 {
3508 #ifdef LOAD_EXTEND_OP
3509 /* Or as a SIGN_EXTEND if LOAD_EXTEND_OP says that that's
3510 what it really is. */
3511 if (LOAD_EXTEND_OP (GET_MODE (SUBREG_REG (*split)))
3512 == SIGN_EXTEND)
3513 SUBST (*split, gen_rtx_SIGN_EXTEND (split_mode,
3514 SUBREG_REG (*split)));
3515 else
3516 #endif
3517 SUBST (*split, gen_rtx_ZERO_EXTEND (split_mode,
3518 SUBREG_REG (*split)));
3519 }
3520 #endif
3521
3522 /* Attempt to split binary operators using arithmetic identities. */
3523 if (BINARY_P (SET_SRC (newpat))
3524 && split_mode == GET_MODE (SET_SRC (newpat))
3525 && ! side_effects_p (SET_SRC (newpat)))
3526 {
3527 rtx setsrc = SET_SRC (newpat);
3528 enum machine_mode mode = GET_MODE (setsrc);
3529 enum rtx_code code = GET_CODE (setsrc);
3530 rtx src_op0 = XEXP (setsrc, 0);
3531 rtx src_op1 = XEXP (setsrc, 1);
3532
3533 /* Split "X = Y op Y" as "Z = Y; X = Z op Z". */
3534 if (rtx_equal_p (src_op0, src_op1))
3535 {
3536 newi2pat = gen_rtx_SET (VOIDmode, newdest, src_op0);
3537 SUBST (XEXP (setsrc, 0), newdest);
3538 SUBST (XEXP (setsrc, 1), newdest);
3539 subst_done = true;
3540 }
3541 /* Split "((P op Q) op R) op S" where op is PLUS or MULT. */
3542 else if ((code == PLUS || code == MULT)
3543 && GET_CODE (src_op0) == code
3544 && GET_CODE (XEXP (src_op0, 0)) == code
3545 && (INTEGRAL_MODE_P (mode)
3546 || (FLOAT_MODE_P (mode)
3547 && flag_unsafe_math_optimizations)))
3548 {
3549 rtx p = XEXP (XEXP (src_op0, 0), 0);
3550 rtx q = XEXP (XEXP (src_op0, 0), 1);
3551 rtx r = XEXP (src_op0, 1);
3552 rtx s = src_op1;
3553
3554 /* Split both "((X op Y) op X) op Y" and
3555 "((X op Y) op Y) op X" as "T op T" where T is
3556 "X op Y". */
3557 if ((rtx_equal_p (p,r) && rtx_equal_p (q,s))
3558 || (rtx_equal_p (p,s) && rtx_equal_p (q,r)))
3559 {
3560 newi2pat = gen_rtx_SET (VOIDmode, newdest,
3561 XEXP (src_op0, 0));
3562 SUBST (XEXP (setsrc, 0), newdest);
3563 SUBST (XEXP (setsrc, 1), newdest);
3564 subst_done = true;
3565 }
3566 /* Split "((X op X) op Y) op Y)" as "T op T" where
3567 T is "X op Y". */
3568 else if (rtx_equal_p (p,q) && rtx_equal_p (r,s))
3569 {
3570 rtx tmp = simplify_gen_binary (code, mode, p, r);
3571 newi2pat = gen_rtx_SET (VOIDmode, newdest, tmp);
3572 SUBST (XEXP (setsrc, 0), newdest);
3573 SUBST (XEXP (setsrc, 1), newdest);
3574 subst_done = true;
3575 }
3576 }
3577 }
3578
3579 if (!subst_done)
3580 {
3581 newi2pat = gen_rtx_SET (VOIDmode, newdest, *split);
3582 SUBST (*split, newdest);
3583 }
3584
3585 i2_code_number = recog_for_combine (&newi2pat, i2, &new_i2_notes);
3586
3587 /* recog_for_combine might have added CLOBBERs to newi2pat.
3588 Make sure NEWPAT does not depend on the clobbered regs. */
3589 if (GET_CODE (newi2pat) == PARALLEL)
3590 for (i = XVECLEN (newi2pat, 0) - 1; i >= 0; i--)
3591 if (GET_CODE (XVECEXP (newi2pat, 0, i)) == CLOBBER)
3592 {
3593 rtx reg = XEXP (XVECEXP (newi2pat, 0, i), 0);
3594 if (reg_overlap_mentioned_p (reg, newpat))
3595 {
3596 undo_all ();
3597 return 0;
3598 }
3599 }
3600
3601 /* If the split point was a MULT and we didn't have one before,
3602 don't use one now. */
3603 if (i2_code_number >= 0 && ! (split_code == MULT && ! have_mult))
3604 insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
3605 }
3606 }
3607
3608 /* Check for a case where we loaded from memory in a narrow mode and
3609 then sign extended it, but we need both registers. In that case,
3610 we have a PARALLEL with both loads from the same memory location.
3611 We can split this into a load from memory followed by a register-register
3612 copy. This saves at least one insn, more if register allocation can
3613 eliminate the copy.
3614
3615 We cannot do this if the destination of the first assignment is a
3616 condition code register or cc0. We eliminate this case by making sure
3617 the SET_DEST and SET_SRC have the same mode.
3618
3619 We cannot do this if the destination of the second assignment is
3620 a register that we have already assumed is zero-extended. Similarly
3621 for a SUBREG of such a register. */
3622
3623 else if (i1 && insn_code_number < 0 && asm_noperands (newpat) < 0
3624 && GET_CODE (newpat) == PARALLEL
3625 && XVECLEN (newpat, 0) == 2
3626 && GET_CODE (XVECEXP (newpat, 0, 0)) == SET
3627 && GET_CODE (SET_SRC (XVECEXP (newpat, 0, 0))) == SIGN_EXTEND
3628 && (GET_MODE (SET_DEST (XVECEXP (newpat, 0, 0)))
3629 == GET_MODE (SET_SRC (XVECEXP (newpat, 0, 0))))
3630 && GET_CODE (XVECEXP (newpat, 0, 1)) == SET
3631 && rtx_equal_p (SET_SRC (XVECEXP (newpat, 0, 1)),
3632 XEXP (SET_SRC (XVECEXP (newpat, 0, 0)), 0))
3633 && ! use_crosses_set_p (SET_SRC (XVECEXP (newpat, 0, 1)),
3634 DF_INSN_LUID (i2))
3635 && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) != ZERO_EXTRACT
3636 && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) != STRICT_LOW_PART
3637 && ! (temp = SET_DEST (XVECEXP (newpat, 0, 1)),
3638 (REG_P (temp)
3639 && VEC_index (reg_stat_type, reg_stat,
3640 REGNO (temp))->nonzero_bits != 0
3641 && GET_MODE_BITSIZE (GET_MODE (temp)) < BITS_PER_WORD
3642 && GET_MODE_BITSIZE (GET_MODE (temp)) < HOST_BITS_PER_INT
3643 && (VEC_index (reg_stat_type, reg_stat,
3644 REGNO (temp))->nonzero_bits
3645 != GET_MODE_MASK (word_mode))))
3646 && ! (GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) == SUBREG
3647 && (temp = SUBREG_REG (SET_DEST (XVECEXP (newpat, 0, 1))),
3648 (REG_P (temp)
3649 && VEC_index (reg_stat_type, reg_stat,
3650 REGNO (temp))->nonzero_bits != 0
3651 && GET_MODE_BITSIZE (GET_MODE (temp)) < BITS_PER_WORD
3652 && GET_MODE_BITSIZE (GET_MODE (temp)) < HOST_BITS_PER_INT
3653 && (VEC_index (reg_stat_type, reg_stat,
3654 REGNO (temp))->nonzero_bits
3655 != GET_MODE_MASK (word_mode)))))
3656 && ! reg_overlap_mentioned_p (SET_DEST (XVECEXP (newpat, 0, 1)),
3657 SET_SRC (XVECEXP (newpat, 0, 1)))
3658 && ! find_reg_note (i3, REG_UNUSED,
3659 SET_DEST (XVECEXP (newpat, 0, 0))))
3660 {
3661 rtx ni2dest;
3662
3663 newi2pat = XVECEXP (newpat, 0, 0);
3664 ni2dest = SET_DEST (XVECEXP (newpat, 0, 0));
3665 newpat = XVECEXP (newpat, 0, 1);
3666 SUBST (SET_SRC (newpat),
3667 gen_lowpart (GET_MODE (SET_SRC (newpat)), ni2dest));
3668 i2_code_number = recog_for_combine (&newi2pat, i2, &new_i2_notes);
3669
3670 if (i2_code_number >= 0)
3671 insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
3672
3673 if (insn_code_number >= 0)
3674 swap_i2i3 = 1;
3675 }
3676
3677 /* Similarly, check for a case where we have a PARALLEL of two independent
3678 SETs but we started with three insns. In this case, we can do the sets
3679 as two separate insns. This case occurs when some SET allows two
3680 other insns to combine, but the destination of that SET is still live. */
3681
3682 else if (i1 && insn_code_number < 0 && asm_noperands (newpat) < 0
3683 && GET_CODE (newpat) == PARALLEL
3684 && XVECLEN (newpat, 0) == 2
3685 && GET_CODE (XVECEXP (newpat, 0, 0)) == SET
3686 && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 0))) != ZERO_EXTRACT
3687 && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 0))) != STRICT_LOW_PART
3688 && GET_CODE (XVECEXP (newpat, 0, 1)) == SET
3689 && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) != ZERO_EXTRACT
3690 && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) != STRICT_LOW_PART
3691 && ! reg_referenced_p (SET_DEST (XVECEXP (newpat, 0, 1)),
3692 XVECEXP (newpat, 0, 0))
3693 && ! reg_referenced_p (SET_DEST (XVECEXP (newpat, 0, 0)),
3694 XVECEXP (newpat, 0, 1))
3695 && ! (contains_muldiv (SET_SRC (XVECEXP (newpat, 0, 0)))
3696 && contains_muldiv (SET_SRC (XVECEXP (newpat, 0, 1)))))
3697 {
3698 /* Normally, it doesn't matter which of the two is done first,
3699 but the one that references cc0 can't be the second, and
3700 one which uses any regs/memory set in between i2 and i3 can't
3701 be first. */
3702 if (!use_crosses_set_p (SET_SRC (XVECEXP (newpat, 0, 1)),
3703 DF_INSN_LUID (i2))
3704 #ifdef HAVE_cc0
3705 && !reg_referenced_p (cc0_rtx, XVECEXP (newpat, 0, 0))
3706 #endif
3707 )
3708 {
3709 newi2pat = XVECEXP (newpat, 0, 1);
3710 newpat = XVECEXP (newpat, 0, 0);
3711 }
3712 else if (!use_crosses_set_p (SET_SRC (XVECEXP (newpat, 0, 0)),
3713 DF_INSN_LUID (i2))
3714 #ifdef HAVE_cc0
3715 && !reg_referenced_p (cc0_rtx, XVECEXP (newpat, 0, 1))
3716 #endif
3717 )
3718 {
3719 newi2pat = XVECEXP (newpat, 0, 0);
3720 newpat = XVECEXP (newpat, 0, 1);
3721 }
3722 else
3723 {
3724 undo_all ();
3725 return 0;
3726 }
3727
3728 i2_code_number = recog_for_combine (&newi2pat, i2, &new_i2_notes);
3729
3730 if (i2_code_number >= 0)
3731 {
3732 /* recog_for_combine might have added CLOBBERs to newi2pat.
3733 Make sure NEWPAT does not depend on the clobbered regs. */
3734 if (GET_CODE (newi2pat) == PARALLEL)
3735 {
3736 for (i = XVECLEN (newi2pat, 0) - 1; i >= 0; i--)
3737 if (GET_CODE (XVECEXP (newi2pat, 0, i)) == CLOBBER)
3738 {
3739 rtx reg = XEXP (XVECEXP (newi2pat, 0, i), 0);
3740 if (reg_overlap_mentioned_p (reg, newpat))
3741 {
3742 undo_all ();
3743 return 0;
3744 }
3745 }
3746 }
3747
3748 insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
3749 }
3750 }
3751
3752 /* If it still isn't recognized, fail and change things back the way they
3753 were. */
3754 if ((insn_code_number < 0
3755 /* Is the result a reasonable ASM_OPERANDS? */
3756 && (! check_asm_operands (newpat) || added_sets_1 || added_sets_2)))
3757 {
3758 undo_all ();
3759 return 0;
3760 }
3761
3762 /* If we had to change another insn, make sure it is valid also. */
3763 if (undobuf.other_insn)
3764 {
3765 CLEAR_HARD_REG_SET (newpat_used_regs);
3766
3767 other_pat = PATTERN (undobuf.other_insn);
3768 other_code_number = recog_for_combine (&other_pat, undobuf.other_insn,
3769 &new_other_notes);
3770
3771 if (other_code_number < 0 && ! check_asm_operands (other_pat))
3772 {
3773 undo_all ();
3774 return 0;
3775 }
3776 }
3777
3778 #ifdef HAVE_cc0
3779 /* If I2 is the CC0 setter and I3 is the CC0 user then check whether
3780 they are adjacent to each other or not. */
3781 {
3782 rtx p = prev_nonnote_insn (i3);
3783 if (p && p != i2 && NONJUMP_INSN_P (p) && newi2pat
3784 && sets_cc0_p (newi2pat))
3785 {
3786 undo_all ();
3787 return 0;
3788 }
3789 }
3790 #endif
3791
3792 /* Only allow this combination if insn_rtx_costs reports that the
3793 replacement instructions are cheaper than the originals. */
3794 if (!combine_validate_cost (i0, i1, i2, i3, newpat, newi2pat, other_pat))
3795 {
3796 undo_all ();
3797 return 0;
3798 }
3799
3800 if (MAY_HAVE_DEBUG_INSNS)
3801 {
3802 struct undo *undo;
3803
3804 for (undo = undobuf.undos; undo; undo = undo->next)
3805 if (undo->kind == UNDO_MODE)
3806 {
3807 rtx reg = *undo->where.r;
3808 enum machine_mode new_mode = GET_MODE (reg);
3809 enum machine_mode old_mode = undo->old_contents.m;
3810
3811 /* Temporarily revert mode back. */
3812 adjust_reg_mode (reg, old_mode);
3813
3814 if (reg == i2dest && i2scratch)
3815 {
3816 /* If we used i2dest as a scratch register with a
3817 different mode, substitute it for the original
3818 i2src while its original mode is temporarily
3819 restored, and then clear i2scratch so that we don't
3820 do it again later. */
3821 propagate_for_debug (i2, i3, reg, i2src);
3822 i2scratch = false;
3823 /* Put back the new mode. */
3824 adjust_reg_mode (reg, new_mode);
3825 }
3826 else
3827 {
3828 rtx tempreg = gen_raw_REG (old_mode, REGNO (reg));
3829 rtx first, last;
3830
3831 if (reg == i2dest)
3832 {
3833 first = i2;
3834 last = i3;
3835 }
3836 else
3837 {
3838 first = i3;
3839 last = undobuf.other_insn;
3840 gcc_assert (last);
3841 }
3842
3843 /* We're dealing with a reg that changed mode but not
3844 meaning, so we want to turn it into a subreg for
3845 the new mode. However, because of REG sharing and
3846 because its mode had already changed, we have to do
3847 it in two steps. First, replace any debug uses of
3848 reg, with its original mode temporarily restored,
3849 with this copy we have created; then, replace the
3850 copy with the SUBREG of the original shared reg,
3851 once again changed to the new mode. */
3852 propagate_for_debug (first, last, reg, tempreg);
3853 adjust_reg_mode (reg, new_mode);
3854 propagate_for_debug (first, last, tempreg,
3855 lowpart_subreg (old_mode, reg, new_mode));
3856 }
3857 }
3858 }
3859
3860 /* If we will be able to accept this, we have made a
3861 change to the destination of I3. This requires us to
3862 do a few adjustments. */
3863
3864 if (changed_i3_dest)
3865 {
3866 PATTERN (i3) = newpat;
3867 adjust_for_new_dest (i3);
3868 }
3869
3870 /* We now know that we can do this combination. Merge the insns and
3871 update the status of registers and LOG_LINKS. */
3872
3873 if (undobuf.other_insn)
3874 {
3875 rtx note, next;
3876
3877 PATTERN (undobuf.other_insn) = other_pat;
3878
3879 /* If any of the notes in OTHER_INSN were REG_UNUSED, ensure that they
3880 are still valid. Then add any non-duplicate notes added by
3881 recog_for_combine. */
3882 for (note = REG_NOTES (undobuf.other_insn); note; note = next)
3883 {
3884 next = XEXP (note, 1);
3885
3886 if (REG_NOTE_KIND (note) == REG_UNUSED
3887 && ! reg_set_p (XEXP (note, 0), PATTERN (undobuf.other_insn)))
3888 remove_note (undobuf.other_insn, note);
3889 }
3890
3891 distribute_notes (new_other_notes, undobuf.other_insn,
3892 undobuf.other_insn, NULL_RTX, NULL_RTX, NULL_RTX,
3893 NULL_RTX);
3894 }
3895
3896 if (swap_i2i3)
3897 {
3898 rtx insn;
3899 rtx link;
3900 rtx ni2dest;
3901
3902 /* I3 now uses what used to be its destination and which is now
3903 I2's destination. This requires us to do a few adjustments. */
3904 PATTERN (i3) = newpat;
3905 adjust_for_new_dest (i3);
3906
3907 /* We need a LOG_LINK from I3 to I2. But we used to have one,
3908 so we still will.
3909
3910 However, some later insn might be using I2's dest and have
3911 a LOG_LINK pointing at I3. We must remove this link.
3912 The simplest way to remove the link is to point it at I1,
3913 which we know will be a NOTE. */
3914
3915 /* newi2pat is usually a SET here; however, recog_for_combine might
3916 have added some clobbers. */
3917 if (GET_CODE (newi2pat) == PARALLEL)
3918 ni2dest = SET_DEST (XVECEXP (newi2pat, 0, 0));
3919 else
3920 ni2dest = SET_DEST (newi2pat);
3921
3922 for (insn = NEXT_INSN (i3);
3923 insn && (this_basic_block->next_bb == EXIT_BLOCK_PTR
3924 || insn != BB_HEAD (this_basic_block->next_bb));
3925 insn = NEXT_INSN (insn))
3926 {
3927 if (INSN_P (insn) && reg_referenced_p (ni2dest, PATTERN (insn)))
3928 {
3929 for (link = LOG_LINKS (insn); link;
3930 link = XEXP (link, 1))
3931 if (XEXP (link, 0) == i3)
3932 XEXP (link, 0) = i1;
3933
3934 break;
3935 }
3936 }
3937 }
3938
3939 {
3940 rtx i3notes, i2notes, i1notes = 0, i0notes = 0;
3941 rtx i3links, i2links, i1links = 0, i0links = 0;
3942 rtx midnotes = 0;
3943 int from_luid;
3944 unsigned int regno;
3945 /* Compute which registers we expect to eliminate. newi2pat may be setting
3946 either i3dest or i2dest, so we must check it. Also, i1dest may be the
3947 same as i3dest, in which case newi2pat may be setting i1dest. */
3948 rtx elim_i2 = ((newi2pat && reg_set_p (i2dest, newi2pat))
3949 || i2dest_in_i2src || i2dest_in_i1src || i2dest_in_i0src
3950 || !i2dest_killed
3951 ? 0 : i2dest);
3952 rtx elim_i1 = (i1 == 0 || i1dest_in_i1src || i1dest_in_i0src
3953 || (newi2pat && reg_set_p (i1dest, newi2pat))
3954 || !i1dest_killed
3955 ? 0 : i1dest);
3956 rtx elim_i0 = (i0 == 0 || i0dest_in_i0src
3957 || (newi2pat && reg_set_p (i0dest, newi2pat))
3958 || !i0dest_killed
3959 ? 0 : i0dest);
3960
3961 /* Get the old REG_NOTES and LOG_LINKS from all our insns and
3962 clear them. */
3963 i3notes = REG_NOTES (i3), i3links = LOG_LINKS (i3);
3964 i2notes = REG_NOTES (i2), i2links = LOG_LINKS (i2);
3965 if (i1)
3966 i1notes = REG_NOTES (i1), i1links = LOG_LINKS (i1);
3967 if (i0)
3968 i0notes = REG_NOTES (i0), i0links = LOG_LINKS (i0);
3969
3970 /* Ensure that we do not have something that should not be shared but
3971 occurs multiple times in the new insns. Check this by first
3972 resetting all the `used' flags and then copying anything is shared. */
3973
3974 reset_used_flags (i3notes);
3975 reset_used_flags (i2notes);
3976 reset_used_flags (i1notes);
3977 reset_used_flags (i0notes);
3978 reset_used_flags (newpat);
3979 reset_used_flags (newi2pat);
3980 if (undobuf.other_insn)
3981 reset_used_flags (PATTERN (undobuf.other_insn));
3982
3983 i3notes = copy_rtx_if_shared (i3notes);
3984 i2notes = copy_rtx_if_shared (i2notes);
3985 i1notes = copy_rtx_if_shared (i1notes);
3986 i0notes = copy_rtx_if_shared (i0notes);
3987 newpat = copy_rtx_if_shared (newpat);
3988 newi2pat = copy_rtx_if_shared (newi2pat);
3989 if (undobuf.other_insn)
3990 reset_used_flags (PATTERN (undobuf.other_insn));
3991
3992 INSN_CODE (i3) = insn_code_number;
3993 PATTERN (i3) = newpat;
3994
3995 if (CALL_P (i3) && CALL_INSN_FUNCTION_USAGE (i3))
3996 {
3997 rtx call_usage = CALL_INSN_FUNCTION_USAGE (i3);
3998
3999 reset_used_flags (call_usage);
4000 call_usage = copy_rtx (call_usage);
4001
4002 if (substed_i2)
4003 {
4004 /* I2SRC must still be meaningful at this point. Some splitting
4005 operations can invalidate I2SRC, but those operations do not
4006 apply to calls. */
4007 gcc_assert (i2src);
4008 replace_rtx (call_usage, i2dest, i2src);
4009 }
4010
4011 if (substed_i1)
4012 replace_rtx (call_usage, i1dest, i1src);
4013 if (substed_i0)
4014 replace_rtx (call_usage, i0dest, i0src);
4015
4016 CALL_INSN_FUNCTION_USAGE (i3) = call_usage;
4017 }
4018
4019 if (undobuf.other_insn)
4020 INSN_CODE (undobuf.other_insn) = other_code_number;
4021
4022 /* We had one special case above where I2 had more than one set and
4023 we replaced a destination of one of those sets with the destination
4024 of I3. In that case, we have to update LOG_LINKS of insns later
4025 in this basic block. Note that this (expensive) case is rare.
4026
4027 Also, in this case, we must pretend that all REG_NOTEs for I2
4028 actually came from I3, so that REG_UNUSED notes from I2 will be
4029 properly handled. */
4030
4031 if (i3_subst_into_i2)
4032 {
4033 for (i = 0; i < XVECLEN (PATTERN (i2), 0); i++)
4034 if ((GET_CODE (XVECEXP (PATTERN (i2), 0, i)) == SET
4035 || GET_CODE (XVECEXP (PATTERN (i2), 0, i)) == CLOBBER)
4036 && REG_P (SET_DEST (XVECEXP (PATTERN (i2), 0, i)))
4037 && SET_DEST (XVECEXP (PATTERN (i2), 0, i)) != i2dest
4038 && ! find_reg_note (i2, REG_UNUSED,
4039 SET_DEST (XVECEXP (PATTERN (i2), 0, i))))
4040 for (temp = NEXT_INSN (i2);
4041 temp && (this_basic_block->next_bb == EXIT_BLOCK_PTR
4042 || BB_HEAD (this_basic_block) != temp);
4043 temp = NEXT_INSN (temp))
4044 if (temp != i3 && INSN_P (temp))
4045 for (link = LOG_LINKS (temp); link; link = XEXP (link, 1))
4046 if (XEXP (link, 0) == i2)
4047 XEXP (link, 0) = i3;
4048
4049 if (i3notes)
4050 {
4051 rtx link = i3notes;
4052 while (XEXP (link, 1))
4053 link = XEXP (link, 1);
4054 XEXP (link, 1) = i2notes;
4055 }
4056 else
4057 i3notes = i2notes;
4058 i2notes = 0;
4059 }
4060
4061 LOG_LINKS (i3) = 0;
4062 REG_NOTES (i3) = 0;
4063 LOG_LINKS (i2) = 0;
4064 REG_NOTES (i2) = 0;
4065
4066 if (newi2pat)
4067 {
4068 if (MAY_HAVE_DEBUG_INSNS && i2scratch)
4069 propagate_for_debug (i2, i3, i2dest, i2src);
4070 INSN_CODE (i2) = i2_code_number;
4071 PATTERN (i2) = newi2pat;
4072 }
4073 else
4074 {
4075 if (MAY_HAVE_DEBUG_INSNS && i2src)
4076 propagate_for_debug (i2, i3, i2dest, i2src);
4077 SET_INSN_DELETED (i2);
4078 }
4079
4080 if (i1)
4081 {
4082 LOG_LINKS (i1) = 0;
4083 REG_NOTES (i1) = 0;
4084 if (MAY_HAVE_DEBUG_INSNS)
4085 propagate_for_debug (i1, i3, i1dest, i1src);
4086 SET_INSN_DELETED (i1);
4087 }
4088
4089 if (i0)
4090 {
4091 LOG_LINKS (i0) = 0;
4092 REG_NOTES (i0) = 0;
4093 if (MAY_HAVE_DEBUG_INSNS)
4094 propagate_for_debug (i0, i3, i0dest, i0src);
4095 SET_INSN_DELETED (i0);
4096 }
4097
4098 /* Get death notes for everything that is now used in either I3 or
4099 I2 and used to die in a previous insn. If we built two new
4100 patterns, move from I1 to I2 then I2 to I3 so that we get the
4101 proper movement on registers that I2 modifies. */
4102
4103 if (i0)
4104 from_luid = DF_INSN_LUID (i0);
4105 else if (i1)
4106 from_luid = DF_INSN_LUID (i1);
4107 else
4108 from_luid = DF_INSN_LUID (i2);
4109 if (newi2pat)
4110 move_deaths (newi2pat, NULL_RTX, from_luid, i2, &midnotes);
4111 move_deaths (newpat, newi2pat, from_luid, i3, &midnotes);
4112
4113 /* Distribute all the LOG_LINKS and REG_NOTES from I1, I2, and I3. */
4114 if (i3notes)
4115 distribute_notes (i3notes, i3, i3, newi2pat ? i2 : NULL_RTX,
4116 elim_i2, elim_i1, elim_i0);
4117 if (i2notes)
4118 distribute_notes (i2notes, i2, i3, newi2pat ? i2 : NULL_RTX,
4119 elim_i2, elim_i1, elim_i0);
4120 if (i1notes)
4121 distribute_notes (i1notes, i1, i3, newi2pat ? i2 : NULL_RTX,
4122 elim_i2, elim_i1, elim_i0);
4123 if (i0notes)
4124 distribute_notes (i0notes, i0, i3, newi2pat ? i2 : NULL_RTX,
4125 elim_i2, elim_i1, elim_i0);
4126 if (midnotes)
4127 distribute_notes (midnotes, NULL_RTX, i3, newi2pat ? i2 : NULL_RTX,
4128 elim_i2, elim_i1, elim_i0);
4129
4130 /* Distribute any notes added to I2 or I3 by recog_for_combine. We
4131 know these are REG_UNUSED and want them to go to the desired insn,
4132 so we always pass it as i3. */
4133
4134 if (newi2pat && new_i2_notes)
4135 distribute_notes (new_i2_notes, i2, i2, NULL_RTX, NULL_RTX, NULL_RTX,
4136 NULL_RTX);
4137
4138 if (new_i3_notes)
4139 distribute_notes (new_i3_notes, i3, i3, NULL_RTX, NULL_RTX, NULL_RTX,
4140 NULL_RTX);
4141
4142 /* If I3DEST was used in I3SRC, it really died in I3. We may need to
4143 put a REG_DEAD note for it somewhere. If NEWI2PAT exists and sets
4144 I3DEST, the death must be somewhere before I2, not I3. If we passed I3
4145 in that case, it might delete I2. Similarly for I2 and I1.
4146 Show an additional death due to the REG_DEAD note we make here. If
4147 we discard it in distribute_notes, we will decrement it again. */
4148
4149 if (i3dest_killed)
4150 {
4151 if (newi2pat && reg_set_p (i3dest_killed, newi2pat))
4152 distribute_notes (alloc_reg_note (REG_DEAD, i3dest_killed,
4153 NULL_RTX),
4154 NULL_RTX, i2, NULL_RTX, elim_i2, elim_i1, elim_i0);
4155 else
4156 distribute_notes (alloc_reg_note (REG_DEAD, i3dest_killed,
4157 NULL_RTX),
4158 NULL_RTX, i3, newi2pat ? i2 : NULL_RTX,
4159 elim_i2, elim_i1, elim_i0);
4160 }
4161
4162 if (i2dest_in_i2src)
4163 {
4164 rtx new_note = alloc_reg_note (REG_DEAD, i2dest, NULL_RTX);
4165 if (newi2pat && reg_set_p (i2dest, newi2pat))
4166 distribute_notes (new_note, NULL_RTX, i2, NULL_RTX, NULL_RTX,
4167 NULL_RTX, NULL_RTX);
4168 else
4169 distribute_notes (new_note, NULL_RTX, i3, newi2pat ? i2 : NULL_RTX,
4170 NULL_RTX, NULL_RTX, NULL_RTX);
4171 }
4172
4173 if (i1dest_in_i1src)
4174 {
4175 rtx new_note = alloc_reg_note (REG_DEAD, i1dest, NULL_RTX);
4176 if (newi2pat && reg_set_p (i1dest, newi2pat))
4177 distribute_notes (new_note, NULL_RTX, i2, NULL_RTX, NULL_RTX,
4178 NULL_RTX, NULL_RTX);
4179 else
4180 distribute_notes (new_note, NULL_RTX, i3, newi2pat ? i2 : NULL_RTX,
4181 NULL_RTX, NULL_RTX, NULL_RTX);
4182 }
4183
4184 if (i0dest_in_i0src)
4185 {
4186 rtx new_note = alloc_reg_note (REG_DEAD, i0dest, NULL_RTX);
4187 if (newi2pat && reg_set_p (i0dest, newi2pat))
4188 distribute_notes (new_note, NULL_RTX, i2, NULL_RTX, NULL_RTX,
4189 NULL_RTX, NULL_RTX);
4190 else
4191 distribute_notes (new_note, NULL_RTX, i3, newi2pat ? i2 : NULL_RTX,
4192 NULL_RTX, NULL_RTX, NULL_RTX);
4193 }
4194
4195 distribute_links (i3links);
4196 distribute_links (i2links);
4197 distribute_links (i1links);
4198 distribute_links (i0links);
4199
4200 if (REG_P (i2dest))
4201 {
4202 rtx link;
4203 rtx i2_insn = 0, i2_val = 0, set;
4204
4205 /* The insn that used to set this register doesn't exist, and
4206 this life of the register may not exist either. See if one of
4207 I3's links points to an insn that sets I2DEST. If it does,
4208 that is now the last known value for I2DEST. If we don't update
4209 this and I2 set the register to a value that depended on its old
4210 contents, we will get confused. If this insn is used, thing
4211 will be set correctly in combine_instructions. */
4212
4213 for (link = LOG_LINKS (i3); link; link = XEXP (link, 1))
4214 if ((set = single_set (XEXP (link, 0))) != 0
4215 && rtx_equal_p (i2dest, SET_DEST (set)))
4216 i2_insn = XEXP (link, 0), i2_val = SET_SRC (set);
4217
4218 record_value_for_reg (i2dest, i2_insn, i2_val);
4219
4220 /* If the reg formerly set in I2 died only once and that was in I3,
4221 zero its use count so it won't make `reload' do any work. */
4222 if (! added_sets_2
4223 && (newi2pat == 0 || ! reg_mentioned_p (i2dest, newi2pat))
4224 && ! i2dest_in_i2src)
4225 {
4226 regno = REGNO (i2dest);
4227 INC_REG_N_SETS (regno, -1);
4228 }
4229 }
4230
4231 if (i1 && REG_P (i1dest))
4232 {
4233 rtx link;
4234 rtx i1_insn = 0, i1_val = 0, set;
4235
4236 for (link = LOG_LINKS (i3); link; link = XEXP (link, 1))
4237 if ((set = single_set (XEXP (link, 0))) != 0
4238 && rtx_equal_p (i1dest, SET_DEST (set)))
4239 i1_insn = XEXP (link, 0), i1_val = SET_SRC (set);
4240
4241 record_value_for_reg (i1dest, i1_insn, i1_val);
4242
4243 regno = REGNO (i1dest);
4244 if (! added_sets_1 && ! i1dest_in_i1src)
4245 INC_REG_N_SETS (regno, -1);
4246 }
4247
4248 if (i0 && REG_P (i0dest))
4249 {
4250 rtx link;
4251 rtx i0_insn = 0, i0_val = 0, set;
4252
4253 for (link = LOG_LINKS (i3); link; link = XEXP (link, 1))
4254 if ((set = single_set (XEXP (link, 0))) != 0
4255 && rtx_equal_p (i0dest, SET_DEST (set)))
4256 i0_insn = XEXP (link, 0), i0_val = SET_SRC (set);
4257
4258 record_value_for_reg (i0dest, i0_insn, i0_val);
4259
4260 regno = REGNO (i0dest);
4261 if (! added_sets_0 && ! i0dest_in_i0src)
4262 INC_REG_N_SETS (regno, -1);
4263 }
4264
4265 /* Update reg_stat[].nonzero_bits et al for any changes that may have
4266 been made to this insn. The order of
4267 set_nonzero_bits_and_sign_copies() is important. Because newi2pat
4268 can affect nonzero_bits of newpat */
4269 if (newi2pat)
4270 note_stores (newi2pat, set_nonzero_bits_and_sign_copies, NULL);
4271 note_stores (newpat, set_nonzero_bits_and_sign_copies, NULL);
4272 }
4273
4274 if (undobuf.other_insn != NULL_RTX)
4275 {
4276 if (dump_file)
4277 {
4278 fprintf (dump_file, "modifying other_insn ");
4279 dump_insn_slim (dump_file, undobuf.other_insn);
4280 }
4281 df_insn_rescan (undobuf.other_insn);
4282 }
4283
4284 if (i0 && !(NOTE_P(i0) && (NOTE_KIND (i0) == NOTE_INSN_DELETED)))
4285 {
4286 if (dump_file)
4287 {
4288 fprintf (dump_file, "modifying insn i1 ");
4289 dump_insn_slim (dump_file, i0);
4290 }
4291 df_insn_rescan (i0);
4292 }
4293
4294 if (i1 && !(NOTE_P(i1) && (NOTE_KIND (i1) == NOTE_INSN_DELETED)))
4295 {
4296 if (dump_file)
4297 {
4298 fprintf (dump_file, "modifying insn i1 ");
4299 dump_insn_slim (dump_file, i1);
4300 }
4301 df_insn_rescan (i1);
4302 }
4303
4304 if (i2 && !(NOTE_P(i2) && (NOTE_KIND (i2) == NOTE_INSN_DELETED)))
4305 {
4306 if (dump_file)
4307 {
4308 fprintf (dump_file, "modifying insn i2 ");
4309 dump_insn_slim (dump_file, i2);
4310 }
4311 df_insn_rescan (i2);
4312 }
4313
4314 if (i3 && !(NOTE_P(i3) && (NOTE_KIND (i3) == NOTE_INSN_DELETED)))
4315 {
4316 if (dump_file)
4317 {
4318 fprintf (dump_file, "modifying insn i3 ");
4319 dump_insn_slim (dump_file, i3);
4320 }
4321 df_insn_rescan (i3);
4322 }
4323
4324 /* Set new_direct_jump_p if a new return or simple jump instruction
4325 has been created. Adjust the CFG accordingly. */
4326
4327 if (returnjump_p (i3) || any_uncondjump_p (i3))
4328 {
4329 *new_direct_jump_p = 1;
4330 mark_jump_label (PATTERN (i3), i3, 0);
4331 update_cfg_for_uncondjump (i3);
4332 }
4333
4334 if (undobuf.other_insn != NULL_RTX
4335 && (returnjump_p (undobuf.other_insn)
4336 || any_uncondjump_p (undobuf.other_insn)))
4337 {
4338 *new_direct_jump_p = 1;
4339 update_cfg_for_uncondjump (undobuf.other_insn);
4340 }
4341
4342 /* A noop might also need cleaning up of CFG, if it comes from the
4343 simplification of a jump. */
4344 if (GET_CODE (newpat) == SET
4345 && SET_SRC (newpat) == pc_rtx
4346 && SET_DEST (newpat) == pc_rtx)
4347 {
4348 *new_direct_jump_p = 1;
4349 update_cfg_for_uncondjump (i3);
4350 }
4351
4352 combine_successes++;
4353 undo_commit ();
4354
4355 if (added_links_insn
4356 && (newi2pat == 0 || DF_INSN_LUID (added_links_insn) < DF_INSN_LUID (i2))
4357 && DF_INSN_LUID (added_links_insn) < DF_INSN_LUID (i3))
4358 return added_links_insn;
4359 else
4360 return newi2pat ? i2 : i3;
4361 }
4362 \f
4363 /* Undo all the modifications recorded in undobuf. */
4364
4365 static void
4366 undo_all (void)
4367 {
4368 struct undo *undo, *next;
4369
4370 for (undo = undobuf.undos; undo; undo = next)
4371 {
4372 next = undo->next;
4373 switch (undo->kind)
4374 {
4375 case UNDO_RTX:
4376 *undo->where.r = undo->old_contents.r;
4377 break;
4378 case UNDO_INT:
4379 *undo->where.i = undo->old_contents.i;
4380 break;
4381 case UNDO_MODE:
4382 adjust_reg_mode (*undo->where.r, undo->old_contents.m);
4383 break;
4384 default:
4385 gcc_unreachable ();
4386 }
4387
4388 undo->next = undobuf.frees;
4389 undobuf.frees = undo;
4390 }
4391
4392 undobuf.undos = 0;
4393 }
4394
4395 /* We've committed to accepting the changes we made. Move all
4396 of the undos to the free list. */
4397
4398 static void
4399 undo_commit (void)
4400 {
4401 struct undo *undo, *next;
4402
4403 for (undo = undobuf.undos; undo; undo = next)
4404 {
4405 next = undo->next;
4406 undo->next = undobuf.frees;
4407 undobuf.frees = undo;
4408 }
4409 undobuf.undos = 0;
4410 }
4411 \f
4412 /* Find the innermost point within the rtx at LOC, possibly LOC itself,
4413 where we have an arithmetic expression and return that point. LOC will
4414 be inside INSN.
4415
4416 try_combine will call this function to see if an insn can be split into
4417 two insns. */
4418
4419 static rtx *
4420 find_split_point (rtx *loc, rtx insn, bool set_src)
4421 {
4422 rtx x = *loc;
4423 enum rtx_code code = GET_CODE (x);
4424 rtx *split;
4425 unsigned HOST_WIDE_INT len = 0;
4426 HOST_WIDE_INT pos = 0;
4427 int unsignedp = 0;
4428 rtx inner = NULL_RTX;
4429
4430 /* First special-case some codes. */
4431 switch (code)
4432 {
4433 case SUBREG:
4434 #ifdef INSN_SCHEDULING
4435 /* If we are making a paradoxical SUBREG invalid, it becomes a split
4436 point. */
4437 if (MEM_P (SUBREG_REG (x)))
4438 return loc;
4439 #endif
4440 return find_split_point (&SUBREG_REG (x), insn, false);
4441
4442 case MEM:
4443 #ifdef HAVE_lo_sum
4444 /* If we have (mem (const ..)) or (mem (symbol_ref ...)), split it
4445 using LO_SUM and HIGH. */
4446 if (GET_CODE (XEXP (x, 0)) == CONST
4447 || GET_CODE (XEXP (x, 0)) == SYMBOL_REF)
4448 {
4449 enum machine_mode address_mode
4450 = targetm.addr_space.address_mode (MEM_ADDR_SPACE (x));
4451
4452 SUBST (XEXP (x, 0),
4453 gen_rtx_LO_SUM (address_mode,
4454 gen_rtx_HIGH (address_mode, XEXP (x, 0)),
4455 XEXP (x, 0)));
4456 return &XEXP (XEXP (x, 0), 0);
4457 }
4458 #endif
4459
4460 /* If we have a PLUS whose second operand is a constant and the
4461 address is not valid, perhaps will can split it up using
4462 the machine-specific way to split large constants. We use
4463 the first pseudo-reg (one of the virtual regs) as a placeholder;
4464 it will not remain in the result. */
4465 if (GET_CODE (XEXP (x, 0)) == PLUS
4466 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
4467 && ! memory_address_addr_space_p (GET_MODE (x), XEXP (x, 0),
4468 MEM_ADDR_SPACE (x)))
4469 {
4470 rtx reg = regno_reg_rtx[FIRST_PSEUDO_REGISTER];
4471 rtx seq = combine_split_insns (gen_rtx_SET (VOIDmode, reg,
4472 XEXP (x, 0)),
4473 subst_insn);
4474
4475 /* This should have produced two insns, each of which sets our
4476 placeholder. If the source of the second is a valid address,
4477 we can make put both sources together and make a split point
4478 in the middle. */
4479
4480 if (seq
4481 && NEXT_INSN (seq) != NULL_RTX
4482 && NEXT_INSN (NEXT_INSN (seq)) == NULL_RTX
4483 && NONJUMP_INSN_P (seq)
4484 && GET_CODE (PATTERN (seq)) == SET
4485 && SET_DEST (PATTERN (seq)) == reg
4486 && ! reg_mentioned_p (reg,
4487 SET_SRC (PATTERN (seq)))
4488 && NONJUMP_INSN_P (NEXT_INSN (seq))
4489 && GET_CODE (PATTERN (NEXT_INSN (seq))) == SET
4490 && SET_DEST (PATTERN (NEXT_INSN (seq))) == reg
4491 && memory_address_addr_space_p
4492 (GET_MODE (x), SET_SRC (PATTERN (NEXT_INSN (seq))),
4493 MEM_ADDR_SPACE (x)))
4494 {
4495 rtx src1 = SET_SRC (PATTERN (seq));
4496 rtx src2 = SET_SRC (PATTERN (NEXT_INSN (seq)));
4497
4498 /* Replace the placeholder in SRC2 with SRC1. If we can
4499 find where in SRC2 it was placed, that can become our
4500 split point and we can replace this address with SRC2.
4501 Just try two obvious places. */
4502
4503 src2 = replace_rtx (src2, reg, src1);
4504 split = 0;
4505 if (XEXP (src2, 0) == src1)
4506 split = &XEXP (src2, 0);
4507 else if (GET_RTX_FORMAT (GET_CODE (XEXP (src2, 0)))[0] == 'e'
4508 && XEXP (XEXP (src2, 0), 0) == src1)
4509 split = &XEXP (XEXP (src2, 0), 0);
4510
4511 if (split)
4512 {
4513 SUBST (XEXP (x, 0), src2);
4514 return split;
4515 }
4516 }
4517
4518 /* If that didn't work, perhaps the first operand is complex and
4519 needs to be computed separately, so make a split point there.
4520 This will occur on machines that just support REG + CONST
4521 and have a constant moved through some previous computation. */
4522
4523 else if (!OBJECT_P (XEXP (XEXP (x, 0), 0))
4524 && ! (GET_CODE (XEXP (XEXP (x, 0), 0)) == SUBREG
4525 && OBJECT_P (SUBREG_REG (XEXP (XEXP (x, 0), 0)))))
4526 return &XEXP (XEXP (x, 0), 0);
4527 }
4528
4529 /* If we have a PLUS whose first operand is complex, try computing it
4530 separately by making a split there. */
4531 if (GET_CODE (XEXP (x, 0)) == PLUS
4532 && ! memory_address_addr_space_p (GET_MODE (x), XEXP (x, 0),
4533 MEM_ADDR_SPACE (x))
4534 && ! OBJECT_P (XEXP (XEXP (x, 0), 0))
4535 && ! (GET_CODE (XEXP (XEXP (x, 0), 0)) == SUBREG
4536 && OBJECT_P (SUBREG_REG (XEXP (XEXP (x, 0), 0)))))
4537 return &XEXP (XEXP (x, 0), 0);
4538 break;
4539
4540 case SET:
4541 #ifdef HAVE_cc0
4542 /* If SET_DEST is CC0 and SET_SRC is not an operand, a COMPARE, or a
4543 ZERO_EXTRACT, the most likely reason why this doesn't match is that
4544 we need to put the operand into a register. So split at that
4545 point. */
4546
4547 if (SET_DEST (x) == cc0_rtx
4548 && GET_CODE (SET_SRC (x)) != COMPARE
4549 && GET_CODE (SET_SRC (x)) != ZERO_EXTRACT
4550 && !OBJECT_P (SET_SRC (x))
4551 && ! (GET_CODE (SET_SRC (x)) == SUBREG
4552 && OBJECT_P (SUBREG_REG (SET_SRC (x)))))
4553 return &SET_SRC (x);
4554 #endif
4555
4556 /* See if we can split SET_SRC as it stands. */
4557 split = find_split_point (&SET_SRC (x), insn, true);
4558 if (split && split != &SET_SRC (x))
4559 return split;
4560
4561 /* See if we can split SET_DEST as it stands. */
4562 split = find_split_point (&SET_DEST (x), insn, false);
4563 if (split && split != &SET_DEST (x))
4564 return split;
4565
4566 /* See if this is a bitfield assignment with everything constant. If
4567 so, this is an IOR of an AND, so split it into that. */
4568 if (GET_CODE (SET_DEST (x)) == ZERO_EXTRACT
4569 && (GET_MODE_BITSIZE (GET_MODE (XEXP (SET_DEST (x), 0)))
4570 <= HOST_BITS_PER_WIDE_INT)
4571 && CONST_INT_P (XEXP (SET_DEST (x), 1))
4572 && CONST_INT_P (XEXP (SET_DEST (x), 2))
4573 && CONST_INT_P (SET_SRC (x))
4574 && ((INTVAL (XEXP (SET_DEST (x), 1))
4575 + INTVAL (XEXP (SET_DEST (x), 2)))
4576 <= GET_MODE_BITSIZE (GET_MODE (XEXP (SET_DEST (x), 0))))
4577 && ! side_effects_p (XEXP (SET_DEST (x), 0)))
4578 {
4579 HOST_WIDE_INT pos = INTVAL (XEXP (SET_DEST (x), 2));
4580 unsigned HOST_WIDE_INT len = INTVAL (XEXP (SET_DEST (x), 1));
4581 unsigned HOST_WIDE_INT src = INTVAL (SET_SRC (x));
4582 rtx dest = XEXP (SET_DEST (x), 0);
4583 enum machine_mode mode = GET_MODE (dest);
4584 unsigned HOST_WIDE_INT mask
4585 = ((unsigned HOST_WIDE_INT) 1 << len) - 1;
4586 rtx or_mask;
4587
4588 if (BITS_BIG_ENDIAN)
4589 pos = GET_MODE_BITSIZE (mode) - len - pos;
4590
4591 or_mask = gen_int_mode (src << pos, mode);
4592 if (src == mask)
4593 SUBST (SET_SRC (x),
4594 simplify_gen_binary (IOR, mode, dest, or_mask));
4595 else
4596 {
4597 rtx negmask = gen_int_mode (~(mask << pos), mode);
4598 SUBST (SET_SRC (x),
4599 simplify_gen_binary (IOR, mode,
4600 simplify_gen_binary (AND, mode,
4601 dest, negmask),
4602 or_mask));
4603 }
4604
4605 SUBST (SET_DEST (x), dest);
4606
4607 split = find_split_point (&SET_SRC (x), insn, true);
4608 if (split && split != &SET_SRC (x))
4609 return split;
4610 }
4611
4612 /* Otherwise, see if this is an operation that we can split into two.
4613 If so, try to split that. */
4614 code = GET_CODE (SET_SRC (x));
4615
4616 switch (code)
4617 {
4618 case AND:
4619 /* If we are AND'ing with a large constant that is only a single
4620 bit and the result is only being used in a context where we
4621 need to know if it is zero or nonzero, replace it with a bit
4622 extraction. This will avoid the large constant, which might
4623 have taken more than one insn to make. If the constant were
4624 not a valid argument to the AND but took only one insn to make,
4625 this is no worse, but if it took more than one insn, it will
4626 be better. */
4627
4628 if (CONST_INT_P (XEXP (SET_SRC (x), 1))
4629 && REG_P (XEXP (SET_SRC (x), 0))
4630 && (pos = exact_log2 (UINTVAL (XEXP (SET_SRC (x), 1)))) >= 7
4631 && REG_P (SET_DEST (x))
4632 && (split = find_single_use (SET_DEST (x), insn, (rtx*) 0)) != 0
4633 && (GET_CODE (*split) == EQ || GET_CODE (*split) == NE)
4634 && XEXP (*split, 0) == SET_DEST (x)
4635 && XEXP (*split, 1) == const0_rtx)
4636 {
4637 rtx extraction = make_extraction (GET_MODE (SET_DEST (x)),
4638 XEXP (SET_SRC (x), 0),
4639 pos, NULL_RTX, 1, 1, 0, 0);
4640 if (extraction != 0)
4641 {
4642 SUBST (SET_SRC (x), extraction);
4643 return find_split_point (loc, insn, false);
4644 }
4645 }
4646 break;
4647
4648 case NE:
4649 /* If STORE_FLAG_VALUE is -1, this is (NE X 0) and only one bit of X
4650 is known to be on, this can be converted into a NEG of a shift. */
4651 if (STORE_FLAG_VALUE == -1 && XEXP (SET_SRC (x), 1) == const0_rtx
4652 && GET_MODE (SET_SRC (x)) == GET_MODE (XEXP (SET_SRC (x), 0))
4653 && 1 <= (pos = exact_log2
4654 (nonzero_bits (XEXP (SET_SRC (x), 0),
4655 GET_MODE (XEXP (SET_SRC (x), 0))))))
4656 {
4657 enum machine_mode mode = GET_MODE (XEXP (SET_SRC (x), 0));
4658
4659 SUBST (SET_SRC (x),
4660 gen_rtx_NEG (mode,
4661 gen_rtx_LSHIFTRT (mode,
4662 XEXP (SET_SRC (x), 0),
4663 GEN_INT (pos))));
4664
4665 split = find_split_point (&SET_SRC (x), insn, true);
4666 if (split && split != &SET_SRC (x))
4667 return split;
4668 }
4669 break;
4670
4671 case SIGN_EXTEND:
4672 inner = XEXP (SET_SRC (x), 0);
4673
4674 /* We can't optimize if either mode is a partial integer
4675 mode as we don't know how many bits are significant
4676 in those modes. */
4677 if (GET_MODE_CLASS (GET_MODE (inner)) == MODE_PARTIAL_INT
4678 || GET_MODE_CLASS (GET_MODE (SET_SRC (x))) == MODE_PARTIAL_INT)
4679 break;
4680
4681 pos = 0;
4682 len = GET_MODE_BITSIZE (GET_MODE (inner));
4683 unsignedp = 0;
4684 break;
4685
4686 case SIGN_EXTRACT:
4687 case ZERO_EXTRACT:
4688 if (CONST_INT_P (XEXP (SET_SRC (x), 1))
4689 && CONST_INT_P (XEXP (SET_SRC (x), 2)))
4690 {
4691 inner = XEXP (SET_SRC (x), 0);
4692 len = INTVAL (XEXP (SET_SRC (x), 1));
4693 pos = INTVAL (XEXP (SET_SRC (x), 2));
4694
4695 if (BITS_BIG_ENDIAN)
4696 pos = GET_MODE_BITSIZE (GET_MODE (inner)) - len - pos;
4697 unsignedp = (code == ZERO_EXTRACT);
4698 }
4699 break;
4700
4701 default:
4702 break;
4703 }
4704
4705 if (len && pos >= 0 && pos + len <= GET_MODE_BITSIZE (GET_MODE (inner)))
4706 {
4707 enum machine_mode mode = GET_MODE (SET_SRC (x));
4708
4709 /* For unsigned, we have a choice of a shift followed by an
4710 AND or two shifts. Use two shifts for field sizes where the
4711 constant might be too large. We assume here that we can
4712 always at least get 8-bit constants in an AND insn, which is
4713 true for every current RISC. */
4714
4715 if (unsignedp && len <= 8)
4716 {
4717 SUBST (SET_SRC (x),
4718 gen_rtx_AND (mode,
4719 gen_rtx_LSHIFTRT
4720 (mode, gen_lowpart (mode, inner),
4721 GEN_INT (pos)),
4722 GEN_INT (((unsigned HOST_WIDE_INT) 1 << len)
4723 - 1)));
4724
4725 split = find_split_point (&SET_SRC (x), insn, true);
4726 if (split && split != &SET_SRC (x))
4727 return split;
4728 }
4729 else
4730 {
4731 SUBST (SET_SRC (x),
4732 gen_rtx_fmt_ee
4733 (unsignedp ? LSHIFTRT : ASHIFTRT, mode,
4734 gen_rtx_ASHIFT (mode,
4735 gen_lowpart (mode, inner),
4736 GEN_INT (GET_MODE_BITSIZE (mode)
4737 - len - pos)),
4738 GEN_INT (GET_MODE_BITSIZE (mode) - len)));
4739
4740 split = find_split_point (&SET_SRC (x), insn, true);
4741 if (split && split != &SET_SRC (x))
4742 return split;
4743 }
4744 }
4745
4746 /* See if this is a simple operation with a constant as the second
4747 operand. It might be that this constant is out of range and hence
4748 could be used as a split point. */
4749 if (BINARY_P (SET_SRC (x))
4750 && CONSTANT_P (XEXP (SET_SRC (x), 1))
4751 && (OBJECT_P (XEXP (SET_SRC (x), 0))
4752 || (GET_CODE (XEXP (SET_SRC (x), 0)) == SUBREG
4753 && OBJECT_P (SUBREG_REG (XEXP (SET_SRC (x), 0))))))
4754 return &XEXP (SET_SRC (x), 1);
4755
4756 /* Finally, see if this is a simple operation with its first operand
4757 not in a register. The operation might require this operand in a
4758 register, so return it as a split point. We can always do this
4759 because if the first operand were another operation, we would have
4760 already found it as a split point. */
4761 if ((BINARY_P (SET_SRC (x)) || UNARY_P (SET_SRC (x)))
4762 && ! register_operand (XEXP (SET_SRC (x), 0), VOIDmode))
4763 return &XEXP (SET_SRC (x), 0);
4764
4765 return 0;
4766
4767 case AND:
4768 case IOR:
4769 /* We write NOR as (and (not A) (not B)), but if we don't have a NOR,
4770 it is better to write this as (not (ior A B)) so we can split it.
4771 Similarly for IOR. */
4772 if (GET_CODE (XEXP (x, 0)) == NOT && GET_CODE (XEXP (x, 1)) == NOT)
4773 {
4774 SUBST (*loc,
4775 gen_rtx_NOT (GET_MODE (x),
4776 gen_rtx_fmt_ee (code == IOR ? AND : IOR,
4777 GET_MODE (x),
4778 XEXP (XEXP (x, 0), 0),
4779 XEXP (XEXP (x, 1), 0))));
4780 return find_split_point (loc, insn, set_src);
4781 }
4782
4783 /* Many RISC machines have a large set of logical insns. If the
4784 second operand is a NOT, put it first so we will try to split the
4785 other operand first. */
4786 if (GET_CODE (XEXP (x, 1)) == NOT)
4787 {
4788 rtx tem = XEXP (x, 0);
4789 SUBST (XEXP (x, 0), XEXP (x, 1));
4790 SUBST (XEXP (x, 1), tem);
4791 }
4792 break;
4793
4794 case PLUS:
4795 case MINUS:
4796 /* Canonicalization can produce (minus A (mult B C)), where C is a
4797 constant. It may be better to try splitting (plus (mult B -C) A)
4798 instead if this isn't a multiply by a power of two. */
4799 if (set_src && code == MINUS && GET_CODE (XEXP (x, 1)) == MULT
4800 && GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT
4801 && exact_log2 (INTVAL (XEXP (XEXP (x, 1), 1))) < 0)
4802 {
4803 enum machine_mode mode = GET_MODE (x);
4804 unsigned HOST_WIDE_INT this_int = INTVAL (XEXP (XEXP (x, 1), 1));
4805 HOST_WIDE_INT other_int = trunc_int_for_mode (-this_int, mode);
4806 SUBST (*loc, gen_rtx_PLUS (mode, gen_rtx_MULT (mode,
4807 XEXP (XEXP (x, 1), 0),
4808 GEN_INT (other_int)),
4809 XEXP (x, 0)));
4810 return find_split_point (loc, insn, set_src);
4811 }
4812
4813 /* Split at a multiply-accumulate instruction. However if this is
4814 the SET_SRC, we likely do not have such an instruction and it's
4815 worthless to try this split. */
4816 if (!set_src && GET_CODE (XEXP (x, 0)) == MULT)
4817 return loc;
4818
4819 default:
4820 break;
4821 }
4822
4823 /* Otherwise, select our actions depending on our rtx class. */
4824 switch (GET_RTX_CLASS (code))
4825 {
4826 case RTX_BITFIELD_OPS: /* This is ZERO_EXTRACT and SIGN_EXTRACT. */
4827 case RTX_TERNARY:
4828 split = find_split_point (&XEXP (x, 2), insn, false);
4829 if (split)
4830 return split;
4831 /* ... fall through ... */
4832 case RTX_BIN_ARITH:
4833 case RTX_COMM_ARITH:
4834 case RTX_COMPARE:
4835 case RTX_COMM_COMPARE:
4836 split = find_split_point (&XEXP (x, 1), insn, false);
4837 if (split)
4838 return split;
4839 /* ... fall through ... */
4840 case RTX_UNARY:
4841 /* Some machines have (and (shift ...) ...) insns. If X is not
4842 an AND, but XEXP (X, 0) is, use it as our split point. */
4843 if (GET_CODE (x) != AND && GET_CODE (XEXP (x, 0)) == AND)
4844 return &XEXP (x, 0);
4845
4846 split = find_split_point (&XEXP (x, 0), insn, false);
4847 if (split)
4848 return split;
4849 return loc;
4850
4851 default:
4852 /* Otherwise, we don't have a split point. */
4853 return 0;
4854 }
4855 }
4856 \f
4857 /* Throughout X, replace FROM with TO, and return the result.
4858 The result is TO if X is FROM;
4859 otherwise the result is X, but its contents may have been modified.
4860 If they were modified, a record was made in undobuf so that
4861 undo_all will (among other things) return X to its original state.
4862
4863 If the number of changes necessary is too much to record to undo,
4864 the excess changes are not made, so the result is invalid.
4865 The changes already made can still be undone.
4866 undobuf.num_undo is incremented for such changes, so by testing that
4867 the caller can tell whether the result is valid.
4868
4869 `n_occurrences' is incremented each time FROM is replaced.
4870
4871 IN_DEST is nonzero if we are processing the SET_DEST of a SET.
4872
4873 UNIQUE_COPY is nonzero if each substitution must be unique. We do this
4874 by copying if `n_occurrences' is nonzero. */
4875
4876 static rtx
4877 subst (rtx x, rtx from, rtx to, int in_dest, int unique_copy)
4878 {
4879 enum rtx_code code = GET_CODE (x);
4880 enum machine_mode op0_mode = VOIDmode;
4881 const char *fmt;
4882 int len, i;
4883 rtx new_rtx;
4884
4885 /* Two expressions are equal if they are identical copies of a shared
4886 RTX or if they are both registers with the same register number
4887 and mode. */
4888
4889 #define COMBINE_RTX_EQUAL_P(X,Y) \
4890 ((X) == (Y) \
4891 || (REG_P (X) && REG_P (Y) \
4892 && REGNO (X) == REGNO (Y) && GET_MODE (X) == GET_MODE (Y)))
4893
4894 if (! in_dest && COMBINE_RTX_EQUAL_P (x, from))
4895 {
4896 n_occurrences++;
4897 return (unique_copy && n_occurrences > 1 ? copy_rtx (to) : to);
4898 }
4899
4900 /* If X and FROM are the same register but different modes, they
4901 will not have been seen as equal above. However, the log links code
4902 will make a LOG_LINKS entry for that case. If we do nothing, we
4903 will try to rerecognize our original insn and, when it succeeds,
4904 we will delete the feeding insn, which is incorrect.
4905
4906 So force this insn not to match in this (rare) case. */
4907 if (! in_dest && code == REG && REG_P (from)
4908 && reg_overlap_mentioned_p (x, from))
4909 return gen_rtx_CLOBBER (GET_MODE (x), const0_rtx);
4910
4911 /* If this is an object, we are done unless it is a MEM or LO_SUM, both
4912 of which may contain things that can be combined. */
4913 if (code != MEM && code != LO_SUM && OBJECT_P (x))
4914 return x;
4915
4916 /* It is possible to have a subexpression appear twice in the insn.
4917 Suppose that FROM is a register that appears within TO.
4918 Then, after that subexpression has been scanned once by `subst',
4919 the second time it is scanned, TO may be found. If we were
4920 to scan TO here, we would find FROM within it and create a
4921 self-referent rtl structure which is completely wrong. */
4922 if (COMBINE_RTX_EQUAL_P (x, to))
4923 return to;
4924
4925 /* Parallel asm_operands need special attention because all of the
4926 inputs are shared across the arms. Furthermore, unsharing the
4927 rtl results in recognition failures. Failure to handle this case
4928 specially can result in circular rtl.
4929
4930 Solve this by doing a normal pass across the first entry of the
4931 parallel, and only processing the SET_DESTs of the subsequent
4932 entries. Ug. */
4933
4934 if (code == PARALLEL
4935 && GET_CODE (XVECEXP (x, 0, 0)) == SET
4936 && GET_CODE (SET_SRC (XVECEXP (x, 0, 0))) == ASM_OPERANDS)
4937 {
4938 new_rtx = subst (XVECEXP (x, 0, 0), from, to, 0, unique_copy);
4939
4940 /* If this substitution failed, this whole thing fails. */
4941 if (GET_CODE (new_rtx) == CLOBBER
4942 && XEXP (new_rtx, 0) == const0_rtx)
4943 return new_rtx;
4944
4945 SUBST (XVECEXP (x, 0, 0), new_rtx);
4946
4947 for (i = XVECLEN (x, 0) - 1; i >= 1; i--)
4948 {
4949 rtx dest = SET_DEST (XVECEXP (x, 0, i));
4950
4951 if (!REG_P (dest)
4952 && GET_CODE (dest) != CC0
4953 && GET_CODE (dest) != PC)
4954 {
4955 new_rtx = subst (dest, from, to, 0, unique_copy);
4956
4957 /* If this substitution failed, this whole thing fails. */
4958 if (GET_CODE (new_rtx) == CLOBBER
4959 && XEXP (new_rtx, 0) == const0_rtx)
4960 return new_rtx;
4961
4962 SUBST (SET_DEST (XVECEXP (x, 0, i)), new_rtx);
4963 }
4964 }
4965 }
4966 else
4967 {
4968 len = GET_RTX_LENGTH (code);
4969 fmt = GET_RTX_FORMAT (code);
4970
4971 /* We don't need to process a SET_DEST that is a register, CC0,
4972 or PC, so set up to skip this common case. All other cases
4973 where we want to suppress replacing something inside a
4974 SET_SRC are handled via the IN_DEST operand. */
4975 if (code == SET
4976 && (REG_P (SET_DEST (x))
4977 || GET_CODE (SET_DEST (x)) == CC0
4978 || GET_CODE (SET_DEST (x)) == PC))
4979 fmt = "ie";
4980
4981 /* Get the mode of operand 0 in case X is now a SIGN_EXTEND of a
4982 constant. */
4983 if (fmt[0] == 'e')
4984 op0_mode = GET_MODE (XEXP (x, 0));
4985
4986 for (i = 0; i < len; i++)
4987 {
4988 if (fmt[i] == 'E')
4989 {
4990 int j;
4991 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
4992 {
4993 if (COMBINE_RTX_EQUAL_P (XVECEXP (x, i, j), from))
4994 {
4995 new_rtx = (unique_copy && n_occurrences
4996 ? copy_rtx (to) : to);
4997 n_occurrences++;
4998 }
4999 else
5000 {
5001 new_rtx = subst (XVECEXP (x, i, j), from, to, 0,
5002 unique_copy);
5003
5004 /* If this substitution failed, this whole thing
5005 fails. */
5006 if (GET_CODE (new_rtx) == CLOBBER
5007 && XEXP (new_rtx, 0) == const0_rtx)
5008 return new_rtx;
5009 }
5010
5011 SUBST (XVECEXP (x, i, j), new_rtx);
5012 }
5013 }
5014 else if (fmt[i] == 'e')
5015 {
5016 /* If this is a register being set, ignore it. */
5017 new_rtx = XEXP (x, i);
5018 if (in_dest
5019 && i == 0
5020 && (((code == SUBREG || code == ZERO_EXTRACT)
5021 && REG_P (new_rtx))
5022 || code == STRICT_LOW_PART))
5023 ;
5024
5025 else if (COMBINE_RTX_EQUAL_P (XEXP (x, i), from))
5026 {
5027 /* In general, don't install a subreg involving two
5028 modes not tieable. It can worsen register
5029 allocation, and can even make invalid reload
5030 insns, since the reg inside may need to be copied
5031 from in the outside mode, and that may be invalid
5032 if it is an fp reg copied in integer mode.
5033
5034 We allow two exceptions to this: It is valid if
5035 it is inside another SUBREG and the mode of that
5036 SUBREG and the mode of the inside of TO is
5037 tieable and it is valid if X is a SET that copies
5038 FROM to CC0. */
5039
5040 if (GET_CODE (to) == SUBREG
5041 && ! MODES_TIEABLE_P (GET_MODE (to),
5042 GET_MODE (SUBREG_REG (to)))
5043 && ! (code == SUBREG
5044 && MODES_TIEABLE_P (GET_MODE (x),
5045 GET_MODE (SUBREG_REG (to))))
5046 #ifdef HAVE_cc0
5047 && ! (code == SET && i == 1 && XEXP (x, 0) == cc0_rtx)
5048 #endif
5049 )
5050 return gen_rtx_CLOBBER (VOIDmode, const0_rtx);
5051
5052 #ifdef CANNOT_CHANGE_MODE_CLASS
5053 if (code == SUBREG
5054 && REG_P (to)
5055 && REGNO (to) < FIRST_PSEUDO_REGISTER
5056 && REG_CANNOT_CHANGE_MODE_P (REGNO (to),
5057 GET_MODE (to),
5058 GET_MODE (x)))
5059 return gen_rtx_CLOBBER (VOIDmode, const0_rtx);
5060 #endif
5061
5062 new_rtx = (unique_copy && n_occurrences ? copy_rtx (to) : to);
5063 n_occurrences++;
5064 }
5065 else
5066 /* If we are in a SET_DEST, suppress most cases unless we
5067 have gone inside a MEM, in which case we want to
5068 simplify the address. We assume here that things that
5069 are actually part of the destination have their inner
5070 parts in the first expression. This is true for SUBREG,
5071 STRICT_LOW_PART, and ZERO_EXTRACT, which are the only
5072 things aside from REG and MEM that should appear in a
5073 SET_DEST. */
5074 new_rtx = subst (XEXP (x, i), from, to,
5075 (((in_dest
5076 && (code == SUBREG || code == STRICT_LOW_PART
5077 || code == ZERO_EXTRACT))
5078 || code == SET)
5079 && i == 0), unique_copy);
5080
5081 /* If we found that we will have to reject this combination,
5082 indicate that by returning the CLOBBER ourselves, rather than
5083 an expression containing it. This will speed things up as
5084 well as prevent accidents where two CLOBBERs are considered
5085 to be equal, thus producing an incorrect simplification. */
5086
5087 if (GET_CODE (new_rtx) == CLOBBER && XEXP (new_rtx, 0) == const0_rtx)
5088 return new_rtx;
5089
5090 if (GET_CODE (x) == SUBREG
5091 && (CONST_INT_P (new_rtx)
5092 || GET_CODE (new_rtx) == CONST_DOUBLE))
5093 {
5094 enum machine_mode mode = GET_MODE (x);
5095
5096 x = simplify_subreg (GET_MODE (x), new_rtx,
5097 GET_MODE (SUBREG_REG (x)),
5098 SUBREG_BYTE (x));
5099 if (! x)
5100 x = gen_rtx_CLOBBER (mode, const0_rtx);
5101 }
5102 else if (CONST_INT_P (new_rtx)
5103 && GET_CODE (x) == ZERO_EXTEND)
5104 {
5105 x = simplify_unary_operation (ZERO_EXTEND, GET_MODE (x),
5106 new_rtx, GET_MODE (XEXP (x, 0)));
5107 gcc_assert (x);
5108 }
5109 else
5110 SUBST (XEXP (x, i), new_rtx);
5111 }
5112 }
5113 }
5114
5115 /* Check if we are loading something from the constant pool via float
5116 extension; in this case we would undo compress_float_constant
5117 optimization and degenerate constant load to an immediate value. */
5118 if (GET_CODE (x) == FLOAT_EXTEND
5119 && MEM_P (XEXP (x, 0))
5120 && MEM_READONLY_P (XEXP (x, 0)))
5121 {
5122 rtx tmp = avoid_constant_pool_reference (x);
5123 if (x != tmp)
5124 return x;
5125 }
5126
5127 /* Try to simplify X. If the simplification changed the code, it is likely
5128 that further simplification will help, so loop, but limit the number
5129 of repetitions that will be performed. */
5130
5131 for (i = 0; i < 4; i++)
5132 {
5133 /* If X is sufficiently simple, don't bother trying to do anything
5134 with it. */
5135 if (code != CONST_INT && code != REG && code != CLOBBER)
5136 x = combine_simplify_rtx (x, op0_mode, in_dest);
5137
5138 if (GET_CODE (x) == code)
5139 break;
5140
5141 code = GET_CODE (x);
5142
5143 /* We no longer know the original mode of operand 0 since we
5144 have changed the form of X) */
5145 op0_mode = VOIDmode;
5146 }
5147
5148 return x;
5149 }
5150 \f
5151 /* Simplify X, a piece of RTL. We just operate on the expression at the
5152 outer level; call `subst' to simplify recursively. Return the new
5153 expression.
5154
5155 OP0_MODE is the original mode of XEXP (x, 0). IN_DEST is nonzero
5156 if we are inside a SET_DEST. */
5157
5158 static rtx
5159 combine_simplify_rtx (rtx x, enum machine_mode op0_mode, int in_dest)
5160 {
5161 enum rtx_code code = GET_CODE (x);
5162 enum machine_mode mode = GET_MODE (x);
5163 rtx temp;
5164 int i;
5165
5166 /* If this is a commutative operation, put a constant last and a complex
5167 expression first. We don't need to do this for comparisons here. */
5168 if (COMMUTATIVE_ARITH_P (x)
5169 && swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
5170 {
5171 temp = XEXP (x, 0);
5172 SUBST (XEXP (x, 0), XEXP (x, 1));
5173 SUBST (XEXP (x, 1), temp);
5174 }
5175
5176 /* If this is a simple operation applied to an IF_THEN_ELSE, try
5177 applying it to the arms of the IF_THEN_ELSE. This often simplifies
5178 things. Check for cases where both arms are testing the same
5179 condition.
5180
5181 Don't do anything if all operands are very simple. */
5182
5183 if ((BINARY_P (x)
5184 && ((!OBJECT_P (XEXP (x, 0))
5185 && ! (GET_CODE (XEXP (x, 0)) == SUBREG
5186 && OBJECT_P (SUBREG_REG (XEXP (x, 0)))))
5187 || (!OBJECT_P (XEXP (x, 1))
5188 && ! (GET_CODE (XEXP (x, 1)) == SUBREG
5189 && OBJECT_P (SUBREG_REG (XEXP (x, 1)))))))
5190 || (UNARY_P (x)
5191 && (!OBJECT_P (XEXP (x, 0))
5192 && ! (GET_CODE (XEXP (x, 0)) == SUBREG
5193 && OBJECT_P (SUBREG_REG (XEXP (x, 0)))))))
5194 {
5195 rtx cond, true_rtx, false_rtx;
5196
5197 cond = if_then_else_cond (x, &true_rtx, &false_rtx);
5198 if (cond != 0
5199 /* If everything is a comparison, what we have is highly unlikely
5200 to be simpler, so don't use it. */
5201 && ! (COMPARISON_P (x)
5202 && (COMPARISON_P (true_rtx) || COMPARISON_P (false_rtx))))
5203 {
5204 rtx cop1 = const0_rtx;
5205 enum rtx_code cond_code = simplify_comparison (NE, &cond, &cop1);
5206
5207 if (cond_code == NE && COMPARISON_P (cond))
5208 return x;
5209
5210 /* Simplify the alternative arms; this may collapse the true and
5211 false arms to store-flag values. Be careful to use copy_rtx
5212 here since true_rtx or false_rtx might share RTL with x as a
5213 result of the if_then_else_cond call above. */
5214 true_rtx = subst (copy_rtx (true_rtx), pc_rtx, pc_rtx, 0, 0);
5215 false_rtx = subst (copy_rtx (false_rtx), pc_rtx, pc_rtx, 0, 0);
5216
5217 /* If true_rtx and false_rtx are not general_operands, an if_then_else
5218 is unlikely to be simpler. */
5219 if (general_operand (true_rtx, VOIDmode)
5220 && general_operand (false_rtx, VOIDmode))
5221 {
5222 enum rtx_code reversed;
5223
5224 /* Restarting if we generate a store-flag expression will cause
5225 us to loop. Just drop through in this case. */
5226
5227 /* If the result values are STORE_FLAG_VALUE and zero, we can
5228 just make the comparison operation. */
5229 if (true_rtx == const_true_rtx && false_rtx == const0_rtx)
5230 x = simplify_gen_relational (cond_code, mode, VOIDmode,
5231 cond, cop1);
5232 else if (true_rtx == const0_rtx && false_rtx == const_true_rtx
5233 && ((reversed = reversed_comparison_code_parts
5234 (cond_code, cond, cop1, NULL))
5235 != UNKNOWN))
5236 x = simplify_gen_relational (reversed, mode, VOIDmode,
5237 cond, cop1);
5238
5239 /* Likewise, we can make the negate of a comparison operation
5240 if the result values are - STORE_FLAG_VALUE and zero. */
5241 else if (CONST_INT_P (true_rtx)
5242 && INTVAL (true_rtx) == - STORE_FLAG_VALUE
5243 && false_rtx == const0_rtx)
5244 x = simplify_gen_unary (NEG, mode,
5245 simplify_gen_relational (cond_code,
5246 mode, VOIDmode,
5247 cond, cop1),
5248 mode);
5249 else if (CONST_INT_P (false_rtx)
5250 && INTVAL (false_rtx) == - STORE_FLAG_VALUE
5251 && true_rtx == const0_rtx
5252 && ((reversed = reversed_comparison_code_parts
5253 (cond_code, cond, cop1, NULL))
5254 != UNKNOWN))
5255 x = simplify_gen_unary (NEG, mode,
5256 simplify_gen_relational (reversed,
5257 mode, VOIDmode,
5258 cond, cop1),
5259 mode);
5260 else
5261 return gen_rtx_IF_THEN_ELSE (mode,
5262 simplify_gen_relational (cond_code,
5263 mode,
5264 VOIDmode,
5265 cond,
5266 cop1),
5267 true_rtx, false_rtx);
5268
5269 code = GET_CODE (x);
5270 op0_mode = VOIDmode;
5271 }
5272 }
5273 }
5274
5275 /* Try to fold this expression in case we have constants that weren't
5276 present before. */
5277 temp = 0;
5278 switch (GET_RTX_CLASS (code))
5279 {
5280 case RTX_UNARY:
5281 if (op0_mode == VOIDmode)
5282 op0_mode = GET_MODE (XEXP (x, 0));
5283 temp = simplify_unary_operation (code, mode, XEXP (x, 0), op0_mode);
5284 break;
5285 case RTX_COMPARE:
5286 case RTX_COMM_COMPARE:
5287 {
5288 enum machine_mode cmp_mode = GET_MODE (XEXP (x, 0));
5289 if (cmp_mode == VOIDmode)
5290 {
5291 cmp_mode = GET_MODE (XEXP (x, 1));
5292 if (cmp_mode == VOIDmode)
5293 cmp_mode = op0_mode;
5294 }
5295 temp = simplify_relational_operation (code, mode, cmp_mode,
5296 XEXP (x, 0), XEXP (x, 1));
5297 }
5298 break;
5299 case RTX_COMM_ARITH:
5300 case RTX_BIN_ARITH:
5301 temp = simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
5302 break;
5303 case RTX_BITFIELD_OPS:
5304 case RTX_TERNARY:
5305 temp = simplify_ternary_operation (code, mode, op0_mode, XEXP (x, 0),
5306 XEXP (x, 1), XEXP (x, 2));
5307 break;
5308 default:
5309 break;
5310 }
5311
5312 if (temp)
5313 {
5314 x = temp;
5315 code = GET_CODE (temp);
5316 op0_mode = VOIDmode;
5317 mode = GET_MODE (temp);
5318 }
5319
5320 /* First see if we can apply the inverse distributive law. */
5321 if (code == PLUS || code == MINUS
5322 || code == AND || code == IOR || code == XOR)
5323 {
5324 x = apply_distributive_law (x);
5325 code = GET_CODE (x);
5326 op0_mode = VOIDmode;
5327 }
5328
5329 /* If CODE is an associative operation not otherwise handled, see if we
5330 can associate some operands. This can win if they are constants or
5331 if they are logically related (i.e. (a & b) & a). */
5332 if ((code == PLUS || code == MINUS || code == MULT || code == DIV
5333 || code == AND || code == IOR || code == XOR
5334 || code == SMAX || code == SMIN || code == UMAX || code == UMIN)
5335 && ((INTEGRAL_MODE_P (mode) && code != DIV)
5336 || (flag_associative_math && FLOAT_MODE_P (mode))))
5337 {
5338 if (GET_CODE (XEXP (x, 0)) == code)
5339 {
5340 rtx other = XEXP (XEXP (x, 0), 0);
5341 rtx inner_op0 = XEXP (XEXP (x, 0), 1);
5342 rtx inner_op1 = XEXP (x, 1);
5343 rtx inner;
5344
5345 /* Make sure we pass the constant operand if any as the second
5346 one if this is a commutative operation. */
5347 if (CONSTANT_P (inner_op0) && COMMUTATIVE_ARITH_P (x))
5348 {
5349 rtx tem = inner_op0;
5350 inner_op0 = inner_op1;
5351 inner_op1 = tem;
5352 }
5353 inner = simplify_binary_operation (code == MINUS ? PLUS
5354 : code == DIV ? MULT
5355 : code,
5356 mode, inner_op0, inner_op1);
5357
5358 /* For commutative operations, try the other pair if that one
5359 didn't simplify. */
5360 if (inner == 0 && COMMUTATIVE_ARITH_P (x))
5361 {
5362 other = XEXP (XEXP (x, 0), 1);
5363 inner = simplify_binary_operation (code, mode,
5364 XEXP (XEXP (x, 0), 0),
5365 XEXP (x, 1));
5366 }
5367
5368 if (inner)
5369 return simplify_gen_binary (code, mode, other, inner);
5370 }
5371 }
5372
5373 /* A little bit of algebraic simplification here. */
5374 switch (code)
5375 {
5376 case MEM:
5377 /* Ensure that our address has any ASHIFTs converted to MULT in case
5378 address-recognizing predicates are called later. */
5379 temp = make_compound_operation (XEXP (x, 0), MEM);
5380 SUBST (XEXP (x, 0), temp);
5381 break;
5382
5383 case SUBREG:
5384 if (op0_mode == VOIDmode)
5385 op0_mode = GET_MODE (SUBREG_REG (x));
5386
5387 /* See if this can be moved to simplify_subreg. */
5388 if (CONSTANT_P (SUBREG_REG (x))
5389 && subreg_lowpart_offset (mode, op0_mode) == SUBREG_BYTE (x)
5390 /* Don't call gen_lowpart if the inner mode
5391 is VOIDmode and we cannot simplify it, as SUBREG without
5392 inner mode is invalid. */
5393 && (GET_MODE (SUBREG_REG (x)) != VOIDmode
5394 || gen_lowpart_common (mode, SUBREG_REG (x))))
5395 return gen_lowpart (mode, SUBREG_REG (x));
5396
5397 if (GET_MODE_CLASS (GET_MODE (SUBREG_REG (x))) == MODE_CC)
5398 break;
5399 {
5400 rtx temp;
5401 temp = simplify_subreg (mode, SUBREG_REG (x), op0_mode,
5402 SUBREG_BYTE (x));
5403 if (temp)
5404 return temp;
5405 }
5406
5407 /* Don't change the mode of the MEM if that would change the meaning
5408 of the address. */
5409 if (MEM_P (SUBREG_REG (x))
5410 && (MEM_VOLATILE_P (SUBREG_REG (x))
5411 || mode_dependent_address_p (XEXP (SUBREG_REG (x), 0))))
5412 return gen_rtx_CLOBBER (mode, const0_rtx);
5413
5414 /* Note that we cannot do any narrowing for non-constants since
5415 we might have been counting on using the fact that some bits were
5416 zero. We now do this in the SET. */
5417
5418 break;
5419
5420 case NEG:
5421 temp = expand_compound_operation (XEXP (x, 0));
5422
5423 /* For C equal to the width of MODE minus 1, (neg (ashiftrt X C)) can be
5424 replaced by (lshiftrt X C). This will convert
5425 (neg (sign_extract X 1 Y)) to (zero_extract X 1 Y). */
5426
5427 if (GET_CODE (temp) == ASHIFTRT
5428 && CONST_INT_P (XEXP (temp, 1))
5429 && INTVAL (XEXP (temp, 1)) == GET_MODE_BITSIZE (mode) - 1)
5430 return simplify_shift_const (NULL_RTX, LSHIFTRT, mode, XEXP (temp, 0),
5431 INTVAL (XEXP (temp, 1)));
5432
5433 /* If X has only a single bit that might be nonzero, say, bit I, convert
5434 (neg X) to (ashiftrt (ashift X C-I) C-I) where C is the bitsize of
5435 MODE minus 1. This will convert (neg (zero_extract X 1 Y)) to
5436 (sign_extract X 1 Y). But only do this if TEMP isn't a register
5437 or a SUBREG of one since we'd be making the expression more
5438 complex if it was just a register. */
5439
5440 if (!REG_P (temp)
5441 && ! (GET_CODE (temp) == SUBREG
5442 && REG_P (SUBREG_REG (temp)))
5443 && (i = exact_log2 (nonzero_bits (temp, mode))) >= 0)
5444 {
5445 rtx temp1 = simplify_shift_const
5446 (NULL_RTX, ASHIFTRT, mode,
5447 simplify_shift_const (NULL_RTX, ASHIFT, mode, temp,
5448 GET_MODE_BITSIZE (mode) - 1 - i),
5449 GET_MODE_BITSIZE (mode) - 1 - i);
5450
5451 /* If all we did was surround TEMP with the two shifts, we
5452 haven't improved anything, so don't use it. Otherwise,
5453 we are better off with TEMP1. */
5454 if (GET_CODE (temp1) != ASHIFTRT
5455 || GET_CODE (XEXP (temp1, 0)) != ASHIFT
5456 || XEXP (XEXP (temp1, 0), 0) != temp)
5457 return temp1;
5458 }
5459 break;
5460
5461 case TRUNCATE:
5462 /* We can't handle truncation to a partial integer mode here
5463 because we don't know the real bitsize of the partial
5464 integer mode. */
5465 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
5466 break;
5467
5468 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
5469 SUBST (XEXP (x, 0),
5470 force_to_mode (XEXP (x, 0), GET_MODE (XEXP (x, 0)),
5471 GET_MODE_MASK (mode), 0));
5472
5473 /* We can truncate a constant value and return it. */
5474 if (CONST_INT_P (XEXP (x, 0)))
5475 return gen_int_mode (INTVAL (XEXP (x, 0)), mode);
5476
5477 /* Similarly to what we do in simplify-rtx.c, a truncate of a register
5478 whose value is a comparison can be replaced with a subreg if
5479 STORE_FLAG_VALUE permits. */
5480 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
5481 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0
5482 && (temp = get_last_value (XEXP (x, 0)))
5483 && COMPARISON_P (temp))
5484 return gen_lowpart (mode, XEXP (x, 0));
5485 break;
5486
5487 case CONST:
5488 /* (const (const X)) can become (const X). Do it this way rather than
5489 returning the inner CONST since CONST can be shared with a
5490 REG_EQUAL note. */
5491 if (GET_CODE (XEXP (x, 0)) == CONST)
5492 SUBST (XEXP (x, 0), XEXP (XEXP (x, 0), 0));
5493 break;
5494
5495 #ifdef HAVE_lo_sum
5496 case LO_SUM:
5497 /* Convert (lo_sum (high FOO) FOO) to FOO. This is necessary so we
5498 can add in an offset. find_split_point will split this address up
5499 again if it doesn't match. */
5500 if (GET_CODE (XEXP (x, 0)) == HIGH
5501 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
5502 return XEXP (x, 1);
5503 break;
5504 #endif
5505
5506 case PLUS:
5507 /* (plus (xor (and <foo> (const_int pow2 - 1)) <c>) <-c>)
5508 when c is (const_int (pow2 + 1) / 2) is a sign extension of a
5509 bit-field and can be replaced by either a sign_extend or a
5510 sign_extract. The `and' may be a zero_extend and the two
5511 <c>, -<c> constants may be reversed. */
5512 if (GET_CODE (XEXP (x, 0)) == XOR
5513 && CONST_INT_P (XEXP (x, 1))
5514 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
5515 && INTVAL (XEXP (x, 1)) == -INTVAL (XEXP (XEXP (x, 0), 1))
5516 && ((i = exact_log2 (UINTVAL (XEXP (XEXP (x, 0), 1)))) >= 0
5517 || (i = exact_log2 (UINTVAL (XEXP (x, 1)))) >= 0)
5518 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
5519 && ((GET_CODE (XEXP (XEXP (x, 0), 0)) == AND
5520 && CONST_INT_P (XEXP (XEXP (XEXP (x, 0), 0), 1))
5521 && (UINTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1))
5522 == ((unsigned HOST_WIDE_INT) 1 << (i + 1)) - 1))
5523 || (GET_CODE (XEXP (XEXP (x, 0), 0)) == ZERO_EXTEND
5524 && (GET_MODE_BITSIZE (GET_MODE (XEXP (XEXP (XEXP (x, 0), 0), 0)))
5525 == (unsigned int) i + 1))))
5526 return simplify_shift_const
5527 (NULL_RTX, ASHIFTRT, mode,
5528 simplify_shift_const (NULL_RTX, ASHIFT, mode,
5529 XEXP (XEXP (XEXP (x, 0), 0), 0),
5530 GET_MODE_BITSIZE (mode) - (i + 1)),
5531 GET_MODE_BITSIZE (mode) - (i + 1));
5532
5533 /* If only the low-order bit of X is possibly nonzero, (plus x -1)
5534 can become (ashiftrt (ashift (xor x 1) C) C) where C is
5535 the bitsize of the mode - 1. This allows simplification of
5536 "a = (b & 8) == 0;" */
5537 if (XEXP (x, 1) == constm1_rtx
5538 && !REG_P (XEXP (x, 0))
5539 && ! (GET_CODE (XEXP (x, 0)) == SUBREG
5540 && REG_P (SUBREG_REG (XEXP (x, 0))))
5541 && nonzero_bits (XEXP (x, 0), mode) == 1)
5542 return simplify_shift_const (NULL_RTX, ASHIFTRT, mode,
5543 simplify_shift_const (NULL_RTX, ASHIFT, mode,
5544 gen_rtx_XOR (mode, XEXP (x, 0), const1_rtx),
5545 GET_MODE_BITSIZE (mode) - 1),
5546 GET_MODE_BITSIZE (mode) - 1);
5547
5548 /* If we are adding two things that have no bits in common, convert
5549 the addition into an IOR. This will often be further simplified,
5550 for example in cases like ((a & 1) + (a & 2)), which can
5551 become a & 3. */
5552
5553 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
5554 && (nonzero_bits (XEXP (x, 0), mode)
5555 & nonzero_bits (XEXP (x, 1), mode)) == 0)
5556 {
5557 /* Try to simplify the expression further. */
5558 rtx tor = simplify_gen_binary (IOR, mode, XEXP (x, 0), XEXP (x, 1));
5559 temp = combine_simplify_rtx (tor, mode, in_dest);
5560
5561 /* If we could, great. If not, do not go ahead with the IOR
5562 replacement, since PLUS appears in many special purpose
5563 address arithmetic instructions. */
5564 if (GET_CODE (temp) != CLOBBER && temp != tor)
5565 return temp;
5566 }
5567 break;
5568
5569 case MINUS:
5570 /* (minus <foo> (and <foo> (const_int -pow2))) becomes
5571 (and <foo> (const_int pow2-1)) */
5572 if (GET_CODE (XEXP (x, 1)) == AND
5573 && CONST_INT_P (XEXP (XEXP (x, 1), 1))
5574 && exact_log2 (-UINTVAL (XEXP (XEXP (x, 1), 1))) >= 0
5575 && rtx_equal_p (XEXP (XEXP (x, 1), 0), XEXP (x, 0)))
5576 return simplify_and_const_int (NULL_RTX, mode, XEXP (x, 0),
5577 -INTVAL (XEXP (XEXP (x, 1), 1)) - 1);
5578 break;
5579
5580 case MULT:
5581 /* If we have (mult (plus A B) C), apply the distributive law and then
5582 the inverse distributive law to see if things simplify. This
5583 occurs mostly in addresses, often when unrolling loops. */
5584
5585 if (GET_CODE (XEXP (x, 0)) == PLUS)
5586 {
5587 rtx result = distribute_and_simplify_rtx (x, 0);
5588 if (result)
5589 return result;
5590 }
5591
5592 /* Try simplify a*(b/c) as (a*b)/c. */
5593 if (FLOAT_MODE_P (mode) && flag_associative_math
5594 && GET_CODE (XEXP (x, 0)) == DIV)
5595 {
5596 rtx tem = simplify_binary_operation (MULT, mode,
5597 XEXP (XEXP (x, 0), 0),
5598 XEXP (x, 1));
5599 if (tem)
5600 return simplify_gen_binary (DIV, mode, tem, XEXP (XEXP (x, 0), 1));
5601 }
5602 break;
5603
5604 case UDIV:
5605 /* If this is a divide by a power of two, treat it as a shift if
5606 its first operand is a shift. */
5607 if (CONST_INT_P (XEXP (x, 1))
5608 && (i = exact_log2 (UINTVAL (XEXP (x, 1)))) >= 0
5609 && (GET_CODE (XEXP (x, 0)) == ASHIFT
5610 || GET_CODE (XEXP (x, 0)) == LSHIFTRT
5611 || GET_CODE (XEXP (x, 0)) == ASHIFTRT
5612 || GET_CODE (XEXP (x, 0)) == ROTATE
5613 || GET_CODE (XEXP (x, 0)) == ROTATERT))
5614 return simplify_shift_const (NULL_RTX, LSHIFTRT, mode, XEXP (x, 0), i);
5615 break;
5616
5617 case EQ: case NE:
5618 case GT: case GTU: case GE: case GEU:
5619 case LT: case LTU: case LE: case LEU:
5620 case UNEQ: case LTGT:
5621 case UNGT: case UNGE:
5622 case UNLT: case UNLE:
5623 case UNORDERED: case ORDERED:
5624 /* If the first operand is a condition code, we can't do anything
5625 with it. */
5626 if (GET_CODE (XEXP (x, 0)) == COMPARE
5627 || (GET_MODE_CLASS (GET_MODE (XEXP (x, 0))) != MODE_CC
5628 && ! CC0_P (XEXP (x, 0))))
5629 {
5630 rtx op0 = XEXP (x, 0);
5631 rtx op1 = XEXP (x, 1);
5632 enum rtx_code new_code;
5633
5634 if (GET_CODE (op0) == COMPARE)
5635 op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
5636
5637 /* Simplify our comparison, if possible. */
5638 new_code = simplify_comparison (code, &op0, &op1);
5639
5640 /* If STORE_FLAG_VALUE is 1, we can convert (ne x 0) to simply X
5641 if only the low-order bit is possibly nonzero in X (such as when
5642 X is a ZERO_EXTRACT of one bit). Similarly, we can convert EQ to
5643 (xor X 1) or (minus 1 X); we use the former. Finally, if X is
5644 known to be either 0 or -1, NE becomes a NEG and EQ becomes
5645 (plus X 1).
5646
5647 Remove any ZERO_EXTRACT we made when thinking this was a
5648 comparison. It may now be simpler to use, e.g., an AND. If a
5649 ZERO_EXTRACT is indeed appropriate, it will be placed back by
5650 the call to make_compound_operation in the SET case. */
5651
5652 if (STORE_FLAG_VALUE == 1
5653 && new_code == NE && GET_MODE_CLASS (mode) == MODE_INT
5654 && op1 == const0_rtx
5655 && mode == GET_MODE (op0)
5656 && nonzero_bits (op0, mode) == 1)
5657 return gen_lowpart (mode,
5658 expand_compound_operation (op0));
5659
5660 else if (STORE_FLAG_VALUE == 1
5661 && new_code == NE && GET_MODE_CLASS (mode) == MODE_INT
5662 && op1 == const0_rtx
5663 && mode == GET_MODE (op0)
5664 && (num_sign_bit_copies (op0, mode)
5665 == GET_MODE_BITSIZE (mode)))
5666 {
5667 op0 = expand_compound_operation (op0);
5668 return simplify_gen_unary (NEG, mode,
5669 gen_lowpart (mode, op0),
5670 mode);
5671 }
5672
5673 else if (STORE_FLAG_VALUE == 1
5674 && new_code == EQ && GET_MODE_CLASS (mode) == MODE_INT
5675 && op1 == const0_rtx
5676 && mode == GET_MODE (op0)
5677 && nonzero_bits (op0, mode) == 1)
5678 {
5679 op0 = expand_compound_operation (op0);
5680 return simplify_gen_binary (XOR, mode,
5681 gen_lowpart (mode, op0),
5682 const1_rtx);
5683 }
5684
5685 else if (STORE_FLAG_VALUE == 1
5686 && new_code == EQ && GET_MODE_CLASS (mode) == MODE_INT
5687 && op1 == const0_rtx
5688 && mode == GET_MODE (op0)
5689 && (num_sign_bit_copies (op0, mode)
5690 == GET_MODE_BITSIZE (mode)))
5691 {
5692 op0 = expand_compound_operation (op0);
5693 return plus_constant (gen_lowpart (mode, op0), 1);
5694 }
5695
5696 /* If STORE_FLAG_VALUE is -1, we have cases similar to
5697 those above. */
5698 if (STORE_FLAG_VALUE == -1
5699 && new_code == NE && GET_MODE_CLASS (mode) == MODE_INT
5700 && op1 == const0_rtx
5701 && (num_sign_bit_copies (op0, mode)
5702 == GET_MODE_BITSIZE (mode)))
5703 return gen_lowpart (mode,
5704 expand_compound_operation (op0));
5705
5706 else if (STORE_FLAG_VALUE == -1
5707 && new_code == NE && GET_MODE_CLASS (mode) == MODE_INT
5708 && op1 == const0_rtx
5709 && mode == GET_MODE (op0)
5710 && nonzero_bits (op0, mode) == 1)
5711 {
5712 op0 = expand_compound_operation (op0);
5713 return simplify_gen_unary (NEG, mode,
5714 gen_lowpart (mode, op0),
5715 mode);
5716 }
5717
5718 else if (STORE_FLAG_VALUE == -1
5719 && new_code == EQ && GET_MODE_CLASS (mode) == MODE_INT
5720 && op1 == const0_rtx
5721 && mode == GET_MODE (op0)
5722 && (num_sign_bit_copies (op0, mode)
5723 == GET_MODE_BITSIZE (mode)))
5724 {
5725 op0 = expand_compound_operation (op0);
5726 return simplify_gen_unary (NOT, mode,
5727 gen_lowpart (mode, op0),
5728 mode);
5729 }
5730
5731 /* If X is 0/1, (eq X 0) is X-1. */
5732 else if (STORE_FLAG_VALUE == -1
5733 && new_code == EQ && GET_MODE_CLASS (mode) == MODE_INT
5734 && op1 == const0_rtx
5735 && mode == GET_MODE (op0)
5736 && nonzero_bits (op0, mode) == 1)
5737 {
5738 op0 = expand_compound_operation (op0);
5739 return plus_constant (gen_lowpart (mode, op0), -1);
5740 }
5741
5742 /* If STORE_FLAG_VALUE says to just test the sign bit and X has just
5743 one bit that might be nonzero, we can convert (ne x 0) to
5744 (ashift x c) where C puts the bit in the sign bit. Remove any
5745 AND with STORE_FLAG_VALUE when we are done, since we are only
5746 going to test the sign bit. */
5747 if (new_code == NE && GET_MODE_CLASS (mode) == MODE_INT
5748 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
5749 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
5750 == (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1))
5751 && op1 == const0_rtx
5752 && mode == GET_MODE (op0)
5753 && (i = exact_log2 (nonzero_bits (op0, mode))) >= 0)
5754 {
5755 x = simplify_shift_const (NULL_RTX, ASHIFT, mode,
5756 expand_compound_operation (op0),
5757 GET_MODE_BITSIZE (mode) - 1 - i);
5758 if (GET_CODE (x) == AND && XEXP (x, 1) == const_true_rtx)
5759 return XEXP (x, 0);
5760 else
5761 return x;
5762 }
5763
5764 /* If the code changed, return a whole new comparison. */
5765 if (new_code != code)
5766 return gen_rtx_fmt_ee (new_code, mode, op0, op1);
5767
5768 /* Otherwise, keep this operation, but maybe change its operands.
5769 This also converts (ne (compare FOO BAR) 0) to (ne FOO BAR). */
5770 SUBST (XEXP (x, 0), op0);
5771 SUBST (XEXP (x, 1), op1);
5772 }
5773 break;
5774
5775 case IF_THEN_ELSE:
5776 return simplify_if_then_else (x);
5777
5778 case ZERO_EXTRACT:
5779 case SIGN_EXTRACT:
5780 case ZERO_EXTEND:
5781 case SIGN_EXTEND:
5782 /* If we are processing SET_DEST, we are done. */
5783 if (in_dest)
5784 return x;
5785
5786 return expand_compound_operation (x);
5787
5788 case SET:
5789 return simplify_set (x);
5790
5791 case AND:
5792 case IOR:
5793 return simplify_logical (x);
5794
5795 case ASHIFT:
5796 case LSHIFTRT:
5797 case ASHIFTRT:
5798 case ROTATE:
5799 case ROTATERT:
5800 /* If this is a shift by a constant amount, simplify it. */
5801 if (CONST_INT_P (XEXP (x, 1)))
5802 return simplify_shift_const (x, code, mode, XEXP (x, 0),
5803 INTVAL (XEXP (x, 1)));
5804
5805 else if (SHIFT_COUNT_TRUNCATED && !REG_P (XEXP (x, 1)))
5806 SUBST (XEXP (x, 1),
5807 force_to_mode (XEXP (x, 1), GET_MODE (XEXP (x, 1)),
5808 ((unsigned HOST_WIDE_INT) 1
5809 << exact_log2 (GET_MODE_BITSIZE (GET_MODE (x))))
5810 - 1,
5811 0));
5812 break;
5813
5814 default:
5815 break;
5816 }
5817
5818 return x;
5819 }
5820 \f
5821 /* Simplify X, an IF_THEN_ELSE expression. Return the new expression. */
5822
5823 static rtx
5824 simplify_if_then_else (rtx x)
5825 {
5826 enum machine_mode mode = GET_MODE (x);
5827 rtx cond = XEXP (x, 0);
5828 rtx true_rtx = XEXP (x, 1);
5829 rtx false_rtx = XEXP (x, 2);
5830 enum rtx_code true_code = GET_CODE (cond);
5831 int comparison_p = COMPARISON_P (cond);
5832 rtx temp;
5833 int i;
5834 enum rtx_code false_code;
5835 rtx reversed;
5836
5837 /* Simplify storing of the truth value. */
5838 if (comparison_p && true_rtx == const_true_rtx && false_rtx == const0_rtx)
5839 return simplify_gen_relational (true_code, mode, VOIDmode,
5840 XEXP (cond, 0), XEXP (cond, 1));
5841
5842 /* Also when the truth value has to be reversed. */
5843 if (comparison_p
5844 && true_rtx == const0_rtx && false_rtx == const_true_rtx
5845 && (reversed = reversed_comparison (cond, mode)))
5846 return reversed;
5847
5848 /* Sometimes we can simplify the arm of an IF_THEN_ELSE if a register used
5849 in it is being compared against certain values. Get the true and false
5850 comparisons and see if that says anything about the value of each arm. */
5851
5852 if (comparison_p
5853 && ((false_code = reversed_comparison_code (cond, NULL))
5854 != UNKNOWN)
5855 && REG_P (XEXP (cond, 0)))
5856 {
5857 HOST_WIDE_INT nzb;
5858 rtx from = XEXP (cond, 0);
5859 rtx true_val = XEXP (cond, 1);
5860 rtx false_val = true_val;
5861 int swapped = 0;
5862
5863 /* If FALSE_CODE is EQ, swap the codes and arms. */
5864
5865 if (false_code == EQ)
5866 {
5867 swapped = 1, true_code = EQ, false_code = NE;
5868 temp = true_rtx, true_rtx = false_rtx, false_rtx = temp;
5869 }
5870
5871 /* If we are comparing against zero and the expression being tested has
5872 only a single bit that might be nonzero, that is its value when it is
5873 not equal to zero. Similarly if it is known to be -1 or 0. */
5874
5875 if (true_code == EQ && true_val == const0_rtx
5876 && exact_log2 (nzb = nonzero_bits (from, GET_MODE (from))) >= 0)
5877 {
5878 false_code = EQ;
5879 false_val = GEN_INT (trunc_int_for_mode (nzb, GET_MODE (from)));
5880 }
5881 else if (true_code == EQ && true_val == const0_rtx
5882 && (num_sign_bit_copies (from, GET_MODE (from))
5883 == GET_MODE_BITSIZE (GET_MODE (from))))
5884 {
5885 false_code = EQ;
5886 false_val = constm1_rtx;
5887 }
5888
5889 /* Now simplify an arm if we know the value of the register in the
5890 branch and it is used in the arm. Be careful due to the potential
5891 of locally-shared RTL. */
5892
5893 if (reg_mentioned_p (from, true_rtx))
5894 true_rtx = subst (known_cond (copy_rtx (true_rtx), true_code,
5895 from, true_val),
5896 pc_rtx, pc_rtx, 0, 0);
5897 if (reg_mentioned_p (from, false_rtx))
5898 false_rtx = subst (known_cond (copy_rtx (false_rtx), false_code,
5899 from, false_val),
5900 pc_rtx, pc_rtx, 0, 0);
5901
5902 SUBST (XEXP (x, 1), swapped ? false_rtx : true_rtx);
5903 SUBST (XEXP (x, 2), swapped ? true_rtx : false_rtx);
5904
5905 true_rtx = XEXP (x, 1);
5906 false_rtx = XEXP (x, 2);
5907 true_code = GET_CODE (cond);
5908 }
5909
5910 /* If we have (if_then_else FOO (pc) (label_ref BAR)) and FOO can be
5911 reversed, do so to avoid needing two sets of patterns for
5912 subtract-and-branch insns. Similarly if we have a constant in the true
5913 arm, the false arm is the same as the first operand of the comparison, or
5914 the false arm is more complicated than the true arm. */
5915
5916 if (comparison_p
5917 && reversed_comparison_code (cond, NULL) != UNKNOWN
5918 && (true_rtx == pc_rtx
5919 || (CONSTANT_P (true_rtx)
5920 && !CONST_INT_P (false_rtx) && false_rtx != pc_rtx)
5921 || true_rtx == const0_rtx
5922 || (OBJECT_P (true_rtx) && !OBJECT_P (false_rtx))
5923 || (GET_CODE (true_rtx) == SUBREG && OBJECT_P (SUBREG_REG (true_rtx))
5924 && !OBJECT_P (false_rtx))
5925 || reg_mentioned_p (true_rtx, false_rtx)
5926 || rtx_equal_p (false_rtx, XEXP (cond, 0))))
5927 {
5928 true_code = reversed_comparison_code (cond, NULL);
5929 SUBST (XEXP (x, 0), reversed_comparison (cond, GET_MODE (cond)));
5930 SUBST (XEXP (x, 1), false_rtx);
5931 SUBST (XEXP (x, 2), true_rtx);
5932
5933 temp = true_rtx, true_rtx = false_rtx, false_rtx = temp;
5934 cond = XEXP (x, 0);
5935
5936 /* It is possible that the conditional has been simplified out. */
5937 true_code = GET_CODE (cond);
5938 comparison_p = COMPARISON_P (cond);
5939 }
5940
5941 /* If the two arms are identical, we don't need the comparison. */
5942
5943 if (rtx_equal_p (true_rtx, false_rtx) && ! side_effects_p (cond))
5944 return true_rtx;
5945
5946 /* Convert a == b ? b : a to "a". */
5947 if (true_code == EQ && ! side_effects_p (cond)
5948 && !HONOR_NANS (mode)
5949 && rtx_equal_p (XEXP (cond, 0), false_rtx)
5950 && rtx_equal_p (XEXP (cond, 1), true_rtx))
5951 return false_rtx;
5952 else if (true_code == NE && ! side_effects_p (cond)
5953 && !HONOR_NANS (mode)
5954 && rtx_equal_p (XEXP (cond, 0), true_rtx)
5955 && rtx_equal_p (XEXP (cond, 1), false_rtx))
5956 return true_rtx;
5957
5958 /* Look for cases where we have (abs x) or (neg (abs X)). */
5959
5960 if (GET_MODE_CLASS (mode) == MODE_INT
5961 && comparison_p
5962 && XEXP (cond, 1) == const0_rtx
5963 && GET_CODE (false_rtx) == NEG
5964 && rtx_equal_p (true_rtx, XEXP (false_rtx, 0))
5965 && rtx_equal_p (true_rtx, XEXP (cond, 0))
5966 && ! side_effects_p (true_rtx))
5967 switch (true_code)
5968 {
5969 case GT:
5970 case GE:
5971 return simplify_gen_unary (ABS, mode, true_rtx, mode);
5972 case LT:
5973 case LE:
5974 return
5975 simplify_gen_unary (NEG, mode,
5976 simplify_gen_unary (ABS, mode, true_rtx, mode),
5977 mode);
5978 default:
5979 break;
5980 }
5981
5982 /* Look for MIN or MAX. */
5983
5984 if ((! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
5985 && comparison_p
5986 && rtx_equal_p (XEXP (cond, 0), true_rtx)
5987 && rtx_equal_p (XEXP (cond, 1), false_rtx)
5988 && ! side_effects_p (cond))
5989 switch (true_code)
5990 {
5991 case GE:
5992 case GT:
5993 return simplify_gen_binary (SMAX, mode, true_rtx, false_rtx);
5994 case LE:
5995 case LT:
5996 return simplify_gen_binary (SMIN, mode, true_rtx, false_rtx);
5997 case GEU:
5998 case GTU:
5999 return simplify_gen_binary (UMAX, mode, true_rtx, false_rtx);
6000 case LEU:
6001 case LTU:
6002 return simplify_gen_binary (UMIN, mode, true_rtx, false_rtx);
6003 default:
6004 break;
6005 }
6006
6007 /* If we have (if_then_else COND (OP Z C1) Z) and OP is an identity when its
6008 second operand is zero, this can be done as (OP Z (mult COND C2)) where
6009 C2 = C1 * STORE_FLAG_VALUE. Similarly if OP has an outer ZERO_EXTEND or
6010 SIGN_EXTEND as long as Z is already extended (so we don't destroy it).
6011 We can do this kind of thing in some cases when STORE_FLAG_VALUE is
6012 neither 1 or -1, but it isn't worth checking for. */
6013
6014 if ((STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
6015 && comparison_p
6016 && GET_MODE_CLASS (mode) == MODE_INT
6017 && ! side_effects_p (x))
6018 {
6019 rtx t = make_compound_operation (true_rtx, SET);
6020 rtx f = make_compound_operation (false_rtx, SET);
6021 rtx cond_op0 = XEXP (cond, 0);
6022 rtx cond_op1 = XEXP (cond, 1);
6023 enum rtx_code op = UNKNOWN, extend_op = UNKNOWN;
6024 enum machine_mode m = mode;
6025 rtx z = 0, c1 = NULL_RTX;
6026
6027 if ((GET_CODE (t) == PLUS || GET_CODE (t) == MINUS
6028 || GET_CODE (t) == IOR || GET_CODE (t) == XOR
6029 || GET_CODE (t) == ASHIFT
6030 || GET_CODE (t) == LSHIFTRT || GET_CODE (t) == ASHIFTRT)
6031 && rtx_equal_p (XEXP (t, 0), f))
6032 c1 = XEXP (t, 1), op = GET_CODE (t), z = f;
6033
6034 /* If an identity-zero op is commutative, check whether there
6035 would be a match if we swapped the operands. */
6036 else if ((GET_CODE (t) == PLUS || GET_CODE (t) == IOR
6037 || GET_CODE (t) == XOR)
6038 && rtx_equal_p (XEXP (t, 1), f))
6039 c1 = XEXP (t, 0), op = GET_CODE (t), z = f;
6040 else if (GET_CODE (t) == SIGN_EXTEND
6041 && (GET_CODE (XEXP (t, 0)) == PLUS
6042 || GET_CODE (XEXP (t, 0)) == MINUS
6043 || GET_CODE (XEXP (t, 0)) == IOR
6044 || GET_CODE (XEXP (t, 0)) == XOR
6045 || GET_CODE (XEXP (t, 0)) == ASHIFT
6046 || GET_CODE (XEXP (t, 0)) == LSHIFTRT
6047 || GET_CODE (XEXP (t, 0)) == ASHIFTRT)
6048 && GET_CODE (XEXP (XEXP (t, 0), 0)) == SUBREG
6049 && subreg_lowpart_p (XEXP (XEXP (t, 0), 0))
6050 && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 0)), f)
6051 && (num_sign_bit_copies (f, GET_MODE (f))
6052 > (unsigned int)
6053 (GET_MODE_BITSIZE (mode)
6054 - GET_MODE_BITSIZE (GET_MODE (XEXP (XEXP (t, 0), 0))))))
6055 {
6056 c1 = XEXP (XEXP (t, 0), 1); z = f; op = GET_CODE (XEXP (t, 0));
6057 extend_op = SIGN_EXTEND;
6058 m = GET_MODE (XEXP (t, 0));
6059 }
6060 else if (GET_CODE (t) == SIGN_EXTEND
6061 && (GET_CODE (XEXP (t, 0)) == PLUS
6062 || GET_CODE (XEXP (t, 0)) == IOR
6063 || GET_CODE (XEXP (t, 0)) == XOR)
6064 && GET_CODE (XEXP (XEXP (t, 0), 1)) == SUBREG
6065 && subreg_lowpart_p (XEXP (XEXP (t, 0), 1))
6066 && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 1)), f)
6067 && (num_sign_bit_copies (f, GET_MODE (f))
6068 > (unsigned int)
6069 (GET_MODE_BITSIZE (mode)
6070 - GET_MODE_BITSIZE (GET_MODE (XEXP (XEXP (t, 0), 1))))))
6071 {
6072 c1 = XEXP (XEXP (t, 0), 0); z = f; op = GET_CODE (XEXP (t, 0));
6073 extend_op = SIGN_EXTEND;
6074 m = GET_MODE (XEXP (t, 0));
6075 }
6076 else if (GET_CODE (t) == ZERO_EXTEND
6077 && (GET_CODE (XEXP (t, 0)) == PLUS
6078 || GET_CODE (XEXP (t, 0)) == MINUS
6079 || GET_CODE (XEXP (t, 0)) == IOR
6080 || GET_CODE (XEXP (t, 0)) == XOR
6081 || GET_CODE (XEXP (t, 0)) == ASHIFT
6082 || GET_CODE (XEXP (t, 0)) == LSHIFTRT
6083 || GET_CODE (XEXP (t, 0)) == ASHIFTRT)
6084 && GET_CODE (XEXP (XEXP (t, 0), 0)) == SUBREG
6085 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
6086 && subreg_lowpart_p (XEXP (XEXP (t, 0), 0))
6087 && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 0)), f)
6088 && ((nonzero_bits (f, GET_MODE (f))
6089 & ~GET_MODE_MASK (GET_MODE (XEXP (XEXP (t, 0), 0))))
6090 == 0))
6091 {
6092 c1 = XEXP (XEXP (t, 0), 1); z = f; op = GET_CODE (XEXP (t, 0));
6093 extend_op = ZERO_EXTEND;
6094 m = GET_MODE (XEXP (t, 0));
6095 }
6096 else if (GET_CODE (t) == ZERO_EXTEND
6097 && (GET_CODE (XEXP (t, 0)) == PLUS
6098 || GET_CODE (XEXP (t, 0)) == IOR
6099 || GET_CODE (XEXP (t, 0)) == XOR)
6100 && GET_CODE (XEXP (XEXP (t, 0), 1)) == SUBREG
6101 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
6102 && subreg_lowpart_p (XEXP (XEXP (t, 0), 1))
6103 && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 1)), f)
6104 && ((nonzero_bits (f, GET_MODE (f))
6105 & ~GET_MODE_MASK (GET_MODE (XEXP (XEXP (t, 0), 1))))
6106 == 0))
6107 {
6108 c1 = XEXP (XEXP (t, 0), 0); z = f; op = GET_CODE (XEXP (t, 0));
6109 extend_op = ZERO_EXTEND;
6110 m = GET_MODE (XEXP (t, 0));
6111 }
6112
6113 if (z)
6114 {
6115 temp = subst (simplify_gen_relational (true_code, m, VOIDmode,
6116 cond_op0, cond_op1),
6117 pc_rtx, pc_rtx, 0, 0);
6118 temp = simplify_gen_binary (MULT, m, temp,
6119 simplify_gen_binary (MULT, m, c1,
6120 const_true_rtx));
6121 temp = subst (temp, pc_rtx, pc_rtx, 0, 0);
6122 temp = simplify_gen_binary (op, m, gen_lowpart (m, z), temp);
6123
6124 if (extend_op != UNKNOWN)
6125 temp = simplify_gen_unary (extend_op, mode, temp, m);
6126
6127 return temp;
6128 }
6129 }
6130
6131 /* If we have (if_then_else (ne A 0) C1 0) and either A is known to be 0 or
6132 1 and C1 is a single bit or A is known to be 0 or -1 and C1 is the
6133 negation of a single bit, we can convert this operation to a shift. We
6134 can actually do this more generally, but it doesn't seem worth it. */
6135
6136 if (true_code == NE && XEXP (cond, 1) == const0_rtx
6137 && false_rtx == const0_rtx && CONST_INT_P (true_rtx)
6138 && ((1 == nonzero_bits (XEXP (cond, 0), mode)
6139 && (i = exact_log2 (UINTVAL (true_rtx))) >= 0)
6140 || ((num_sign_bit_copies (XEXP (cond, 0), mode)
6141 == GET_MODE_BITSIZE (mode))
6142 && (i = exact_log2 (-UINTVAL (true_rtx))) >= 0)))
6143 return
6144 simplify_shift_const (NULL_RTX, ASHIFT, mode,
6145 gen_lowpart (mode, XEXP (cond, 0)), i);
6146
6147 /* (IF_THEN_ELSE (NE REG 0) (0) (8)) is REG for nonzero_bits (REG) == 8. */
6148 if (true_code == NE && XEXP (cond, 1) == const0_rtx
6149 && false_rtx == const0_rtx && CONST_INT_P (true_rtx)
6150 && GET_MODE (XEXP (cond, 0)) == mode
6151 && (UINTVAL (true_rtx) & GET_MODE_MASK (mode))
6152 == nonzero_bits (XEXP (cond, 0), mode)
6153 && (i = exact_log2 (UINTVAL (true_rtx) & GET_MODE_MASK (mode))) >= 0)
6154 return XEXP (cond, 0);
6155
6156 return x;
6157 }
6158 \f
6159 /* Simplify X, a SET expression. Return the new expression. */
6160
6161 static rtx
6162 simplify_set (rtx x)
6163 {
6164 rtx src = SET_SRC (x);
6165 rtx dest = SET_DEST (x);
6166 enum machine_mode mode
6167 = GET_MODE (src) != VOIDmode ? GET_MODE (src) : GET_MODE (dest);
6168 rtx other_insn;
6169 rtx *cc_use;
6170
6171 /* (set (pc) (return)) gets written as (return). */
6172 if (GET_CODE (dest) == PC && GET_CODE (src) == RETURN)
6173 return src;
6174
6175 /* Now that we know for sure which bits of SRC we are using, see if we can
6176 simplify the expression for the object knowing that we only need the
6177 low-order bits. */
6178
6179 if (GET_MODE_CLASS (mode) == MODE_INT
6180 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
6181 {
6182 src = force_to_mode (src, mode, ~(unsigned HOST_WIDE_INT) 0, 0);
6183 SUBST (SET_SRC (x), src);
6184 }
6185
6186 /* If we are setting CC0 or if the source is a COMPARE, look for the use of
6187 the comparison result and try to simplify it unless we already have used
6188 undobuf.other_insn. */
6189 if ((GET_MODE_CLASS (mode) == MODE_CC
6190 || GET_CODE (src) == COMPARE
6191 || CC0_P (dest))
6192 && (cc_use = find_single_use (dest, subst_insn, &other_insn)) != 0
6193 && (undobuf.other_insn == 0 || other_insn == undobuf.other_insn)
6194 && COMPARISON_P (*cc_use)
6195 && rtx_equal_p (XEXP (*cc_use, 0), dest))
6196 {
6197 enum rtx_code old_code = GET_CODE (*cc_use);
6198 enum rtx_code new_code;
6199 rtx op0, op1, tmp;
6200 int other_changed = 0;
6201 enum machine_mode compare_mode = GET_MODE (dest);
6202
6203 if (GET_CODE (src) == COMPARE)
6204 op0 = XEXP (src, 0), op1 = XEXP (src, 1);
6205 else
6206 op0 = src, op1 = CONST0_RTX (GET_MODE (src));
6207
6208 tmp = simplify_relational_operation (old_code, compare_mode, VOIDmode,
6209 op0, op1);
6210 if (!tmp)
6211 new_code = old_code;
6212 else if (!CONSTANT_P (tmp))
6213 {
6214 new_code = GET_CODE (tmp);
6215 op0 = XEXP (tmp, 0);
6216 op1 = XEXP (tmp, 1);
6217 }
6218 else
6219 {
6220 rtx pat = PATTERN (other_insn);
6221 undobuf.other_insn = other_insn;
6222 SUBST (*cc_use, tmp);
6223
6224 /* Attempt to simplify CC user. */
6225 if (GET_CODE (pat) == SET)
6226 {
6227 rtx new_rtx = simplify_rtx (SET_SRC (pat));
6228 if (new_rtx != NULL_RTX)
6229 SUBST (SET_SRC (pat), new_rtx);
6230 }
6231
6232 /* Convert X into a no-op move. */
6233 SUBST (SET_DEST (x), pc_rtx);
6234 SUBST (SET_SRC (x), pc_rtx);
6235 return x;
6236 }
6237
6238 /* Simplify our comparison, if possible. */
6239 new_code = simplify_comparison (new_code, &op0, &op1);
6240
6241 #ifdef SELECT_CC_MODE
6242 /* If this machine has CC modes other than CCmode, check to see if we
6243 need to use a different CC mode here. */
6244 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
6245 compare_mode = GET_MODE (op0);
6246 else
6247 compare_mode = SELECT_CC_MODE (new_code, op0, op1);
6248
6249 #ifndef HAVE_cc0
6250 /* If the mode changed, we have to change SET_DEST, the mode in the
6251 compare, and the mode in the place SET_DEST is used. If SET_DEST is
6252 a hard register, just build new versions with the proper mode. If it
6253 is a pseudo, we lose unless it is only time we set the pseudo, in
6254 which case we can safely change its mode. */
6255 if (compare_mode != GET_MODE (dest))
6256 {
6257 if (can_change_dest_mode (dest, 0, compare_mode))
6258 {
6259 unsigned int regno = REGNO (dest);
6260 rtx new_dest;
6261
6262 if (regno < FIRST_PSEUDO_REGISTER)
6263 new_dest = gen_rtx_REG (compare_mode, regno);
6264 else
6265 {
6266 SUBST_MODE (regno_reg_rtx[regno], compare_mode);
6267 new_dest = regno_reg_rtx[regno];
6268 }
6269
6270 SUBST (SET_DEST (x), new_dest);
6271 SUBST (XEXP (*cc_use, 0), new_dest);
6272 other_changed = 1;
6273
6274 dest = new_dest;
6275 }
6276 }
6277 #endif /* cc0 */
6278 #endif /* SELECT_CC_MODE */
6279
6280 /* If the code changed, we have to build a new comparison in
6281 undobuf.other_insn. */
6282 if (new_code != old_code)
6283 {
6284 int other_changed_previously = other_changed;
6285 unsigned HOST_WIDE_INT mask;
6286 rtx old_cc_use = *cc_use;
6287
6288 SUBST (*cc_use, gen_rtx_fmt_ee (new_code, GET_MODE (*cc_use),
6289 dest, const0_rtx));
6290 other_changed = 1;
6291
6292 /* If the only change we made was to change an EQ into an NE or
6293 vice versa, OP0 has only one bit that might be nonzero, and OP1
6294 is zero, check if changing the user of the condition code will
6295 produce a valid insn. If it won't, we can keep the original code
6296 in that insn by surrounding our operation with an XOR. */
6297
6298 if (((old_code == NE && new_code == EQ)
6299 || (old_code == EQ && new_code == NE))
6300 && ! other_changed_previously && op1 == const0_rtx
6301 && GET_MODE_BITSIZE (GET_MODE (op0)) <= HOST_BITS_PER_WIDE_INT
6302 && exact_log2 (mask = nonzero_bits (op0, GET_MODE (op0))) >= 0)
6303 {
6304 rtx pat = PATTERN (other_insn), note = 0;
6305
6306 if ((recog_for_combine (&pat, other_insn, &note) < 0
6307 && ! check_asm_operands (pat)))
6308 {
6309 *cc_use = old_cc_use;
6310 other_changed = 0;
6311
6312 op0 = simplify_gen_binary (XOR, GET_MODE (op0),
6313 op0, GEN_INT (mask));
6314 }
6315 }
6316 }
6317
6318 if (other_changed)
6319 undobuf.other_insn = other_insn;
6320
6321 /* Otherwise, if we didn't previously have a COMPARE in the
6322 correct mode, we need one. */
6323 if (GET_CODE (src) != COMPARE || GET_MODE (src) != compare_mode)
6324 {
6325 SUBST (SET_SRC (x), gen_rtx_COMPARE (compare_mode, op0, op1));
6326 src = SET_SRC (x);
6327 }
6328 else if (GET_MODE (op0) == compare_mode && op1 == const0_rtx)
6329 {
6330 SUBST (SET_SRC (x), op0);
6331 src = SET_SRC (x);
6332 }
6333 /* Otherwise, update the COMPARE if needed. */
6334 else if (XEXP (src, 0) != op0 || XEXP (src, 1) != op1)
6335 {
6336 SUBST (SET_SRC (x), gen_rtx_COMPARE (compare_mode, op0, op1));
6337 src = SET_SRC (x);
6338 }
6339 }
6340 else
6341 {
6342 /* Get SET_SRC in a form where we have placed back any
6343 compound expressions. Then do the checks below. */
6344 src = make_compound_operation (src, SET);
6345 SUBST (SET_SRC (x), src);
6346 }
6347
6348 /* If we have (set x (subreg:m1 (op:m2 ...) 0)) with OP being some operation,
6349 and X being a REG or (subreg (reg)), we may be able to convert this to
6350 (set (subreg:m2 x) (op)).
6351
6352 We can always do this if M1 is narrower than M2 because that means that
6353 we only care about the low bits of the result.
6354
6355 However, on machines without WORD_REGISTER_OPERATIONS defined, we cannot
6356 perform a narrower operation than requested since the high-order bits will
6357 be undefined. On machine where it is defined, this transformation is safe
6358 as long as M1 and M2 have the same number of words. */
6359
6360 if (GET_CODE (src) == SUBREG && subreg_lowpart_p (src)
6361 && !OBJECT_P (SUBREG_REG (src))
6362 && (((GET_MODE_SIZE (GET_MODE (src)) + (UNITS_PER_WORD - 1))
6363 / UNITS_PER_WORD)
6364 == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (src)))
6365 + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD))
6366 #ifndef WORD_REGISTER_OPERATIONS
6367 && (GET_MODE_SIZE (GET_MODE (src))
6368 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (src))))
6369 #endif
6370 #ifdef CANNOT_CHANGE_MODE_CLASS
6371 && ! (REG_P (dest) && REGNO (dest) < FIRST_PSEUDO_REGISTER
6372 && REG_CANNOT_CHANGE_MODE_P (REGNO (dest),
6373 GET_MODE (SUBREG_REG (src)),
6374 GET_MODE (src)))
6375 #endif
6376 && (REG_P (dest)
6377 || (GET_CODE (dest) == SUBREG
6378 && REG_P (SUBREG_REG (dest)))))
6379 {
6380 SUBST (SET_DEST (x),
6381 gen_lowpart (GET_MODE (SUBREG_REG (src)),
6382 dest));
6383 SUBST (SET_SRC (x), SUBREG_REG (src));
6384
6385 src = SET_SRC (x), dest = SET_DEST (x);
6386 }
6387
6388 #ifdef HAVE_cc0
6389 /* If we have (set (cc0) (subreg ...)), we try to remove the subreg
6390 in SRC. */
6391 if (dest == cc0_rtx
6392 && GET_CODE (src) == SUBREG
6393 && subreg_lowpart_p (src)
6394 && (GET_MODE_BITSIZE (GET_MODE (src))
6395 < GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (src)))))
6396 {
6397 rtx inner = SUBREG_REG (src);
6398 enum machine_mode inner_mode = GET_MODE (inner);
6399
6400 /* Here we make sure that we don't have a sign bit on. */
6401 if (GET_MODE_BITSIZE (inner_mode) <= HOST_BITS_PER_WIDE_INT
6402 && (nonzero_bits (inner, inner_mode)
6403 < ((unsigned HOST_WIDE_INT) 1
6404 << (GET_MODE_BITSIZE (GET_MODE (src)) - 1))))
6405 {
6406 SUBST (SET_SRC (x), inner);
6407 src = SET_SRC (x);
6408 }
6409 }
6410 #endif
6411
6412 #ifdef LOAD_EXTEND_OP
6413 /* If we have (set FOO (subreg:M (mem:N BAR) 0)) with M wider than N, this
6414 would require a paradoxical subreg. Replace the subreg with a
6415 zero_extend to avoid the reload that would otherwise be required. */
6416
6417 if (GET_CODE (src) == SUBREG && subreg_lowpart_p (src)
6418 && INTEGRAL_MODE_P (GET_MODE (SUBREG_REG (src)))
6419 && LOAD_EXTEND_OP (GET_MODE (SUBREG_REG (src))) != UNKNOWN
6420 && SUBREG_BYTE (src) == 0
6421 && (GET_MODE_SIZE (GET_MODE (src))
6422 > GET_MODE_SIZE (GET_MODE (SUBREG_REG (src))))
6423 && MEM_P (SUBREG_REG (src)))
6424 {
6425 SUBST (SET_SRC (x),
6426 gen_rtx_fmt_e (LOAD_EXTEND_OP (GET_MODE (SUBREG_REG (src))),
6427 GET_MODE (src), SUBREG_REG (src)));
6428
6429 src = SET_SRC (x);
6430 }
6431 #endif
6432
6433 /* If we don't have a conditional move, SET_SRC is an IF_THEN_ELSE, and we
6434 are comparing an item known to be 0 or -1 against 0, use a logical
6435 operation instead. Check for one of the arms being an IOR of the other
6436 arm with some value. We compute three terms to be IOR'ed together. In
6437 practice, at most two will be nonzero. Then we do the IOR's. */
6438
6439 if (GET_CODE (dest) != PC
6440 && GET_CODE (src) == IF_THEN_ELSE
6441 && GET_MODE_CLASS (GET_MODE (src)) == MODE_INT
6442 && (GET_CODE (XEXP (src, 0)) == EQ || GET_CODE (XEXP (src, 0)) == NE)
6443 && XEXP (XEXP (src, 0), 1) == const0_rtx
6444 && GET_MODE (src) == GET_MODE (XEXP (XEXP (src, 0), 0))
6445 #ifdef HAVE_conditional_move
6446 && ! can_conditionally_move_p (GET_MODE (src))
6447 #endif
6448 && (num_sign_bit_copies (XEXP (XEXP (src, 0), 0),
6449 GET_MODE (XEXP (XEXP (src, 0), 0)))
6450 == GET_MODE_BITSIZE (GET_MODE (XEXP (XEXP (src, 0), 0))))
6451 && ! side_effects_p (src))
6452 {
6453 rtx true_rtx = (GET_CODE (XEXP (src, 0)) == NE
6454 ? XEXP (src, 1) : XEXP (src, 2));
6455 rtx false_rtx = (GET_CODE (XEXP (src, 0)) == NE
6456 ? XEXP (src, 2) : XEXP (src, 1));
6457 rtx term1 = const0_rtx, term2, term3;
6458
6459 if (GET_CODE (true_rtx) == IOR
6460 && rtx_equal_p (XEXP (true_rtx, 0), false_rtx))
6461 term1 = false_rtx, true_rtx = XEXP (true_rtx, 1), false_rtx = const0_rtx;
6462 else if (GET_CODE (true_rtx) == IOR
6463 && rtx_equal_p (XEXP (true_rtx, 1), false_rtx))
6464 term1 = false_rtx, true_rtx = XEXP (true_rtx, 0), false_rtx = const0_rtx;
6465 else if (GET_CODE (false_rtx) == IOR
6466 && rtx_equal_p (XEXP (false_rtx, 0), true_rtx))
6467 term1 = true_rtx, false_rtx = XEXP (false_rtx, 1), true_rtx = const0_rtx;
6468 else if (GET_CODE (false_rtx) == IOR
6469 && rtx_equal_p (XEXP (false_rtx, 1), true_rtx))
6470 term1 = true_rtx, false_rtx = XEXP (false_rtx, 0), true_rtx = const0_rtx;
6471
6472 term2 = simplify_gen_binary (AND, GET_MODE (src),
6473 XEXP (XEXP (src, 0), 0), true_rtx);
6474 term3 = simplify_gen_binary (AND, GET_MODE (src),
6475 simplify_gen_unary (NOT, GET_MODE (src),
6476 XEXP (XEXP (src, 0), 0),
6477 GET_MODE (src)),
6478 false_rtx);
6479
6480 SUBST (SET_SRC (x),
6481 simplify_gen_binary (IOR, GET_MODE (src),
6482 simplify_gen_binary (IOR, GET_MODE (src),
6483 term1, term2),
6484 term3));
6485
6486 src = SET_SRC (x);
6487 }
6488
6489 /* If either SRC or DEST is a CLOBBER of (const_int 0), make this
6490 whole thing fail. */
6491 if (GET_CODE (src) == CLOBBER && XEXP (src, 0) == const0_rtx)
6492 return src;
6493 else if (GET_CODE (dest) == CLOBBER && XEXP (dest, 0) == const0_rtx)
6494 return dest;
6495 else
6496 /* Convert this into a field assignment operation, if possible. */
6497 return make_field_assignment (x);
6498 }
6499 \f
6500 /* Simplify, X, and AND, IOR, or XOR operation, and return the simplified
6501 result. */
6502
6503 static rtx
6504 simplify_logical (rtx x)
6505 {
6506 enum machine_mode mode = GET_MODE (x);
6507 rtx op0 = XEXP (x, 0);
6508 rtx op1 = XEXP (x, 1);
6509
6510 switch (GET_CODE (x))
6511 {
6512 case AND:
6513 /* We can call simplify_and_const_int only if we don't lose
6514 any (sign) bits when converting INTVAL (op1) to
6515 "unsigned HOST_WIDE_INT". */
6516 if (CONST_INT_P (op1)
6517 && (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
6518 || INTVAL (op1) > 0))
6519 {
6520 x = simplify_and_const_int (x, mode, op0, INTVAL (op1));
6521 if (GET_CODE (x) != AND)
6522 return x;
6523
6524 op0 = XEXP (x, 0);
6525 op1 = XEXP (x, 1);
6526 }
6527
6528 /* If we have any of (and (ior A B) C) or (and (xor A B) C),
6529 apply the distributive law and then the inverse distributive
6530 law to see if things simplify. */
6531 if (GET_CODE (op0) == IOR || GET_CODE (op0) == XOR)
6532 {
6533 rtx result = distribute_and_simplify_rtx (x, 0);
6534 if (result)
6535 return result;
6536 }
6537 if (GET_CODE (op1) == IOR || GET_CODE (op1) == XOR)
6538 {
6539 rtx result = distribute_and_simplify_rtx (x, 1);
6540 if (result)
6541 return result;
6542 }
6543 break;
6544
6545 case IOR:
6546 /* If we have (ior (and A B) C), apply the distributive law and then
6547 the inverse distributive law to see if things simplify. */
6548
6549 if (GET_CODE (op0) == AND)
6550 {
6551 rtx result = distribute_and_simplify_rtx (x, 0);
6552 if (result)
6553 return result;
6554 }
6555
6556 if (GET_CODE (op1) == AND)
6557 {
6558 rtx result = distribute_and_simplify_rtx (x, 1);
6559 if (result)
6560 return result;
6561 }
6562 break;
6563
6564 default:
6565 gcc_unreachable ();
6566 }
6567
6568 return x;
6569 }
6570 \f
6571 /* We consider ZERO_EXTRACT, SIGN_EXTRACT, and SIGN_EXTEND as "compound
6572 operations" because they can be replaced with two more basic operations.
6573 ZERO_EXTEND is also considered "compound" because it can be replaced with
6574 an AND operation, which is simpler, though only one operation.
6575
6576 The function expand_compound_operation is called with an rtx expression
6577 and will convert it to the appropriate shifts and AND operations,
6578 simplifying at each stage.
6579
6580 The function make_compound_operation is called to convert an expression
6581 consisting of shifts and ANDs into the equivalent compound expression.
6582 It is the inverse of this function, loosely speaking. */
6583
6584 static rtx
6585 expand_compound_operation (rtx x)
6586 {
6587 unsigned HOST_WIDE_INT pos = 0, len;
6588 int unsignedp = 0;
6589 unsigned int modewidth;
6590 rtx tem;
6591
6592 switch (GET_CODE (x))
6593 {
6594 case ZERO_EXTEND:
6595 unsignedp = 1;
6596 case SIGN_EXTEND:
6597 /* We can't necessarily use a const_int for a multiword mode;
6598 it depends on implicitly extending the value.
6599 Since we don't know the right way to extend it,
6600 we can't tell whether the implicit way is right.
6601
6602 Even for a mode that is no wider than a const_int,
6603 we can't win, because we need to sign extend one of its bits through
6604 the rest of it, and we don't know which bit. */
6605 if (CONST_INT_P (XEXP (x, 0)))
6606 return x;
6607
6608 /* Return if (subreg:MODE FROM 0) is not a safe replacement for
6609 (zero_extend:MODE FROM) or (sign_extend:MODE FROM). It is for any MEM
6610 because (SUBREG (MEM...)) is guaranteed to cause the MEM to be
6611 reloaded. If not for that, MEM's would very rarely be safe.
6612
6613 Reject MODEs bigger than a word, because we might not be able
6614 to reference a two-register group starting with an arbitrary register
6615 (and currently gen_lowpart might crash for a SUBREG). */
6616
6617 if (GET_MODE_SIZE (GET_MODE (XEXP (x, 0))) > UNITS_PER_WORD)
6618 return x;
6619
6620 /* Reject MODEs that aren't scalar integers because turning vector
6621 or complex modes into shifts causes problems. */
6622
6623 if (! SCALAR_INT_MODE_P (GET_MODE (XEXP (x, 0))))
6624 return x;
6625
6626 len = GET_MODE_BITSIZE (GET_MODE (XEXP (x, 0)));
6627 /* If the inner object has VOIDmode (the only way this can happen
6628 is if it is an ASM_OPERANDS), we can't do anything since we don't
6629 know how much masking to do. */
6630 if (len == 0)
6631 return x;
6632
6633 break;
6634
6635 case ZERO_EXTRACT:
6636 unsignedp = 1;
6637
6638 /* ... fall through ... */
6639
6640 case SIGN_EXTRACT:
6641 /* If the operand is a CLOBBER, just return it. */
6642 if (GET_CODE (XEXP (x, 0)) == CLOBBER)
6643 return XEXP (x, 0);
6644
6645 if (!CONST_INT_P (XEXP (x, 1))
6646 || !CONST_INT_P (XEXP (x, 2))
6647 || GET_MODE (XEXP (x, 0)) == VOIDmode)
6648 return x;
6649
6650 /* Reject MODEs that aren't scalar integers because turning vector
6651 or complex modes into shifts causes problems. */
6652
6653 if (! SCALAR_INT_MODE_P (GET_MODE (XEXP (x, 0))))
6654 return x;
6655
6656 len = INTVAL (XEXP (x, 1));
6657 pos = INTVAL (XEXP (x, 2));
6658
6659 /* This should stay within the object being extracted, fail otherwise. */
6660 if (len + pos > GET_MODE_BITSIZE (GET_MODE (XEXP (x, 0))))
6661 return x;
6662
6663 if (BITS_BIG_ENDIAN)
6664 pos = GET_MODE_BITSIZE (GET_MODE (XEXP (x, 0))) - len - pos;
6665
6666 break;
6667
6668 default:
6669 return x;
6670 }
6671 /* Convert sign extension to zero extension, if we know that the high
6672 bit is not set, as this is easier to optimize. It will be converted
6673 back to cheaper alternative in make_extraction. */
6674 if (GET_CODE (x) == SIGN_EXTEND
6675 && (GET_MODE_BITSIZE (GET_MODE (x)) <= HOST_BITS_PER_WIDE_INT
6676 && ((nonzero_bits (XEXP (x, 0), GET_MODE (XEXP (x, 0)))
6677 & ~(((unsigned HOST_WIDE_INT)
6678 GET_MODE_MASK (GET_MODE (XEXP (x, 0))))
6679 >> 1))
6680 == 0)))
6681 {
6682 rtx temp = gen_rtx_ZERO_EXTEND (GET_MODE (x), XEXP (x, 0));
6683 rtx temp2 = expand_compound_operation (temp);
6684
6685 /* Make sure this is a profitable operation. */
6686 if (rtx_cost (x, SET, optimize_this_for_speed_p)
6687 > rtx_cost (temp2, SET, optimize_this_for_speed_p))
6688 return temp2;
6689 else if (rtx_cost (x, SET, optimize_this_for_speed_p)
6690 > rtx_cost (temp, SET, optimize_this_for_speed_p))
6691 return temp;
6692 else
6693 return x;
6694 }
6695
6696 /* We can optimize some special cases of ZERO_EXTEND. */
6697 if (GET_CODE (x) == ZERO_EXTEND)
6698 {
6699 /* (zero_extend:DI (truncate:SI foo:DI)) is just foo:DI if we
6700 know that the last value didn't have any inappropriate bits
6701 set. */
6702 if (GET_CODE (XEXP (x, 0)) == TRUNCATE
6703 && GET_MODE (XEXP (XEXP (x, 0), 0)) == GET_MODE (x)
6704 && GET_MODE_BITSIZE (GET_MODE (x)) <= HOST_BITS_PER_WIDE_INT
6705 && (nonzero_bits (XEXP (XEXP (x, 0), 0), GET_MODE (x))
6706 & ~GET_MODE_MASK (GET_MODE (XEXP (x, 0)))) == 0)
6707 return XEXP (XEXP (x, 0), 0);
6708
6709 /* Likewise for (zero_extend:DI (subreg:SI foo:DI 0)). */
6710 if (GET_CODE (XEXP (x, 0)) == SUBREG
6711 && GET_MODE (SUBREG_REG (XEXP (x, 0))) == GET_MODE (x)
6712 && subreg_lowpart_p (XEXP (x, 0))
6713 && GET_MODE_BITSIZE (GET_MODE (x)) <= HOST_BITS_PER_WIDE_INT
6714 && (nonzero_bits (SUBREG_REG (XEXP (x, 0)), GET_MODE (x))
6715 & ~GET_MODE_MASK (GET_MODE (XEXP (x, 0)))) == 0)
6716 return SUBREG_REG (XEXP (x, 0));
6717
6718 /* (zero_extend:DI (truncate:SI foo:DI)) is just foo:DI when foo
6719 is a comparison and STORE_FLAG_VALUE permits. This is like
6720 the first case, but it works even when GET_MODE (x) is larger
6721 than HOST_WIDE_INT. */
6722 if (GET_CODE (XEXP (x, 0)) == TRUNCATE
6723 && GET_MODE (XEXP (XEXP (x, 0), 0)) == GET_MODE (x)
6724 && COMPARISON_P (XEXP (XEXP (x, 0), 0))
6725 && (GET_MODE_BITSIZE (GET_MODE (XEXP (x, 0)))
6726 <= HOST_BITS_PER_WIDE_INT)
6727 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (GET_MODE (XEXP (x, 0)))) == 0)
6728 return XEXP (XEXP (x, 0), 0);
6729
6730 /* Likewise for (zero_extend:DI (subreg:SI foo:DI 0)). */
6731 if (GET_CODE (XEXP (x, 0)) == SUBREG
6732 && GET_MODE (SUBREG_REG (XEXP (x, 0))) == GET_MODE (x)
6733 && subreg_lowpart_p (XEXP (x, 0))
6734 && COMPARISON_P (SUBREG_REG (XEXP (x, 0)))
6735 && (GET_MODE_BITSIZE (GET_MODE (XEXP (x, 0)))
6736 <= HOST_BITS_PER_WIDE_INT)
6737 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (GET_MODE (XEXP (x, 0)))) == 0)
6738 return SUBREG_REG (XEXP (x, 0));
6739
6740 }
6741
6742 /* If we reach here, we want to return a pair of shifts. The inner
6743 shift is a left shift of BITSIZE - POS - LEN bits. The outer
6744 shift is a right shift of BITSIZE - LEN bits. It is arithmetic or
6745 logical depending on the value of UNSIGNEDP.
6746
6747 If this was a ZERO_EXTEND or ZERO_EXTRACT, this pair of shifts will be
6748 converted into an AND of a shift.
6749
6750 We must check for the case where the left shift would have a negative
6751 count. This can happen in a case like (x >> 31) & 255 on machines
6752 that can't shift by a constant. On those machines, we would first
6753 combine the shift with the AND to produce a variable-position
6754 extraction. Then the constant of 31 would be substituted in to produce
6755 a such a position. */
6756
6757 modewidth = GET_MODE_BITSIZE (GET_MODE (x));
6758 if (modewidth + len >= pos)
6759 {
6760 enum machine_mode mode = GET_MODE (x);
6761 tem = gen_lowpart (mode, XEXP (x, 0));
6762 if (!tem || GET_CODE (tem) == CLOBBER)
6763 return x;
6764 tem = simplify_shift_const (NULL_RTX, ASHIFT, mode,
6765 tem, modewidth - pos - len);
6766 tem = simplify_shift_const (NULL_RTX, unsignedp ? LSHIFTRT : ASHIFTRT,
6767 mode, tem, modewidth - len);
6768 }
6769 else if (unsignedp && len < HOST_BITS_PER_WIDE_INT)
6770 tem = simplify_and_const_int (NULL_RTX, GET_MODE (x),
6771 simplify_shift_const (NULL_RTX, LSHIFTRT,
6772 GET_MODE (x),
6773 XEXP (x, 0), pos),
6774 ((unsigned HOST_WIDE_INT) 1 << len) - 1);
6775 else
6776 /* Any other cases we can't handle. */
6777 return x;
6778
6779 /* If we couldn't do this for some reason, return the original
6780 expression. */
6781 if (GET_CODE (tem) == CLOBBER)
6782 return x;
6783
6784 return tem;
6785 }
6786 \f
6787 /* X is a SET which contains an assignment of one object into
6788 a part of another (such as a bit-field assignment, STRICT_LOW_PART,
6789 or certain SUBREGS). If possible, convert it into a series of
6790 logical operations.
6791
6792 We half-heartedly support variable positions, but do not at all
6793 support variable lengths. */
6794
6795 static const_rtx
6796 expand_field_assignment (const_rtx x)
6797 {
6798 rtx inner;
6799 rtx pos; /* Always counts from low bit. */
6800 int len;
6801 rtx mask, cleared, masked;
6802 enum machine_mode compute_mode;
6803
6804 /* Loop until we find something we can't simplify. */
6805 while (1)
6806 {
6807 if (GET_CODE (SET_DEST (x)) == STRICT_LOW_PART
6808 && GET_CODE (XEXP (SET_DEST (x), 0)) == SUBREG)
6809 {
6810 inner = SUBREG_REG (XEXP (SET_DEST (x), 0));
6811 len = GET_MODE_BITSIZE (GET_MODE (XEXP (SET_DEST (x), 0)));
6812 pos = GEN_INT (subreg_lsb (XEXP (SET_DEST (x), 0)));
6813 }
6814 else if (GET_CODE (SET_DEST (x)) == ZERO_EXTRACT
6815 && CONST_INT_P (XEXP (SET_DEST (x), 1)))
6816 {
6817 inner = XEXP (SET_DEST (x), 0);
6818 len = INTVAL (XEXP (SET_DEST (x), 1));
6819 pos = XEXP (SET_DEST (x), 2);
6820
6821 /* A constant position should stay within the width of INNER. */
6822 if (CONST_INT_P (pos)
6823 && INTVAL (pos) + len > GET_MODE_BITSIZE (GET_MODE (inner)))
6824 break;
6825
6826 if (BITS_BIG_ENDIAN)
6827 {
6828 if (CONST_INT_P (pos))
6829 pos = GEN_INT (GET_MODE_BITSIZE (GET_MODE (inner)) - len
6830 - INTVAL (pos));
6831 else if (GET_CODE (pos) == MINUS
6832 && CONST_INT_P (XEXP (pos, 1))
6833 && (INTVAL (XEXP (pos, 1))
6834 == GET_MODE_BITSIZE (GET_MODE (inner)) - len))
6835 /* If position is ADJUST - X, new position is X. */
6836 pos = XEXP (pos, 0);
6837 else
6838 pos = simplify_gen_binary (MINUS, GET_MODE (pos),
6839 GEN_INT (GET_MODE_BITSIZE (
6840 GET_MODE (inner))
6841 - len),
6842 pos);
6843 }
6844 }
6845
6846 /* A SUBREG between two modes that occupy the same numbers of words
6847 can be done by moving the SUBREG to the source. */
6848 else if (GET_CODE (SET_DEST (x)) == SUBREG
6849 /* We need SUBREGs to compute nonzero_bits properly. */
6850 && nonzero_sign_valid
6851 && (((GET_MODE_SIZE (GET_MODE (SET_DEST (x)))
6852 + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)
6853 == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (SET_DEST (x))))
6854 + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)))
6855 {
6856 x = gen_rtx_SET (VOIDmode, SUBREG_REG (SET_DEST (x)),
6857 gen_lowpart
6858 (GET_MODE (SUBREG_REG (SET_DEST (x))),
6859 SET_SRC (x)));
6860 continue;
6861 }
6862 else
6863 break;
6864
6865 while (GET_CODE (inner) == SUBREG && subreg_lowpart_p (inner))
6866 inner = SUBREG_REG (inner);
6867
6868 compute_mode = GET_MODE (inner);
6869
6870 /* Don't attempt bitwise arithmetic on non scalar integer modes. */
6871 if (! SCALAR_INT_MODE_P (compute_mode))
6872 {
6873 enum machine_mode imode;
6874
6875 /* Don't do anything for vector or complex integral types. */
6876 if (! FLOAT_MODE_P (compute_mode))
6877 break;
6878
6879 /* Try to find an integral mode to pun with. */
6880 imode = mode_for_size (GET_MODE_BITSIZE (compute_mode), MODE_INT, 0);
6881 if (imode == BLKmode)
6882 break;
6883
6884 compute_mode = imode;
6885 inner = gen_lowpart (imode, inner);
6886 }
6887
6888 /* Compute a mask of LEN bits, if we can do this on the host machine. */
6889 if (len >= HOST_BITS_PER_WIDE_INT)
6890 break;
6891
6892 /* Now compute the equivalent expression. Make a copy of INNER
6893 for the SET_DEST in case it is a MEM into which we will substitute;
6894 we don't want shared RTL in that case. */
6895 mask = GEN_INT (((unsigned HOST_WIDE_INT) 1 << len) - 1);
6896 cleared = simplify_gen_binary (AND, compute_mode,
6897 simplify_gen_unary (NOT, compute_mode,
6898 simplify_gen_binary (ASHIFT,
6899 compute_mode,
6900 mask, pos),
6901 compute_mode),
6902 inner);
6903 masked = simplify_gen_binary (ASHIFT, compute_mode,
6904 simplify_gen_binary (
6905 AND, compute_mode,
6906 gen_lowpart (compute_mode, SET_SRC (x)),
6907 mask),
6908 pos);
6909
6910 x = gen_rtx_SET (VOIDmode, copy_rtx (inner),
6911 simplify_gen_binary (IOR, compute_mode,
6912 cleared, masked));
6913 }
6914
6915 return x;
6916 }
6917 \f
6918 /* Return an RTX for a reference to LEN bits of INNER. If POS_RTX is nonzero,
6919 it is an RTX that represents a variable starting position; otherwise,
6920 POS is the (constant) starting bit position (counted from the LSB).
6921
6922 UNSIGNEDP is nonzero for an unsigned reference and zero for a
6923 signed reference.
6924
6925 IN_DEST is nonzero if this is a reference in the destination of a
6926 SET. This is used when a ZERO_ or SIGN_EXTRACT isn't needed. If nonzero,
6927 a STRICT_LOW_PART will be used, if zero, ZERO_EXTEND or SIGN_EXTEND will
6928 be used.
6929
6930 IN_COMPARE is nonzero if we are in a COMPARE. This means that a
6931 ZERO_EXTRACT should be built even for bits starting at bit 0.
6932
6933 MODE is the desired mode of the result (if IN_DEST == 0).
6934
6935 The result is an RTX for the extraction or NULL_RTX if the target
6936 can't handle it. */
6937
6938 static rtx
6939 make_extraction (enum machine_mode mode, rtx inner, HOST_WIDE_INT pos,
6940 rtx pos_rtx, unsigned HOST_WIDE_INT len, int unsignedp,
6941 int in_dest, int in_compare)
6942 {
6943 /* This mode describes the size of the storage area
6944 to fetch the overall value from. Within that, we
6945 ignore the POS lowest bits, etc. */
6946 enum machine_mode is_mode = GET_MODE (inner);
6947 enum machine_mode inner_mode;
6948 enum machine_mode wanted_inner_mode;
6949 enum machine_mode wanted_inner_reg_mode = word_mode;
6950 enum machine_mode pos_mode = word_mode;
6951 enum machine_mode extraction_mode = word_mode;
6952 enum machine_mode tmode = mode_for_size (len, MODE_INT, 1);
6953 rtx new_rtx = 0;
6954 rtx orig_pos_rtx = pos_rtx;
6955 HOST_WIDE_INT orig_pos;
6956
6957 if (GET_CODE (inner) == SUBREG && subreg_lowpart_p (inner))
6958 {
6959 /* If going from (subreg:SI (mem:QI ...)) to (mem:QI ...),
6960 consider just the QI as the memory to extract from.
6961 The subreg adds or removes high bits; its mode is
6962 irrelevant to the meaning of this extraction,
6963 since POS and LEN count from the lsb. */
6964 if (MEM_P (SUBREG_REG (inner)))
6965 is_mode = GET_MODE (SUBREG_REG (inner));
6966 inner = SUBREG_REG (inner);
6967 }
6968 else if (GET_CODE (inner) == ASHIFT
6969 && CONST_INT_P (XEXP (inner, 1))
6970 && pos_rtx == 0 && pos == 0
6971 && len > UINTVAL (XEXP (inner, 1)))
6972 {
6973 /* We're extracting the least significant bits of an rtx
6974 (ashift X (const_int C)), where LEN > C. Extract the
6975 least significant (LEN - C) bits of X, giving an rtx
6976 whose mode is MODE, then shift it left C times. */
6977 new_rtx = make_extraction (mode, XEXP (inner, 0),
6978 0, 0, len - INTVAL (XEXP (inner, 1)),
6979 unsignedp, in_dest, in_compare);
6980 if (new_rtx != 0)
6981 return gen_rtx_ASHIFT (mode, new_rtx, XEXP (inner, 1));
6982 }
6983
6984 inner_mode = GET_MODE (inner);
6985
6986 if (pos_rtx && CONST_INT_P (pos_rtx))
6987 pos = INTVAL (pos_rtx), pos_rtx = 0;
6988
6989 /* See if this can be done without an extraction. We never can if the
6990 width of the field is not the same as that of some integer mode. For
6991 registers, we can only avoid the extraction if the position is at the
6992 low-order bit and this is either not in the destination or we have the
6993 appropriate STRICT_LOW_PART operation available.
6994
6995 For MEM, we can avoid an extract if the field starts on an appropriate
6996 boundary and we can change the mode of the memory reference. */
6997
6998 if (tmode != BLKmode
6999 && ((pos_rtx == 0 && (pos % BITS_PER_WORD) == 0
7000 && !MEM_P (inner)
7001 && (inner_mode == tmode
7002 || !REG_P (inner)
7003 || TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (tmode),
7004 GET_MODE_BITSIZE (inner_mode))
7005 || reg_truncated_to_mode (tmode, inner))
7006 && (! in_dest
7007 || (REG_P (inner)
7008 && have_insn_for (STRICT_LOW_PART, tmode))))
7009 || (MEM_P (inner) && pos_rtx == 0
7010 && (pos
7011 % (STRICT_ALIGNMENT ? GET_MODE_ALIGNMENT (tmode)
7012 : BITS_PER_UNIT)) == 0
7013 /* We can't do this if we are widening INNER_MODE (it
7014 may not be aligned, for one thing). */
7015 && GET_MODE_BITSIZE (inner_mode) >= GET_MODE_BITSIZE (tmode)
7016 && (inner_mode == tmode
7017 || (! mode_dependent_address_p (XEXP (inner, 0))
7018 && ! MEM_VOLATILE_P (inner))))))
7019 {
7020 /* If INNER is a MEM, make a new MEM that encompasses just the desired
7021 field. If the original and current mode are the same, we need not
7022 adjust the offset. Otherwise, we do if bytes big endian.
7023
7024 If INNER is not a MEM, get a piece consisting of just the field
7025 of interest (in this case POS % BITS_PER_WORD must be 0). */
7026
7027 if (MEM_P (inner))
7028 {
7029 HOST_WIDE_INT offset;
7030
7031 /* POS counts from lsb, but make OFFSET count in memory order. */
7032 if (BYTES_BIG_ENDIAN)
7033 offset = (GET_MODE_BITSIZE (is_mode) - len - pos) / BITS_PER_UNIT;
7034 else
7035 offset = pos / BITS_PER_UNIT;
7036
7037 new_rtx = adjust_address_nv (inner, tmode, offset);
7038 }
7039 else if (REG_P (inner))
7040 {
7041 if (tmode != inner_mode)
7042 {
7043 /* We can't call gen_lowpart in a DEST since we
7044 always want a SUBREG (see below) and it would sometimes
7045 return a new hard register. */
7046 if (pos || in_dest)
7047 {
7048 HOST_WIDE_INT final_word = pos / BITS_PER_WORD;
7049
7050 if (WORDS_BIG_ENDIAN
7051 && GET_MODE_SIZE (inner_mode) > UNITS_PER_WORD)
7052 final_word = ((GET_MODE_SIZE (inner_mode)
7053 - GET_MODE_SIZE (tmode))
7054 / UNITS_PER_WORD) - final_word;
7055
7056 final_word *= UNITS_PER_WORD;
7057 if (BYTES_BIG_ENDIAN &&
7058 GET_MODE_SIZE (inner_mode) > GET_MODE_SIZE (tmode))
7059 final_word += (GET_MODE_SIZE (inner_mode)
7060 - GET_MODE_SIZE (tmode)) % UNITS_PER_WORD;
7061
7062 /* Avoid creating invalid subregs, for example when
7063 simplifying (x>>32)&255. */
7064 if (!validate_subreg (tmode, inner_mode, inner, final_word))
7065 return NULL_RTX;
7066
7067 new_rtx = gen_rtx_SUBREG (tmode, inner, final_word);
7068 }
7069 else
7070 new_rtx = gen_lowpart (tmode, inner);
7071 }
7072 else
7073 new_rtx = inner;
7074 }
7075 else
7076 new_rtx = force_to_mode (inner, tmode,
7077 len >= HOST_BITS_PER_WIDE_INT
7078 ? ~(unsigned HOST_WIDE_INT) 0
7079 : ((unsigned HOST_WIDE_INT) 1 << len) - 1,
7080 0);
7081
7082 /* If this extraction is going into the destination of a SET,
7083 make a STRICT_LOW_PART unless we made a MEM. */
7084
7085 if (in_dest)
7086 return (MEM_P (new_rtx) ? new_rtx
7087 : (GET_CODE (new_rtx) != SUBREG
7088 ? gen_rtx_CLOBBER (tmode, const0_rtx)
7089 : gen_rtx_STRICT_LOW_PART (VOIDmode, new_rtx)));
7090
7091 if (mode == tmode)
7092 return new_rtx;
7093
7094 if (CONST_INT_P (new_rtx)
7095 || GET_CODE (new_rtx) == CONST_DOUBLE)
7096 return simplify_unary_operation (unsignedp ? ZERO_EXTEND : SIGN_EXTEND,
7097 mode, new_rtx, tmode);
7098
7099 /* If we know that no extraneous bits are set, and that the high
7100 bit is not set, convert the extraction to the cheaper of
7101 sign and zero extension, that are equivalent in these cases. */
7102 if (flag_expensive_optimizations
7103 && (GET_MODE_BITSIZE (tmode) <= HOST_BITS_PER_WIDE_INT
7104 && ((nonzero_bits (new_rtx, tmode)
7105 & ~(((unsigned HOST_WIDE_INT)
7106 GET_MODE_MASK (tmode))
7107 >> 1))
7108 == 0)))
7109 {
7110 rtx temp = gen_rtx_ZERO_EXTEND (mode, new_rtx);
7111 rtx temp1 = gen_rtx_SIGN_EXTEND (mode, new_rtx);
7112
7113 /* Prefer ZERO_EXTENSION, since it gives more information to
7114 backends. */
7115 if (rtx_cost (temp, SET, optimize_this_for_speed_p)
7116 <= rtx_cost (temp1, SET, optimize_this_for_speed_p))
7117 return temp;
7118 return temp1;
7119 }
7120
7121 /* Otherwise, sign- or zero-extend unless we already are in the
7122 proper mode. */
7123
7124 return (gen_rtx_fmt_e (unsignedp ? ZERO_EXTEND : SIGN_EXTEND,
7125 mode, new_rtx));
7126 }
7127
7128 /* Unless this is a COMPARE or we have a funny memory reference,
7129 don't do anything with zero-extending field extracts starting at
7130 the low-order bit since they are simple AND operations. */
7131 if (pos_rtx == 0 && pos == 0 && ! in_dest
7132 && ! in_compare && unsignedp)
7133 return 0;
7134
7135 /* Unless INNER is not MEM, reject this if we would be spanning bytes or
7136 if the position is not a constant and the length is not 1. In all
7137 other cases, we would only be going outside our object in cases when
7138 an original shift would have been undefined. */
7139 if (MEM_P (inner)
7140 && ((pos_rtx == 0 && pos + len > GET_MODE_BITSIZE (is_mode))
7141 || (pos_rtx != 0 && len != 1)))
7142 return 0;
7143
7144 /* Get the mode to use should INNER not be a MEM, the mode for the position,
7145 and the mode for the result. */
7146 if (in_dest && mode_for_extraction (EP_insv, -1) != MAX_MACHINE_MODE)
7147 {
7148 wanted_inner_reg_mode = mode_for_extraction (EP_insv, 0);
7149 pos_mode = mode_for_extraction (EP_insv, 2);
7150 extraction_mode = mode_for_extraction (EP_insv, 3);
7151 }
7152
7153 if (! in_dest && unsignedp
7154 && mode_for_extraction (EP_extzv, -1) != MAX_MACHINE_MODE)
7155 {
7156 wanted_inner_reg_mode = mode_for_extraction (EP_extzv, 1);
7157 pos_mode = mode_for_extraction (EP_extzv, 3);
7158 extraction_mode = mode_for_extraction (EP_extzv, 0);
7159 }
7160
7161 if (! in_dest && ! unsignedp
7162 && mode_for_extraction (EP_extv, -1) != MAX_MACHINE_MODE)
7163 {
7164 wanted_inner_reg_mode = mode_for_extraction (EP_extv, 1);
7165 pos_mode = mode_for_extraction (EP_extv, 3);
7166 extraction_mode = mode_for_extraction (EP_extv, 0);
7167 }
7168
7169 /* Never narrow an object, since that might not be safe. */
7170
7171 if (mode != VOIDmode
7172 && GET_MODE_SIZE (extraction_mode) < GET_MODE_SIZE (mode))
7173 extraction_mode = mode;
7174
7175 if (pos_rtx && GET_MODE (pos_rtx) != VOIDmode
7176 && GET_MODE_SIZE (pos_mode) < GET_MODE_SIZE (GET_MODE (pos_rtx)))
7177 pos_mode = GET_MODE (pos_rtx);
7178
7179 /* If this is not from memory, the desired mode is the preferred mode
7180 for an extraction pattern's first input operand, or word_mode if there
7181 is none. */
7182 if (!MEM_P (inner))
7183 wanted_inner_mode = wanted_inner_reg_mode;
7184 else
7185 {
7186 /* Be careful not to go beyond the extracted object and maintain the
7187 natural alignment of the memory. */
7188 wanted_inner_mode = smallest_mode_for_size (len, MODE_INT);
7189 while (pos % GET_MODE_BITSIZE (wanted_inner_mode) + len
7190 > GET_MODE_BITSIZE (wanted_inner_mode))
7191 {
7192 wanted_inner_mode = GET_MODE_WIDER_MODE (wanted_inner_mode);
7193 gcc_assert (wanted_inner_mode != VOIDmode);
7194 }
7195
7196 /* If we have to change the mode of memory and cannot, the desired mode
7197 is EXTRACTION_MODE. */
7198 if (inner_mode != wanted_inner_mode
7199 && (mode_dependent_address_p (XEXP (inner, 0))
7200 || MEM_VOLATILE_P (inner)
7201 || pos_rtx))
7202 wanted_inner_mode = extraction_mode;
7203 }
7204
7205 orig_pos = pos;
7206
7207 if (BITS_BIG_ENDIAN)
7208 {
7209 /* POS is passed as if BITS_BIG_ENDIAN == 0, so we need to convert it to
7210 BITS_BIG_ENDIAN style. If position is constant, compute new
7211 position. Otherwise, build subtraction.
7212 Note that POS is relative to the mode of the original argument.
7213 If it's a MEM we need to recompute POS relative to that.
7214 However, if we're extracting from (or inserting into) a register,
7215 we want to recompute POS relative to wanted_inner_mode. */
7216 int width = (MEM_P (inner)
7217 ? GET_MODE_BITSIZE (is_mode)
7218 : GET_MODE_BITSIZE (wanted_inner_mode));
7219
7220 if (pos_rtx == 0)
7221 pos = width - len - pos;
7222 else
7223 pos_rtx
7224 = gen_rtx_MINUS (GET_MODE (pos_rtx), GEN_INT (width - len), pos_rtx);
7225 /* POS may be less than 0 now, but we check for that below.
7226 Note that it can only be less than 0 if !MEM_P (inner). */
7227 }
7228
7229 /* If INNER has a wider mode, and this is a constant extraction, try to
7230 make it smaller and adjust the byte to point to the byte containing
7231 the value. */
7232 if (wanted_inner_mode != VOIDmode
7233 && inner_mode != wanted_inner_mode
7234 && ! pos_rtx
7235 && GET_MODE_SIZE (wanted_inner_mode) < GET_MODE_SIZE (is_mode)
7236 && MEM_P (inner)
7237 && ! mode_dependent_address_p (XEXP (inner, 0))
7238 && ! MEM_VOLATILE_P (inner))
7239 {
7240 int offset = 0;
7241
7242 /* The computations below will be correct if the machine is big
7243 endian in both bits and bytes or little endian in bits and bytes.
7244 If it is mixed, we must adjust. */
7245
7246 /* If bytes are big endian and we had a paradoxical SUBREG, we must
7247 adjust OFFSET to compensate. */
7248 if (BYTES_BIG_ENDIAN
7249 && GET_MODE_SIZE (inner_mode) < GET_MODE_SIZE (is_mode))
7250 offset -= GET_MODE_SIZE (is_mode) - GET_MODE_SIZE (inner_mode);
7251
7252 /* We can now move to the desired byte. */
7253 offset += (pos / GET_MODE_BITSIZE (wanted_inner_mode))
7254 * GET_MODE_SIZE (wanted_inner_mode);
7255 pos %= GET_MODE_BITSIZE (wanted_inner_mode);
7256
7257 if (BYTES_BIG_ENDIAN != BITS_BIG_ENDIAN
7258 && is_mode != wanted_inner_mode)
7259 offset = (GET_MODE_SIZE (is_mode)
7260 - GET_MODE_SIZE (wanted_inner_mode) - offset);
7261
7262 inner = adjust_address_nv (inner, wanted_inner_mode, offset);
7263 }
7264
7265 /* If INNER is not memory, get it into the proper mode. If we are changing
7266 its mode, POS must be a constant and smaller than the size of the new
7267 mode. */
7268 else if (!MEM_P (inner))
7269 {
7270 /* On the LHS, don't create paradoxical subregs implicitely truncating
7271 the register unless TRULY_NOOP_TRUNCATION. */
7272 if (in_dest
7273 && !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (GET_MODE (inner)),
7274 GET_MODE_BITSIZE (wanted_inner_mode)))
7275 return NULL_RTX;
7276
7277 if (GET_MODE (inner) != wanted_inner_mode
7278 && (pos_rtx != 0
7279 || orig_pos + len > GET_MODE_BITSIZE (wanted_inner_mode)))
7280 return NULL_RTX;
7281
7282 if (orig_pos < 0)
7283 return NULL_RTX;
7284
7285 inner = force_to_mode (inner, wanted_inner_mode,
7286 pos_rtx
7287 || len + orig_pos >= HOST_BITS_PER_WIDE_INT
7288 ? ~(unsigned HOST_WIDE_INT) 0
7289 : ((((unsigned HOST_WIDE_INT) 1 << len) - 1)
7290 << orig_pos),
7291 0);
7292 }
7293
7294 /* Adjust mode of POS_RTX, if needed. If we want a wider mode, we
7295 have to zero extend. Otherwise, we can just use a SUBREG. */
7296 if (pos_rtx != 0
7297 && GET_MODE_SIZE (pos_mode) > GET_MODE_SIZE (GET_MODE (pos_rtx)))
7298 {
7299 rtx temp = gen_rtx_ZERO_EXTEND (pos_mode, pos_rtx);
7300
7301 /* If we know that no extraneous bits are set, and that the high
7302 bit is not set, convert extraction to cheaper one - either
7303 SIGN_EXTENSION or ZERO_EXTENSION, that are equivalent in these
7304 cases. */
7305 if (flag_expensive_optimizations
7306 && (GET_MODE_BITSIZE (GET_MODE (pos_rtx)) <= HOST_BITS_PER_WIDE_INT
7307 && ((nonzero_bits (pos_rtx, GET_MODE (pos_rtx))
7308 & ~(((unsigned HOST_WIDE_INT)
7309 GET_MODE_MASK (GET_MODE (pos_rtx)))
7310 >> 1))
7311 == 0)))
7312 {
7313 rtx temp1 = gen_rtx_SIGN_EXTEND (pos_mode, pos_rtx);
7314
7315 /* Prefer ZERO_EXTENSION, since it gives more information to
7316 backends. */
7317 if (rtx_cost (temp1, SET, optimize_this_for_speed_p)
7318 < rtx_cost (temp, SET, optimize_this_for_speed_p))
7319 temp = temp1;
7320 }
7321 pos_rtx = temp;
7322 }
7323 else if (pos_rtx != 0
7324 && GET_MODE_SIZE (pos_mode) < GET_MODE_SIZE (GET_MODE (pos_rtx)))
7325 pos_rtx = gen_lowpart (pos_mode, pos_rtx);
7326
7327 /* Make POS_RTX unless we already have it and it is correct. If we don't
7328 have a POS_RTX but we do have an ORIG_POS_RTX, the latter must
7329 be a CONST_INT. */
7330 if (pos_rtx == 0 && orig_pos_rtx != 0 && INTVAL (orig_pos_rtx) == pos)
7331 pos_rtx = orig_pos_rtx;
7332
7333 else if (pos_rtx == 0)
7334 pos_rtx = GEN_INT (pos);
7335
7336 /* Make the required operation. See if we can use existing rtx. */
7337 new_rtx = gen_rtx_fmt_eee (unsignedp ? ZERO_EXTRACT : SIGN_EXTRACT,
7338 extraction_mode, inner, GEN_INT (len), pos_rtx);
7339 if (! in_dest)
7340 new_rtx = gen_lowpart (mode, new_rtx);
7341
7342 return new_rtx;
7343 }
7344 \f
7345 /* See if X contains an ASHIFT of COUNT or more bits that can be commuted
7346 with any other operations in X. Return X without that shift if so. */
7347
7348 static rtx
7349 extract_left_shift (rtx x, int count)
7350 {
7351 enum rtx_code code = GET_CODE (x);
7352 enum machine_mode mode = GET_MODE (x);
7353 rtx tem;
7354
7355 switch (code)
7356 {
7357 case ASHIFT:
7358 /* This is the shift itself. If it is wide enough, we will return
7359 either the value being shifted if the shift count is equal to
7360 COUNT or a shift for the difference. */
7361 if (CONST_INT_P (XEXP (x, 1))
7362 && INTVAL (XEXP (x, 1)) >= count)
7363 return simplify_shift_const (NULL_RTX, ASHIFT, mode, XEXP (x, 0),
7364 INTVAL (XEXP (x, 1)) - count);
7365 break;
7366
7367 case NEG: case NOT:
7368 if ((tem = extract_left_shift (XEXP (x, 0), count)) != 0)
7369 return simplify_gen_unary (code, mode, tem, mode);
7370
7371 break;
7372
7373 case PLUS: case IOR: case XOR: case AND:
7374 /* If we can safely shift this constant and we find the inner shift,
7375 make a new operation. */
7376 if (CONST_INT_P (XEXP (x, 1))
7377 && (UINTVAL (XEXP (x, 1))
7378 & ((((unsigned HOST_WIDE_INT) 1 << count)) - 1)) == 0
7379 && (tem = extract_left_shift (XEXP (x, 0), count)) != 0)
7380 return simplify_gen_binary (code, mode, tem,
7381 GEN_INT (INTVAL (XEXP (x, 1)) >> count));
7382
7383 break;
7384
7385 default:
7386 break;
7387 }
7388
7389 return 0;
7390 }
7391 \f
7392 /* Look at the expression rooted at X. Look for expressions
7393 equivalent to ZERO_EXTRACT, SIGN_EXTRACT, ZERO_EXTEND, SIGN_EXTEND.
7394 Form these expressions.
7395
7396 Return the new rtx, usually just X.
7397
7398 Also, for machines like the VAX that don't have logical shift insns,
7399 try to convert logical to arithmetic shift operations in cases where
7400 they are equivalent. This undoes the canonicalizations to logical
7401 shifts done elsewhere.
7402
7403 We try, as much as possible, to re-use rtl expressions to save memory.
7404
7405 IN_CODE says what kind of expression we are processing. Normally, it is
7406 SET. In a memory address (inside a MEM, PLUS or minus, the latter two
7407 being kludges), it is MEM. When processing the arguments of a comparison
7408 or a COMPARE against zero, it is COMPARE. */
7409
7410 static rtx
7411 make_compound_operation (rtx x, enum rtx_code in_code)
7412 {
7413 enum rtx_code code = GET_CODE (x);
7414 enum machine_mode mode = GET_MODE (x);
7415 int mode_width = GET_MODE_BITSIZE (mode);
7416 rtx rhs, lhs;
7417 enum rtx_code next_code;
7418 int i, j;
7419 rtx new_rtx = 0;
7420 rtx tem;
7421 const char *fmt;
7422
7423 /* Select the code to be used in recursive calls. Once we are inside an
7424 address, we stay there. If we have a comparison, set to COMPARE,
7425 but once inside, go back to our default of SET. */
7426
7427 next_code = (code == MEM ? MEM
7428 : ((code == PLUS || code == MINUS)
7429 && SCALAR_INT_MODE_P (mode)) ? MEM
7430 : ((code == COMPARE || COMPARISON_P (x))
7431 && XEXP (x, 1) == const0_rtx) ? COMPARE
7432 : in_code == COMPARE ? SET : in_code);
7433
7434 /* Process depending on the code of this operation. If NEW is set
7435 nonzero, it will be returned. */
7436
7437 switch (code)
7438 {
7439 case ASHIFT:
7440 /* Convert shifts by constants into multiplications if inside
7441 an address. */
7442 if (in_code == MEM && CONST_INT_P (XEXP (x, 1))
7443 && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT
7444 && INTVAL (XEXP (x, 1)) >= 0)
7445 {
7446 HOST_WIDE_INT count = INTVAL (XEXP (x, 1));
7447 HOST_WIDE_INT multval = (HOST_WIDE_INT) 1 << count;
7448
7449 new_rtx = make_compound_operation (XEXP (x, 0), next_code);
7450 if (GET_CODE (new_rtx) == NEG)
7451 {
7452 new_rtx = XEXP (new_rtx, 0);
7453 multval = -multval;
7454 }
7455 multval = trunc_int_for_mode (multval, mode);
7456 new_rtx = gen_rtx_MULT (mode, new_rtx, GEN_INT (multval));
7457 }
7458 break;
7459
7460 case PLUS:
7461 lhs = XEXP (x, 0);
7462 rhs = XEXP (x, 1);
7463 lhs = make_compound_operation (lhs, next_code);
7464 rhs = make_compound_operation (rhs, next_code);
7465 if (GET_CODE (lhs) == MULT && GET_CODE (XEXP (lhs, 0)) == NEG
7466 && SCALAR_INT_MODE_P (mode))
7467 {
7468 tem = simplify_gen_binary (MULT, mode, XEXP (XEXP (lhs, 0), 0),
7469 XEXP (lhs, 1));
7470 new_rtx = simplify_gen_binary (MINUS, mode, rhs, tem);
7471 }
7472 else if (GET_CODE (lhs) == MULT
7473 && (CONST_INT_P (XEXP (lhs, 1)) && INTVAL (XEXP (lhs, 1)) < 0))
7474 {
7475 tem = simplify_gen_binary (MULT, mode, XEXP (lhs, 0),
7476 simplify_gen_unary (NEG, mode,
7477 XEXP (lhs, 1),
7478 mode));
7479 new_rtx = simplify_gen_binary (MINUS, mode, rhs, tem);
7480 }
7481 else
7482 {
7483 SUBST (XEXP (x, 0), lhs);
7484 SUBST (XEXP (x, 1), rhs);
7485 goto maybe_swap;
7486 }
7487 x = gen_lowpart (mode, new_rtx);
7488 goto maybe_swap;
7489
7490 case MINUS:
7491 lhs = XEXP (x, 0);
7492 rhs = XEXP (x, 1);
7493 lhs = make_compound_operation (lhs, next_code);
7494 rhs = make_compound_operation (rhs, next_code);
7495 if (GET_CODE (rhs) == MULT && GET_CODE (XEXP (rhs, 0)) == NEG
7496 && SCALAR_INT_MODE_P (mode))
7497 {
7498 tem = simplify_gen_binary (MULT, mode, XEXP (XEXP (rhs, 0), 0),
7499 XEXP (rhs, 1));
7500 new_rtx = simplify_gen_binary (PLUS, mode, tem, lhs);
7501 }
7502 else if (GET_CODE (rhs) == MULT
7503 && (CONST_INT_P (XEXP (rhs, 1)) && INTVAL (XEXP (rhs, 1)) < 0))
7504 {
7505 tem = simplify_gen_binary (MULT, mode, XEXP (rhs, 0),
7506 simplify_gen_unary (NEG, mode,
7507 XEXP (rhs, 1),
7508 mode));
7509 new_rtx = simplify_gen_binary (PLUS, mode, tem, lhs);
7510 }
7511 else
7512 {
7513 SUBST (XEXP (x, 0), lhs);
7514 SUBST (XEXP (x, 1), rhs);
7515 return x;
7516 }
7517 return gen_lowpart (mode, new_rtx);
7518
7519 case AND:
7520 /* If the second operand is not a constant, we can't do anything
7521 with it. */
7522 if (!CONST_INT_P (XEXP (x, 1)))
7523 break;
7524
7525 /* If the constant is a power of two minus one and the first operand
7526 is a logical right shift, make an extraction. */
7527 if (GET_CODE (XEXP (x, 0)) == LSHIFTRT
7528 && (i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0)
7529 {
7530 new_rtx = make_compound_operation (XEXP (XEXP (x, 0), 0), next_code);
7531 new_rtx = make_extraction (mode, new_rtx, 0, XEXP (XEXP (x, 0), 1), i, 1,
7532 0, in_code == COMPARE);
7533 }
7534
7535 /* Same as previous, but for (subreg (lshiftrt ...)) in first op. */
7536 else if (GET_CODE (XEXP (x, 0)) == SUBREG
7537 && subreg_lowpart_p (XEXP (x, 0))
7538 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == LSHIFTRT
7539 && (i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0)
7540 {
7541 new_rtx = make_compound_operation (XEXP (SUBREG_REG (XEXP (x, 0)), 0),
7542 next_code);
7543 new_rtx = make_extraction (GET_MODE (SUBREG_REG (XEXP (x, 0))), new_rtx, 0,
7544 XEXP (SUBREG_REG (XEXP (x, 0)), 1), i, 1,
7545 0, in_code == COMPARE);
7546 }
7547 /* Same as previous, but for (xor/ior (lshiftrt...) (lshiftrt...)). */
7548 else if ((GET_CODE (XEXP (x, 0)) == XOR
7549 || GET_CODE (XEXP (x, 0)) == IOR)
7550 && GET_CODE (XEXP (XEXP (x, 0), 0)) == LSHIFTRT
7551 && GET_CODE (XEXP (XEXP (x, 0), 1)) == LSHIFTRT
7552 && (i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0)
7553 {
7554 /* Apply the distributive law, and then try to make extractions. */
7555 new_rtx = gen_rtx_fmt_ee (GET_CODE (XEXP (x, 0)), mode,
7556 gen_rtx_AND (mode, XEXP (XEXP (x, 0), 0),
7557 XEXP (x, 1)),
7558 gen_rtx_AND (mode, XEXP (XEXP (x, 0), 1),
7559 XEXP (x, 1)));
7560 new_rtx = make_compound_operation (new_rtx, in_code);
7561 }
7562
7563 /* If we are have (and (rotate X C) M) and C is larger than the number
7564 of bits in M, this is an extraction. */
7565
7566 else if (GET_CODE (XEXP (x, 0)) == ROTATE
7567 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
7568 && (i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0
7569 && i <= INTVAL (XEXP (XEXP (x, 0), 1)))
7570 {
7571 new_rtx = make_compound_operation (XEXP (XEXP (x, 0), 0), next_code);
7572 new_rtx = make_extraction (mode, new_rtx,
7573 (GET_MODE_BITSIZE (mode)
7574 - INTVAL (XEXP (XEXP (x, 0), 1))),
7575 NULL_RTX, i, 1, 0, in_code == COMPARE);
7576 }
7577
7578 /* On machines without logical shifts, if the operand of the AND is
7579 a logical shift and our mask turns off all the propagated sign
7580 bits, we can replace the logical shift with an arithmetic shift. */
7581 else if (GET_CODE (XEXP (x, 0)) == LSHIFTRT
7582 && !have_insn_for (LSHIFTRT, mode)
7583 && have_insn_for (ASHIFTRT, mode)
7584 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
7585 && INTVAL (XEXP (XEXP (x, 0), 1)) >= 0
7586 && INTVAL (XEXP (XEXP (x, 0), 1)) < HOST_BITS_PER_WIDE_INT
7587 && mode_width <= HOST_BITS_PER_WIDE_INT)
7588 {
7589 unsigned HOST_WIDE_INT mask = GET_MODE_MASK (mode);
7590
7591 mask >>= INTVAL (XEXP (XEXP (x, 0), 1));
7592 if ((INTVAL (XEXP (x, 1)) & ~mask) == 0)
7593 SUBST (XEXP (x, 0),
7594 gen_rtx_ASHIFTRT (mode,
7595 make_compound_operation
7596 (XEXP (XEXP (x, 0), 0), next_code),
7597 XEXP (XEXP (x, 0), 1)));
7598 }
7599
7600 /* If the constant is one less than a power of two, this might be
7601 representable by an extraction even if no shift is present.
7602 If it doesn't end up being a ZERO_EXTEND, we will ignore it unless
7603 we are in a COMPARE. */
7604 else if ((i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0)
7605 new_rtx = make_extraction (mode,
7606 make_compound_operation (XEXP (x, 0),
7607 next_code),
7608 0, NULL_RTX, i, 1, 0, in_code == COMPARE);
7609
7610 /* If we are in a comparison and this is an AND with a power of two,
7611 convert this into the appropriate bit extract. */
7612 else if (in_code == COMPARE
7613 && (i = exact_log2 (UINTVAL (XEXP (x, 1)))) >= 0)
7614 new_rtx = make_extraction (mode,
7615 make_compound_operation (XEXP (x, 0),
7616 next_code),
7617 i, NULL_RTX, 1, 1, 0, 1);
7618
7619 break;
7620
7621 case LSHIFTRT:
7622 /* If the sign bit is known to be zero, replace this with an
7623 arithmetic shift. */
7624 if (have_insn_for (ASHIFTRT, mode)
7625 && ! have_insn_for (LSHIFTRT, mode)
7626 && mode_width <= HOST_BITS_PER_WIDE_INT
7627 && (nonzero_bits (XEXP (x, 0), mode) & (1 << (mode_width - 1))) == 0)
7628 {
7629 new_rtx = gen_rtx_ASHIFTRT (mode,
7630 make_compound_operation (XEXP (x, 0),
7631 next_code),
7632 XEXP (x, 1));
7633 break;
7634 }
7635
7636 /* ... fall through ... */
7637
7638 case ASHIFTRT:
7639 lhs = XEXP (x, 0);
7640 rhs = XEXP (x, 1);
7641
7642 /* If we have (ashiftrt (ashift foo C1) C2) with C2 >= C1,
7643 this is a SIGN_EXTRACT. */
7644 if (CONST_INT_P (rhs)
7645 && GET_CODE (lhs) == ASHIFT
7646 && CONST_INT_P (XEXP (lhs, 1))
7647 && INTVAL (rhs) >= INTVAL (XEXP (lhs, 1))
7648 && INTVAL (rhs) < mode_width)
7649 {
7650 new_rtx = make_compound_operation (XEXP (lhs, 0), next_code);
7651 new_rtx = make_extraction (mode, new_rtx,
7652 INTVAL (rhs) - INTVAL (XEXP (lhs, 1)),
7653 NULL_RTX, mode_width - INTVAL (rhs),
7654 code == LSHIFTRT, 0, in_code == COMPARE);
7655 break;
7656 }
7657
7658 /* See if we have operations between an ASHIFTRT and an ASHIFT.
7659 If so, try to merge the shifts into a SIGN_EXTEND. We could
7660 also do this for some cases of SIGN_EXTRACT, but it doesn't
7661 seem worth the effort; the case checked for occurs on Alpha. */
7662
7663 if (!OBJECT_P (lhs)
7664 && ! (GET_CODE (lhs) == SUBREG
7665 && (OBJECT_P (SUBREG_REG (lhs))))
7666 && CONST_INT_P (rhs)
7667 && INTVAL (rhs) < HOST_BITS_PER_WIDE_INT
7668 && INTVAL (rhs) < mode_width
7669 && (new_rtx = extract_left_shift (lhs, INTVAL (rhs))) != 0)
7670 new_rtx = make_extraction (mode, make_compound_operation (new_rtx, next_code),
7671 0, NULL_RTX, mode_width - INTVAL (rhs),
7672 code == LSHIFTRT, 0, in_code == COMPARE);
7673
7674 break;
7675
7676 case SUBREG:
7677 /* Call ourselves recursively on the inner expression. If we are
7678 narrowing the object and it has a different RTL code from
7679 what it originally did, do this SUBREG as a force_to_mode. */
7680 {
7681 rtx inner = SUBREG_REG (x), simplified;
7682
7683 tem = make_compound_operation (inner, in_code);
7684
7685 simplified
7686 = simplify_subreg (mode, tem, GET_MODE (inner), SUBREG_BYTE (x));
7687 if (simplified)
7688 tem = simplified;
7689
7690 if (GET_CODE (tem) != GET_CODE (inner)
7691 && GET_MODE_SIZE (mode) < GET_MODE_SIZE (GET_MODE (inner))
7692 && subreg_lowpart_p (x))
7693 {
7694 rtx newer
7695 = force_to_mode (tem, mode, ~(unsigned HOST_WIDE_INT) 0, 0);
7696
7697 /* If we have something other than a SUBREG, we might have
7698 done an expansion, so rerun ourselves. */
7699 if (GET_CODE (newer) != SUBREG)
7700 newer = make_compound_operation (newer, in_code);
7701
7702 /* force_to_mode can expand compounds. If it just re-expanded the
7703 compound, use gen_lowpart to convert to the desired mode. */
7704 if (rtx_equal_p (newer, x)
7705 /* Likewise if it re-expanded the compound only partially.
7706 This happens for SUBREG of ZERO_EXTRACT if they extract
7707 the same number of bits. */
7708 || (GET_CODE (newer) == SUBREG
7709 && (GET_CODE (SUBREG_REG (newer)) == LSHIFTRT
7710 || GET_CODE (SUBREG_REG (newer)) == ASHIFTRT)
7711 && GET_CODE (inner) == AND
7712 && rtx_equal_p (SUBREG_REG (newer), XEXP (inner, 0))))
7713 return gen_lowpart (GET_MODE (x), tem);
7714
7715 return newer;
7716 }
7717
7718 if (simplified)
7719 return tem;
7720 }
7721 break;
7722
7723 default:
7724 break;
7725 }
7726
7727 if (new_rtx)
7728 {
7729 x = gen_lowpart (mode, new_rtx);
7730 code = GET_CODE (x);
7731 }
7732
7733 /* Now recursively process each operand of this operation. */
7734 fmt = GET_RTX_FORMAT (code);
7735 for (i = 0; i < GET_RTX_LENGTH (code); i++)
7736 if (fmt[i] == 'e')
7737 {
7738 new_rtx = make_compound_operation (XEXP (x, i), next_code);
7739 SUBST (XEXP (x, i), new_rtx);
7740 }
7741 else if (fmt[i] == 'E')
7742 for (j = 0; j < XVECLEN (x, i); j++)
7743 {
7744 new_rtx = make_compound_operation (XVECEXP (x, i, j), next_code);
7745 SUBST (XVECEXP (x, i, j), new_rtx);
7746 }
7747
7748 maybe_swap:
7749 /* If this is a commutative operation, the changes to the operands
7750 may have made it noncanonical. */
7751 if (COMMUTATIVE_ARITH_P (x)
7752 && swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
7753 {
7754 tem = XEXP (x, 0);
7755 SUBST (XEXP (x, 0), XEXP (x, 1));
7756 SUBST (XEXP (x, 1), tem);
7757 }
7758
7759 return x;
7760 }
7761 \f
7762 /* Given M see if it is a value that would select a field of bits
7763 within an item, but not the entire word. Return -1 if not.
7764 Otherwise, return the starting position of the field, where 0 is the
7765 low-order bit.
7766
7767 *PLEN is set to the length of the field. */
7768
7769 static int
7770 get_pos_from_mask (unsigned HOST_WIDE_INT m, unsigned HOST_WIDE_INT *plen)
7771 {
7772 /* Get the bit number of the first 1 bit from the right, -1 if none. */
7773 int pos = m ? ctz_hwi (m) : -1;
7774 int len = 0;
7775
7776 if (pos >= 0)
7777 /* Now shift off the low-order zero bits and see if we have a
7778 power of two minus 1. */
7779 len = exact_log2 ((m >> pos) + 1);
7780
7781 if (len <= 0)
7782 pos = -1;
7783
7784 *plen = len;
7785 return pos;
7786 }
7787 \f
7788 /* If X refers to a register that equals REG in value, replace these
7789 references with REG. */
7790 static rtx
7791 canon_reg_for_combine (rtx x, rtx reg)
7792 {
7793 rtx op0, op1, op2;
7794 const char *fmt;
7795 int i;
7796 bool copied;
7797
7798 enum rtx_code code = GET_CODE (x);
7799 switch (GET_RTX_CLASS (code))
7800 {
7801 case RTX_UNARY:
7802 op0 = canon_reg_for_combine (XEXP (x, 0), reg);
7803 if (op0 != XEXP (x, 0))
7804 return simplify_gen_unary (GET_CODE (x), GET_MODE (x), op0,
7805 GET_MODE (reg));
7806 break;
7807
7808 case RTX_BIN_ARITH:
7809 case RTX_COMM_ARITH:
7810 op0 = canon_reg_for_combine (XEXP (x, 0), reg);
7811 op1 = canon_reg_for_combine (XEXP (x, 1), reg);
7812 if (op0 != XEXP (x, 0) || op1 != XEXP (x, 1))
7813 return simplify_gen_binary (GET_CODE (x), GET_MODE (x), op0, op1);
7814 break;
7815
7816 case RTX_COMPARE:
7817 case RTX_COMM_COMPARE:
7818 op0 = canon_reg_for_combine (XEXP (x, 0), reg);
7819 op1 = canon_reg_for_combine (XEXP (x, 1), reg);
7820 if (op0 != XEXP (x, 0) || op1 != XEXP (x, 1))
7821 return simplify_gen_relational (GET_CODE (x), GET_MODE (x),
7822 GET_MODE (op0), op0, op1);
7823 break;
7824
7825 case RTX_TERNARY:
7826 case RTX_BITFIELD_OPS:
7827 op0 = canon_reg_for_combine (XEXP (x, 0), reg);
7828 op1 = canon_reg_for_combine (XEXP (x, 1), reg);
7829 op2 = canon_reg_for_combine (XEXP (x, 2), reg);
7830 if (op0 != XEXP (x, 0) || op1 != XEXP (x, 1) || op2 != XEXP (x, 2))
7831 return simplify_gen_ternary (GET_CODE (x), GET_MODE (x),
7832 GET_MODE (op0), op0, op1, op2);
7833
7834 case RTX_OBJ:
7835 if (REG_P (x))
7836 {
7837 if (rtx_equal_p (get_last_value (reg), x)
7838 || rtx_equal_p (reg, get_last_value (x)))
7839 return reg;
7840 else
7841 break;
7842 }
7843
7844 /* fall through */
7845
7846 default:
7847 fmt = GET_RTX_FORMAT (code);
7848 copied = false;
7849 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
7850 if (fmt[i] == 'e')
7851 {
7852 rtx op = canon_reg_for_combine (XEXP (x, i), reg);
7853 if (op != XEXP (x, i))
7854 {
7855 if (!copied)
7856 {
7857 copied = true;
7858 x = copy_rtx (x);
7859 }
7860 XEXP (x, i) = op;
7861 }
7862 }
7863 else if (fmt[i] == 'E')
7864 {
7865 int j;
7866 for (j = 0; j < XVECLEN (x, i); j++)
7867 {
7868 rtx op = canon_reg_for_combine (XVECEXP (x, i, j), reg);
7869 if (op != XVECEXP (x, i, j))
7870 {
7871 if (!copied)
7872 {
7873 copied = true;
7874 x = copy_rtx (x);
7875 }
7876 XVECEXP (x, i, j) = op;
7877 }
7878 }
7879 }
7880
7881 break;
7882 }
7883
7884 return x;
7885 }
7886
7887 /* Return X converted to MODE. If the value is already truncated to
7888 MODE we can just return a subreg even though in the general case we
7889 would need an explicit truncation. */
7890
7891 static rtx
7892 gen_lowpart_or_truncate (enum machine_mode mode, rtx x)
7893 {
7894 if (!CONST_INT_P (x)
7895 && GET_MODE_SIZE (mode) < GET_MODE_SIZE (GET_MODE (x))
7896 && !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
7897 GET_MODE_BITSIZE (GET_MODE (x)))
7898 && !(REG_P (x) && reg_truncated_to_mode (mode, x)))
7899 {
7900 /* Bit-cast X into an integer mode. */
7901 if (!SCALAR_INT_MODE_P (GET_MODE (x)))
7902 x = gen_lowpart (int_mode_for_mode (GET_MODE (x)), x);
7903 x = simplify_gen_unary (TRUNCATE, int_mode_for_mode (mode),
7904 x, GET_MODE (x));
7905 }
7906
7907 return gen_lowpart (mode, x);
7908 }
7909
7910 /* See if X can be simplified knowing that we will only refer to it in
7911 MODE and will only refer to those bits that are nonzero in MASK.
7912 If other bits are being computed or if masking operations are done
7913 that select a superset of the bits in MASK, they can sometimes be
7914 ignored.
7915
7916 Return a possibly simplified expression, but always convert X to
7917 MODE. If X is a CONST_INT, AND the CONST_INT with MASK.
7918
7919 If JUST_SELECT is nonzero, don't optimize by noticing that bits in MASK
7920 are all off in X. This is used when X will be complemented, by either
7921 NOT, NEG, or XOR. */
7922
7923 static rtx
7924 force_to_mode (rtx x, enum machine_mode mode, unsigned HOST_WIDE_INT mask,
7925 int just_select)
7926 {
7927 enum rtx_code code = GET_CODE (x);
7928 int next_select = just_select || code == XOR || code == NOT || code == NEG;
7929 enum machine_mode op_mode;
7930 unsigned HOST_WIDE_INT fuller_mask, nonzero;
7931 rtx op0, op1, temp;
7932
7933 /* If this is a CALL or ASM_OPERANDS, don't do anything. Some of the
7934 code below will do the wrong thing since the mode of such an
7935 expression is VOIDmode.
7936
7937 Also do nothing if X is a CLOBBER; this can happen if X was
7938 the return value from a call to gen_lowpart. */
7939 if (code == CALL || code == ASM_OPERANDS || code == CLOBBER)
7940 return x;
7941
7942 /* We want to perform the operation is its present mode unless we know
7943 that the operation is valid in MODE, in which case we do the operation
7944 in MODE. */
7945 op_mode = ((GET_MODE_CLASS (mode) == GET_MODE_CLASS (GET_MODE (x))
7946 && have_insn_for (code, mode))
7947 ? mode : GET_MODE (x));
7948
7949 /* It is not valid to do a right-shift in a narrower mode
7950 than the one it came in with. */
7951 if ((code == LSHIFTRT || code == ASHIFTRT)
7952 && GET_MODE_BITSIZE (mode) < GET_MODE_BITSIZE (GET_MODE (x)))
7953 op_mode = GET_MODE (x);
7954
7955 /* Truncate MASK to fit OP_MODE. */
7956 if (op_mode)
7957 mask &= GET_MODE_MASK (op_mode);
7958
7959 /* When we have an arithmetic operation, or a shift whose count we
7960 do not know, we need to assume that all bits up to the highest-order
7961 bit in MASK will be needed. This is how we form such a mask. */
7962 if (mask & ((unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)))
7963 fuller_mask = ~(unsigned HOST_WIDE_INT) 0;
7964 else
7965 fuller_mask = (((unsigned HOST_WIDE_INT) 1 << (floor_log2 (mask) + 1))
7966 - 1);
7967
7968 /* Determine what bits of X are guaranteed to be (non)zero. */
7969 nonzero = nonzero_bits (x, mode);
7970
7971 /* If none of the bits in X are needed, return a zero. */
7972 if (!just_select && (nonzero & mask) == 0 && !side_effects_p (x))
7973 x = const0_rtx;
7974
7975 /* If X is a CONST_INT, return a new one. Do this here since the
7976 test below will fail. */
7977 if (CONST_INT_P (x))
7978 {
7979 if (SCALAR_INT_MODE_P (mode))
7980 return gen_int_mode (INTVAL (x) & mask, mode);
7981 else
7982 {
7983 x = GEN_INT (INTVAL (x) & mask);
7984 return gen_lowpart_common (mode, x);
7985 }
7986 }
7987
7988 /* If X is narrower than MODE and we want all the bits in X's mode, just
7989 get X in the proper mode. */
7990 if (GET_MODE_SIZE (GET_MODE (x)) < GET_MODE_SIZE (mode)
7991 && (GET_MODE_MASK (GET_MODE (x)) & ~mask) == 0)
7992 return gen_lowpart (mode, x);
7993
7994 /* We can ignore the effect of a SUBREG if it narrows the mode or
7995 if the constant masks to zero all the bits the mode doesn't have. */
7996 if (GET_CODE (x) == SUBREG
7997 && subreg_lowpart_p (x)
7998 && ((GET_MODE_SIZE (GET_MODE (x))
7999 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x))))
8000 || (0 == (mask
8001 & GET_MODE_MASK (GET_MODE (x))
8002 & ~GET_MODE_MASK (GET_MODE (SUBREG_REG (x)))))))
8003 return force_to_mode (SUBREG_REG (x), mode, mask, next_select);
8004
8005 /* The arithmetic simplifications here only work for scalar integer modes. */
8006 if (!SCALAR_INT_MODE_P (mode) || !SCALAR_INT_MODE_P (GET_MODE (x)))
8007 return gen_lowpart_or_truncate (mode, x);
8008
8009 switch (code)
8010 {
8011 case CLOBBER:
8012 /* If X is a (clobber (const_int)), return it since we know we are
8013 generating something that won't match. */
8014 return x;
8015
8016 case SIGN_EXTEND:
8017 case ZERO_EXTEND:
8018 case ZERO_EXTRACT:
8019 case SIGN_EXTRACT:
8020 x = expand_compound_operation (x);
8021 if (GET_CODE (x) != code)
8022 return force_to_mode (x, mode, mask, next_select);
8023 break;
8024
8025 case TRUNCATE:
8026 /* Similarly for a truncate. */
8027 return force_to_mode (XEXP (x, 0), mode, mask, next_select);
8028
8029 case AND:
8030 /* If this is an AND with a constant, convert it into an AND
8031 whose constant is the AND of that constant with MASK. If it
8032 remains an AND of MASK, delete it since it is redundant. */
8033
8034 if (CONST_INT_P (XEXP (x, 1)))
8035 {
8036 x = simplify_and_const_int (x, op_mode, XEXP (x, 0),
8037 mask & INTVAL (XEXP (x, 1)));
8038
8039 /* If X is still an AND, see if it is an AND with a mask that
8040 is just some low-order bits. If so, and it is MASK, we don't
8041 need it. */
8042
8043 if (GET_CODE (x) == AND && CONST_INT_P (XEXP (x, 1))
8044 && ((INTVAL (XEXP (x, 1)) & GET_MODE_MASK (GET_MODE (x)))
8045 == mask))
8046 x = XEXP (x, 0);
8047
8048 /* If it remains an AND, try making another AND with the bits
8049 in the mode mask that aren't in MASK turned on. If the
8050 constant in the AND is wide enough, this might make a
8051 cheaper constant. */
8052
8053 if (GET_CODE (x) == AND && CONST_INT_P (XEXP (x, 1))
8054 && GET_MODE_MASK (GET_MODE (x)) != mask
8055 && GET_MODE_BITSIZE (GET_MODE (x)) <= HOST_BITS_PER_WIDE_INT)
8056 {
8057 unsigned HOST_WIDE_INT cval
8058 = UINTVAL (XEXP (x, 1))
8059 | (GET_MODE_MASK (GET_MODE (x)) & ~mask);
8060 int width = GET_MODE_BITSIZE (GET_MODE (x));
8061 rtx y;
8062
8063 /* If MODE is narrower than HOST_WIDE_INT and CVAL is a negative
8064 number, sign extend it. */
8065 if (width > 0 && width < HOST_BITS_PER_WIDE_INT
8066 && (cval & ((unsigned HOST_WIDE_INT) 1 << (width - 1))) != 0)
8067 cval |= (unsigned HOST_WIDE_INT) -1 << width;
8068
8069 y = simplify_gen_binary (AND, GET_MODE (x),
8070 XEXP (x, 0), GEN_INT (cval));
8071 if (rtx_cost (y, SET, optimize_this_for_speed_p)
8072 < rtx_cost (x, SET, optimize_this_for_speed_p))
8073 x = y;
8074 }
8075
8076 break;
8077 }
8078
8079 goto binop;
8080
8081 case PLUS:
8082 /* In (and (plus FOO C1) M), if M is a mask that just turns off
8083 low-order bits (as in an alignment operation) and FOO is already
8084 aligned to that boundary, mask C1 to that boundary as well.
8085 This may eliminate that PLUS and, later, the AND. */
8086
8087 {
8088 unsigned int width = GET_MODE_BITSIZE (mode);
8089 unsigned HOST_WIDE_INT smask = mask;
8090
8091 /* If MODE is narrower than HOST_WIDE_INT and mask is a negative
8092 number, sign extend it. */
8093
8094 if (width < HOST_BITS_PER_WIDE_INT
8095 && (smask & ((unsigned HOST_WIDE_INT) 1 << (width - 1))) != 0)
8096 smask |= (unsigned HOST_WIDE_INT) (-1) << width;
8097
8098 if (CONST_INT_P (XEXP (x, 1))
8099 && exact_log2 (- smask) >= 0
8100 && (nonzero_bits (XEXP (x, 0), mode) & ~smask) == 0
8101 && (INTVAL (XEXP (x, 1)) & ~smask) != 0)
8102 return force_to_mode (plus_constant (XEXP (x, 0),
8103 (INTVAL (XEXP (x, 1)) & smask)),
8104 mode, smask, next_select);
8105 }
8106
8107 /* ... fall through ... */
8108
8109 case MULT:
8110 /* For PLUS, MINUS and MULT, we need any bits less significant than the
8111 most significant bit in MASK since carries from those bits will
8112 affect the bits we are interested in. */
8113 mask = fuller_mask;
8114 goto binop;
8115
8116 case MINUS:
8117 /* If X is (minus C Y) where C's least set bit is larger than any bit
8118 in the mask, then we may replace with (neg Y). */
8119 if (CONST_INT_P (XEXP (x, 0))
8120 && (((unsigned HOST_WIDE_INT) (INTVAL (XEXP (x, 0))
8121 & -INTVAL (XEXP (x, 0))))
8122 > mask))
8123 {
8124 x = simplify_gen_unary (NEG, GET_MODE (x), XEXP (x, 1),
8125 GET_MODE (x));
8126 return force_to_mode (x, mode, mask, next_select);
8127 }
8128
8129 /* Similarly, if C contains every bit in the fuller_mask, then we may
8130 replace with (not Y). */
8131 if (CONST_INT_P (XEXP (x, 0))
8132 && ((UINTVAL (XEXP (x, 0)) | fuller_mask) == UINTVAL (XEXP (x, 0))))
8133 {
8134 x = simplify_gen_unary (NOT, GET_MODE (x),
8135 XEXP (x, 1), GET_MODE (x));
8136 return force_to_mode (x, mode, mask, next_select);
8137 }
8138
8139 mask = fuller_mask;
8140 goto binop;
8141
8142 case IOR:
8143 case XOR:
8144 /* If X is (ior (lshiftrt FOO C1) C2), try to commute the IOR and
8145 LSHIFTRT so we end up with an (and (lshiftrt (ior ...) ...) ...)
8146 operation which may be a bitfield extraction. Ensure that the
8147 constant we form is not wider than the mode of X. */
8148
8149 if (GET_CODE (XEXP (x, 0)) == LSHIFTRT
8150 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
8151 && INTVAL (XEXP (XEXP (x, 0), 1)) >= 0
8152 && INTVAL (XEXP (XEXP (x, 0), 1)) < HOST_BITS_PER_WIDE_INT
8153 && CONST_INT_P (XEXP (x, 1))
8154 && ((INTVAL (XEXP (XEXP (x, 0), 1))
8155 + floor_log2 (INTVAL (XEXP (x, 1))))
8156 < GET_MODE_BITSIZE (GET_MODE (x)))
8157 && (UINTVAL (XEXP (x, 1))
8158 & ~nonzero_bits (XEXP (x, 0), GET_MODE (x))) == 0)
8159 {
8160 temp = GEN_INT ((INTVAL (XEXP (x, 1)) & mask)
8161 << INTVAL (XEXP (XEXP (x, 0), 1)));
8162 temp = simplify_gen_binary (GET_CODE (x), GET_MODE (x),
8163 XEXP (XEXP (x, 0), 0), temp);
8164 x = simplify_gen_binary (LSHIFTRT, GET_MODE (x), temp,
8165 XEXP (XEXP (x, 0), 1));
8166 return force_to_mode (x, mode, mask, next_select);
8167 }
8168
8169 binop:
8170 /* For most binary operations, just propagate into the operation and
8171 change the mode if we have an operation of that mode. */
8172
8173 op0 = force_to_mode (XEXP (x, 0), mode, mask, next_select);
8174 op1 = force_to_mode (XEXP (x, 1), mode, mask, next_select);
8175
8176 /* If we ended up truncating both operands, truncate the result of the
8177 operation instead. */
8178 if (GET_CODE (op0) == TRUNCATE
8179 && GET_CODE (op1) == TRUNCATE)
8180 {
8181 op0 = XEXP (op0, 0);
8182 op1 = XEXP (op1, 0);
8183 }
8184
8185 op0 = gen_lowpart_or_truncate (op_mode, op0);
8186 op1 = gen_lowpart_or_truncate (op_mode, op1);
8187
8188 if (op_mode != GET_MODE (x) || op0 != XEXP (x, 0) || op1 != XEXP (x, 1))
8189 x = simplify_gen_binary (code, op_mode, op0, op1);
8190 break;
8191
8192 case ASHIFT:
8193 /* For left shifts, do the same, but just for the first operand.
8194 However, we cannot do anything with shifts where we cannot
8195 guarantee that the counts are smaller than the size of the mode
8196 because such a count will have a different meaning in a
8197 wider mode. */
8198
8199 if (! (CONST_INT_P (XEXP (x, 1))
8200 && INTVAL (XEXP (x, 1)) >= 0
8201 && INTVAL (XEXP (x, 1)) < GET_MODE_BITSIZE (mode))
8202 && ! (GET_MODE (XEXP (x, 1)) != VOIDmode
8203 && (nonzero_bits (XEXP (x, 1), GET_MODE (XEXP (x, 1)))
8204 < (unsigned HOST_WIDE_INT) GET_MODE_BITSIZE (mode))))
8205 break;
8206
8207 /* If the shift count is a constant and we can do arithmetic in
8208 the mode of the shift, refine which bits we need. Otherwise, use the
8209 conservative form of the mask. */
8210 if (CONST_INT_P (XEXP (x, 1))
8211 && INTVAL (XEXP (x, 1)) >= 0
8212 && INTVAL (XEXP (x, 1)) < GET_MODE_BITSIZE (op_mode)
8213 && GET_MODE_BITSIZE (op_mode) <= HOST_BITS_PER_WIDE_INT)
8214 mask >>= INTVAL (XEXP (x, 1));
8215 else
8216 mask = fuller_mask;
8217
8218 op0 = gen_lowpart_or_truncate (op_mode,
8219 force_to_mode (XEXP (x, 0), op_mode,
8220 mask, next_select));
8221
8222 if (op_mode != GET_MODE (x) || op0 != XEXP (x, 0))
8223 x = simplify_gen_binary (code, op_mode, op0, XEXP (x, 1));
8224 break;
8225
8226 case LSHIFTRT:
8227 /* Here we can only do something if the shift count is a constant,
8228 this shift constant is valid for the host, and we can do arithmetic
8229 in OP_MODE. */
8230
8231 if (CONST_INT_P (XEXP (x, 1))
8232 && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT
8233 && GET_MODE_BITSIZE (op_mode) <= HOST_BITS_PER_WIDE_INT)
8234 {
8235 rtx inner = XEXP (x, 0);
8236 unsigned HOST_WIDE_INT inner_mask;
8237
8238 /* Select the mask of the bits we need for the shift operand. */
8239 inner_mask = mask << INTVAL (XEXP (x, 1));
8240
8241 /* We can only change the mode of the shift if we can do arithmetic
8242 in the mode of the shift and INNER_MASK is no wider than the
8243 width of X's mode. */
8244 if ((inner_mask & ~GET_MODE_MASK (GET_MODE (x))) != 0)
8245 op_mode = GET_MODE (x);
8246
8247 inner = force_to_mode (inner, op_mode, inner_mask, next_select);
8248
8249 if (GET_MODE (x) != op_mode || inner != XEXP (x, 0))
8250 x = simplify_gen_binary (LSHIFTRT, op_mode, inner, XEXP (x, 1));
8251 }
8252
8253 /* If we have (and (lshiftrt FOO C1) C2) where the combination of the
8254 shift and AND produces only copies of the sign bit (C2 is one less
8255 than a power of two), we can do this with just a shift. */
8256
8257 if (GET_CODE (x) == LSHIFTRT
8258 && CONST_INT_P (XEXP (x, 1))
8259 /* The shift puts one of the sign bit copies in the least significant
8260 bit. */
8261 && ((INTVAL (XEXP (x, 1))
8262 + num_sign_bit_copies (XEXP (x, 0), GET_MODE (XEXP (x, 0))))
8263 >= GET_MODE_BITSIZE (GET_MODE (x)))
8264 && exact_log2 (mask + 1) >= 0
8265 /* Number of bits left after the shift must be more than the mask
8266 needs. */
8267 && ((INTVAL (XEXP (x, 1)) + exact_log2 (mask + 1))
8268 <= GET_MODE_BITSIZE (GET_MODE (x)))
8269 /* Must be more sign bit copies than the mask needs. */
8270 && ((int) num_sign_bit_copies (XEXP (x, 0), GET_MODE (XEXP (x, 0)))
8271 >= exact_log2 (mask + 1)))
8272 x = simplify_gen_binary (LSHIFTRT, GET_MODE (x), XEXP (x, 0),
8273 GEN_INT (GET_MODE_BITSIZE (GET_MODE (x))
8274 - exact_log2 (mask + 1)));
8275
8276 goto shiftrt;
8277
8278 case ASHIFTRT:
8279 /* If we are just looking for the sign bit, we don't need this shift at
8280 all, even if it has a variable count. */
8281 if (GET_MODE_BITSIZE (GET_MODE (x)) <= HOST_BITS_PER_WIDE_INT
8282 && (mask == ((unsigned HOST_WIDE_INT) 1
8283 << (GET_MODE_BITSIZE (GET_MODE (x)) - 1))))
8284 return force_to_mode (XEXP (x, 0), mode, mask, next_select);
8285
8286 /* If this is a shift by a constant, get a mask that contains those bits
8287 that are not copies of the sign bit. We then have two cases: If
8288 MASK only includes those bits, this can be a logical shift, which may
8289 allow simplifications. If MASK is a single-bit field not within
8290 those bits, we are requesting a copy of the sign bit and hence can
8291 shift the sign bit to the appropriate location. */
8292
8293 if (CONST_INT_P (XEXP (x, 1)) && INTVAL (XEXP (x, 1)) >= 0
8294 && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT)
8295 {
8296 int i;
8297
8298 /* If the considered data is wider than HOST_WIDE_INT, we can't
8299 represent a mask for all its bits in a single scalar.
8300 But we only care about the lower bits, so calculate these. */
8301
8302 if (GET_MODE_BITSIZE (GET_MODE (x)) > HOST_BITS_PER_WIDE_INT)
8303 {
8304 nonzero = ~(unsigned HOST_WIDE_INT) 0;
8305
8306 /* GET_MODE_BITSIZE (GET_MODE (x)) - INTVAL (XEXP (x, 1))
8307 is the number of bits a full-width mask would have set.
8308 We need only shift if these are fewer than nonzero can
8309 hold. If not, we must keep all bits set in nonzero. */
8310
8311 if (GET_MODE_BITSIZE (GET_MODE (x)) - INTVAL (XEXP (x, 1))
8312 < HOST_BITS_PER_WIDE_INT)
8313 nonzero >>= INTVAL (XEXP (x, 1))
8314 + HOST_BITS_PER_WIDE_INT
8315 - GET_MODE_BITSIZE (GET_MODE (x)) ;
8316 }
8317 else
8318 {
8319 nonzero = GET_MODE_MASK (GET_MODE (x));
8320 nonzero >>= INTVAL (XEXP (x, 1));
8321 }
8322
8323 if ((mask & ~nonzero) == 0)
8324 {
8325 x = simplify_shift_const (NULL_RTX, LSHIFTRT, GET_MODE (x),
8326 XEXP (x, 0), INTVAL (XEXP (x, 1)));
8327 if (GET_CODE (x) != ASHIFTRT)
8328 return force_to_mode (x, mode, mask, next_select);
8329 }
8330
8331 else if ((i = exact_log2 (mask)) >= 0)
8332 {
8333 x = simplify_shift_const
8334 (NULL_RTX, LSHIFTRT, GET_MODE (x), XEXP (x, 0),
8335 GET_MODE_BITSIZE (GET_MODE (x)) - 1 - i);
8336
8337 if (GET_CODE (x) != ASHIFTRT)
8338 return force_to_mode (x, mode, mask, next_select);
8339 }
8340 }
8341
8342 /* If MASK is 1, convert this to an LSHIFTRT. This can be done
8343 even if the shift count isn't a constant. */
8344 if (mask == 1)
8345 x = simplify_gen_binary (LSHIFTRT, GET_MODE (x),
8346 XEXP (x, 0), XEXP (x, 1));
8347
8348 shiftrt:
8349
8350 /* If this is a zero- or sign-extension operation that just affects bits
8351 we don't care about, remove it. Be sure the call above returned
8352 something that is still a shift. */
8353
8354 if ((GET_CODE (x) == LSHIFTRT || GET_CODE (x) == ASHIFTRT)
8355 && CONST_INT_P (XEXP (x, 1))
8356 && INTVAL (XEXP (x, 1)) >= 0
8357 && (INTVAL (XEXP (x, 1))
8358 <= GET_MODE_BITSIZE (GET_MODE (x)) - (floor_log2 (mask) + 1))
8359 && GET_CODE (XEXP (x, 0)) == ASHIFT
8360 && XEXP (XEXP (x, 0), 1) == XEXP (x, 1))
8361 return force_to_mode (XEXP (XEXP (x, 0), 0), mode, mask,
8362 next_select);
8363
8364 break;
8365
8366 case ROTATE:
8367 case ROTATERT:
8368 /* If the shift count is constant and we can do computations
8369 in the mode of X, compute where the bits we care about are.
8370 Otherwise, we can't do anything. Don't change the mode of
8371 the shift or propagate MODE into the shift, though. */
8372 if (CONST_INT_P (XEXP (x, 1))
8373 && INTVAL (XEXP (x, 1)) >= 0)
8374 {
8375 temp = simplify_binary_operation (code == ROTATE ? ROTATERT : ROTATE,
8376 GET_MODE (x), GEN_INT (mask),
8377 XEXP (x, 1));
8378 if (temp && CONST_INT_P (temp))
8379 SUBST (XEXP (x, 0),
8380 force_to_mode (XEXP (x, 0), GET_MODE (x),
8381 INTVAL (temp), next_select));
8382 }
8383 break;
8384
8385 case NEG:
8386 /* If we just want the low-order bit, the NEG isn't needed since it
8387 won't change the low-order bit. */
8388 if (mask == 1)
8389 return force_to_mode (XEXP (x, 0), mode, mask, just_select);
8390
8391 /* We need any bits less significant than the most significant bit in
8392 MASK since carries from those bits will affect the bits we are
8393 interested in. */
8394 mask = fuller_mask;
8395 goto unop;
8396
8397 case NOT:
8398 /* (not FOO) is (xor FOO CONST), so if FOO is an LSHIFTRT, we can do the
8399 same as the XOR case above. Ensure that the constant we form is not
8400 wider than the mode of X. */
8401
8402 if (GET_CODE (XEXP (x, 0)) == LSHIFTRT
8403 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
8404 && INTVAL (XEXP (XEXP (x, 0), 1)) >= 0
8405 && (INTVAL (XEXP (XEXP (x, 0), 1)) + floor_log2 (mask)
8406 < GET_MODE_BITSIZE (GET_MODE (x)))
8407 && INTVAL (XEXP (XEXP (x, 0), 1)) < HOST_BITS_PER_WIDE_INT)
8408 {
8409 temp = gen_int_mode (mask << INTVAL (XEXP (XEXP (x, 0), 1)),
8410 GET_MODE (x));
8411 temp = simplify_gen_binary (XOR, GET_MODE (x),
8412 XEXP (XEXP (x, 0), 0), temp);
8413 x = simplify_gen_binary (LSHIFTRT, GET_MODE (x),
8414 temp, XEXP (XEXP (x, 0), 1));
8415
8416 return force_to_mode (x, mode, mask, next_select);
8417 }
8418
8419 /* (and (not FOO) CONST) is (not (or FOO (not CONST))), so we must
8420 use the full mask inside the NOT. */
8421 mask = fuller_mask;
8422
8423 unop:
8424 op0 = gen_lowpart_or_truncate (op_mode,
8425 force_to_mode (XEXP (x, 0), mode, mask,
8426 next_select));
8427 if (op_mode != GET_MODE (x) || op0 != XEXP (x, 0))
8428 x = simplify_gen_unary (code, op_mode, op0, op_mode);
8429 break;
8430
8431 case NE:
8432 /* (and (ne FOO 0) CONST) can be (and FOO CONST) if CONST is included
8433 in STORE_FLAG_VALUE and FOO has a single bit that might be nonzero,
8434 which is equal to STORE_FLAG_VALUE. */
8435 if ((mask & ~STORE_FLAG_VALUE) == 0
8436 && XEXP (x, 1) == const0_rtx
8437 && GET_MODE (XEXP (x, 0)) == mode
8438 && exact_log2 (nonzero_bits (XEXP (x, 0), mode)) >= 0
8439 && (nonzero_bits (XEXP (x, 0), mode)
8440 == (unsigned HOST_WIDE_INT) STORE_FLAG_VALUE))
8441 return force_to_mode (XEXP (x, 0), mode, mask, next_select);
8442
8443 break;
8444
8445 case IF_THEN_ELSE:
8446 /* We have no way of knowing if the IF_THEN_ELSE can itself be
8447 written in a narrower mode. We play it safe and do not do so. */
8448
8449 SUBST (XEXP (x, 1),
8450 gen_lowpart_or_truncate (GET_MODE (x),
8451 force_to_mode (XEXP (x, 1), mode,
8452 mask, next_select)));
8453 SUBST (XEXP (x, 2),
8454 gen_lowpart_or_truncate (GET_MODE (x),
8455 force_to_mode (XEXP (x, 2), mode,
8456 mask, next_select)));
8457 break;
8458
8459 default:
8460 break;
8461 }
8462
8463 /* Ensure we return a value of the proper mode. */
8464 return gen_lowpart_or_truncate (mode, x);
8465 }
8466 \f
8467 /* Return nonzero if X is an expression that has one of two values depending on
8468 whether some other value is zero or nonzero. In that case, we return the
8469 value that is being tested, *PTRUE is set to the value if the rtx being
8470 returned has a nonzero value, and *PFALSE is set to the other alternative.
8471
8472 If we return zero, we set *PTRUE and *PFALSE to X. */
8473
8474 static rtx
8475 if_then_else_cond (rtx x, rtx *ptrue, rtx *pfalse)
8476 {
8477 enum machine_mode mode = GET_MODE (x);
8478 enum rtx_code code = GET_CODE (x);
8479 rtx cond0, cond1, true0, true1, false0, false1;
8480 unsigned HOST_WIDE_INT nz;
8481
8482 /* If we are comparing a value against zero, we are done. */
8483 if ((code == NE || code == EQ)
8484 && XEXP (x, 1) == const0_rtx)
8485 {
8486 *ptrue = (code == NE) ? const_true_rtx : const0_rtx;
8487 *pfalse = (code == NE) ? const0_rtx : const_true_rtx;
8488 return XEXP (x, 0);
8489 }
8490
8491 /* If this is a unary operation whose operand has one of two values, apply
8492 our opcode to compute those values. */
8493 else if (UNARY_P (x)
8494 && (cond0 = if_then_else_cond (XEXP (x, 0), &true0, &false0)) != 0)
8495 {
8496 *ptrue = simplify_gen_unary (code, mode, true0, GET_MODE (XEXP (x, 0)));
8497 *pfalse = simplify_gen_unary (code, mode, false0,
8498 GET_MODE (XEXP (x, 0)));
8499 return cond0;
8500 }
8501
8502 /* If this is a COMPARE, do nothing, since the IF_THEN_ELSE we would
8503 make can't possibly match and would suppress other optimizations. */
8504 else if (code == COMPARE)
8505 ;
8506
8507 /* If this is a binary operation, see if either side has only one of two
8508 values. If either one does or if both do and they are conditional on
8509 the same value, compute the new true and false values. */
8510 else if (BINARY_P (x))
8511 {
8512 cond0 = if_then_else_cond (XEXP (x, 0), &true0, &false0);
8513 cond1 = if_then_else_cond (XEXP (x, 1), &true1, &false1);
8514
8515 if ((cond0 != 0 || cond1 != 0)
8516 && ! (cond0 != 0 && cond1 != 0 && ! rtx_equal_p (cond0, cond1)))
8517 {
8518 /* If if_then_else_cond returned zero, then true/false are the
8519 same rtl. We must copy one of them to prevent invalid rtl
8520 sharing. */
8521 if (cond0 == 0)
8522 true0 = copy_rtx (true0);
8523 else if (cond1 == 0)
8524 true1 = copy_rtx (true1);
8525
8526 if (COMPARISON_P (x))
8527 {
8528 *ptrue = simplify_gen_relational (code, mode, VOIDmode,
8529 true0, true1);
8530 *pfalse = simplify_gen_relational (code, mode, VOIDmode,
8531 false0, false1);
8532 }
8533 else
8534 {
8535 *ptrue = simplify_gen_binary (code, mode, true0, true1);
8536 *pfalse = simplify_gen_binary (code, mode, false0, false1);
8537 }
8538
8539 return cond0 ? cond0 : cond1;
8540 }
8541
8542 /* See if we have PLUS, IOR, XOR, MINUS or UMAX, where one of the
8543 operands is zero when the other is nonzero, and vice-versa,
8544 and STORE_FLAG_VALUE is 1 or -1. */
8545
8546 if ((STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
8547 && (code == PLUS || code == IOR || code == XOR || code == MINUS
8548 || code == UMAX)
8549 && GET_CODE (XEXP (x, 0)) == MULT && GET_CODE (XEXP (x, 1)) == MULT)
8550 {
8551 rtx op0 = XEXP (XEXP (x, 0), 1);
8552 rtx op1 = XEXP (XEXP (x, 1), 1);
8553
8554 cond0 = XEXP (XEXP (x, 0), 0);
8555 cond1 = XEXP (XEXP (x, 1), 0);
8556
8557 if (COMPARISON_P (cond0)
8558 && COMPARISON_P (cond1)
8559 && ((GET_CODE (cond0) == reversed_comparison_code (cond1, NULL)
8560 && rtx_equal_p (XEXP (cond0, 0), XEXP (cond1, 0))
8561 && rtx_equal_p (XEXP (cond0, 1), XEXP (cond1, 1)))
8562 || ((swap_condition (GET_CODE (cond0))
8563 == reversed_comparison_code (cond1, NULL))
8564 && rtx_equal_p (XEXP (cond0, 0), XEXP (cond1, 1))
8565 && rtx_equal_p (XEXP (cond0, 1), XEXP (cond1, 0))))
8566 && ! side_effects_p (x))
8567 {
8568 *ptrue = simplify_gen_binary (MULT, mode, op0, const_true_rtx);
8569 *pfalse = simplify_gen_binary (MULT, mode,
8570 (code == MINUS
8571 ? simplify_gen_unary (NEG, mode,
8572 op1, mode)
8573 : op1),
8574 const_true_rtx);
8575 return cond0;
8576 }
8577 }
8578
8579 /* Similarly for MULT, AND and UMIN, except that for these the result
8580 is always zero. */
8581 if ((STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
8582 && (code == MULT || code == AND || code == UMIN)
8583 && GET_CODE (XEXP (x, 0)) == MULT && GET_CODE (XEXP (x, 1)) == MULT)
8584 {
8585 cond0 = XEXP (XEXP (x, 0), 0);
8586 cond1 = XEXP (XEXP (x, 1), 0);
8587
8588 if (COMPARISON_P (cond0)
8589 && COMPARISON_P (cond1)
8590 && ((GET_CODE (cond0) == reversed_comparison_code (cond1, NULL)
8591 && rtx_equal_p (XEXP (cond0, 0), XEXP (cond1, 0))
8592 && rtx_equal_p (XEXP (cond0, 1), XEXP (cond1, 1)))
8593 || ((swap_condition (GET_CODE (cond0))
8594 == reversed_comparison_code (cond1, NULL))
8595 && rtx_equal_p (XEXP (cond0, 0), XEXP (cond1, 1))
8596 && rtx_equal_p (XEXP (cond0, 1), XEXP (cond1, 0))))
8597 && ! side_effects_p (x))
8598 {
8599 *ptrue = *pfalse = const0_rtx;
8600 return cond0;
8601 }
8602 }
8603 }
8604
8605 else if (code == IF_THEN_ELSE)
8606 {
8607 /* If we have IF_THEN_ELSE already, extract the condition and
8608 canonicalize it if it is NE or EQ. */
8609 cond0 = XEXP (x, 0);
8610 *ptrue = XEXP (x, 1), *pfalse = XEXP (x, 2);
8611 if (GET_CODE (cond0) == NE && XEXP (cond0, 1) == const0_rtx)
8612 return XEXP (cond0, 0);
8613 else if (GET_CODE (cond0) == EQ && XEXP (cond0, 1) == const0_rtx)
8614 {
8615 *ptrue = XEXP (x, 2), *pfalse = XEXP (x, 1);
8616 return XEXP (cond0, 0);
8617 }
8618 else
8619 return cond0;
8620 }
8621
8622 /* If X is a SUBREG, we can narrow both the true and false values
8623 if the inner expression, if there is a condition. */
8624 else if (code == SUBREG
8625 && 0 != (cond0 = if_then_else_cond (SUBREG_REG (x),
8626 &true0, &false0)))
8627 {
8628 true0 = simplify_gen_subreg (mode, true0,
8629 GET_MODE (SUBREG_REG (x)), SUBREG_BYTE (x));
8630 false0 = simplify_gen_subreg (mode, false0,
8631 GET_MODE (SUBREG_REG (x)), SUBREG_BYTE (x));
8632 if (true0 && false0)
8633 {
8634 *ptrue = true0;
8635 *pfalse = false0;
8636 return cond0;
8637 }
8638 }
8639
8640 /* If X is a constant, this isn't special and will cause confusions
8641 if we treat it as such. Likewise if it is equivalent to a constant. */
8642 else if (CONSTANT_P (x)
8643 || ((cond0 = get_last_value (x)) != 0 && CONSTANT_P (cond0)))
8644 ;
8645
8646 /* If we're in BImode, canonicalize on 0 and STORE_FLAG_VALUE, as that
8647 will be least confusing to the rest of the compiler. */
8648 else if (mode == BImode)
8649 {
8650 *ptrue = GEN_INT (STORE_FLAG_VALUE), *pfalse = const0_rtx;
8651 return x;
8652 }
8653
8654 /* If X is known to be either 0 or -1, those are the true and
8655 false values when testing X. */
8656 else if (x == constm1_rtx || x == const0_rtx
8657 || (mode != VOIDmode
8658 && num_sign_bit_copies (x, mode) == GET_MODE_BITSIZE (mode)))
8659 {
8660 *ptrue = constm1_rtx, *pfalse = const0_rtx;
8661 return x;
8662 }
8663
8664 /* Likewise for 0 or a single bit. */
8665 else if (SCALAR_INT_MODE_P (mode)
8666 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
8667 && exact_log2 (nz = nonzero_bits (x, mode)) >= 0)
8668 {
8669 *ptrue = gen_int_mode (nz, mode), *pfalse = const0_rtx;
8670 return x;
8671 }
8672
8673 /* Otherwise fail; show no condition with true and false values the same. */
8674 *ptrue = *pfalse = x;
8675 return 0;
8676 }
8677 \f
8678 /* Return the value of expression X given the fact that condition COND
8679 is known to be true when applied to REG as its first operand and VAL
8680 as its second. X is known to not be shared and so can be modified in
8681 place.
8682
8683 We only handle the simplest cases, and specifically those cases that
8684 arise with IF_THEN_ELSE expressions. */
8685
8686 static rtx
8687 known_cond (rtx x, enum rtx_code cond, rtx reg, rtx val)
8688 {
8689 enum rtx_code code = GET_CODE (x);
8690 rtx temp;
8691 const char *fmt;
8692 int i, j;
8693
8694 if (side_effects_p (x))
8695 return x;
8696
8697 /* If either operand of the condition is a floating point value,
8698 then we have to avoid collapsing an EQ comparison. */
8699 if (cond == EQ
8700 && rtx_equal_p (x, reg)
8701 && ! FLOAT_MODE_P (GET_MODE (x))
8702 && ! FLOAT_MODE_P (GET_MODE (val)))
8703 return val;
8704
8705 if (cond == UNEQ && rtx_equal_p (x, reg))
8706 return val;
8707
8708 /* If X is (abs REG) and we know something about REG's relationship
8709 with zero, we may be able to simplify this. */
8710
8711 if (code == ABS && rtx_equal_p (XEXP (x, 0), reg) && val == const0_rtx)
8712 switch (cond)
8713 {
8714 case GE: case GT: case EQ:
8715 return XEXP (x, 0);
8716 case LT: case LE:
8717 return simplify_gen_unary (NEG, GET_MODE (XEXP (x, 0)),
8718 XEXP (x, 0),
8719 GET_MODE (XEXP (x, 0)));
8720 default:
8721 break;
8722 }
8723
8724 /* The only other cases we handle are MIN, MAX, and comparisons if the
8725 operands are the same as REG and VAL. */
8726
8727 else if (COMPARISON_P (x) || COMMUTATIVE_ARITH_P (x))
8728 {
8729 if (rtx_equal_p (XEXP (x, 0), val))
8730 cond = swap_condition (cond), temp = val, val = reg, reg = temp;
8731
8732 if (rtx_equal_p (XEXP (x, 0), reg) && rtx_equal_p (XEXP (x, 1), val))
8733 {
8734 if (COMPARISON_P (x))
8735 {
8736 if (comparison_dominates_p (cond, code))
8737 return const_true_rtx;
8738
8739 code = reversed_comparison_code (x, NULL);
8740 if (code != UNKNOWN
8741 && comparison_dominates_p (cond, code))
8742 return const0_rtx;
8743 else
8744 return x;
8745 }
8746 else if (code == SMAX || code == SMIN
8747 || code == UMIN || code == UMAX)
8748 {
8749 int unsignedp = (code == UMIN || code == UMAX);
8750
8751 /* Do not reverse the condition when it is NE or EQ.
8752 This is because we cannot conclude anything about
8753 the value of 'SMAX (x, y)' when x is not equal to y,
8754 but we can when x equals y. */
8755 if ((code == SMAX || code == UMAX)
8756 && ! (cond == EQ || cond == NE))
8757 cond = reverse_condition (cond);
8758
8759 switch (cond)
8760 {
8761 case GE: case GT:
8762 return unsignedp ? x : XEXP (x, 1);
8763 case LE: case LT:
8764 return unsignedp ? x : XEXP (x, 0);
8765 case GEU: case GTU:
8766 return unsignedp ? XEXP (x, 1) : x;
8767 case LEU: case LTU:
8768 return unsignedp ? XEXP (x, 0) : x;
8769 default:
8770 break;
8771 }
8772 }
8773 }
8774 }
8775 else if (code == SUBREG)
8776 {
8777 enum machine_mode inner_mode = GET_MODE (SUBREG_REG (x));
8778 rtx new_rtx, r = known_cond (SUBREG_REG (x), cond, reg, val);
8779
8780 if (SUBREG_REG (x) != r)
8781 {
8782 /* We must simplify subreg here, before we lose track of the
8783 original inner_mode. */
8784 new_rtx = simplify_subreg (GET_MODE (x), r,
8785 inner_mode, SUBREG_BYTE (x));
8786 if (new_rtx)
8787 return new_rtx;
8788 else
8789 SUBST (SUBREG_REG (x), r);
8790 }
8791
8792 return x;
8793 }
8794 /* We don't have to handle SIGN_EXTEND here, because even in the
8795 case of replacing something with a modeless CONST_INT, a
8796 CONST_INT is already (supposed to be) a valid sign extension for
8797 its narrower mode, which implies it's already properly
8798 sign-extended for the wider mode. Now, for ZERO_EXTEND, the
8799 story is different. */
8800 else if (code == ZERO_EXTEND)
8801 {
8802 enum machine_mode inner_mode = GET_MODE (XEXP (x, 0));
8803 rtx new_rtx, r = known_cond (XEXP (x, 0), cond, reg, val);
8804
8805 if (XEXP (x, 0) != r)
8806 {
8807 /* We must simplify the zero_extend here, before we lose
8808 track of the original inner_mode. */
8809 new_rtx = simplify_unary_operation (ZERO_EXTEND, GET_MODE (x),
8810 r, inner_mode);
8811 if (new_rtx)
8812 return new_rtx;
8813 else
8814 SUBST (XEXP (x, 0), r);
8815 }
8816
8817 return x;
8818 }
8819
8820 fmt = GET_RTX_FORMAT (code);
8821 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
8822 {
8823 if (fmt[i] == 'e')
8824 SUBST (XEXP (x, i), known_cond (XEXP (x, i), cond, reg, val));
8825 else if (fmt[i] == 'E')
8826 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
8827 SUBST (XVECEXP (x, i, j), known_cond (XVECEXP (x, i, j),
8828 cond, reg, val));
8829 }
8830
8831 return x;
8832 }
8833 \f
8834 /* See if X and Y are equal for the purposes of seeing if we can rewrite an
8835 assignment as a field assignment. */
8836
8837 static int
8838 rtx_equal_for_field_assignment_p (rtx x, rtx y)
8839 {
8840 if (x == y || rtx_equal_p (x, y))
8841 return 1;
8842
8843 if (x == 0 || y == 0 || GET_MODE (x) != GET_MODE (y))
8844 return 0;
8845
8846 /* Check for a paradoxical SUBREG of a MEM compared with the MEM.
8847 Note that all SUBREGs of MEM are paradoxical; otherwise they
8848 would have been rewritten. */
8849 if (MEM_P (x) && GET_CODE (y) == SUBREG
8850 && MEM_P (SUBREG_REG (y))
8851 && rtx_equal_p (SUBREG_REG (y),
8852 gen_lowpart (GET_MODE (SUBREG_REG (y)), x)))
8853 return 1;
8854
8855 if (MEM_P (y) && GET_CODE (x) == SUBREG
8856 && MEM_P (SUBREG_REG (x))
8857 && rtx_equal_p (SUBREG_REG (x),
8858 gen_lowpart (GET_MODE (SUBREG_REG (x)), y)))
8859 return 1;
8860
8861 /* We used to see if get_last_value of X and Y were the same but that's
8862 not correct. In one direction, we'll cause the assignment to have
8863 the wrong destination and in the case, we'll import a register into this
8864 insn that might have already have been dead. So fail if none of the
8865 above cases are true. */
8866 return 0;
8867 }
8868 \f
8869 /* See if X, a SET operation, can be rewritten as a bit-field assignment.
8870 Return that assignment if so.
8871
8872 We only handle the most common cases. */
8873
8874 static rtx
8875 make_field_assignment (rtx x)
8876 {
8877 rtx dest = SET_DEST (x);
8878 rtx src = SET_SRC (x);
8879 rtx assign;
8880 rtx rhs, lhs;
8881 HOST_WIDE_INT c1;
8882 HOST_WIDE_INT pos;
8883 unsigned HOST_WIDE_INT len;
8884 rtx other;
8885 enum machine_mode mode;
8886
8887 /* If SRC was (and (not (ashift (const_int 1) POS)) DEST), this is
8888 a clear of a one-bit field. We will have changed it to
8889 (and (rotate (const_int -2) POS) DEST), so check for that. Also check
8890 for a SUBREG. */
8891
8892 if (GET_CODE (src) == AND && GET_CODE (XEXP (src, 0)) == ROTATE
8893 && CONST_INT_P (XEXP (XEXP (src, 0), 0))
8894 && INTVAL (XEXP (XEXP (src, 0), 0)) == -2
8895 && rtx_equal_for_field_assignment_p (dest, XEXP (src, 1)))
8896 {
8897 assign = make_extraction (VOIDmode, dest, 0, XEXP (XEXP (src, 0), 1),
8898 1, 1, 1, 0);
8899 if (assign != 0)
8900 return gen_rtx_SET (VOIDmode, assign, const0_rtx);
8901 return x;
8902 }
8903
8904 if (GET_CODE (src) == AND && GET_CODE (XEXP (src, 0)) == SUBREG
8905 && subreg_lowpart_p (XEXP (src, 0))
8906 && (GET_MODE_SIZE (GET_MODE (XEXP (src, 0)))
8907 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (XEXP (src, 0)))))
8908 && GET_CODE (SUBREG_REG (XEXP (src, 0))) == ROTATE
8909 && CONST_INT_P (XEXP (SUBREG_REG (XEXP (src, 0)), 0))
8910 && INTVAL (XEXP (SUBREG_REG (XEXP (src, 0)), 0)) == -2
8911 && rtx_equal_for_field_assignment_p (dest, XEXP (src, 1)))
8912 {
8913 assign = make_extraction (VOIDmode, dest, 0,
8914 XEXP (SUBREG_REG (XEXP (src, 0)), 1),
8915 1, 1, 1, 0);
8916 if (assign != 0)
8917 return gen_rtx_SET (VOIDmode, assign, const0_rtx);
8918 return x;
8919 }
8920
8921 /* If SRC is (ior (ashift (const_int 1) POS) DEST), this is a set of a
8922 one-bit field. */
8923 if (GET_CODE (src) == IOR && GET_CODE (XEXP (src, 0)) == ASHIFT
8924 && XEXP (XEXP (src, 0), 0) == const1_rtx
8925 && rtx_equal_for_field_assignment_p (dest, XEXP (src, 1)))
8926 {
8927 assign = make_extraction (VOIDmode, dest, 0, XEXP (XEXP (src, 0), 1),
8928 1, 1, 1, 0);
8929 if (assign != 0)
8930 return gen_rtx_SET (VOIDmode, assign, const1_rtx);
8931 return x;
8932 }
8933
8934 /* If DEST is already a field assignment, i.e. ZERO_EXTRACT, and the
8935 SRC is an AND with all bits of that field set, then we can discard
8936 the AND. */
8937 if (GET_CODE (dest) == ZERO_EXTRACT
8938 && CONST_INT_P (XEXP (dest, 1))
8939 && GET_CODE (src) == AND
8940 && CONST_INT_P (XEXP (src, 1)))
8941 {
8942 HOST_WIDE_INT width = INTVAL (XEXP (dest, 1));
8943 unsigned HOST_WIDE_INT and_mask = INTVAL (XEXP (src, 1));
8944 unsigned HOST_WIDE_INT ze_mask;
8945
8946 if (width >= HOST_BITS_PER_WIDE_INT)
8947 ze_mask = -1;
8948 else
8949 ze_mask = ((unsigned HOST_WIDE_INT)1 << width) - 1;
8950
8951 /* Complete overlap. We can remove the source AND. */
8952 if ((and_mask & ze_mask) == ze_mask)
8953 return gen_rtx_SET (VOIDmode, dest, XEXP (src, 0));
8954
8955 /* Partial overlap. We can reduce the source AND. */
8956 if ((and_mask & ze_mask) != and_mask)
8957 {
8958 mode = GET_MODE (src);
8959 src = gen_rtx_AND (mode, XEXP (src, 0),
8960 gen_int_mode (and_mask & ze_mask, mode));
8961 return gen_rtx_SET (VOIDmode, dest, src);
8962 }
8963 }
8964
8965 /* The other case we handle is assignments into a constant-position
8966 field. They look like (ior/xor (and DEST C1) OTHER). If C1 represents
8967 a mask that has all one bits except for a group of zero bits and
8968 OTHER is known to have zeros where C1 has ones, this is such an
8969 assignment. Compute the position and length from C1. Shift OTHER
8970 to the appropriate position, force it to the required mode, and
8971 make the extraction. Check for the AND in both operands. */
8972
8973 if (GET_CODE (src) != IOR && GET_CODE (src) != XOR)
8974 return x;
8975
8976 rhs = expand_compound_operation (XEXP (src, 0));
8977 lhs = expand_compound_operation (XEXP (src, 1));
8978
8979 if (GET_CODE (rhs) == AND
8980 && CONST_INT_P (XEXP (rhs, 1))
8981 && rtx_equal_for_field_assignment_p (XEXP (rhs, 0), dest))
8982 c1 = INTVAL (XEXP (rhs, 1)), other = lhs;
8983 else if (GET_CODE (lhs) == AND
8984 && CONST_INT_P (XEXP (lhs, 1))
8985 && rtx_equal_for_field_assignment_p (XEXP (lhs, 0), dest))
8986 c1 = INTVAL (XEXP (lhs, 1)), other = rhs;
8987 else
8988 return x;
8989
8990 pos = get_pos_from_mask ((~c1) & GET_MODE_MASK (GET_MODE (dest)), &len);
8991 if (pos < 0 || pos + len > GET_MODE_BITSIZE (GET_MODE (dest))
8992 || GET_MODE_BITSIZE (GET_MODE (dest)) > HOST_BITS_PER_WIDE_INT
8993 || (c1 & nonzero_bits (other, GET_MODE (dest))) != 0)
8994 return x;
8995
8996 assign = make_extraction (VOIDmode, dest, pos, NULL_RTX, len, 1, 1, 0);
8997 if (assign == 0)
8998 return x;
8999
9000 /* The mode to use for the source is the mode of the assignment, or of
9001 what is inside a possible STRICT_LOW_PART. */
9002 mode = (GET_CODE (assign) == STRICT_LOW_PART
9003 ? GET_MODE (XEXP (assign, 0)) : GET_MODE (assign));
9004
9005 /* Shift OTHER right POS places and make it the source, restricting it
9006 to the proper length and mode. */
9007
9008 src = canon_reg_for_combine (simplify_shift_const (NULL_RTX, LSHIFTRT,
9009 GET_MODE (src),
9010 other, pos),
9011 dest);
9012 src = force_to_mode (src, mode,
9013 GET_MODE_BITSIZE (mode) >= HOST_BITS_PER_WIDE_INT
9014 ? ~(unsigned HOST_WIDE_INT) 0
9015 : ((unsigned HOST_WIDE_INT) 1 << len) - 1,
9016 0);
9017
9018 /* If SRC is masked by an AND that does not make a difference in
9019 the value being stored, strip it. */
9020 if (GET_CODE (assign) == ZERO_EXTRACT
9021 && CONST_INT_P (XEXP (assign, 1))
9022 && INTVAL (XEXP (assign, 1)) < HOST_BITS_PER_WIDE_INT
9023 && GET_CODE (src) == AND
9024 && CONST_INT_P (XEXP (src, 1))
9025 && UINTVAL (XEXP (src, 1))
9026 == ((unsigned HOST_WIDE_INT) 1 << INTVAL (XEXP (assign, 1))) - 1)
9027 src = XEXP (src, 0);
9028
9029 return gen_rtx_SET (VOIDmode, assign, src);
9030 }
9031 \f
9032 /* See if X is of the form (+ (* a c) (* b c)) and convert to (* (+ a b) c)
9033 if so. */
9034
9035 static rtx
9036 apply_distributive_law (rtx x)
9037 {
9038 enum rtx_code code = GET_CODE (x);
9039 enum rtx_code inner_code;
9040 rtx lhs, rhs, other;
9041 rtx tem;
9042
9043 /* Distributivity is not true for floating point as it can change the
9044 value. So we don't do it unless -funsafe-math-optimizations. */
9045 if (FLOAT_MODE_P (GET_MODE (x))
9046 && ! flag_unsafe_math_optimizations)
9047 return x;
9048
9049 /* The outer operation can only be one of the following: */
9050 if (code != IOR && code != AND && code != XOR
9051 && code != PLUS && code != MINUS)
9052 return x;
9053
9054 lhs = XEXP (x, 0);
9055 rhs = XEXP (x, 1);
9056
9057 /* If either operand is a primitive we can't do anything, so get out
9058 fast. */
9059 if (OBJECT_P (lhs) || OBJECT_P (rhs))
9060 return x;
9061
9062 lhs = expand_compound_operation (lhs);
9063 rhs = expand_compound_operation (rhs);
9064 inner_code = GET_CODE (lhs);
9065 if (inner_code != GET_CODE (rhs))
9066 return x;
9067
9068 /* See if the inner and outer operations distribute. */
9069 switch (inner_code)
9070 {
9071 case LSHIFTRT:
9072 case ASHIFTRT:
9073 case AND:
9074 case IOR:
9075 /* These all distribute except over PLUS. */
9076 if (code == PLUS || code == MINUS)
9077 return x;
9078 break;
9079
9080 case MULT:
9081 if (code != PLUS && code != MINUS)
9082 return x;
9083 break;
9084
9085 case ASHIFT:
9086 /* This is also a multiply, so it distributes over everything. */
9087 break;
9088
9089 case SUBREG:
9090 /* Non-paradoxical SUBREGs distributes over all operations,
9091 provided the inner modes and byte offsets are the same, this
9092 is an extraction of a low-order part, we don't convert an fp
9093 operation to int or vice versa, this is not a vector mode,
9094 and we would not be converting a single-word operation into a
9095 multi-word operation. The latter test is not required, but
9096 it prevents generating unneeded multi-word operations. Some
9097 of the previous tests are redundant given the latter test,
9098 but are retained because they are required for correctness.
9099
9100 We produce the result slightly differently in this case. */
9101
9102 if (GET_MODE (SUBREG_REG (lhs)) != GET_MODE (SUBREG_REG (rhs))
9103 || SUBREG_BYTE (lhs) != SUBREG_BYTE (rhs)
9104 || ! subreg_lowpart_p (lhs)
9105 || (GET_MODE_CLASS (GET_MODE (lhs))
9106 != GET_MODE_CLASS (GET_MODE (SUBREG_REG (lhs))))
9107 || (GET_MODE_SIZE (GET_MODE (lhs))
9108 > GET_MODE_SIZE (GET_MODE (SUBREG_REG (lhs))))
9109 || VECTOR_MODE_P (GET_MODE (lhs))
9110 || GET_MODE_SIZE (GET_MODE (SUBREG_REG (lhs))) > UNITS_PER_WORD
9111 /* Result might need to be truncated. Don't change mode if
9112 explicit truncation is needed. */
9113 || !TRULY_NOOP_TRUNCATION
9114 (GET_MODE_BITSIZE (GET_MODE (x)),
9115 GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (lhs)))))
9116 return x;
9117
9118 tem = simplify_gen_binary (code, GET_MODE (SUBREG_REG (lhs)),
9119 SUBREG_REG (lhs), SUBREG_REG (rhs));
9120 return gen_lowpart (GET_MODE (x), tem);
9121
9122 default:
9123 return x;
9124 }
9125
9126 /* Set LHS and RHS to the inner operands (A and B in the example
9127 above) and set OTHER to the common operand (C in the example).
9128 There is only one way to do this unless the inner operation is
9129 commutative. */
9130 if (COMMUTATIVE_ARITH_P (lhs)
9131 && rtx_equal_p (XEXP (lhs, 0), XEXP (rhs, 0)))
9132 other = XEXP (lhs, 0), lhs = XEXP (lhs, 1), rhs = XEXP (rhs, 1);
9133 else if (COMMUTATIVE_ARITH_P (lhs)
9134 && rtx_equal_p (XEXP (lhs, 0), XEXP (rhs, 1)))
9135 other = XEXP (lhs, 0), lhs = XEXP (lhs, 1), rhs = XEXP (rhs, 0);
9136 else if (COMMUTATIVE_ARITH_P (lhs)
9137 && rtx_equal_p (XEXP (lhs, 1), XEXP (rhs, 0)))
9138 other = XEXP (lhs, 1), lhs = XEXP (lhs, 0), rhs = XEXP (rhs, 1);
9139 else if (rtx_equal_p (XEXP (lhs, 1), XEXP (rhs, 1)))
9140 other = XEXP (lhs, 1), lhs = XEXP (lhs, 0), rhs = XEXP (rhs, 0);
9141 else
9142 return x;
9143
9144 /* Form the new inner operation, seeing if it simplifies first. */
9145 tem = simplify_gen_binary (code, GET_MODE (x), lhs, rhs);
9146
9147 /* There is one exception to the general way of distributing:
9148 (a | c) ^ (b | c) -> (a ^ b) & ~c */
9149 if (code == XOR && inner_code == IOR)
9150 {
9151 inner_code = AND;
9152 other = simplify_gen_unary (NOT, GET_MODE (x), other, GET_MODE (x));
9153 }
9154
9155 /* We may be able to continuing distributing the result, so call
9156 ourselves recursively on the inner operation before forming the
9157 outer operation, which we return. */
9158 return simplify_gen_binary (inner_code, GET_MODE (x),
9159 apply_distributive_law (tem), other);
9160 }
9161
9162 /* See if X is of the form (* (+ A B) C), and if so convert to
9163 (+ (* A C) (* B C)) and try to simplify.
9164
9165 Most of the time, this results in no change. However, if some of
9166 the operands are the same or inverses of each other, simplifications
9167 will result.
9168
9169 For example, (and (ior A B) (not B)) can occur as the result of
9170 expanding a bit field assignment. When we apply the distributive
9171 law to this, we get (ior (and (A (not B))) (and (B (not B)))),
9172 which then simplifies to (and (A (not B))).
9173
9174 Note that no checks happen on the validity of applying the inverse
9175 distributive law. This is pointless since we can do it in the
9176 few places where this routine is called.
9177
9178 N is the index of the term that is decomposed (the arithmetic operation,
9179 i.e. (+ A B) in the first example above). !N is the index of the term that
9180 is distributed, i.e. of C in the first example above. */
9181 static rtx
9182 distribute_and_simplify_rtx (rtx x, int n)
9183 {
9184 enum machine_mode mode;
9185 enum rtx_code outer_code, inner_code;
9186 rtx decomposed, distributed, inner_op0, inner_op1, new_op0, new_op1, tmp;
9187
9188 /* Distributivity is not true for floating point as it can change the
9189 value. So we don't do it unless -funsafe-math-optimizations. */
9190 if (FLOAT_MODE_P (GET_MODE (x))
9191 && ! flag_unsafe_math_optimizations)
9192 return NULL_RTX;
9193
9194 decomposed = XEXP (x, n);
9195 if (!ARITHMETIC_P (decomposed))
9196 return NULL_RTX;
9197
9198 mode = GET_MODE (x);
9199 outer_code = GET_CODE (x);
9200 distributed = XEXP (x, !n);
9201
9202 inner_code = GET_CODE (decomposed);
9203 inner_op0 = XEXP (decomposed, 0);
9204 inner_op1 = XEXP (decomposed, 1);
9205
9206 /* Special case (and (xor B C) (not A)), which is equivalent to
9207 (xor (ior A B) (ior A C)) */
9208 if (outer_code == AND && inner_code == XOR && GET_CODE (distributed) == NOT)
9209 {
9210 distributed = XEXP (distributed, 0);
9211 outer_code = IOR;
9212 }
9213
9214 if (n == 0)
9215 {
9216 /* Distribute the second term. */
9217 new_op0 = simplify_gen_binary (outer_code, mode, inner_op0, distributed);
9218 new_op1 = simplify_gen_binary (outer_code, mode, inner_op1, distributed);
9219 }
9220 else
9221 {
9222 /* Distribute the first term. */
9223 new_op0 = simplify_gen_binary (outer_code, mode, distributed, inner_op0);
9224 new_op1 = simplify_gen_binary (outer_code, mode, distributed, inner_op1);
9225 }
9226
9227 tmp = apply_distributive_law (simplify_gen_binary (inner_code, mode,
9228 new_op0, new_op1));
9229 if (GET_CODE (tmp) != outer_code
9230 && rtx_cost (tmp, SET, optimize_this_for_speed_p)
9231 < rtx_cost (x, SET, optimize_this_for_speed_p))
9232 return tmp;
9233
9234 return NULL_RTX;
9235 }
9236 \f
9237 /* Simplify a logical `and' of VAROP with the constant CONSTOP, to be done
9238 in MODE. Return an equivalent form, if different from (and VAROP
9239 (const_int CONSTOP)). Otherwise, return NULL_RTX. */
9240
9241 static rtx
9242 simplify_and_const_int_1 (enum machine_mode mode, rtx varop,
9243 unsigned HOST_WIDE_INT constop)
9244 {
9245 unsigned HOST_WIDE_INT nonzero;
9246 unsigned HOST_WIDE_INT orig_constop;
9247 rtx orig_varop;
9248 int i;
9249
9250 orig_varop = varop;
9251 orig_constop = constop;
9252 if (GET_CODE (varop) == CLOBBER)
9253 return NULL_RTX;
9254
9255 /* Simplify VAROP knowing that we will be only looking at some of the
9256 bits in it.
9257
9258 Note by passing in CONSTOP, we guarantee that the bits not set in
9259 CONSTOP are not significant and will never be examined. We must
9260 ensure that is the case by explicitly masking out those bits
9261 before returning. */
9262 varop = force_to_mode (varop, mode, constop, 0);
9263
9264 /* If VAROP is a CLOBBER, we will fail so return it. */
9265 if (GET_CODE (varop) == CLOBBER)
9266 return varop;
9267
9268 /* If VAROP is a CONST_INT, then we need to apply the mask in CONSTOP
9269 to VAROP and return the new constant. */
9270 if (CONST_INT_P (varop))
9271 return gen_int_mode (INTVAL (varop) & constop, mode);
9272
9273 /* See what bits may be nonzero in VAROP. Unlike the general case of
9274 a call to nonzero_bits, here we don't care about bits outside
9275 MODE. */
9276
9277 nonzero = nonzero_bits (varop, mode) & GET_MODE_MASK (mode);
9278
9279 /* Turn off all bits in the constant that are known to already be zero.
9280 Thus, if the AND isn't needed at all, we will have CONSTOP == NONZERO_BITS
9281 which is tested below. */
9282
9283 constop &= nonzero;
9284
9285 /* If we don't have any bits left, return zero. */
9286 if (constop == 0)
9287 return const0_rtx;
9288
9289 /* If VAROP is a NEG of something known to be zero or 1 and CONSTOP is
9290 a power of two, we can replace this with an ASHIFT. */
9291 if (GET_CODE (varop) == NEG && nonzero_bits (XEXP (varop, 0), mode) == 1
9292 && (i = exact_log2 (constop)) >= 0)
9293 return simplify_shift_const (NULL_RTX, ASHIFT, mode, XEXP (varop, 0), i);
9294
9295 /* If VAROP is an IOR or XOR, apply the AND to both branches of the IOR
9296 or XOR, then try to apply the distributive law. This may eliminate
9297 operations if either branch can be simplified because of the AND.
9298 It may also make some cases more complex, but those cases probably
9299 won't match a pattern either with or without this. */
9300
9301 if (GET_CODE (varop) == IOR || GET_CODE (varop) == XOR)
9302 return
9303 gen_lowpart
9304 (mode,
9305 apply_distributive_law
9306 (simplify_gen_binary (GET_CODE (varop), GET_MODE (varop),
9307 simplify_and_const_int (NULL_RTX,
9308 GET_MODE (varop),
9309 XEXP (varop, 0),
9310 constop),
9311 simplify_and_const_int (NULL_RTX,
9312 GET_MODE (varop),
9313 XEXP (varop, 1),
9314 constop))));
9315
9316 /* If VAROP is PLUS, and the constant is a mask of low bits, distribute
9317 the AND and see if one of the operands simplifies to zero. If so, we
9318 may eliminate it. */
9319
9320 if (GET_CODE (varop) == PLUS
9321 && exact_log2 (constop + 1) >= 0)
9322 {
9323 rtx o0, o1;
9324
9325 o0 = simplify_and_const_int (NULL_RTX, mode, XEXP (varop, 0), constop);
9326 o1 = simplify_and_const_int (NULL_RTX, mode, XEXP (varop, 1), constop);
9327 if (o0 == const0_rtx)
9328 return o1;
9329 if (o1 == const0_rtx)
9330 return o0;
9331 }
9332
9333 /* Make a SUBREG if necessary. If we can't make it, fail. */
9334 varop = gen_lowpart (mode, varop);
9335 if (varop == NULL_RTX || GET_CODE (varop) == CLOBBER)
9336 return NULL_RTX;
9337
9338 /* If we are only masking insignificant bits, return VAROP. */
9339 if (constop == nonzero)
9340 return varop;
9341
9342 if (varop == orig_varop && constop == orig_constop)
9343 return NULL_RTX;
9344
9345 /* Otherwise, return an AND. */
9346 return simplify_gen_binary (AND, mode, varop, gen_int_mode (constop, mode));
9347 }
9348
9349
9350 /* We have X, a logical `and' of VAROP with the constant CONSTOP, to be done
9351 in MODE.
9352
9353 Return an equivalent form, if different from X. Otherwise, return X. If
9354 X is zero, we are to always construct the equivalent form. */
9355
9356 static rtx
9357 simplify_and_const_int (rtx x, enum machine_mode mode, rtx varop,
9358 unsigned HOST_WIDE_INT constop)
9359 {
9360 rtx tem = simplify_and_const_int_1 (mode, varop, constop);
9361 if (tem)
9362 return tem;
9363
9364 if (!x)
9365 x = simplify_gen_binary (AND, GET_MODE (varop), varop,
9366 gen_int_mode (constop, mode));
9367 if (GET_MODE (x) != mode)
9368 x = gen_lowpart (mode, x);
9369 return x;
9370 }
9371 \f
9372 /* Given a REG, X, compute which bits in X can be nonzero.
9373 We don't care about bits outside of those defined in MODE.
9374
9375 For most X this is simply GET_MODE_MASK (GET_MODE (MODE)), but if X is
9376 a shift, AND, or zero_extract, we can do better. */
9377
9378 static rtx
9379 reg_nonzero_bits_for_combine (const_rtx x, enum machine_mode mode,
9380 const_rtx known_x ATTRIBUTE_UNUSED,
9381 enum machine_mode known_mode ATTRIBUTE_UNUSED,
9382 unsigned HOST_WIDE_INT known_ret ATTRIBUTE_UNUSED,
9383 unsigned HOST_WIDE_INT *nonzero)
9384 {
9385 rtx tem;
9386 reg_stat_type *rsp;
9387
9388 /* If X is a register whose nonzero bits value is current, use it.
9389 Otherwise, if X is a register whose value we can find, use that
9390 value. Otherwise, use the previously-computed global nonzero bits
9391 for this register. */
9392
9393 rsp = VEC_index (reg_stat_type, reg_stat, REGNO (x));
9394 if (rsp->last_set_value != 0
9395 && (rsp->last_set_mode == mode
9396 || (GET_MODE_CLASS (rsp->last_set_mode) == MODE_INT
9397 && GET_MODE_CLASS (mode) == MODE_INT))
9398 && ((rsp->last_set_label >= label_tick_ebb_start
9399 && rsp->last_set_label < label_tick)
9400 || (rsp->last_set_label == label_tick
9401 && DF_INSN_LUID (rsp->last_set) < subst_low_luid)
9402 || (REGNO (x) >= FIRST_PSEUDO_REGISTER
9403 && REG_N_SETS (REGNO (x)) == 1
9404 && !REGNO_REG_SET_P
9405 (DF_LR_IN (ENTRY_BLOCK_PTR->next_bb), REGNO (x)))))
9406 {
9407 *nonzero &= rsp->last_set_nonzero_bits;
9408 return NULL;
9409 }
9410
9411 tem = get_last_value (x);
9412
9413 if (tem)
9414 {
9415 #ifdef SHORT_IMMEDIATES_SIGN_EXTEND
9416 /* If X is narrower than MODE and TEM is a non-negative
9417 constant that would appear negative in the mode of X,
9418 sign-extend it for use in reg_nonzero_bits because some
9419 machines (maybe most) will actually do the sign-extension
9420 and this is the conservative approach.
9421
9422 ??? For 2.5, try to tighten up the MD files in this regard
9423 instead of this kludge. */
9424
9425 if (GET_MODE_BITSIZE (GET_MODE (x)) < GET_MODE_BITSIZE (mode)
9426 && CONST_INT_P (tem)
9427 && INTVAL (tem) > 0
9428 && 0 != (UINTVAL (tem)
9429 & ((unsigned HOST_WIDE_INT) 1
9430 << (GET_MODE_BITSIZE (GET_MODE (x)) - 1))))
9431 tem = GEN_INT (UINTVAL (tem)
9432 | ((unsigned HOST_WIDE_INT) (-1)
9433 << GET_MODE_BITSIZE (GET_MODE (x))));
9434 #endif
9435 return tem;
9436 }
9437 else if (nonzero_sign_valid && rsp->nonzero_bits)
9438 {
9439 unsigned HOST_WIDE_INT mask = rsp->nonzero_bits;
9440
9441 if (GET_MODE_BITSIZE (GET_MODE (x)) < GET_MODE_BITSIZE (mode))
9442 /* We don't know anything about the upper bits. */
9443 mask |= GET_MODE_MASK (mode) ^ GET_MODE_MASK (GET_MODE (x));
9444 *nonzero &= mask;
9445 }
9446
9447 return NULL;
9448 }
9449
9450 /* Return the number of bits at the high-order end of X that are known to
9451 be equal to the sign bit. X will be used in mode MODE; if MODE is
9452 VOIDmode, X will be used in its own mode. The returned value will always
9453 be between 1 and the number of bits in MODE. */
9454
9455 static rtx
9456 reg_num_sign_bit_copies_for_combine (const_rtx x, enum machine_mode mode,
9457 const_rtx known_x ATTRIBUTE_UNUSED,
9458 enum machine_mode known_mode
9459 ATTRIBUTE_UNUSED,
9460 unsigned int known_ret ATTRIBUTE_UNUSED,
9461 unsigned int *result)
9462 {
9463 rtx tem;
9464 reg_stat_type *rsp;
9465
9466 rsp = VEC_index (reg_stat_type, reg_stat, REGNO (x));
9467 if (rsp->last_set_value != 0
9468 && rsp->last_set_mode == mode
9469 && ((rsp->last_set_label >= label_tick_ebb_start
9470 && rsp->last_set_label < label_tick)
9471 || (rsp->last_set_label == label_tick
9472 && DF_INSN_LUID (rsp->last_set) < subst_low_luid)
9473 || (REGNO (x) >= FIRST_PSEUDO_REGISTER
9474 && REG_N_SETS (REGNO (x)) == 1
9475 && !REGNO_REG_SET_P
9476 (DF_LR_IN (ENTRY_BLOCK_PTR->next_bb), REGNO (x)))))
9477 {
9478 *result = rsp->last_set_sign_bit_copies;
9479 return NULL;
9480 }
9481
9482 tem = get_last_value (x);
9483 if (tem != 0)
9484 return tem;
9485
9486 if (nonzero_sign_valid && rsp->sign_bit_copies != 0
9487 && GET_MODE_BITSIZE (GET_MODE (x)) == GET_MODE_BITSIZE (mode))
9488 *result = rsp->sign_bit_copies;
9489
9490 return NULL;
9491 }
9492 \f
9493 /* Return the number of "extended" bits there are in X, when interpreted
9494 as a quantity in MODE whose signedness is indicated by UNSIGNEDP. For
9495 unsigned quantities, this is the number of high-order zero bits.
9496 For signed quantities, this is the number of copies of the sign bit
9497 minus 1. In both case, this function returns the number of "spare"
9498 bits. For example, if two quantities for which this function returns
9499 at least 1 are added, the addition is known not to overflow.
9500
9501 This function will always return 0 unless called during combine, which
9502 implies that it must be called from a define_split. */
9503
9504 unsigned int
9505 extended_count (const_rtx x, enum machine_mode mode, int unsignedp)
9506 {
9507 if (nonzero_sign_valid == 0)
9508 return 0;
9509
9510 return (unsignedp
9511 ? (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
9512 ? (unsigned int) (GET_MODE_BITSIZE (mode) - 1
9513 - floor_log2 (nonzero_bits (x, mode)))
9514 : 0)
9515 : num_sign_bit_copies (x, mode) - 1);
9516 }
9517 \f
9518 /* This function is called from `simplify_shift_const' to merge two
9519 outer operations. Specifically, we have already found that we need
9520 to perform operation *POP0 with constant *PCONST0 at the outermost
9521 position. We would now like to also perform OP1 with constant CONST1
9522 (with *POP0 being done last).
9523
9524 Return 1 if we can do the operation and update *POP0 and *PCONST0 with
9525 the resulting operation. *PCOMP_P is set to 1 if we would need to
9526 complement the innermost operand, otherwise it is unchanged.
9527
9528 MODE is the mode in which the operation will be done. No bits outside
9529 the width of this mode matter. It is assumed that the width of this mode
9530 is smaller than or equal to HOST_BITS_PER_WIDE_INT.
9531
9532 If *POP0 or OP1 are UNKNOWN, it means no operation is required. Only NEG, PLUS,
9533 IOR, XOR, and AND are supported. We may set *POP0 to SET if the proper
9534 result is simply *PCONST0.
9535
9536 If the resulting operation cannot be expressed as one operation, we
9537 return 0 and do not change *POP0, *PCONST0, and *PCOMP_P. */
9538
9539 static int
9540 merge_outer_ops (enum rtx_code *pop0, HOST_WIDE_INT *pconst0, enum rtx_code op1, HOST_WIDE_INT const1, enum machine_mode mode, int *pcomp_p)
9541 {
9542 enum rtx_code op0 = *pop0;
9543 HOST_WIDE_INT const0 = *pconst0;
9544
9545 const0 &= GET_MODE_MASK (mode);
9546 const1 &= GET_MODE_MASK (mode);
9547
9548 /* If OP0 is an AND, clear unimportant bits in CONST1. */
9549 if (op0 == AND)
9550 const1 &= const0;
9551
9552 /* If OP0 or OP1 is UNKNOWN, this is easy. Similarly if they are the same or
9553 if OP0 is SET. */
9554
9555 if (op1 == UNKNOWN || op0 == SET)
9556 return 1;
9557
9558 else if (op0 == UNKNOWN)
9559 op0 = op1, const0 = const1;
9560
9561 else if (op0 == op1)
9562 {
9563 switch (op0)
9564 {
9565 case AND:
9566 const0 &= const1;
9567 break;
9568 case IOR:
9569 const0 |= const1;
9570 break;
9571 case XOR:
9572 const0 ^= const1;
9573 break;
9574 case PLUS:
9575 const0 += const1;
9576 break;
9577 case NEG:
9578 op0 = UNKNOWN;
9579 break;
9580 default:
9581 break;
9582 }
9583 }
9584
9585 /* Otherwise, if either is a PLUS or NEG, we can't do anything. */
9586 else if (op0 == PLUS || op1 == PLUS || op0 == NEG || op1 == NEG)
9587 return 0;
9588
9589 /* If the two constants aren't the same, we can't do anything. The
9590 remaining six cases can all be done. */
9591 else if (const0 != const1)
9592 return 0;
9593
9594 else
9595 switch (op0)
9596 {
9597 case IOR:
9598 if (op1 == AND)
9599 /* (a & b) | b == b */
9600 op0 = SET;
9601 else /* op1 == XOR */
9602 /* (a ^ b) | b == a | b */
9603 {;}
9604 break;
9605
9606 case XOR:
9607 if (op1 == AND)
9608 /* (a & b) ^ b == (~a) & b */
9609 op0 = AND, *pcomp_p = 1;
9610 else /* op1 == IOR */
9611 /* (a | b) ^ b == a & ~b */
9612 op0 = AND, const0 = ~const0;
9613 break;
9614
9615 case AND:
9616 if (op1 == IOR)
9617 /* (a | b) & b == b */
9618 op0 = SET;
9619 else /* op1 == XOR */
9620 /* (a ^ b) & b) == (~a) & b */
9621 *pcomp_p = 1;
9622 break;
9623 default:
9624 break;
9625 }
9626
9627 /* Check for NO-OP cases. */
9628 const0 &= GET_MODE_MASK (mode);
9629 if (const0 == 0
9630 && (op0 == IOR || op0 == XOR || op0 == PLUS))
9631 op0 = UNKNOWN;
9632 else if (const0 == 0 && op0 == AND)
9633 op0 = SET;
9634 else if ((unsigned HOST_WIDE_INT) const0 == GET_MODE_MASK (mode)
9635 && op0 == AND)
9636 op0 = UNKNOWN;
9637
9638 *pop0 = op0;
9639
9640 /* ??? Slightly redundant with the above mask, but not entirely.
9641 Moving this above means we'd have to sign-extend the mode mask
9642 for the final test. */
9643 if (op0 != UNKNOWN && op0 != NEG)
9644 *pconst0 = trunc_int_for_mode (const0, mode);
9645
9646 return 1;
9647 }
9648 \f
9649 /* A helper to simplify_shift_const_1 to determine the mode we can perform
9650 the shift in. The original shift operation CODE is performed on OP in
9651 ORIG_MODE. Return the wider mode MODE if we can perform the operation
9652 in that mode. Return ORIG_MODE otherwise. We can also assume that the
9653 result of the shift is subject to operation OUTER_CODE with operand
9654 OUTER_CONST. */
9655
9656 static enum machine_mode
9657 try_widen_shift_mode (enum rtx_code code, rtx op, int count,
9658 enum machine_mode orig_mode, enum machine_mode mode,
9659 enum rtx_code outer_code, HOST_WIDE_INT outer_const)
9660 {
9661 if (orig_mode == mode)
9662 return mode;
9663 gcc_assert (GET_MODE_BITSIZE (mode) > GET_MODE_BITSIZE (orig_mode));
9664
9665 /* In general we can't perform in wider mode for right shift and rotate. */
9666 switch (code)
9667 {
9668 case ASHIFTRT:
9669 /* We can still widen if the bits brought in from the left are identical
9670 to the sign bit of ORIG_MODE. */
9671 if (num_sign_bit_copies (op, mode)
9672 > (unsigned) (GET_MODE_BITSIZE (mode)
9673 - GET_MODE_BITSIZE (orig_mode)))
9674 return mode;
9675 return orig_mode;
9676
9677 case LSHIFTRT:
9678 /* Similarly here but with zero bits. */
9679 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
9680 && (nonzero_bits (op, mode) & ~GET_MODE_MASK (orig_mode)) == 0)
9681 return mode;
9682
9683 /* We can also widen if the bits brought in will be masked off. This
9684 operation is performed in ORIG_MODE. */
9685 if (outer_code == AND)
9686 {
9687 int care_bits = low_bitmask_len (orig_mode, outer_const);
9688
9689 if (care_bits >= 0
9690 && GET_MODE_BITSIZE (orig_mode) - care_bits >= count)
9691 return mode;
9692 }
9693 /* fall through */
9694
9695 case ROTATE:
9696 return orig_mode;
9697
9698 case ROTATERT:
9699 gcc_unreachable ();
9700
9701 default:
9702 return mode;
9703 }
9704 }
9705
9706 /* Simplify a shift of VAROP by COUNT bits. CODE says what kind of shift.
9707 The result of the shift is RESULT_MODE. Return NULL_RTX if we cannot
9708 simplify it. Otherwise, return a simplified value.
9709
9710 The shift is normally computed in the widest mode we find in VAROP, as
9711 long as it isn't a different number of words than RESULT_MODE. Exceptions
9712 are ASHIFTRT and ROTATE, which are always done in their original mode. */
9713
9714 static rtx
9715 simplify_shift_const_1 (enum rtx_code code, enum machine_mode result_mode,
9716 rtx varop, int orig_count)
9717 {
9718 enum rtx_code orig_code = code;
9719 rtx orig_varop = varop;
9720 int count;
9721 enum machine_mode mode = result_mode;
9722 enum machine_mode shift_mode, tmode;
9723 unsigned int mode_words
9724 = (GET_MODE_SIZE (mode) + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD;
9725 /* We form (outer_op (code varop count) (outer_const)). */
9726 enum rtx_code outer_op = UNKNOWN;
9727 HOST_WIDE_INT outer_const = 0;
9728 int complement_p = 0;
9729 rtx new_rtx, x;
9730
9731 /* Make sure and truncate the "natural" shift on the way in. We don't
9732 want to do this inside the loop as it makes it more difficult to
9733 combine shifts. */
9734 if (SHIFT_COUNT_TRUNCATED)
9735 orig_count &= GET_MODE_BITSIZE (mode) - 1;
9736
9737 /* If we were given an invalid count, don't do anything except exactly
9738 what was requested. */
9739
9740 if (orig_count < 0 || orig_count >= (int) GET_MODE_BITSIZE (mode))
9741 return NULL_RTX;
9742
9743 count = orig_count;
9744
9745 /* Unless one of the branches of the `if' in this loop does a `continue',
9746 we will `break' the loop after the `if'. */
9747
9748 while (count != 0)
9749 {
9750 /* If we have an operand of (clobber (const_int 0)), fail. */
9751 if (GET_CODE (varop) == CLOBBER)
9752 return NULL_RTX;
9753
9754 /* Convert ROTATERT to ROTATE. */
9755 if (code == ROTATERT)
9756 {
9757 unsigned int bitsize = GET_MODE_BITSIZE (result_mode);;
9758 code = ROTATE;
9759 if (VECTOR_MODE_P (result_mode))
9760 count = bitsize / GET_MODE_NUNITS (result_mode) - count;
9761 else
9762 count = bitsize - count;
9763 }
9764
9765 shift_mode = try_widen_shift_mode (code, varop, count, result_mode,
9766 mode, outer_op, outer_const);
9767
9768 /* Handle cases where the count is greater than the size of the mode
9769 minus 1. For ASHIFT, use the size minus one as the count (this can
9770 occur when simplifying (lshiftrt (ashiftrt ..))). For rotates,
9771 take the count modulo the size. For other shifts, the result is
9772 zero.
9773
9774 Since these shifts are being produced by the compiler by combining
9775 multiple operations, each of which are defined, we know what the
9776 result is supposed to be. */
9777
9778 if (count > (GET_MODE_BITSIZE (shift_mode) - 1))
9779 {
9780 if (code == ASHIFTRT)
9781 count = GET_MODE_BITSIZE (shift_mode) - 1;
9782 else if (code == ROTATE || code == ROTATERT)
9783 count %= GET_MODE_BITSIZE (shift_mode);
9784 else
9785 {
9786 /* We can't simply return zero because there may be an
9787 outer op. */
9788 varop = const0_rtx;
9789 count = 0;
9790 break;
9791 }
9792 }
9793
9794 /* If we discovered we had to complement VAROP, leave. Making a NOT
9795 here would cause an infinite loop. */
9796 if (complement_p)
9797 break;
9798
9799 /* An arithmetic right shift of a quantity known to be -1 or 0
9800 is a no-op. */
9801 if (code == ASHIFTRT
9802 && (num_sign_bit_copies (varop, shift_mode)
9803 == GET_MODE_BITSIZE (shift_mode)))
9804 {
9805 count = 0;
9806 break;
9807 }
9808
9809 /* If we are doing an arithmetic right shift and discarding all but
9810 the sign bit copies, this is equivalent to doing a shift by the
9811 bitsize minus one. Convert it into that shift because it will often
9812 allow other simplifications. */
9813
9814 if (code == ASHIFTRT
9815 && (count + num_sign_bit_copies (varop, shift_mode)
9816 >= GET_MODE_BITSIZE (shift_mode)))
9817 count = GET_MODE_BITSIZE (shift_mode) - 1;
9818
9819 /* We simplify the tests below and elsewhere by converting
9820 ASHIFTRT to LSHIFTRT if we know the sign bit is clear.
9821 `make_compound_operation' will convert it to an ASHIFTRT for
9822 those machines (such as VAX) that don't have an LSHIFTRT. */
9823 if (GET_MODE_BITSIZE (shift_mode) <= HOST_BITS_PER_WIDE_INT
9824 && code == ASHIFTRT
9825 && ((nonzero_bits (varop, shift_mode)
9826 & ((unsigned HOST_WIDE_INT) 1
9827 << (GET_MODE_BITSIZE (shift_mode) - 1))) == 0))
9828 code = LSHIFTRT;
9829
9830 if (((code == LSHIFTRT
9831 && GET_MODE_BITSIZE (shift_mode) <= HOST_BITS_PER_WIDE_INT
9832 && !(nonzero_bits (varop, shift_mode) >> count))
9833 || (code == ASHIFT
9834 && GET_MODE_BITSIZE (shift_mode) <= HOST_BITS_PER_WIDE_INT
9835 && !((nonzero_bits (varop, shift_mode) << count)
9836 & GET_MODE_MASK (shift_mode))))
9837 && !side_effects_p (varop))
9838 varop = const0_rtx;
9839
9840 switch (GET_CODE (varop))
9841 {
9842 case SIGN_EXTEND:
9843 case ZERO_EXTEND:
9844 case SIGN_EXTRACT:
9845 case ZERO_EXTRACT:
9846 new_rtx = expand_compound_operation (varop);
9847 if (new_rtx != varop)
9848 {
9849 varop = new_rtx;
9850 continue;
9851 }
9852 break;
9853
9854 case MEM:
9855 /* If we have (xshiftrt (mem ...) C) and C is MODE_WIDTH
9856 minus the width of a smaller mode, we can do this with a
9857 SIGN_EXTEND or ZERO_EXTEND from the narrower memory location. */
9858 if ((code == ASHIFTRT || code == LSHIFTRT)
9859 && ! mode_dependent_address_p (XEXP (varop, 0))
9860 && ! MEM_VOLATILE_P (varop)
9861 && (tmode = mode_for_size (GET_MODE_BITSIZE (mode) - count,
9862 MODE_INT, 1)) != BLKmode)
9863 {
9864 new_rtx = adjust_address_nv (varop, tmode,
9865 BYTES_BIG_ENDIAN ? 0
9866 : count / BITS_PER_UNIT);
9867
9868 varop = gen_rtx_fmt_e (code == ASHIFTRT ? SIGN_EXTEND
9869 : ZERO_EXTEND, mode, new_rtx);
9870 count = 0;
9871 continue;
9872 }
9873 break;
9874
9875 case SUBREG:
9876 /* If VAROP is a SUBREG, strip it as long as the inner operand has
9877 the same number of words as what we've seen so far. Then store
9878 the widest mode in MODE. */
9879 if (subreg_lowpart_p (varop)
9880 && (GET_MODE_SIZE (GET_MODE (SUBREG_REG (varop)))
9881 > GET_MODE_SIZE (GET_MODE (varop)))
9882 && (unsigned int) ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (varop)))
9883 + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)
9884 == mode_words
9885 && GET_MODE_CLASS (GET_MODE (varop)) == MODE_INT
9886 && GET_MODE_CLASS (GET_MODE (SUBREG_REG (varop))) == MODE_INT)
9887 {
9888 varop = SUBREG_REG (varop);
9889 if (GET_MODE_SIZE (GET_MODE (varop)) > GET_MODE_SIZE (mode))
9890 mode = GET_MODE (varop);
9891 continue;
9892 }
9893 break;
9894
9895 case MULT:
9896 /* Some machines use MULT instead of ASHIFT because MULT
9897 is cheaper. But it is still better on those machines to
9898 merge two shifts into one. */
9899 if (CONST_INT_P (XEXP (varop, 1))
9900 && exact_log2 (UINTVAL (XEXP (varop, 1))) >= 0)
9901 {
9902 varop
9903 = simplify_gen_binary (ASHIFT, GET_MODE (varop),
9904 XEXP (varop, 0),
9905 GEN_INT (exact_log2 (
9906 UINTVAL (XEXP (varop, 1)))));
9907 continue;
9908 }
9909 break;
9910
9911 case UDIV:
9912 /* Similar, for when divides are cheaper. */
9913 if (CONST_INT_P (XEXP (varop, 1))
9914 && exact_log2 (UINTVAL (XEXP (varop, 1))) >= 0)
9915 {
9916 varop
9917 = simplify_gen_binary (LSHIFTRT, GET_MODE (varop),
9918 XEXP (varop, 0),
9919 GEN_INT (exact_log2 (
9920 UINTVAL (XEXP (varop, 1)))));
9921 continue;
9922 }
9923 break;
9924
9925 case ASHIFTRT:
9926 /* If we are extracting just the sign bit of an arithmetic
9927 right shift, that shift is not needed. However, the sign
9928 bit of a wider mode may be different from what would be
9929 interpreted as the sign bit in a narrower mode, so, if
9930 the result is narrower, don't discard the shift. */
9931 if (code == LSHIFTRT
9932 && count == (GET_MODE_BITSIZE (result_mode) - 1)
9933 && (GET_MODE_BITSIZE (result_mode)
9934 >= GET_MODE_BITSIZE (GET_MODE (varop))))
9935 {
9936 varop = XEXP (varop, 0);
9937 continue;
9938 }
9939
9940 /* ... fall through ... */
9941
9942 case LSHIFTRT:
9943 case ASHIFT:
9944 case ROTATE:
9945 /* Here we have two nested shifts. The result is usually the
9946 AND of a new shift with a mask. We compute the result below. */
9947 if (CONST_INT_P (XEXP (varop, 1))
9948 && INTVAL (XEXP (varop, 1)) >= 0
9949 && INTVAL (XEXP (varop, 1)) < GET_MODE_BITSIZE (GET_MODE (varop))
9950 && GET_MODE_BITSIZE (result_mode) <= HOST_BITS_PER_WIDE_INT
9951 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
9952 && !VECTOR_MODE_P (result_mode))
9953 {
9954 enum rtx_code first_code = GET_CODE (varop);
9955 unsigned int first_count = INTVAL (XEXP (varop, 1));
9956 unsigned HOST_WIDE_INT mask;
9957 rtx mask_rtx;
9958
9959 /* We have one common special case. We can't do any merging if
9960 the inner code is an ASHIFTRT of a smaller mode. However, if
9961 we have (ashift:M1 (subreg:M1 (ashiftrt:M2 FOO C1) 0) C2)
9962 with C2 == GET_MODE_BITSIZE (M1) - GET_MODE_BITSIZE (M2),
9963 we can convert it to
9964 (ashiftrt:M1 (ashift:M1 (and:M1 (subreg:M1 FOO 0 C2) C3) C1).
9965 This simplifies certain SIGN_EXTEND operations. */
9966 if (code == ASHIFT && first_code == ASHIFTRT
9967 && count == (GET_MODE_BITSIZE (result_mode)
9968 - GET_MODE_BITSIZE (GET_MODE (varop))))
9969 {
9970 /* C3 has the low-order C1 bits zero. */
9971
9972 mask = GET_MODE_MASK (mode)
9973 & ~(((unsigned HOST_WIDE_INT) 1 << first_count) - 1);
9974
9975 varop = simplify_and_const_int (NULL_RTX, result_mode,
9976 XEXP (varop, 0), mask);
9977 varop = simplify_shift_const (NULL_RTX, ASHIFT, result_mode,
9978 varop, count);
9979 count = first_count;
9980 code = ASHIFTRT;
9981 continue;
9982 }
9983
9984 /* If this was (ashiftrt (ashift foo C1) C2) and FOO has more
9985 than C1 high-order bits equal to the sign bit, we can convert
9986 this to either an ASHIFT or an ASHIFTRT depending on the
9987 two counts.
9988
9989 We cannot do this if VAROP's mode is not SHIFT_MODE. */
9990
9991 if (code == ASHIFTRT && first_code == ASHIFT
9992 && GET_MODE (varop) == shift_mode
9993 && (num_sign_bit_copies (XEXP (varop, 0), shift_mode)
9994 > first_count))
9995 {
9996 varop = XEXP (varop, 0);
9997 count -= first_count;
9998 if (count < 0)
9999 {
10000 count = -count;
10001 code = ASHIFT;
10002 }
10003
10004 continue;
10005 }
10006
10007 /* There are some cases we can't do. If CODE is ASHIFTRT,
10008 we can only do this if FIRST_CODE is also ASHIFTRT.
10009
10010 We can't do the case when CODE is ROTATE and FIRST_CODE is
10011 ASHIFTRT.
10012
10013 If the mode of this shift is not the mode of the outer shift,
10014 we can't do this if either shift is a right shift or ROTATE.
10015
10016 Finally, we can't do any of these if the mode is too wide
10017 unless the codes are the same.
10018
10019 Handle the case where the shift codes are the same
10020 first. */
10021
10022 if (code == first_code)
10023 {
10024 if (GET_MODE (varop) != result_mode
10025 && (code == ASHIFTRT || code == LSHIFTRT
10026 || code == ROTATE))
10027 break;
10028
10029 count += first_count;
10030 varop = XEXP (varop, 0);
10031 continue;
10032 }
10033
10034 if (code == ASHIFTRT
10035 || (code == ROTATE && first_code == ASHIFTRT)
10036 || GET_MODE_BITSIZE (mode) > HOST_BITS_PER_WIDE_INT
10037 || (GET_MODE (varop) != result_mode
10038 && (first_code == ASHIFTRT || first_code == LSHIFTRT
10039 || first_code == ROTATE
10040 || code == ROTATE)))
10041 break;
10042
10043 /* To compute the mask to apply after the shift, shift the
10044 nonzero bits of the inner shift the same way the
10045 outer shift will. */
10046
10047 mask_rtx = GEN_INT (nonzero_bits (varop, GET_MODE (varop)));
10048
10049 mask_rtx
10050 = simplify_const_binary_operation (code, result_mode, mask_rtx,
10051 GEN_INT (count));
10052
10053 /* Give up if we can't compute an outer operation to use. */
10054 if (mask_rtx == 0
10055 || !CONST_INT_P (mask_rtx)
10056 || ! merge_outer_ops (&outer_op, &outer_const, AND,
10057 INTVAL (mask_rtx),
10058 result_mode, &complement_p))
10059 break;
10060
10061 /* If the shifts are in the same direction, we add the
10062 counts. Otherwise, we subtract them. */
10063 if ((code == ASHIFTRT || code == LSHIFTRT)
10064 == (first_code == ASHIFTRT || first_code == LSHIFTRT))
10065 count += first_count;
10066 else
10067 count -= first_count;
10068
10069 /* If COUNT is positive, the new shift is usually CODE,
10070 except for the two exceptions below, in which case it is
10071 FIRST_CODE. If the count is negative, FIRST_CODE should
10072 always be used */
10073 if (count > 0
10074 && ((first_code == ROTATE && code == ASHIFT)
10075 || (first_code == ASHIFTRT && code == LSHIFTRT)))
10076 code = first_code;
10077 else if (count < 0)
10078 code = first_code, count = -count;
10079
10080 varop = XEXP (varop, 0);
10081 continue;
10082 }
10083
10084 /* If we have (A << B << C) for any shift, we can convert this to
10085 (A << C << B). This wins if A is a constant. Only try this if
10086 B is not a constant. */
10087
10088 else if (GET_CODE (varop) == code
10089 && CONST_INT_P (XEXP (varop, 0))
10090 && !CONST_INT_P (XEXP (varop, 1)))
10091 {
10092 rtx new_rtx = simplify_const_binary_operation (code, mode,
10093 XEXP (varop, 0),
10094 GEN_INT (count));
10095 varop = gen_rtx_fmt_ee (code, mode, new_rtx, XEXP (varop, 1));
10096 count = 0;
10097 continue;
10098 }
10099 break;
10100
10101 case NOT:
10102 if (VECTOR_MODE_P (mode))
10103 break;
10104
10105 /* Make this fit the case below. */
10106 varop = gen_rtx_XOR (mode, XEXP (varop, 0),
10107 GEN_INT (GET_MODE_MASK (mode)));
10108 continue;
10109
10110 case IOR:
10111 case AND:
10112 case XOR:
10113 /* If we have (xshiftrt (ior (plus X (const_int -1)) X) C)
10114 with C the size of VAROP - 1 and the shift is logical if
10115 STORE_FLAG_VALUE is 1 and arithmetic if STORE_FLAG_VALUE is -1,
10116 we have an (le X 0) operation. If we have an arithmetic shift
10117 and STORE_FLAG_VALUE is 1 or we have a logical shift with
10118 STORE_FLAG_VALUE of -1, we have a (neg (le X 0)) operation. */
10119
10120 if (GET_CODE (varop) == IOR && GET_CODE (XEXP (varop, 0)) == PLUS
10121 && XEXP (XEXP (varop, 0), 1) == constm1_rtx
10122 && (STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
10123 && (code == LSHIFTRT || code == ASHIFTRT)
10124 && count == (GET_MODE_BITSIZE (GET_MODE (varop)) - 1)
10125 && rtx_equal_p (XEXP (XEXP (varop, 0), 0), XEXP (varop, 1)))
10126 {
10127 count = 0;
10128 varop = gen_rtx_LE (GET_MODE (varop), XEXP (varop, 1),
10129 const0_rtx);
10130
10131 if (STORE_FLAG_VALUE == 1 ? code == ASHIFTRT : code == LSHIFTRT)
10132 varop = gen_rtx_NEG (GET_MODE (varop), varop);
10133
10134 continue;
10135 }
10136
10137 /* If we have (shift (logical)), move the logical to the outside
10138 to allow it to possibly combine with another logical and the
10139 shift to combine with another shift. This also canonicalizes to
10140 what a ZERO_EXTRACT looks like. Also, some machines have
10141 (and (shift)) insns. */
10142
10143 if (CONST_INT_P (XEXP (varop, 1))
10144 /* We can't do this if we have (ashiftrt (xor)) and the
10145 constant has its sign bit set in shift_mode. */
10146 && !(code == ASHIFTRT && GET_CODE (varop) == XOR
10147 && 0 > trunc_int_for_mode (INTVAL (XEXP (varop, 1)),
10148 shift_mode))
10149 && (new_rtx = simplify_const_binary_operation (code, result_mode,
10150 XEXP (varop, 1),
10151 GEN_INT (count))) != 0
10152 && CONST_INT_P (new_rtx)
10153 && merge_outer_ops (&outer_op, &outer_const, GET_CODE (varop),
10154 INTVAL (new_rtx), result_mode, &complement_p))
10155 {
10156 varop = XEXP (varop, 0);
10157 continue;
10158 }
10159
10160 /* If we can't do that, try to simplify the shift in each arm of the
10161 logical expression, make a new logical expression, and apply
10162 the inverse distributive law. This also can't be done
10163 for some (ashiftrt (xor)). */
10164 if (CONST_INT_P (XEXP (varop, 1))
10165 && !(code == ASHIFTRT && GET_CODE (varop) == XOR
10166 && 0 > trunc_int_for_mode (INTVAL (XEXP (varop, 1)),
10167 shift_mode)))
10168 {
10169 rtx lhs = simplify_shift_const (NULL_RTX, code, shift_mode,
10170 XEXP (varop, 0), count);
10171 rtx rhs = simplify_shift_const (NULL_RTX, code, shift_mode,
10172 XEXP (varop, 1), count);
10173
10174 varop = simplify_gen_binary (GET_CODE (varop), shift_mode,
10175 lhs, rhs);
10176 varop = apply_distributive_law (varop);
10177
10178 count = 0;
10179 continue;
10180 }
10181 break;
10182
10183 case EQ:
10184 /* Convert (lshiftrt (eq FOO 0) C) to (xor FOO 1) if STORE_FLAG_VALUE
10185 says that the sign bit can be tested, FOO has mode MODE, C is
10186 GET_MODE_BITSIZE (MODE) - 1, and FOO has only its low-order bit
10187 that may be nonzero. */
10188 if (code == LSHIFTRT
10189 && XEXP (varop, 1) == const0_rtx
10190 && GET_MODE (XEXP (varop, 0)) == result_mode
10191 && count == (GET_MODE_BITSIZE (result_mode) - 1)
10192 && GET_MODE_BITSIZE (result_mode) <= HOST_BITS_PER_WIDE_INT
10193 && STORE_FLAG_VALUE == -1
10194 && nonzero_bits (XEXP (varop, 0), result_mode) == 1
10195 && merge_outer_ops (&outer_op, &outer_const, XOR, 1, result_mode,
10196 &complement_p))
10197 {
10198 varop = XEXP (varop, 0);
10199 count = 0;
10200 continue;
10201 }
10202 break;
10203
10204 case NEG:
10205 /* (lshiftrt (neg A) C) where A is either 0 or 1 and C is one less
10206 than the number of bits in the mode is equivalent to A. */
10207 if (code == LSHIFTRT
10208 && count == (GET_MODE_BITSIZE (result_mode) - 1)
10209 && nonzero_bits (XEXP (varop, 0), result_mode) == 1)
10210 {
10211 varop = XEXP (varop, 0);
10212 count = 0;
10213 continue;
10214 }
10215
10216 /* NEG commutes with ASHIFT since it is multiplication. Move the
10217 NEG outside to allow shifts to combine. */
10218 if (code == ASHIFT
10219 && merge_outer_ops (&outer_op, &outer_const, NEG, 0, result_mode,
10220 &complement_p))
10221 {
10222 varop = XEXP (varop, 0);
10223 continue;
10224 }
10225 break;
10226
10227 case PLUS:
10228 /* (lshiftrt (plus A -1) C) where A is either 0 or 1 and C
10229 is one less than the number of bits in the mode is
10230 equivalent to (xor A 1). */
10231 if (code == LSHIFTRT
10232 && count == (GET_MODE_BITSIZE (result_mode) - 1)
10233 && XEXP (varop, 1) == constm1_rtx
10234 && nonzero_bits (XEXP (varop, 0), result_mode) == 1
10235 && merge_outer_ops (&outer_op, &outer_const, XOR, 1, result_mode,
10236 &complement_p))
10237 {
10238 count = 0;
10239 varop = XEXP (varop, 0);
10240 continue;
10241 }
10242
10243 /* If we have (xshiftrt (plus FOO BAR) C), and the only bits
10244 that might be nonzero in BAR are those being shifted out and those
10245 bits are known zero in FOO, we can replace the PLUS with FOO.
10246 Similarly in the other operand order. This code occurs when
10247 we are computing the size of a variable-size array. */
10248
10249 if ((code == ASHIFTRT || code == LSHIFTRT)
10250 && count < HOST_BITS_PER_WIDE_INT
10251 && nonzero_bits (XEXP (varop, 1), result_mode) >> count == 0
10252 && (nonzero_bits (XEXP (varop, 1), result_mode)
10253 & nonzero_bits (XEXP (varop, 0), result_mode)) == 0)
10254 {
10255 varop = XEXP (varop, 0);
10256 continue;
10257 }
10258 else if ((code == ASHIFTRT || code == LSHIFTRT)
10259 && count < HOST_BITS_PER_WIDE_INT
10260 && GET_MODE_BITSIZE (result_mode) <= HOST_BITS_PER_WIDE_INT
10261 && 0 == (nonzero_bits (XEXP (varop, 0), result_mode)
10262 >> count)
10263 && 0 == (nonzero_bits (XEXP (varop, 0), result_mode)
10264 & nonzero_bits (XEXP (varop, 1),
10265 result_mode)))
10266 {
10267 varop = XEXP (varop, 1);
10268 continue;
10269 }
10270
10271 /* (ashift (plus foo C) N) is (plus (ashift foo N) C'). */
10272 if (code == ASHIFT
10273 && CONST_INT_P (XEXP (varop, 1))
10274 && (new_rtx = simplify_const_binary_operation (ASHIFT, result_mode,
10275 XEXP (varop, 1),
10276 GEN_INT (count))) != 0
10277 && CONST_INT_P (new_rtx)
10278 && merge_outer_ops (&outer_op, &outer_const, PLUS,
10279 INTVAL (new_rtx), result_mode, &complement_p))
10280 {
10281 varop = XEXP (varop, 0);
10282 continue;
10283 }
10284
10285 /* Check for 'PLUS signbit', which is the canonical form of 'XOR
10286 signbit', and attempt to change the PLUS to an XOR and move it to
10287 the outer operation as is done above in the AND/IOR/XOR case
10288 leg for shift(logical). See details in logical handling above
10289 for reasoning in doing so. */
10290 if (code == LSHIFTRT
10291 && CONST_INT_P (XEXP (varop, 1))
10292 && mode_signbit_p (result_mode, XEXP (varop, 1))
10293 && (new_rtx = simplify_const_binary_operation (code, result_mode,
10294 XEXP (varop, 1),
10295 GEN_INT (count))) != 0
10296 && CONST_INT_P (new_rtx)
10297 && merge_outer_ops (&outer_op, &outer_const, XOR,
10298 INTVAL (new_rtx), result_mode, &complement_p))
10299 {
10300 varop = XEXP (varop, 0);
10301 continue;
10302 }
10303
10304 break;
10305
10306 case MINUS:
10307 /* If we have (xshiftrt (minus (ashiftrt X C)) X) C)
10308 with C the size of VAROP - 1 and the shift is logical if
10309 STORE_FLAG_VALUE is 1 and arithmetic if STORE_FLAG_VALUE is -1,
10310 we have a (gt X 0) operation. If the shift is arithmetic with
10311 STORE_FLAG_VALUE of 1 or logical with STORE_FLAG_VALUE == -1,
10312 we have a (neg (gt X 0)) operation. */
10313
10314 if ((STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
10315 && GET_CODE (XEXP (varop, 0)) == ASHIFTRT
10316 && count == (GET_MODE_BITSIZE (GET_MODE (varop)) - 1)
10317 && (code == LSHIFTRT || code == ASHIFTRT)
10318 && CONST_INT_P (XEXP (XEXP (varop, 0), 1))
10319 && INTVAL (XEXP (XEXP (varop, 0), 1)) == count
10320 && rtx_equal_p (XEXP (XEXP (varop, 0), 0), XEXP (varop, 1)))
10321 {
10322 count = 0;
10323 varop = gen_rtx_GT (GET_MODE (varop), XEXP (varop, 1),
10324 const0_rtx);
10325
10326 if (STORE_FLAG_VALUE == 1 ? code == ASHIFTRT : code == LSHIFTRT)
10327 varop = gen_rtx_NEG (GET_MODE (varop), varop);
10328
10329 continue;
10330 }
10331 break;
10332
10333 case TRUNCATE:
10334 /* Change (lshiftrt (truncate (lshiftrt))) to (truncate (lshiftrt))
10335 if the truncate does not affect the value. */
10336 if (code == LSHIFTRT
10337 && GET_CODE (XEXP (varop, 0)) == LSHIFTRT
10338 && CONST_INT_P (XEXP (XEXP (varop, 0), 1))
10339 && (INTVAL (XEXP (XEXP (varop, 0), 1))
10340 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (varop, 0)))
10341 - GET_MODE_BITSIZE (GET_MODE (varop)))))
10342 {
10343 rtx varop_inner = XEXP (varop, 0);
10344
10345 varop_inner
10346 = gen_rtx_LSHIFTRT (GET_MODE (varop_inner),
10347 XEXP (varop_inner, 0),
10348 GEN_INT
10349 (count + INTVAL (XEXP (varop_inner, 1))));
10350 varop = gen_rtx_TRUNCATE (GET_MODE (varop), varop_inner);
10351 count = 0;
10352 continue;
10353 }
10354 break;
10355
10356 default:
10357 break;
10358 }
10359
10360 break;
10361 }
10362
10363 shift_mode = try_widen_shift_mode (code, varop, count, result_mode, mode,
10364 outer_op, outer_const);
10365
10366 /* We have now finished analyzing the shift. The result should be
10367 a shift of type CODE with SHIFT_MODE shifting VAROP COUNT places. If
10368 OUTER_OP is non-UNKNOWN, it is an operation that needs to be applied
10369 to the result of the shift. OUTER_CONST is the relevant constant,
10370 but we must turn off all bits turned off in the shift. */
10371
10372 if (outer_op == UNKNOWN
10373 && orig_code == code && orig_count == count
10374 && varop == orig_varop
10375 && shift_mode == GET_MODE (varop))
10376 return NULL_RTX;
10377
10378 /* Make a SUBREG if necessary. If we can't make it, fail. */
10379 varop = gen_lowpart (shift_mode, varop);
10380 if (varop == NULL_RTX || GET_CODE (varop) == CLOBBER)
10381 return NULL_RTX;
10382
10383 /* If we have an outer operation and we just made a shift, it is
10384 possible that we could have simplified the shift were it not
10385 for the outer operation. So try to do the simplification
10386 recursively. */
10387
10388 if (outer_op != UNKNOWN)
10389 x = simplify_shift_const_1 (code, shift_mode, varop, count);
10390 else
10391 x = NULL_RTX;
10392
10393 if (x == NULL_RTX)
10394 x = simplify_gen_binary (code, shift_mode, varop, GEN_INT (count));
10395
10396 /* If we were doing an LSHIFTRT in a wider mode than it was originally,
10397 turn off all the bits that the shift would have turned off. */
10398 if (orig_code == LSHIFTRT && result_mode != shift_mode)
10399 x = simplify_and_const_int (NULL_RTX, shift_mode, x,
10400 GET_MODE_MASK (result_mode) >> orig_count);
10401
10402 /* Do the remainder of the processing in RESULT_MODE. */
10403 x = gen_lowpart_or_truncate (result_mode, x);
10404
10405 /* If COMPLEMENT_P is set, we have to complement X before doing the outer
10406 operation. */
10407 if (complement_p)
10408 x = simplify_gen_unary (NOT, result_mode, x, result_mode);
10409
10410 if (outer_op != UNKNOWN)
10411 {
10412 if (GET_RTX_CLASS (outer_op) != RTX_UNARY
10413 && GET_MODE_BITSIZE (result_mode) < HOST_BITS_PER_WIDE_INT)
10414 outer_const = trunc_int_for_mode (outer_const, result_mode);
10415
10416 if (outer_op == AND)
10417 x = simplify_and_const_int (NULL_RTX, result_mode, x, outer_const);
10418 else if (outer_op == SET)
10419 {
10420 /* This means that we have determined that the result is
10421 equivalent to a constant. This should be rare. */
10422 if (!side_effects_p (x))
10423 x = GEN_INT (outer_const);
10424 }
10425 else if (GET_RTX_CLASS (outer_op) == RTX_UNARY)
10426 x = simplify_gen_unary (outer_op, result_mode, x, result_mode);
10427 else
10428 x = simplify_gen_binary (outer_op, result_mode, x,
10429 GEN_INT (outer_const));
10430 }
10431
10432 return x;
10433 }
10434
10435 /* Simplify a shift of VAROP by COUNT bits. CODE says what kind of shift.
10436 The result of the shift is RESULT_MODE. If we cannot simplify it,
10437 return X or, if it is NULL, synthesize the expression with
10438 simplify_gen_binary. Otherwise, return a simplified value.
10439
10440 The shift is normally computed in the widest mode we find in VAROP, as
10441 long as it isn't a different number of words than RESULT_MODE. Exceptions
10442 are ASHIFTRT and ROTATE, which are always done in their original mode. */
10443
10444 static rtx
10445 simplify_shift_const (rtx x, enum rtx_code code, enum machine_mode result_mode,
10446 rtx varop, int count)
10447 {
10448 rtx tem = simplify_shift_const_1 (code, result_mode, varop, count);
10449 if (tem)
10450 return tem;
10451
10452 if (!x)
10453 x = simplify_gen_binary (code, GET_MODE (varop), varop, GEN_INT (count));
10454 if (GET_MODE (x) != result_mode)
10455 x = gen_lowpart (result_mode, x);
10456 return x;
10457 }
10458
10459 \f
10460 /* Like recog, but we receive the address of a pointer to a new pattern.
10461 We try to match the rtx that the pointer points to.
10462 If that fails, we may try to modify or replace the pattern,
10463 storing the replacement into the same pointer object.
10464
10465 Modifications include deletion or addition of CLOBBERs.
10466
10467 PNOTES is a pointer to a location where any REG_UNUSED notes added for
10468 the CLOBBERs are placed.
10469
10470 The value is the final insn code from the pattern ultimately matched,
10471 or -1. */
10472
10473 static int
10474 recog_for_combine (rtx *pnewpat, rtx insn, rtx *pnotes)
10475 {
10476 rtx pat = *pnewpat;
10477 int insn_code_number;
10478 int num_clobbers_to_add = 0;
10479 int i;
10480 rtx notes = 0;
10481 rtx old_notes, old_pat;
10482
10483 /* If PAT is a PARALLEL, check to see if it contains the CLOBBER
10484 we use to indicate that something didn't match. If we find such a
10485 thing, force rejection. */
10486 if (GET_CODE (pat) == PARALLEL)
10487 for (i = XVECLEN (pat, 0) - 1; i >= 0; i--)
10488 if (GET_CODE (XVECEXP (pat, 0, i)) == CLOBBER
10489 && XEXP (XVECEXP (pat, 0, i), 0) == const0_rtx)
10490 return -1;
10491
10492 old_pat = PATTERN (insn);
10493 old_notes = REG_NOTES (insn);
10494 PATTERN (insn) = pat;
10495 REG_NOTES (insn) = 0;
10496
10497 insn_code_number = recog (pat, insn, &num_clobbers_to_add);
10498 if (dump_file && (dump_flags & TDF_DETAILS))
10499 {
10500 if (insn_code_number < 0)
10501 fputs ("Failed to match this instruction:\n", dump_file);
10502 else
10503 fputs ("Successfully matched this instruction:\n", dump_file);
10504 print_rtl_single (dump_file, pat);
10505 }
10506
10507 /* If it isn't, there is the possibility that we previously had an insn
10508 that clobbered some register as a side effect, but the combined
10509 insn doesn't need to do that. So try once more without the clobbers
10510 unless this represents an ASM insn. */
10511
10512 if (insn_code_number < 0 && ! check_asm_operands (pat)
10513 && GET_CODE (pat) == PARALLEL)
10514 {
10515 int pos;
10516
10517 for (pos = 0, i = 0; i < XVECLEN (pat, 0); i++)
10518 if (GET_CODE (XVECEXP (pat, 0, i)) != CLOBBER)
10519 {
10520 if (i != pos)
10521 SUBST (XVECEXP (pat, 0, pos), XVECEXP (pat, 0, i));
10522 pos++;
10523 }
10524
10525 SUBST_INT (XVECLEN (pat, 0), pos);
10526
10527 if (pos == 1)
10528 pat = XVECEXP (pat, 0, 0);
10529
10530 PATTERN (insn) = pat;
10531 insn_code_number = recog (pat, insn, &num_clobbers_to_add);
10532 if (dump_file && (dump_flags & TDF_DETAILS))
10533 {
10534 if (insn_code_number < 0)
10535 fputs ("Failed to match this instruction:\n", dump_file);
10536 else
10537 fputs ("Successfully matched this instruction:\n", dump_file);
10538 print_rtl_single (dump_file, pat);
10539 }
10540 }
10541 PATTERN (insn) = old_pat;
10542 REG_NOTES (insn) = old_notes;
10543
10544 /* Recognize all noop sets, these will be killed by followup pass. */
10545 if (insn_code_number < 0 && GET_CODE (pat) == SET && set_noop_p (pat))
10546 insn_code_number = NOOP_MOVE_INSN_CODE, num_clobbers_to_add = 0;
10547
10548 /* If we had any clobbers to add, make a new pattern than contains
10549 them. Then check to make sure that all of them are dead. */
10550 if (num_clobbers_to_add)
10551 {
10552 rtx newpat = gen_rtx_PARALLEL (VOIDmode,
10553 rtvec_alloc (GET_CODE (pat) == PARALLEL
10554 ? (XVECLEN (pat, 0)
10555 + num_clobbers_to_add)
10556 : num_clobbers_to_add + 1));
10557
10558 if (GET_CODE (pat) == PARALLEL)
10559 for (i = 0; i < XVECLEN (pat, 0); i++)
10560 XVECEXP (newpat, 0, i) = XVECEXP (pat, 0, i);
10561 else
10562 XVECEXP (newpat, 0, 0) = pat;
10563
10564 add_clobbers (newpat, insn_code_number);
10565
10566 for (i = XVECLEN (newpat, 0) - num_clobbers_to_add;
10567 i < XVECLEN (newpat, 0); i++)
10568 {
10569 if (REG_P (XEXP (XVECEXP (newpat, 0, i), 0))
10570 && ! reg_dead_at_p (XEXP (XVECEXP (newpat, 0, i), 0), insn))
10571 return -1;
10572 if (GET_CODE (XEXP (XVECEXP (newpat, 0, i), 0)) != SCRATCH)
10573 {
10574 gcc_assert (REG_P (XEXP (XVECEXP (newpat, 0, i), 0)));
10575 notes = alloc_reg_note (REG_UNUSED,
10576 XEXP (XVECEXP (newpat, 0, i), 0), notes);
10577 }
10578 }
10579 pat = newpat;
10580 }
10581
10582 *pnewpat = pat;
10583 *pnotes = notes;
10584
10585 return insn_code_number;
10586 }
10587 \f
10588 /* Like gen_lowpart_general but for use by combine. In combine it
10589 is not possible to create any new pseudoregs. However, it is
10590 safe to create invalid memory addresses, because combine will
10591 try to recognize them and all they will do is make the combine
10592 attempt fail.
10593
10594 If for some reason this cannot do its job, an rtx
10595 (clobber (const_int 0)) is returned.
10596 An insn containing that will not be recognized. */
10597
10598 static rtx
10599 gen_lowpart_for_combine (enum machine_mode omode, rtx x)
10600 {
10601 enum machine_mode imode = GET_MODE (x);
10602 unsigned int osize = GET_MODE_SIZE (omode);
10603 unsigned int isize = GET_MODE_SIZE (imode);
10604 rtx result;
10605
10606 if (omode == imode)
10607 return x;
10608
10609 /* Return identity if this is a CONST or symbolic reference. */
10610 if (omode == Pmode
10611 && (GET_CODE (x) == CONST
10612 || GET_CODE (x) == SYMBOL_REF
10613 || GET_CODE (x) == LABEL_REF))
10614 return x;
10615
10616 /* We can only support MODE being wider than a word if X is a
10617 constant integer or has a mode the same size. */
10618 if (GET_MODE_SIZE (omode) > UNITS_PER_WORD
10619 && ! ((imode == VOIDmode
10620 && (CONST_INT_P (x)
10621 || GET_CODE (x) == CONST_DOUBLE))
10622 || isize == osize))
10623 goto fail;
10624
10625 /* X might be a paradoxical (subreg (mem)). In that case, gen_lowpart
10626 won't know what to do. So we will strip off the SUBREG here and
10627 process normally. */
10628 if (GET_CODE (x) == SUBREG && MEM_P (SUBREG_REG (x)))
10629 {
10630 x = SUBREG_REG (x);
10631
10632 /* For use in case we fall down into the address adjustments
10633 further below, we need to adjust the known mode and size of
10634 x; imode and isize, since we just adjusted x. */
10635 imode = GET_MODE (x);
10636
10637 if (imode == omode)
10638 return x;
10639
10640 isize = GET_MODE_SIZE (imode);
10641 }
10642
10643 result = gen_lowpart_common (omode, x);
10644
10645 if (result)
10646 return result;
10647
10648 if (MEM_P (x))
10649 {
10650 int offset = 0;
10651
10652 /* Refuse to work on a volatile memory ref or one with a mode-dependent
10653 address. */
10654 if (MEM_VOLATILE_P (x) || mode_dependent_address_p (XEXP (x, 0)))
10655 goto fail;
10656
10657 /* If we want to refer to something bigger than the original memref,
10658 generate a paradoxical subreg instead. That will force a reload
10659 of the original memref X. */
10660 if (isize < osize)
10661 return gen_rtx_SUBREG (omode, x, 0);
10662
10663 if (WORDS_BIG_ENDIAN)
10664 offset = MAX (isize, UNITS_PER_WORD) - MAX (osize, UNITS_PER_WORD);
10665
10666 /* Adjust the address so that the address-after-the-data is
10667 unchanged. */
10668 if (BYTES_BIG_ENDIAN)
10669 offset -= MIN (UNITS_PER_WORD, osize) - MIN (UNITS_PER_WORD, isize);
10670
10671 return adjust_address_nv (x, omode, offset);
10672 }
10673
10674 /* If X is a comparison operator, rewrite it in a new mode. This
10675 probably won't match, but may allow further simplifications. */
10676 else if (COMPARISON_P (x))
10677 return gen_rtx_fmt_ee (GET_CODE (x), omode, XEXP (x, 0), XEXP (x, 1));
10678
10679 /* If we couldn't simplify X any other way, just enclose it in a
10680 SUBREG. Normally, this SUBREG won't match, but some patterns may
10681 include an explicit SUBREG or we may simplify it further in combine. */
10682 else
10683 {
10684 int offset = 0;
10685 rtx res;
10686
10687 offset = subreg_lowpart_offset (omode, imode);
10688 if (imode == VOIDmode)
10689 {
10690 imode = int_mode_for_mode (omode);
10691 x = gen_lowpart_common (imode, x);
10692 if (x == NULL)
10693 goto fail;
10694 }
10695 res = simplify_gen_subreg (omode, x, imode, offset);
10696 if (res)
10697 return res;
10698 }
10699
10700 fail:
10701 return gen_rtx_CLOBBER (omode, const0_rtx);
10702 }
10703 \f
10704 /* Simplify a comparison between *POP0 and *POP1 where CODE is the
10705 comparison code that will be tested.
10706
10707 The result is a possibly different comparison code to use. *POP0 and
10708 *POP1 may be updated.
10709
10710 It is possible that we might detect that a comparison is either always
10711 true or always false. However, we do not perform general constant
10712 folding in combine, so this knowledge isn't useful. Such tautologies
10713 should have been detected earlier. Hence we ignore all such cases. */
10714
10715 static enum rtx_code
10716 simplify_comparison (enum rtx_code code, rtx *pop0, rtx *pop1)
10717 {
10718 rtx op0 = *pop0;
10719 rtx op1 = *pop1;
10720 rtx tem, tem1;
10721 int i;
10722 enum machine_mode mode, tmode;
10723
10724 /* Try a few ways of applying the same transformation to both operands. */
10725 while (1)
10726 {
10727 #ifndef WORD_REGISTER_OPERATIONS
10728 /* The test below this one won't handle SIGN_EXTENDs on these machines,
10729 so check specially. */
10730 if (code != GTU && code != GEU && code != LTU && code != LEU
10731 && GET_CODE (op0) == ASHIFTRT && GET_CODE (op1) == ASHIFTRT
10732 && GET_CODE (XEXP (op0, 0)) == ASHIFT
10733 && GET_CODE (XEXP (op1, 0)) == ASHIFT
10734 && GET_CODE (XEXP (XEXP (op0, 0), 0)) == SUBREG
10735 && GET_CODE (XEXP (XEXP (op1, 0), 0)) == SUBREG
10736 && (GET_MODE (SUBREG_REG (XEXP (XEXP (op0, 0), 0)))
10737 == GET_MODE (SUBREG_REG (XEXP (XEXP (op1, 0), 0))))
10738 && CONST_INT_P (XEXP (op0, 1))
10739 && XEXP (op0, 1) == XEXP (op1, 1)
10740 && XEXP (op0, 1) == XEXP (XEXP (op0, 0), 1)
10741 && XEXP (op0, 1) == XEXP (XEXP (op1, 0), 1)
10742 && (INTVAL (XEXP (op0, 1))
10743 == (GET_MODE_BITSIZE (GET_MODE (op0))
10744 - (GET_MODE_BITSIZE
10745 (GET_MODE (SUBREG_REG (XEXP (XEXP (op0, 0), 0))))))))
10746 {
10747 op0 = SUBREG_REG (XEXP (XEXP (op0, 0), 0));
10748 op1 = SUBREG_REG (XEXP (XEXP (op1, 0), 0));
10749 }
10750 #endif
10751
10752 /* If both operands are the same constant shift, see if we can ignore the
10753 shift. We can if the shift is a rotate or if the bits shifted out of
10754 this shift are known to be zero for both inputs and if the type of
10755 comparison is compatible with the shift. */
10756 if (GET_CODE (op0) == GET_CODE (op1)
10757 && GET_MODE_BITSIZE (GET_MODE (op0)) <= HOST_BITS_PER_WIDE_INT
10758 && ((GET_CODE (op0) == ROTATE && (code == NE || code == EQ))
10759 || ((GET_CODE (op0) == LSHIFTRT || GET_CODE (op0) == ASHIFT)
10760 && (code != GT && code != LT && code != GE && code != LE))
10761 || (GET_CODE (op0) == ASHIFTRT
10762 && (code != GTU && code != LTU
10763 && code != GEU && code != LEU)))
10764 && CONST_INT_P (XEXP (op0, 1))
10765 && INTVAL (XEXP (op0, 1)) >= 0
10766 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT
10767 && XEXP (op0, 1) == XEXP (op1, 1))
10768 {
10769 enum machine_mode mode = GET_MODE (op0);
10770 unsigned HOST_WIDE_INT mask = GET_MODE_MASK (mode);
10771 int shift_count = INTVAL (XEXP (op0, 1));
10772
10773 if (GET_CODE (op0) == LSHIFTRT || GET_CODE (op0) == ASHIFTRT)
10774 mask &= (mask >> shift_count) << shift_count;
10775 else if (GET_CODE (op0) == ASHIFT)
10776 mask = (mask & (mask << shift_count)) >> shift_count;
10777
10778 if ((nonzero_bits (XEXP (op0, 0), mode) & ~mask) == 0
10779 && (nonzero_bits (XEXP (op1, 0), mode) & ~mask) == 0)
10780 op0 = XEXP (op0, 0), op1 = XEXP (op1, 0);
10781 else
10782 break;
10783 }
10784
10785 /* If both operands are AND's of a paradoxical SUBREG by constant, the
10786 SUBREGs are of the same mode, and, in both cases, the AND would
10787 be redundant if the comparison was done in the narrower mode,
10788 do the comparison in the narrower mode (e.g., we are AND'ing with 1
10789 and the operand's possibly nonzero bits are 0xffffff01; in that case
10790 if we only care about QImode, we don't need the AND). This case
10791 occurs if the output mode of an scc insn is not SImode and
10792 STORE_FLAG_VALUE == 1 (e.g., the 386).
10793
10794 Similarly, check for a case where the AND's are ZERO_EXTEND
10795 operations from some narrower mode even though a SUBREG is not
10796 present. */
10797
10798 else if (GET_CODE (op0) == AND && GET_CODE (op1) == AND
10799 && CONST_INT_P (XEXP (op0, 1))
10800 && CONST_INT_P (XEXP (op1, 1)))
10801 {
10802 rtx inner_op0 = XEXP (op0, 0);
10803 rtx inner_op1 = XEXP (op1, 0);
10804 HOST_WIDE_INT c0 = INTVAL (XEXP (op0, 1));
10805 HOST_WIDE_INT c1 = INTVAL (XEXP (op1, 1));
10806 int changed = 0;
10807
10808 if (GET_CODE (inner_op0) == SUBREG && GET_CODE (inner_op1) == SUBREG
10809 && (GET_MODE_SIZE (GET_MODE (inner_op0))
10810 > GET_MODE_SIZE (GET_MODE (SUBREG_REG (inner_op0))))
10811 && (GET_MODE (SUBREG_REG (inner_op0))
10812 == GET_MODE (SUBREG_REG (inner_op1)))
10813 && (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (inner_op0)))
10814 <= HOST_BITS_PER_WIDE_INT)
10815 && (0 == ((~c0) & nonzero_bits (SUBREG_REG (inner_op0),
10816 GET_MODE (SUBREG_REG (inner_op0)))))
10817 && (0 == ((~c1) & nonzero_bits (SUBREG_REG (inner_op1),
10818 GET_MODE (SUBREG_REG (inner_op1))))))
10819 {
10820 op0 = SUBREG_REG (inner_op0);
10821 op1 = SUBREG_REG (inner_op1);
10822
10823 /* The resulting comparison is always unsigned since we masked
10824 off the original sign bit. */
10825 code = unsigned_condition (code);
10826
10827 changed = 1;
10828 }
10829
10830 else if (c0 == c1)
10831 for (tmode = GET_CLASS_NARROWEST_MODE
10832 (GET_MODE_CLASS (GET_MODE (op0)));
10833 tmode != GET_MODE (op0); tmode = GET_MODE_WIDER_MODE (tmode))
10834 if ((unsigned HOST_WIDE_INT) c0 == GET_MODE_MASK (tmode))
10835 {
10836 op0 = gen_lowpart (tmode, inner_op0);
10837 op1 = gen_lowpart (tmode, inner_op1);
10838 code = unsigned_condition (code);
10839 changed = 1;
10840 break;
10841 }
10842
10843 if (! changed)
10844 break;
10845 }
10846
10847 /* If both operands are NOT, we can strip off the outer operation
10848 and adjust the comparison code for swapped operands; similarly for
10849 NEG, except that this must be an equality comparison. */
10850 else if ((GET_CODE (op0) == NOT && GET_CODE (op1) == NOT)
10851 || (GET_CODE (op0) == NEG && GET_CODE (op1) == NEG
10852 && (code == EQ || code == NE)))
10853 op0 = XEXP (op0, 0), op1 = XEXP (op1, 0), code = swap_condition (code);
10854
10855 else
10856 break;
10857 }
10858
10859 /* If the first operand is a constant, swap the operands and adjust the
10860 comparison code appropriately, but don't do this if the second operand
10861 is already a constant integer. */
10862 if (swap_commutative_operands_p (op0, op1))
10863 {
10864 tem = op0, op0 = op1, op1 = tem;
10865 code = swap_condition (code);
10866 }
10867
10868 /* We now enter a loop during which we will try to simplify the comparison.
10869 For the most part, we only are concerned with comparisons with zero,
10870 but some things may really be comparisons with zero but not start
10871 out looking that way. */
10872
10873 while (CONST_INT_P (op1))
10874 {
10875 enum machine_mode mode = GET_MODE (op0);
10876 unsigned int mode_width = GET_MODE_BITSIZE (mode);
10877 unsigned HOST_WIDE_INT mask = GET_MODE_MASK (mode);
10878 int equality_comparison_p;
10879 int sign_bit_comparison_p;
10880 int unsigned_comparison_p;
10881 HOST_WIDE_INT const_op;
10882
10883 /* We only want to handle integral modes. This catches VOIDmode,
10884 CCmode, and the floating-point modes. An exception is that we
10885 can handle VOIDmode if OP0 is a COMPARE or a comparison
10886 operation. */
10887
10888 if (GET_MODE_CLASS (mode) != MODE_INT
10889 && ! (mode == VOIDmode
10890 && (GET_CODE (op0) == COMPARE || COMPARISON_P (op0))))
10891 break;
10892
10893 /* Get the constant we are comparing against and turn off all bits
10894 not on in our mode. */
10895 const_op = INTVAL (op1);
10896 if (mode != VOIDmode)
10897 const_op = trunc_int_for_mode (const_op, mode);
10898 op1 = GEN_INT (const_op);
10899
10900 /* If we are comparing against a constant power of two and the value
10901 being compared can only have that single bit nonzero (e.g., it was
10902 `and'ed with that bit), we can replace this with a comparison
10903 with zero. */
10904 if (const_op
10905 && (code == EQ || code == NE || code == GE || code == GEU
10906 || code == LT || code == LTU)
10907 && mode_width <= HOST_BITS_PER_WIDE_INT
10908 && exact_log2 (const_op) >= 0
10909 && nonzero_bits (op0, mode) == (unsigned HOST_WIDE_INT) const_op)
10910 {
10911 code = (code == EQ || code == GE || code == GEU ? NE : EQ);
10912 op1 = const0_rtx, const_op = 0;
10913 }
10914
10915 /* Similarly, if we are comparing a value known to be either -1 or
10916 0 with -1, change it to the opposite comparison against zero. */
10917
10918 if (const_op == -1
10919 && (code == EQ || code == NE || code == GT || code == LE
10920 || code == GEU || code == LTU)
10921 && num_sign_bit_copies (op0, mode) == mode_width)
10922 {
10923 code = (code == EQ || code == LE || code == GEU ? NE : EQ);
10924 op1 = const0_rtx, const_op = 0;
10925 }
10926
10927 /* Do some canonicalizations based on the comparison code. We prefer
10928 comparisons against zero and then prefer equality comparisons.
10929 If we can reduce the size of a constant, we will do that too. */
10930
10931 switch (code)
10932 {
10933 case LT:
10934 /* < C is equivalent to <= (C - 1) */
10935 if (const_op > 0)
10936 {
10937 const_op -= 1;
10938 op1 = GEN_INT (const_op);
10939 code = LE;
10940 /* ... fall through to LE case below. */
10941 }
10942 else
10943 break;
10944
10945 case LE:
10946 /* <= C is equivalent to < (C + 1); we do this for C < 0 */
10947 if (const_op < 0)
10948 {
10949 const_op += 1;
10950 op1 = GEN_INT (const_op);
10951 code = LT;
10952 }
10953
10954 /* If we are doing a <= 0 comparison on a value known to have
10955 a zero sign bit, we can replace this with == 0. */
10956 else if (const_op == 0
10957 && mode_width <= HOST_BITS_PER_WIDE_INT
10958 && (nonzero_bits (op0, mode)
10959 & ((unsigned HOST_WIDE_INT) 1 << (mode_width - 1)))
10960 == 0)
10961 code = EQ;
10962 break;
10963
10964 case GE:
10965 /* >= C is equivalent to > (C - 1). */
10966 if (const_op > 0)
10967 {
10968 const_op -= 1;
10969 op1 = GEN_INT (const_op);
10970 code = GT;
10971 /* ... fall through to GT below. */
10972 }
10973 else
10974 break;
10975
10976 case GT:
10977 /* > C is equivalent to >= (C + 1); we do this for C < 0. */
10978 if (const_op < 0)
10979 {
10980 const_op += 1;
10981 op1 = GEN_INT (const_op);
10982 code = GE;
10983 }
10984
10985 /* If we are doing a > 0 comparison on a value known to have
10986 a zero sign bit, we can replace this with != 0. */
10987 else if (const_op == 0
10988 && mode_width <= HOST_BITS_PER_WIDE_INT
10989 && (nonzero_bits (op0, mode)
10990 & ((unsigned HOST_WIDE_INT) 1 << (mode_width - 1)))
10991 == 0)
10992 code = NE;
10993 break;
10994
10995 case LTU:
10996 /* < C is equivalent to <= (C - 1). */
10997 if (const_op > 0)
10998 {
10999 const_op -= 1;
11000 op1 = GEN_INT (const_op);
11001 code = LEU;
11002 /* ... fall through ... */
11003 }
11004
11005 /* (unsigned) < 0x80000000 is equivalent to >= 0. */
11006 else if (mode_width <= HOST_BITS_PER_WIDE_INT
11007 && (unsigned HOST_WIDE_INT) const_op
11008 == (unsigned HOST_WIDE_INT) 1 << (mode_width - 1))
11009 {
11010 const_op = 0, op1 = const0_rtx;
11011 code = GE;
11012 break;
11013 }
11014 else
11015 break;
11016
11017 case LEU:
11018 /* unsigned <= 0 is equivalent to == 0 */
11019 if (const_op == 0)
11020 code = EQ;
11021
11022 /* (unsigned) <= 0x7fffffff is equivalent to >= 0. */
11023 else if (mode_width <= HOST_BITS_PER_WIDE_INT
11024 && (unsigned HOST_WIDE_INT) const_op
11025 == ((unsigned HOST_WIDE_INT) 1 << (mode_width - 1)) - 1)
11026 {
11027 const_op = 0, op1 = const0_rtx;
11028 code = GE;
11029 }
11030 break;
11031
11032 case GEU:
11033 /* >= C is equivalent to > (C - 1). */
11034 if (const_op > 1)
11035 {
11036 const_op -= 1;
11037 op1 = GEN_INT (const_op);
11038 code = GTU;
11039 /* ... fall through ... */
11040 }
11041
11042 /* (unsigned) >= 0x80000000 is equivalent to < 0. */
11043 else if (mode_width <= HOST_BITS_PER_WIDE_INT
11044 && (unsigned HOST_WIDE_INT) const_op
11045 == (unsigned HOST_WIDE_INT) 1 << (mode_width - 1))
11046 {
11047 const_op = 0, op1 = const0_rtx;
11048 code = LT;
11049 break;
11050 }
11051 else
11052 break;
11053
11054 case GTU:
11055 /* unsigned > 0 is equivalent to != 0 */
11056 if (const_op == 0)
11057 code = NE;
11058
11059 /* (unsigned) > 0x7fffffff is equivalent to < 0. */
11060 else if (mode_width <= HOST_BITS_PER_WIDE_INT
11061 && (unsigned HOST_WIDE_INT) const_op
11062 == ((unsigned HOST_WIDE_INT) 1 << (mode_width - 1)) - 1)
11063 {
11064 const_op = 0, op1 = const0_rtx;
11065 code = LT;
11066 }
11067 break;
11068
11069 default:
11070 break;
11071 }
11072
11073 /* Compute some predicates to simplify code below. */
11074
11075 equality_comparison_p = (code == EQ || code == NE);
11076 sign_bit_comparison_p = ((code == LT || code == GE) && const_op == 0);
11077 unsigned_comparison_p = (code == LTU || code == LEU || code == GTU
11078 || code == GEU);
11079
11080 /* If this is a sign bit comparison and we can do arithmetic in
11081 MODE, say that we will only be needing the sign bit of OP0. */
11082 if (sign_bit_comparison_p
11083 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
11084 op0 = force_to_mode (op0, mode,
11085 (unsigned HOST_WIDE_INT) 1
11086 << (GET_MODE_BITSIZE (mode) - 1),
11087 0);
11088
11089 /* Now try cases based on the opcode of OP0. If none of the cases
11090 does a "continue", we exit this loop immediately after the
11091 switch. */
11092
11093 switch (GET_CODE (op0))
11094 {
11095 case ZERO_EXTRACT:
11096 /* If we are extracting a single bit from a variable position in
11097 a constant that has only a single bit set and are comparing it
11098 with zero, we can convert this into an equality comparison
11099 between the position and the location of the single bit. */
11100 /* Except we can't if SHIFT_COUNT_TRUNCATED is set, since we might
11101 have already reduced the shift count modulo the word size. */
11102 if (!SHIFT_COUNT_TRUNCATED
11103 && CONST_INT_P (XEXP (op0, 0))
11104 && XEXP (op0, 1) == const1_rtx
11105 && equality_comparison_p && const_op == 0
11106 && (i = exact_log2 (UINTVAL (XEXP (op0, 0)))) >= 0)
11107 {
11108 if (BITS_BIG_ENDIAN)
11109 {
11110 enum machine_mode new_mode
11111 = mode_for_extraction (EP_extzv, 1);
11112 if (new_mode == MAX_MACHINE_MODE)
11113 i = BITS_PER_WORD - 1 - i;
11114 else
11115 {
11116 mode = new_mode;
11117 i = (GET_MODE_BITSIZE (mode) - 1 - i);
11118 }
11119 }
11120
11121 op0 = XEXP (op0, 2);
11122 op1 = GEN_INT (i);
11123 const_op = i;
11124
11125 /* Result is nonzero iff shift count is equal to I. */
11126 code = reverse_condition (code);
11127 continue;
11128 }
11129
11130 /* ... fall through ... */
11131
11132 case SIGN_EXTRACT:
11133 tem = expand_compound_operation (op0);
11134 if (tem != op0)
11135 {
11136 op0 = tem;
11137 continue;
11138 }
11139 break;
11140
11141 case NOT:
11142 /* If testing for equality, we can take the NOT of the constant. */
11143 if (equality_comparison_p
11144 && (tem = simplify_unary_operation (NOT, mode, op1, mode)) != 0)
11145 {
11146 op0 = XEXP (op0, 0);
11147 op1 = tem;
11148 continue;
11149 }
11150
11151 /* If just looking at the sign bit, reverse the sense of the
11152 comparison. */
11153 if (sign_bit_comparison_p)
11154 {
11155 op0 = XEXP (op0, 0);
11156 code = (code == GE ? LT : GE);
11157 continue;
11158 }
11159 break;
11160
11161 case NEG:
11162 /* If testing for equality, we can take the NEG of the constant. */
11163 if (equality_comparison_p
11164 && (tem = simplify_unary_operation (NEG, mode, op1, mode)) != 0)
11165 {
11166 op0 = XEXP (op0, 0);
11167 op1 = tem;
11168 continue;
11169 }
11170
11171 /* The remaining cases only apply to comparisons with zero. */
11172 if (const_op != 0)
11173 break;
11174
11175 /* When X is ABS or is known positive,
11176 (neg X) is < 0 if and only if X != 0. */
11177
11178 if (sign_bit_comparison_p
11179 && (GET_CODE (XEXP (op0, 0)) == ABS
11180 || (mode_width <= HOST_BITS_PER_WIDE_INT
11181 && (nonzero_bits (XEXP (op0, 0), mode)
11182 & ((unsigned HOST_WIDE_INT) 1 << (mode_width - 1)))
11183 == 0)))
11184 {
11185 op0 = XEXP (op0, 0);
11186 code = (code == LT ? NE : EQ);
11187 continue;
11188 }
11189
11190 /* If we have NEG of something whose two high-order bits are the
11191 same, we know that "(-a) < 0" is equivalent to "a > 0". */
11192 if (num_sign_bit_copies (op0, mode) >= 2)
11193 {
11194 op0 = XEXP (op0, 0);
11195 code = swap_condition (code);
11196 continue;
11197 }
11198 break;
11199
11200 case ROTATE:
11201 /* If we are testing equality and our count is a constant, we
11202 can perform the inverse operation on our RHS. */
11203 if (equality_comparison_p && CONST_INT_P (XEXP (op0, 1))
11204 && (tem = simplify_binary_operation (ROTATERT, mode,
11205 op1, XEXP (op0, 1))) != 0)
11206 {
11207 op0 = XEXP (op0, 0);
11208 op1 = tem;
11209 continue;
11210 }
11211
11212 /* If we are doing a < 0 or >= 0 comparison, it means we are testing
11213 a particular bit. Convert it to an AND of a constant of that
11214 bit. This will be converted into a ZERO_EXTRACT. */
11215 if (const_op == 0 && sign_bit_comparison_p
11216 && CONST_INT_P (XEXP (op0, 1))
11217 && mode_width <= HOST_BITS_PER_WIDE_INT)
11218 {
11219 op0 = simplify_and_const_int (NULL_RTX, mode, XEXP (op0, 0),
11220 ((unsigned HOST_WIDE_INT) 1
11221 << (mode_width - 1
11222 - INTVAL (XEXP (op0, 1)))));
11223 code = (code == LT ? NE : EQ);
11224 continue;
11225 }
11226
11227 /* Fall through. */
11228
11229 case ABS:
11230 /* ABS is ignorable inside an equality comparison with zero. */
11231 if (const_op == 0 && equality_comparison_p)
11232 {
11233 op0 = XEXP (op0, 0);
11234 continue;
11235 }
11236 break;
11237
11238 case SIGN_EXTEND:
11239 /* Can simplify (compare (zero/sign_extend FOO) CONST) to
11240 (compare FOO CONST) if CONST fits in FOO's mode and we
11241 are either testing inequality or have an unsigned
11242 comparison with ZERO_EXTEND or a signed comparison with
11243 SIGN_EXTEND. But don't do it if we don't have a compare
11244 insn of the given mode, since we'd have to revert it
11245 later on, and then we wouldn't know whether to sign- or
11246 zero-extend. */
11247 mode = GET_MODE (XEXP (op0, 0));
11248 if (mode != VOIDmode && GET_MODE_CLASS (mode) == MODE_INT
11249 && ! unsigned_comparison_p
11250 && (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
11251 && ((unsigned HOST_WIDE_INT) const_op
11252 < (((unsigned HOST_WIDE_INT) 1
11253 << (GET_MODE_BITSIZE (mode) - 1))))
11254 && have_insn_for (COMPARE, mode))
11255 {
11256 op0 = XEXP (op0, 0);
11257 continue;
11258 }
11259 break;
11260
11261 case SUBREG:
11262 /* Check for the case where we are comparing A - C1 with C2, that is
11263
11264 (subreg:MODE (plus (A) (-C1))) op (C2)
11265
11266 with C1 a constant, and try to lift the SUBREG, i.e. to do the
11267 comparison in the wider mode. One of the following two conditions
11268 must be true in order for this to be valid:
11269
11270 1. The mode extension results in the same bit pattern being added
11271 on both sides and the comparison is equality or unsigned. As
11272 C2 has been truncated to fit in MODE, the pattern can only be
11273 all 0s or all 1s.
11274
11275 2. The mode extension results in the sign bit being copied on
11276 each side.
11277
11278 The difficulty here is that we have predicates for A but not for
11279 (A - C1) so we need to check that C1 is within proper bounds so
11280 as to perturbate A as little as possible. */
11281
11282 if (mode_width <= HOST_BITS_PER_WIDE_INT
11283 && subreg_lowpart_p (op0)
11284 && GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0))) > mode_width
11285 && GET_CODE (SUBREG_REG (op0)) == PLUS
11286 && CONST_INT_P (XEXP (SUBREG_REG (op0), 1)))
11287 {
11288 enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op0));
11289 rtx a = XEXP (SUBREG_REG (op0), 0);
11290 HOST_WIDE_INT c1 = -INTVAL (XEXP (SUBREG_REG (op0), 1));
11291
11292 if ((c1 > 0
11293 && (unsigned HOST_WIDE_INT) c1
11294 < (unsigned HOST_WIDE_INT) 1 << (mode_width - 1)
11295 && (equality_comparison_p || unsigned_comparison_p)
11296 /* (A - C1) zero-extends if it is positive and sign-extends
11297 if it is negative, C2 both zero- and sign-extends. */
11298 && ((0 == (nonzero_bits (a, inner_mode)
11299 & ~GET_MODE_MASK (mode))
11300 && const_op >= 0)
11301 /* (A - C1) sign-extends if it is positive and 1-extends
11302 if it is negative, C2 both sign- and 1-extends. */
11303 || (num_sign_bit_copies (a, inner_mode)
11304 > (unsigned int) (GET_MODE_BITSIZE (inner_mode)
11305 - mode_width)
11306 && const_op < 0)))
11307 || ((unsigned HOST_WIDE_INT) c1
11308 < (unsigned HOST_WIDE_INT) 1 << (mode_width - 2)
11309 /* (A - C1) always sign-extends, like C2. */
11310 && num_sign_bit_copies (a, inner_mode)
11311 > (unsigned int) (GET_MODE_BITSIZE (inner_mode)
11312 - (mode_width - 1))))
11313 {
11314 op0 = SUBREG_REG (op0);
11315 continue;
11316 }
11317 }
11318
11319 /* If the inner mode is narrower and we are extracting the low part,
11320 we can treat the SUBREG as if it were a ZERO_EXTEND. */
11321 if (subreg_lowpart_p (op0)
11322 && GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0))) < mode_width)
11323 /* Fall through */ ;
11324 else
11325 break;
11326
11327 /* ... fall through ... */
11328
11329 case ZERO_EXTEND:
11330 mode = GET_MODE (XEXP (op0, 0));
11331 if (mode != VOIDmode && GET_MODE_CLASS (mode) == MODE_INT
11332 && (unsigned_comparison_p || equality_comparison_p)
11333 && (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
11334 && ((unsigned HOST_WIDE_INT) const_op < GET_MODE_MASK (mode))
11335 && have_insn_for (COMPARE, mode))
11336 {
11337 op0 = XEXP (op0, 0);
11338 continue;
11339 }
11340 break;
11341
11342 case PLUS:
11343 /* (eq (plus X A) B) -> (eq X (minus B A)). We can only do
11344 this for equality comparisons due to pathological cases involving
11345 overflows. */
11346 if (equality_comparison_p
11347 && 0 != (tem = simplify_binary_operation (MINUS, mode,
11348 op1, XEXP (op0, 1))))
11349 {
11350 op0 = XEXP (op0, 0);
11351 op1 = tem;
11352 continue;
11353 }
11354
11355 /* (plus (abs X) (const_int -1)) is < 0 if and only if X == 0. */
11356 if (const_op == 0 && XEXP (op0, 1) == constm1_rtx
11357 && GET_CODE (XEXP (op0, 0)) == ABS && sign_bit_comparison_p)
11358 {
11359 op0 = XEXP (XEXP (op0, 0), 0);
11360 code = (code == LT ? EQ : NE);
11361 continue;
11362 }
11363 break;
11364
11365 case MINUS:
11366 /* We used to optimize signed comparisons against zero, but that
11367 was incorrect. Unsigned comparisons against zero (GTU, LEU)
11368 arrive here as equality comparisons, or (GEU, LTU) are
11369 optimized away. No need to special-case them. */
11370
11371 /* (eq (minus A B) C) -> (eq A (plus B C)) or
11372 (eq B (minus A C)), whichever simplifies. We can only do
11373 this for equality comparisons due to pathological cases involving
11374 overflows. */
11375 if (equality_comparison_p
11376 && 0 != (tem = simplify_binary_operation (PLUS, mode,
11377 XEXP (op0, 1), op1)))
11378 {
11379 op0 = XEXP (op0, 0);
11380 op1 = tem;
11381 continue;
11382 }
11383
11384 if (equality_comparison_p
11385 && 0 != (tem = simplify_binary_operation (MINUS, mode,
11386 XEXP (op0, 0), op1)))
11387 {
11388 op0 = XEXP (op0, 1);
11389 op1 = tem;
11390 continue;
11391 }
11392
11393 /* The sign bit of (minus (ashiftrt X C) X), where C is the number
11394 of bits in X minus 1, is one iff X > 0. */
11395 if (sign_bit_comparison_p && GET_CODE (XEXP (op0, 0)) == ASHIFTRT
11396 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
11397 && UINTVAL (XEXP (XEXP (op0, 0), 1)) == mode_width - 1
11398 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), XEXP (op0, 1)))
11399 {
11400 op0 = XEXP (op0, 1);
11401 code = (code == GE ? LE : GT);
11402 continue;
11403 }
11404 break;
11405
11406 case XOR:
11407 /* (eq (xor A B) C) -> (eq A (xor B C)). This is a simplification
11408 if C is zero or B is a constant. */
11409 if (equality_comparison_p
11410 && 0 != (tem = simplify_binary_operation (XOR, mode,
11411 XEXP (op0, 1), op1)))
11412 {
11413 op0 = XEXP (op0, 0);
11414 op1 = tem;
11415 continue;
11416 }
11417 break;
11418
11419 case EQ: case NE:
11420 case UNEQ: case LTGT:
11421 case LT: case LTU: case UNLT: case LE: case LEU: case UNLE:
11422 case GT: case GTU: case UNGT: case GE: case GEU: case UNGE:
11423 case UNORDERED: case ORDERED:
11424 /* We can't do anything if OP0 is a condition code value, rather
11425 than an actual data value. */
11426 if (const_op != 0
11427 || CC0_P (XEXP (op0, 0))
11428 || GET_MODE_CLASS (GET_MODE (XEXP (op0, 0))) == MODE_CC)
11429 break;
11430
11431 /* Get the two operands being compared. */
11432 if (GET_CODE (XEXP (op0, 0)) == COMPARE)
11433 tem = XEXP (XEXP (op0, 0), 0), tem1 = XEXP (XEXP (op0, 0), 1);
11434 else
11435 tem = XEXP (op0, 0), tem1 = XEXP (op0, 1);
11436
11437 /* Check for the cases where we simply want the result of the
11438 earlier test or the opposite of that result. */
11439 if (code == NE || code == EQ
11440 || (GET_MODE_BITSIZE (GET_MODE (op0)) <= HOST_BITS_PER_WIDE_INT
11441 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT
11442 && (STORE_FLAG_VALUE
11443 & (((unsigned HOST_WIDE_INT) 1
11444 << (GET_MODE_BITSIZE (GET_MODE (op0)) - 1))))
11445 && (code == LT || code == GE)))
11446 {
11447 enum rtx_code new_code;
11448 if (code == LT || code == NE)
11449 new_code = GET_CODE (op0);
11450 else
11451 new_code = reversed_comparison_code (op0, NULL);
11452
11453 if (new_code != UNKNOWN)
11454 {
11455 code = new_code;
11456 op0 = tem;
11457 op1 = tem1;
11458 continue;
11459 }
11460 }
11461 break;
11462
11463 case IOR:
11464 /* The sign bit of (ior (plus X (const_int -1)) X) is nonzero
11465 iff X <= 0. */
11466 if (sign_bit_comparison_p && GET_CODE (XEXP (op0, 0)) == PLUS
11467 && XEXP (XEXP (op0, 0), 1) == constm1_rtx
11468 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), XEXP (op0, 1)))
11469 {
11470 op0 = XEXP (op0, 1);
11471 code = (code == GE ? GT : LE);
11472 continue;
11473 }
11474 break;
11475
11476 case AND:
11477 /* Convert (and (xshift 1 X) Y) to (and (lshiftrt Y X) 1). This
11478 will be converted to a ZERO_EXTRACT later. */
11479 if (const_op == 0 && equality_comparison_p
11480 && GET_CODE (XEXP (op0, 0)) == ASHIFT
11481 && XEXP (XEXP (op0, 0), 0) == const1_rtx)
11482 {
11483 op0 = gen_rtx_LSHIFTRT (mode, XEXP (op0, 1),
11484 XEXP (XEXP (op0, 0), 1));
11485 op0 = simplify_and_const_int (NULL_RTX, mode, op0, 1);
11486 continue;
11487 }
11488
11489 /* If we are comparing (and (lshiftrt X C1) C2) for equality with
11490 zero and X is a comparison and C1 and C2 describe only bits set
11491 in STORE_FLAG_VALUE, we can compare with X. */
11492 if (const_op == 0 && equality_comparison_p
11493 && mode_width <= HOST_BITS_PER_WIDE_INT
11494 && CONST_INT_P (XEXP (op0, 1))
11495 && GET_CODE (XEXP (op0, 0)) == LSHIFTRT
11496 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
11497 && INTVAL (XEXP (XEXP (op0, 0), 1)) >= 0
11498 && INTVAL (XEXP (XEXP (op0, 0), 1)) < HOST_BITS_PER_WIDE_INT)
11499 {
11500 mask = ((INTVAL (XEXP (op0, 1)) & GET_MODE_MASK (mode))
11501 << INTVAL (XEXP (XEXP (op0, 0), 1)));
11502 if ((~STORE_FLAG_VALUE & mask) == 0
11503 && (COMPARISON_P (XEXP (XEXP (op0, 0), 0))
11504 || ((tem = get_last_value (XEXP (XEXP (op0, 0), 0))) != 0
11505 && COMPARISON_P (tem))))
11506 {
11507 op0 = XEXP (XEXP (op0, 0), 0);
11508 continue;
11509 }
11510 }
11511
11512 /* If we are doing an equality comparison of an AND of a bit equal
11513 to the sign bit, replace this with a LT or GE comparison of
11514 the underlying value. */
11515 if (equality_comparison_p
11516 && const_op == 0
11517 && CONST_INT_P (XEXP (op0, 1))
11518 && mode_width <= HOST_BITS_PER_WIDE_INT
11519 && ((INTVAL (XEXP (op0, 1)) & GET_MODE_MASK (mode))
11520 == (unsigned HOST_WIDE_INT) 1 << (mode_width - 1)))
11521 {
11522 op0 = XEXP (op0, 0);
11523 code = (code == EQ ? GE : LT);
11524 continue;
11525 }
11526
11527 /* If this AND operation is really a ZERO_EXTEND from a narrower
11528 mode, the constant fits within that mode, and this is either an
11529 equality or unsigned comparison, try to do this comparison in
11530 the narrower mode.
11531
11532 Note that in:
11533
11534 (ne:DI (and:DI (reg:DI 4) (const_int 0xffffffff)) (const_int 0))
11535 -> (ne:DI (reg:SI 4) (const_int 0))
11536
11537 unless TRULY_NOOP_TRUNCATION allows it or the register is
11538 known to hold a value of the required mode the
11539 transformation is invalid. */
11540 if ((equality_comparison_p || unsigned_comparison_p)
11541 && CONST_INT_P (XEXP (op0, 1))
11542 && (i = exact_log2 ((UINTVAL (XEXP (op0, 1))
11543 & GET_MODE_MASK (mode))
11544 + 1)) >= 0
11545 && const_op >> i == 0
11546 && (tmode = mode_for_size (i, MODE_INT, 1)) != BLKmode
11547 && (TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (tmode),
11548 GET_MODE_BITSIZE (GET_MODE (op0)))
11549 || (REG_P (XEXP (op0, 0))
11550 && reg_truncated_to_mode (tmode, XEXP (op0, 0)))))
11551 {
11552 op0 = gen_lowpart (tmode, XEXP (op0, 0));
11553 continue;
11554 }
11555
11556 /* If this is (and:M1 (subreg:M2 X 0) (const_int C1)) where C1
11557 fits in both M1 and M2 and the SUBREG is either paradoxical
11558 or represents the low part, permute the SUBREG and the AND
11559 and try again. */
11560 if (GET_CODE (XEXP (op0, 0)) == SUBREG)
11561 {
11562 unsigned HOST_WIDE_INT c1;
11563 tmode = GET_MODE (SUBREG_REG (XEXP (op0, 0)));
11564 /* Require an integral mode, to avoid creating something like
11565 (AND:SF ...). */
11566 if (SCALAR_INT_MODE_P (tmode)
11567 /* It is unsafe to commute the AND into the SUBREG if the
11568 SUBREG is paradoxical and WORD_REGISTER_OPERATIONS is
11569 not defined. As originally written the upper bits
11570 have a defined value due to the AND operation.
11571 However, if we commute the AND inside the SUBREG then
11572 they no longer have defined values and the meaning of
11573 the code has been changed. */
11574 && (0
11575 #ifdef WORD_REGISTER_OPERATIONS
11576 || (mode_width > GET_MODE_BITSIZE (tmode)
11577 && mode_width <= BITS_PER_WORD)
11578 #endif
11579 || (mode_width <= GET_MODE_BITSIZE (tmode)
11580 && subreg_lowpart_p (XEXP (op0, 0))))
11581 && CONST_INT_P (XEXP (op0, 1))
11582 && mode_width <= HOST_BITS_PER_WIDE_INT
11583 && GET_MODE_BITSIZE (tmode) <= HOST_BITS_PER_WIDE_INT
11584 && ((c1 = INTVAL (XEXP (op0, 1))) & ~mask) == 0
11585 && (c1 & ~GET_MODE_MASK (tmode)) == 0
11586 && c1 != mask
11587 && c1 != GET_MODE_MASK (tmode))
11588 {
11589 op0 = simplify_gen_binary (AND, tmode,
11590 SUBREG_REG (XEXP (op0, 0)),
11591 gen_int_mode (c1, tmode));
11592 op0 = gen_lowpart (mode, op0);
11593 continue;
11594 }
11595 }
11596
11597 /* Convert (ne (and (not X) 1) 0) to (eq (and X 1) 0). */
11598 if (const_op == 0 && equality_comparison_p
11599 && XEXP (op0, 1) == const1_rtx
11600 && GET_CODE (XEXP (op0, 0)) == NOT)
11601 {
11602 op0 = simplify_and_const_int (NULL_RTX, mode,
11603 XEXP (XEXP (op0, 0), 0), 1);
11604 code = (code == NE ? EQ : NE);
11605 continue;
11606 }
11607
11608 /* Convert (ne (and (lshiftrt (not X)) 1) 0) to
11609 (eq (and (lshiftrt X) 1) 0).
11610 Also handle the case where (not X) is expressed using xor. */
11611 if (const_op == 0 && equality_comparison_p
11612 && XEXP (op0, 1) == const1_rtx
11613 && GET_CODE (XEXP (op0, 0)) == LSHIFTRT)
11614 {
11615 rtx shift_op = XEXP (XEXP (op0, 0), 0);
11616 rtx shift_count = XEXP (XEXP (op0, 0), 1);
11617
11618 if (GET_CODE (shift_op) == NOT
11619 || (GET_CODE (shift_op) == XOR
11620 && CONST_INT_P (XEXP (shift_op, 1))
11621 && CONST_INT_P (shift_count)
11622 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
11623 && (UINTVAL (XEXP (shift_op, 1))
11624 == (unsigned HOST_WIDE_INT) 1
11625 << INTVAL (shift_count))))
11626 {
11627 op0
11628 = gen_rtx_LSHIFTRT (mode, XEXP (shift_op, 0), shift_count);
11629 op0 = simplify_and_const_int (NULL_RTX, mode, op0, 1);
11630 code = (code == NE ? EQ : NE);
11631 continue;
11632 }
11633 }
11634 break;
11635
11636 case ASHIFT:
11637 /* If we have (compare (ashift FOO N) (const_int C)) and
11638 the high order N bits of FOO (N+1 if an inequality comparison)
11639 are known to be zero, we can do this by comparing FOO with C
11640 shifted right N bits so long as the low-order N bits of C are
11641 zero. */
11642 if (CONST_INT_P (XEXP (op0, 1))
11643 && INTVAL (XEXP (op0, 1)) >= 0
11644 && ((INTVAL (XEXP (op0, 1)) + ! equality_comparison_p)
11645 < HOST_BITS_PER_WIDE_INT)
11646 && (((unsigned HOST_WIDE_INT) const_op
11647 & (((unsigned HOST_WIDE_INT) 1 << INTVAL (XEXP (op0, 1)))
11648 - 1)) == 0)
11649 && mode_width <= HOST_BITS_PER_WIDE_INT
11650 && (nonzero_bits (XEXP (op0, 0), mode)
11651 & ~(mask >> (INTVAL (XEXP (op0, 1))
11652 + ! equality_comparison_p))) == 0)
11653 {
11654 /* We must perform a logical shift, not an arithmetic one,
11655 as we want the top N bits of C to be zero. */
11656 unsigned HOST_WIDE_INT temp = const_op & GET_MODE_MASK (mode);
11657
11658 temp >>= INTVAL (XEXP (op0, 1));
11659 op1 = gen_int_mode (temp, mode);
11660 op0 = XEXP (op0, 0);
11661 continue;
11662 }
11663
11664 /* If we are doing a sign bit comparison, it means we are testing
11665 a particular bit. Convert it to the appropriate AND. */
11666 if (sign_bit_comparison_p && CONST_INT_P (XEXP (op0, 1))
11667 && mode_width <= HOST_BITS_PER_WIDE_INT)
11668 {
11669 op0 = simplify_and_const_int (NULL_RTX, mode, XEXP (op0, 0),
11670 ((unsigned HOST_WIDE_INT) 1
11671 << (mode_width - 1
11672 - INTVAL (XEXP (op0, 1)))));
11673 code = (code == LT ? NE : EQ);
11674 continue;
11675 }
11676
11677 /* If this an equality comparison with zero and we are shifting
11678 the low bit to the sign bit, we can convert this to an AND of the
11679 low-order bit. */
11680 if (const_op == 0 && equality_comparison_p
11681 && CONST_INT_P (XEXP (op0, 1))
11682 && UINTVAL (XEXP (op0, 1)) == mode_width - 1)
11683 {
11684 op0 = simplify_and_const_int (NULL_RTX, mode, XEXP (op0, 0), 1);
11685 continue;
11686 }
11687 break;
11688
11689 case ASHIFTRT:
11690 /* If this is an equality comparison with zero, we can do this
11691 as a logical shift, which might be much simpler. */
11692 if (equality_comparison_p && const_op == 0
11693 && CONST_INT_P (XEXP (op0, 1)))
11694 {
11695 op0 = simplify_shift_const (NULL_RTX, LSHIFTRT, mode,
11696 XEXP (op0, 0),
11697 INTVAL (XEXP (op0, 1)));
11698 continue;
11699 }
11700
11701 /* If OP0 is a sign extension and CODE is not an unsigned comparison,
11702 do the comparison in a narrower mode. */
11703 if (! unsigned_comparison_p
11704 && CONST_INT_P (XEXP (op0, 1))
11705 && GET_CODE (XEXP (op0, 0)) == ASHIFT
11706 && XEXP (op0, 1) == XEXP (XEXP (op0, 0), 1)
11707 && (tmode = mode_for_size (mode_width - INTVAL (XEXP (op0, 1)),
11708 MODE_INT, 1)) != BLKmode
11709 && (((unsigned HOST_WIDE_INT) const_op
11710 + (GET_MODE_MASK (tmode) >> 1) + 1)
11711 <= GET_MODE_MASK (tmode)))
11712 {
11713 op0 = gen_lowpart (tmode, XEXP (XEXP (op0, 0), 0));
11714 continue;
11715 }
11716
11717 /* Likewise if OP0 is a PLUS of a sign extension with a
11718 constant, which is usually represented with the PLUS
11719 between the shifts. */
11720 if (! unsigned_comparison_p
11721 && CONST_INT_P (XEXP (op0, 1))
11722 && GET_CODE (XEXP (op0, 0)) == PLUS
11723 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
11724 && GET_CODE (XEXP (XEXP (op0, 0), 0)) == ASHIFT
11725 && XEXP (op0, 1) == XEXP (XEXP (XEXP (op0, 0), 0), 1)
11726 && (tmode = mode_for_size (mode_width - INTVAL (XEXP (op0, 1)),
11727 MODE_INT, 1)) != BLKmode
11728 && (((unsigned HOST_WIDE_INT) const_op
11729 + (GET_MODE_MASK (tmode) >> 1) + 1)
11730 <= GET_MODE_MASK (tmode)))
11731 {
11732 rtx inner = XEXP (XEXP (XEXP (op0, 0), 0), 0);
11733 rtx add_const = XEXP (XEXP (op0, 0), 1);
11734 rtx new_const = simplify_gen_binary (ASHIFTRT, GET_MODE (op0),
11735 add_const, XEXP (op0, 1));
11736
11737 op0 = simplify_gen_binary (PLUS, tmode,
11738 gen_lowpart (tmode, inner),
11739 new_const);
11740 continue;
11741 }
11742
11743 /* ... fall through ... */
11744 case LSHIFTRT:
11745 /* If we have (compare (xshiftrt FOO N) (const_int C)) and
11746 the low order N bits of FOO are known to be zero, we can do this
11747 by comparing FOO with C shifted left N bits so long as no
11748 overflow occurs. Even if the low order N bits of FOO aren't known
11749 to be zero, if the comparison is >= or < we can use the same
11750 optimization and for > or <= by setting all the low
11751 order N bits in the comparison constant. */
11752 if (CONST_INT_P (XEXP (op0, 1))
11753 && INTVAL (XEXP (op0, 1)) > 0
11754 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT
11755 && mode_width <= HOST_BITS_PER_WIDE_INT
11756 && (((unsigned HOST_WIDE_INT) const_op
11757 + (GET_CODE (op0) != LSHIFTRT
11758 ? ((GET_MODE_MASK (mode) >> INTVAL (XEXP (op0, 1)) >> 1)
11759 + 1)
11760 : 0))
11761 <= GET_MODE_MASK (mode) >> INTVAL (XEXP (op0, 1))))
11762 {
11763 unsigned HOST_WIDE_INT low_bits
11764 = (nonzero_bits (XEXP (op0, 0), mode)
11765 & (((unsigned HOST_WIDE_INT) 1
11766 << INTVAL (XEXP (op0, 1))) - 1));
11767 if (low_bits == 0 || !equality_comparison_p)
11768 {
11769 /* If the shift was logical, then we must make the condition
11770 unsigned. */
11771 if (GET_CODE (op0) == LSHIFTRT)
11772 code = unsigned_condition (code);
11773
11774 const_op <<= INTVAL (XEXP (op0, 1));
11775 if (low_bits != 0
11776 && (code == GT || code == GTU
11777 || code == LE || code == LEU))
11778 const_op
11779 |= (((HOST_WIDE_INT) 1 << INTVAL (XEXP (op0, 1))) - 1);
11780 op1 = GEN_INT (const_op);
11781 op0 = XEXP (op0, 0);
11782 continue;
11783 }
11784 }
11785
11786 /* If we are using this shift to extract just the sign bit, we
11787 can replace this with an LT or GE comparison. */
11788 if (const_op == 0
11789 && (equality_comparison_p || sign_bit_comparison_p)
11790 && CONST_INT_P (XEXP (op0, 1))
11791 && UINTVAL (XEXP (op0, 1)) == mode_width - 1)
11792 {
11793 op0 = XEXP (op0, 0);
11794 code = (code == NE || code == GT ? LT : GE);
11795 continue;
11796 }
11797 break;
11798
11799 default:
11800 break;
11801 }
11802
11803 break;
11804 }
11805
11806 /* Now make any compound operations involved in this comparison. Then,
11807 check for an outmost SUBREG on OP0 that is not doing anything or is
11808 paradoxical. The latter transformation must only be performed when
11809 it is known that the "extra" bits will be the same in op0 and op1 or
11810 that they don't matter. There are three cases to consider:
11811
11812 1. SUBREG_REG (op0) is a register. In this case the bits are don't
11813 care bits and we can assume they have any convenient value. So
11814 making the transformation is safe.
11815
11816 2. SUBREG_REG (op0) is a memory and LOAD_EXTEND_OP is not defined.
11817 In this case the upper bits of op0 are undefined. We should not make
11818 the simplification in that case as we do not know the contents of
11819 those bits.
11820
11821 3. SUBREG_REG (op0) is a memory and LOAD_EXTEND_OP is defined and not
11822 UNKNOWN. In that case we know those bits are zeros or ones. We must
11823 also be sure that they are the same as the upper bits of op1.
11824
11825 We can never remove a SUBREG for a non-equality comparison because
11826 the sign bit is in a different place in the underlying object. */
11827
11828 op0 = make_compound_operation (op0, op1 == const0_rtx ? COMPARE : SET);
11829 op1 = make_compound_operation (op1, SET);
11830
11831 if (GET_CODE (op0) == SUBREG && subreg_lowpart_p (op0)
11832 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT
11833 && GET_MODE_CLASS (GET_MODE (SUBREG_REG (op0))) == MODE_INT
11834 && (code == NE || code == EQ))
11835 {
11836 if (GET_MODE_SIZE (GET_MODE (op0))
11837 > GET_MODE_SIZE (GET_MODE (SUBREG_REG (op0))))
11838 {
11839 /* For paradoxical subregs, allow case 1 as above. Case 3 isn't
11840 implemented. */
11841 if (REG_P (SUBREG_REG (op0)))
11842 {
11843 op0 = SUBREG_REG (op0);
11844 op1 = gen_lowpart (GET_MODE (op0), op1);
11845 }
11846 }
11847 else if ((GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0)))
11848 <= HOST_BITS_PER_WIDE_INT)
11849 && (nonzero_bits (SUBREG_REG (op0),
11850 GET_MODE (SUBREG_REG (op0)))
11851 & ~GET_MODE_MASK (GET_MODE (op0))) == 0)
11852 {
11853 tem = gen_lowpart (GET_MODE (SUBREG_REG (op0)), op1);
11854
11855 if ((nonzero_bits (tem, GET_MODE (SUBREG_REG (op0)))
11856 & ~GET_MODE_MASK (GET_MODE (op0))) == 0)
11857 op0 = SUBREG_REG (op0), op1 = tem;
11858 }
11859 }
11860
11861 /* We now do the opposite procedure: Some machines don't have compare
11862 insns in all modes. If OP0's mode is an integer mode smaller than a
11863 word and we can't do a compare in that mode, see if there is a larger
11864 mode for which we can do the compare. There are a number of cases in
11865 which we can use the wider mode. */
11866
11867 mode = GET_MODE (op0);
11868 if (mode != VOIDmode && GET_MODE_CLASS (mode) == MODE_INT
11869 && GET_MODE_SIZE (mode) < UNITS_PER_WORD
11870 && ! have_insn_for (COMPARE, mode))
11871 for (tmode = GET_MODE_WIDER_MODE (mode);
11872 (tmode != VOIDmode
11873 && GET_MODE_BITSIZE (tmode) <= HOST_BITS_PER_WIDE_INT);
11874 tmode = GET_MODE_WIDER_MODE (tmode))
11875 if (have_insn_for (COMPARE, tmode))
11876 {
11877 int zero_extended;
11878
11879 /* If this is a test for negative, we can make an explicit
11880 test of the sign bit. Test this first so we can use
11881 a paradoxical subreg to extend OP0. */
11882
11883 if (op1 == const0_rtx && (code == LT || code == GE)
11884 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
11885 {
11886 op0 = simplify_gen_binary (AND, tmode,
11887 gen_lowpart (tmode, op0),
11888 GEN_INT ((unsigned HOST_WIDE_INT) 1
11889 << (GET_MODE_BITSIZE (mode)
11890 - 1)));
11891 code = (code == LT) ? NE : EQ;
11892 break;
11893 }
11894
11895 /* If the only nonzero bits in OP0 and OP1 are those in the
11896 narrower mode and this is an equality or unsigned comparison,
11897 we can use the wider mode. Similarly for sign-extended
11898 values, in which case it is true for all comparisons. */
11899 zero_extended = ((code == EQ || code == NE
11900 || code == GEU || code == GTU
11901 || code == LEU || code == LTU)
11902 && (nonzero_bits (op0, tmode)
11903 & ~GET_MODE_MASK (mode)) == 0
11904 && ((CONST_INT_P (op1)
11905 || (nonzero_bits (op1, tmode)
11906 & ~GET_MODE_MASK (mode)) == 0)));
11907
11908 if (zero_extended
11909 || ((num_sign_bit_copies (op0, tmode)
11910 > (unsigned int) (GET_MODE_BITSIZE (tmode)
11911 - GET_MODE_BITSIZE (mode)))
11912 && (num_sign_bit_copies (op1, tmode)
11913 > (unsigned int) (GET_MODE_BITSIZE (tmode)
11914 - GET_MODE_BITSIZE (mode)))))
11915 {
11916 /* If OP0 is an AND and we don't have an AND in MODE either,
11917 make a new AND in the proper mode. */
11918 if (GET_CODE (op0) == AND
11919 && !have_insn_for (AND, mode))
11920 op0 = simplify_gen_binary (AND, tmode,
11921 gen_lowpart (tmode,
11922 XEXP (op0, 0)),
11923 gen_lowpart (tmode,
11924 XEXP (op0, 1)));
11925 else
11926 {
11927 if (zero_extended)
11928 {
11929 op0 = simplify_gen_unary (ZERO_EXTEND, tmode, op0, mode);
11930 op1 = simplify_gen_unary (ZERO_EXTEND, tmode, op1, mode);
11931 }
11932 else
11933 {
11934 op0 = simplify_gen_unary (SIGN_EXTEND, tmode, op0, mode);
11935 op1 = simplify_gen_unary (SIGN_EXTEND, tmode, op1, mode);
11936 }
11937 break;
11938 }
11939 }
11940 }
11941
11942 #ifdef CANONICALIZE_COMPARISON
11943 /* If this machine only supports a subset of valid comparisons, see if we
11944 can convert an unsupported one into a supported one. */
11945 CANONICALIZE_COMPARISON (code, op0, op1);
11946 #endif
11947
11948 *pop0 = op0;
11949 *pop1 = op1;
11950
11951 return code;
11952 }
11953 \f
11954 /* Utility function for record_value_for_reg. Count number of
11955 rtxs in X. */
11956 static int
11957 count_rtxs (rtx x)
11958 {
11959 enum rtx_code code = GET_CODE (x);
11960 const char *fmt;
11961 int i, j, ret = 1;
11962
11963 if (GET_RTX_CLASS (code) == '2'
11964 || GET_RTX_CLASS (code) == 'c')
11965 {
11966 rtx x0 = XEXP (x, 0);
11967 rtx x1 = XEXP (x, 1);
11968
11969 if (x0 == x1)
11970 return 1 + 2 * count_rtxs (x0);
11971
11972 if ((GET_RTX_CLASS (GET_CODE (x1)) == '2'
11973 || GET_RTX_CLASS (GET_CODE (x1)) == 'c')
11974 && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
11975 return 2 + 2 * count_rtxs (x0)
11976 + count_rtxs (x == XEXP (x1, 0)
11977 ? XEXP (x1, 1) : XEXP (x1, 0));
11978
11979 if ((GET_RTX_CLASS (GET_CODE (x0)) == '2'
11980 || GET_RTX_CLASS (GET_CODE (x0)) == 'c')
11981 && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
11982 return 2 + 2 * count_rtxs (x1)
11983 + count_rtxs (x == XEXP (x0, 0)
11984 ? XEXP (x0, 1) : XEXP (x0, 0));
11985 }
11986
11987 fmt = GET_RTX_FORMAT (code);
11988 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
11989 if (fmt[i] == 'e')
11990 ret += count_rtxs (XEXP (x, i));
11991 else if (fmt[i] == 'E')
11992 for (j = 0; j < XVECLEN (x, i); j++)
11993 ret += count_rtxs (XVECEXP (x, i, j));
11994
11995 return ret;
11996 }
11997 \f
11998 /* Utility function for following routine. Called when X is part of a value
11999 being stored into last_set_value. Sets last_set_table_tick
12000 for each register mentioned. Similar to mention_regs in cse.c */
12001
12002 static void
12003 update_table_tick (rtx x)
12004 {
12005 enum rtx_code code = GET_CODE (x);
12006 const char *fmt = GET_RTX_FORMAT (code);
12007 int i, j;
12008
12009 if (code == REG)
12010 {
12011 unsigned int regno = REGNO (x);
12012 unsigned int endregno = END_REGNO (x);
12013 unsigned int r;
12014
12015 for (r = regno; r < endregno; r++)
12016 {
12017 reg_stat_type *rsp = VEC_index (reg_stat_type, reg_stat, r);
12018 rsp->last_set_table_tick = label_tick;
12019 }
12020
12021 return;
12022 }
12023
12024 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
12025 if (fmt[i] == 'e')
12026 {
12027 /* Check for identical subexpressions. If x contains
12028 identical subexpression we only have to traverse one of
12029 them. */
12030 if (i == 0 && ARITHMETIC_P (x))
12031 {
12032 /* Note that at this point x1 has already been
12033 processed. */
12034 rtx x0 = XEXP (x, 0);
12035 rtx x1 = XEXP (x, 1);
12036
12037 /* If x0 and x1 are identical then there is no need to
12038 process x0. */
12039 if (x0 == x1)
12040 break;
12041
12042 /* If x0 is identical to a subexpression of x1 then while
12043 processing x1, x0 has already been processed. Thus we
12044 are done with x. */
12045 if (ARITHMETIC_P (x1)
12046 && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
12047 break;
12048
12049 /* If x1 is identical to a subexpression of x0 then we
12050 still have to process the rest of x0. */
12051 if (ARITHMETIC_P (x0)
12052 && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
12053 {
12054 update_table_tick (XEXP (x0, x1 == XEXP (x0, 0) ? 1 : 0));
12055 break;
12056 }
12057 }
12058
12059 update_table_tick (XEXP (x, i));
12060 }
12061 else if (fmt[i] == 'E')
12062 for (j = 0; j < XVECLEN (x, i); j++)
12063 update_table_tick (XVECEXP (x, i, j));
12064 }
12065
12066 /* Record that REG is set to VALUE in insn INSN. If VALUE is zero, we
12067 are saying that the register is clobbered and we no longer know its
12068 value. If INSN is zero, don't update reg_stat[].last_set; this is
12069 only permitted with VALUE also zero and is used to invalidate the
12070 register. */
12071
12072 static void
12073 record_value_for_reg (rtx reg, rtx insn, rtx value)
12074 {
12075 unsigned int regno = REGNO (reg);
12076 unsigned int endregno = END_REGNO (reg);
12077 unsigned int i;
12078 reg_stat_type *rsp;
12079
12080 /* If VALUE contains REG and we have a previous value for REG, substitute
12081 the previous value. */
12082 if (value && insn && reg_overlap_mentioned_p (reg, value))
12083 {
12084 rtx tem;
12085
12086 /* Set things up so get_last_value is allowed to see anything set up to
12087 our insn. */
12088 subst_low_luid = DF_INSN_LUID (insn);
12089 tem = get_last_value (reg);
12090
12091 /* If TEM is simply a binary operation with two CLOBBERs as operands,
12092 it isn't going to be useful and will take a lot of time to process,
12093 so just use the CLOBBER. */
12094
12095 if (tem)
12096 {
12097 if (ARITHMETIC_P (tem)
12098 && GET_CODE (XEXP (tem, 0)) == CLOBBER
12099 && GET_CODE (XEXP (tem, 1)) == CLOBBER)
12100 tem = XEXP (tem, 0);
12101 else if (count_occurrences (value, reg, 1) >= 2)
12102 {
12103 /* If there are two or more occurrences of REG in VALUE,
12104 prevent the value from growing too much. */
12105 if (count_rtxs (tem) > MAX_LAST_VALUE_RTL)
12106 tem = gen_rtx_CLOBBER (GET_MODE (tem), const0_rtx);
12107 }
12108
12109 value = replace_rtx (copy_rtx (value), reg, tem);
12110 }
12111 }
12112
12113 /* For each register modified, show we don't know its value, that
12114 we don't know about its bitwise content, that its value has been
12115 updated, and that we don't know the location of the death of the
12116 register. */
12117 for (i = regno; i < endregno; i++)
12118 {
12119 rsp = VEC_index (reg_stat_type, reg_stat, i);
12120
12121 if (insn)
12122 rsp->last_set = insn;
12123
12124 rsp->last_set_value = 0;
12125 rsp->last_set_mode = VOIDmode;
12126 rsp->last_set_nonzero_bits = 0;
12127 rsp->last_set_sign_bit_copies = 0;
12128 rsp->last_death = 0;
12129 rsp->truncated_to_mode = VOIDmode;
12130 }
12131
12132 /* Mark registers that are being referenced in this value. */
12133 if (value)
12134 update_table_tick (value);
12135
12136 /* Now update the status of each register being set.
12137 If someone is using this register in this block, set this register
12138 to invalid since we will get confused between the two lives in this
12139 basic block. This makes using this register always invalid. In cse, we
12140 scan the table to invalidate all entries using this register, but this
12141 is too much work for us. */
12142
12143 for (i = regno; i < endregno; i++)
12144 {
12145 rsp = VEC_index (reg_stat_type, reg_stat, i);
12146 rsp->last_set_label = label_tick;
12147 if (!insn
12148 || (value && rsp->last_set_table_tick >= label_tick_ebb_start))
12149 rsp->last_set_invalid = 1;
12150 else
12151 rsp->last_set_invalid = 0;
12152 }
12153
12154 /* The value being assigned might refer to X (like in "x++;"). In that
12155 case, we must replace it with (clobber (const_int 0)) to prevent
12156 infinite loops. */
12157 rsp = VEC_index (reg_stat_type, reg_stat, regno);
12158 if (value && !get_last_value_validate (&value, insn, label_tick, 0))
12159 {
12160 value = copy_rtx (value);
12161 if (!get_last_value_validate (&value, insn, label_tick, 1))
12162 value = 0;
12163 }
12164
12165 /* For the main register being modified, update the value, the mode, the
12166 nonzero bits, and the number of sign bit copies. */
12167
12168 rsp->last_set_value = value;
12169
12170 if (value)
12171 {
12172 enum machine_mode mode = GET_MODE (reg);
12173 subst_low_luid = DF_INSN_LUID (insn);
12174 rsp->last_set_mode = mode;
12175 if (GET_MODE_CLASS (mode) == MODE_INT
12176 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
12177 mode = nonzero_bits_mode;
12178 rsp->last_set_nonzero_bits = nonzero_bits (value, mode);
12179 rsp->last_set_sign_bit_copies
12180 = num_sign_bit_copies (value, GET_MODE (reg));
12181 }
12182 }
12183
12184 /* Called via note_stores from record_dead_and_set_regs to handle one
12185 SET or CLOBBER in an insn. DATA is the instruction in which the
12186 set is occurring. */
12187
12188 static void
12189 record_dead_and_set_regs_1 (rtx dest, const_rtx setter, void *data)
12190 {
12191 rtx record_dead_insn = (rtx) data;
12192
12193 if (GET_CODE (dest) == SUBREG)
12194 dest = SUBREG_REG (dest);
12195
12196 if (!record_dead_insn)
12197 {
12198 if (REG_P (dest))
12199 record_value_for_reg (dest, NULL_RTX, NULL_RTX);
12200 return;
12201 }
12202
12203 if (REG_P (dest))
12204 {
12205 /* If we are setting the whole register, we know its value. Otherwise
12206 show that we don't know the value. We can handle SUBREG in
12207 some cases. */
12208 if (GET_CODE (setter) == SET && dest == SET_DEST (setter))
12209 record_value_for_reg (dest, record_dead_insn, SET_SRC (setter));
12210 else if (GET_CODE (setter) == SET
12211 && GET_CODE (SET_DEST (setter)) == SUBREG
12212 && SUBREG_REG (SET_DEST (setter)) == dest
12213 && GET_MODE_BITSIZE (GET_MODE (dest)) <= BITS_PER_WORD
12214 && subreg_lowpart_p (SET_DEST (setter)))
12215 record_value_for_reg (dest, record_dead_insn,
12216 gen_lowpart (GET_MODE (dest),
12217 SET_SRC (setter)));
12218 else
12219 record_value_for_reg (dest, record_dead_insn, NULL_RTX);
12220 }
12221 else if (MEM_P (dest)
12222 /* Ignore pushes, they clobber nothing. */
12223 && ! push_operand (dest, GET_MODE (dest)))
12224 mem_last_set = DF_INSN_LUID (record_dead_insn);
12225 }
12226
12227 /* Update the records of when each REG was most recently set or killed
12228 for the things done by INSN. This is the last thing done in processing
12229 INSN in the combiner loop.
12230
12231 We update reg_stat[], in particular fields last_set, last_set_value,
12232 last_set_mode, last_set_nonzero_bits, last_set_sign_bit_copies,
12233 last_death, and also the similar information mem_last_set (which insn
12234 most recently modified memory) and last_call_luid (which insn was the
12235 most recent subroutine call). */
12236
12237 static void
12238 record_dead_and_set_regs (rtx insn)
12239 {
12240 rtx link;
12241 unsigned int i;
12242
12243 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
12244 {
12245 if (REG_NOTE_KIND (link) == REG_DEAD
12246 && REG_P (XEXP (link, 0)))
12247 {
12248 unsigned int regno = REGNO (XEXP (link, 0));
12249 unsigned int endregno = END_REGNO (XEXP (link, 0));
12250
12251 for (i = regno; i < endregno; i++)
12252 {
12253 reg_stat_type *rsp;
12254
12255 rsp = VEC_index (reg_stat_type, reg_stat, i);
12256 rsp->last_death = insn;
12257 }
12258 }
12259 else if (REG_NOTE_KIND (link) == REG_INC)
12260 record_value_for_reg (XEXP (link, 0), insn, NULL_RTX);
12261 }
12262
12263 if (CALL_P (insn))
12264 {
12265 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
12266 if (TEST_HARD_REG_BIT (regs_invalidated_by_call, i))
12267 {
12268 reg_stat_type *rsp;
12269
12270 rsp = VEC_index (reg_stat_type, reg_stat, i);
12271 rsp->last_set_invalid = 1;
12272 rsp->last_set = insn;
12273 rsp->last_set_value = 0;
12274 rsp->last_set_mode = VOIDmode;
12275 rsp->last_set_nonzero_bits = 0;
12276 rsp->last_set_sign_bit_copies = 0;
12277 rsp->last_death = 0;
12278 rsp->truncated_to_mode = VOIDmode;
12279 }
12280
12281 last_call_luid = mem_last_set = DF_INSN_LUID (insn);
12282
12283 /* We can't combine into a call pattern. Remember, though, that
12284 the return value register is set at this LUID. We could
12285 still replace a register with the return value from the
12286 wrong subroutine call! */
12287 note_stores (PATTERN (insn), record_dead_and_set_regs_1, NULL_RTX);
12288 }
12289 else
12290 note_stores (PATTERN (insn), record_dead_and_set_regs_1, insn);
12291 }
12292
12293 /* If a SUBREG has the promoted bit set, it is in fact a property of the
12294 register present in the SUBREG, so for each such SUBREG go back and
12295 adjust nonzero and sign bit information of the registers that are
12296 known to have some zero/sign bits set.
12297
12298 This is needed because when combine blows the SUBREGs away, the
12299 information on zero/sign bits is lost and further combines can be
12300 missed because of that. */
12301
12302 static void
12303 record_promoted_value (rtx insn, rtx subreg)
12304 {
12305 rtx links, set;
12306 unsigned int regno = REGNO (SUBREG_REG (subreg));
12307 enum machine_mode mode = GET_MODE (subreg);
12308
12309 if (GET_MODE_BITSIZE (mode) > HOST_BITS_PER_WIDE_INT)
12310 return;
12311
12312 for (links = LOG_LINKS (insn); links;)
12313 {
12314 reg_stat_type *rsp;
12315
12316 insn = XEXP (links, 0);
12317 set = single_set (insn);
12318
12319 if (! set || !REG_P (SET_DEST (set))
12320 || REGNO (SET_DEST (set)) != regno
12321 || GET_MODE (SET_DEST (set)) != GET_MODE (SUBREG_REG (subreg)))
12322 {
12323 links = XEXP (links, 1);
12324 continue;
12325 }
12326
12327 rsp = VEC_index (reg_stat_type, reg_stat, regno);
12328 if (rsp->last_set == insn)
12329 {
12330 if (SUBREG_PROMOTED_UNSIGNED_P (subreg) > 0)
12331 rsp->last_set_nonzero_bits &= GET_MODE_MASK (mode);
12332 }
12333
12334 if (REG_P (SET_SRC (set)))
12335 {
12336 regno = REGNO (SET_SRC (set));
12337 links = LOG_LINKS (insn);
12338 }
12339 else
12340 break;
12341 }
12342 }
12343
12344 /* Check if X, a register, is known to contain a value already
12345 truncated to MODE. In this case we can use a subreg to refer to
12346 the truncated value even though in the generic case we would need
12347 an explicit truncation. */
12348
12349 static bool
12350 reg_truncated_to_mode (enum machine_mode mode, const_rtx x)
12351 {
12352 reg_stat_type *rsp = VEC_index (reg_stat_type, reg_stat, REGNO (x));
12353 enum machine_mode truncated = rsp->truncated_to_mode;
12354
12355 if (truncated == 0
12356 || rsp->truncation_label < label_tick_ebb_start)
12357 return false;
12358 if (GET_MODE_SIZE (truncated) <= GET_MODE_SIZE (mode))
12359 return true;
12360 if (TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
12361 GET_MODE_BITSIZE (truncated)))
12362 return true;
12363 return false;
12364 }
12365
12366 /* Callback for for_each_rtx. If *P is a hard reg or a subreg record the mode
12367 that the register is accessed in. For non-TRULY_NOOP_TRUNCATION targets we
12368 might be able to turn a truncate into a subreg using this information.
12369 Return -1 if traversing *P is complete or 0 otherwise. */
12370
12371 static int
12372 record_truncated_value (rtx *p, void *data ATTRIBUTE_UNUSED)
12373 {
12374 rtx x = *p;
12375 enum machine_mode truncated_mode;
12376 reg_stat_type *rsp;
12377
12378 if (GET_CODE (x) == SUBREG && REG_P (SUBREG_REG (x)))
12379 {
12380 enum machine_mode original_mode = GET_MODE (SUBREG_REG (x));
12381 truncated_mode = GET_MODE (x);
12382
12383 if (GET_MODE_SIZE (original_mode) <= GET_MODE_SIZE (truncated_mode))
12384 return -1;
12385
12386 if (TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (truncated_mode),
12387 GET_MODE_BITSIZE (original_mode)))
12388 return -1;
12389
12390 x = SUBREG_REG (x);
12391 }
12392 /* ??? For hard-regs we now record everything. We might be able to
12393 optimize this using last_set_mode. */
12394 else if (REG_P (x) && REGNO (x) < FIRST_PSEUDO_REGISTER)
12395 truncated_mode = GET_MODE (x);
12396 else
12397 return 0;
12398
12399 rsp = VEC_index (reg_stat_type, reg_stat, REGNO (x));
12400 if (rsp->truncated_to_mode == 0
12401 || rsp->truncation_label < label_tick_ebb_start
12402 || (GET_MODE_SIZE (truncated_mode)
12403 < GET_MODE_SIZE (rsp->truncated_to_mode)))
12404 {
12405 rsp->truncated_to_mode = truncated_mode;
12406 rsp->truncation_label = label_tick;
12407 }
12408
12409 return -1;
12410 }
12411
12412 /* Callback for note_uses. Find hardregs and subregs of pseudos and
12413 the modes they are used in. This can help truning TRUNCATEs into
12414 SUBREGs. */
12415
12416 static void
12417 record_truncated_values (rtx *x, void *data ATTRIBUTE_UNUSED)
12418 {
12419 for_each_rtx (x, record_truncated_value, NULL);
12420 }
12421
12422 /* Scan X for promoted SUBREGs. For each one found,
12423 note what it implies to the registers used in it. */
12424
12425 static void
12426 check_promoted_subreg (rtx insn, rtx x)
12427 {
12428 if (GET_CODE (x) == SUBREG
12429 && SUBREG_PROMOTED_VAR_P (x)
12430 && REG_P (SUBREG_REG (x)))
12431 record_promoted_value (insn, x);
12432 else
12433 {
12434 const char *format = GET_RTX_FORMAT (GET_CODE (x));
12435 int i, j;
12436
12437 for (i = 0; i < GET_RTX_LENGTH (GET_CODE (x)); i++)
12438 switch (format[i])
12439 {
12440 case 'e':
12441 check_promoted_subreg (insn, XEXP (x, i));
12442 break;
12443 case 'V':
12444 case 'E':
12445 if (XVEC (x, i) != 0)
12446 for (j = 0; j < XVECLEN (x, i); j++)
12447 check_promoted_subreg (insn, XVECEXP (x, i, j));
12448 break;
12449 }
12450 }
12451 }
12452 \f
12453 /* Verify that all the registers and memory references mentioned in *LOC are
12454 still valid. *LOC was part of a value set in INSN when label_tick was
12455 equal to TICK. Return 0 if some are not. If REPLACE is nonzero, replace
12456 the invalid references with (clobber (const_int 0)) and return 1. This
12457 replacement is useful because we often can get useful information about
12458 the form of a value (e.g., if it was produced by a shift that always
12459 produces -1 or 0) even though we don't know exactly what registers it
12460 was produced from. */
12461
12462 static int
12463 get_last_value_validate (rtx *loc, rtx insn, int tick, int replace)
12464 {
12465 rtx x = *loc;
12466 const char *fmt = GET_RTX_FORMAT (GET_CODE (x));
12467 int len = GET_RTX_LENGTH (GET_CODE (x));
12468 int i, j;
12469
12470 if (REG_P (x))
12471 {
12472 unsigned int regno = REGNO (x);
12473 unsigned int endregno = END_REGNO (x);
12474 unsigned int j;
12475
12476 for (j = regno; j < endregno; j++)
12477 {
12478 reg_stat_type *rsp = VEC_index (reg_stat_type, reg_stat, j);
12479 if (rsp->last_set_invalid
12480 /* If this is a pseudo-register that was only set once and not
12481 live at the beginning of the function, it is always valid. */
12482 || (! (regno >= FIRST_PSEUDO_REGISTER
12483 && REG_N_SETS (regno) == 1
12484 && (!REGNO_REG_SET_P
12485 (DF_LR_IN (ENTRY_BLOCK_PTR->next_bb), regno)))
12486 && rsp->last_set_label > tick))
12487 {
12488 if (replace)
12489 *loc = gen_rtx_CLOBBER (GET_MODE (x), const0_rtx);
12490 return replace;
12491 }
12492 }
12493
12494 return 1;
12495 }
12496 /* If this is a memory reference, make sure that there were no stores after
12497 it that might have clobbered the value. We don't have alias info, so we
12498 assume any store invalidates it. Moreover, we only have local UIDs, so
12499 we also assume that there were stores in the intervening basic blocks. */
12500 else if (MEM_P (x) && !MEM_READONLY_P (x)
12501 && (tick != label_tick || DF_INSN_LUID (insn) <= mem_last_set))
12502 {
12503 if (replace)
12504 *loc = gen_rtx_CLOBBER (GET_MODE (x), const0_rtx);
12505 return replace;
12506 }
12507
12508 for (i = 0; i < len; i++)
12509 {
12510 if (fmt[i] == 'e')
12511 {
12512 /* Check for identical subexpressions. If x contains
12513 identical subexpression we only have to traverse one of
12514 them. */
12515 if (i == 1 && ARITHMETIC_P (x))
12516 {
12517 /* Note that at this point x0 has already been checked
12518 and found valid. */
12519 rtx x0 = XEXP (x, 0);
12520 rtx x1 = XEXP (x, 1);
12521
12522 /* If x0 and x1 are identical then x is also valid. */
12523 if (x0 == x1)
12524 return 1;
12525
12526 /* If x1 is identical to a subexpression of x0 then
12527 while checking x0, x1 has already been checked. Thus
12528 it is valid and so as x. */
12529 if (ARITHMETIC_P (x0)
12530 && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
12531 return 1;
12532
12533 /* If x0 is identical to a subexpression of x1 then x is
12534 valid iff the rest of x1 is valid. */
12535 if (ARITHMETIC_P (x1)
12536 && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
12537 return
12538 get_last_value_validate (&XEXP (x1,
12539 x0 == XEXP (x1, 0) ? 1 : 0),
12540 insn, tick, replace);
12541 }
12542
12543 if (get_last_value_validate (&XEXP (x, i), insn, tick,
12544 replace) == 0)
12545 return 0;
12546 }
12547 else if (fmt[i] == 'E')
12548 for (j = 0; j < XVECLEN (x, i); j++)
12549 if (get_last_value_validate (&XVECEXP (x, i, j),
12550 insn, tick, replace) == 0)
12551 return 0;
12552 }
12553
12554 /* If we haven't found a reason for it to be invalid, it is valid. */
12555 return 1;
12556 }
12557
12558 /* Get the last value assigned to X, if known. Some registers
12559 in the value may be replaced with (clobber (const_int 0)) if their value
12560 is known longer known reliably. */
12561
12562 static rtx
12563 get_last_value (const_rtx x)
12564 {
12565 unsigned int regno;
12566 rtx value;
12567 reg_stat_type *rsp;
12568
12569 /* If this is a non-paradoxical SUBREG, get the value of its operand and
12570 then convert it to the desired mode. If this is a paradoxical SUBREG,
12571 we cannot predict what values the "extra" bits might have. */
12572 if (GET_CODE (x) == SUBREG
12573 && subreg_lowpart_p (x)
12574 && (GET_MODE_SIZE (GET_MODE (x))
12575 <= GET_MODE_SIZE (GET_MODE (SUBREG_REG (x))))
12576 && (value = get_last_value (SUBREG_REG (x))) != 0)
12577 return gen_lowpart (GET_MODE (x), value);
12578
12579 if (!REG_P (x))
12580 return 0;
12581
12582 regno = REGNO (x);
12583 rsp = VEC_index (reg_stat_type, reg_stat, regno);
12584 value = rsp->last_set_value;
12585
12586 /* If we don't have a value, or if it isn't for this basic block and
12587 it's either a hard register, set more than once, or it's a live
12588 at the beginning of the function, return 0.
12589
12590 Because if it's not live at the beginning of the function then the reg
12591 is always set before being used (is never used without being set).
12592 And, if it's set only once, and it's always set before use, then all
12593 uses must have the same last value, even if it's not from this basic
12594 block. */
12595
12596 if (value == 0
12597 || (rsp->last_set_label < label_tick_ebb_start
12598 && (regno < FIRST_PSEUDO_REGISTER
12599 || REG_N_SETS (regno) != 1
12600 || REGNO_REG_SET_P
12601 (DF_LR_IN (ENTRY_BLOCK_PTR->next_bb), regno))))
12602 return 0;
12603
12604 /* If the value was set in a later insn than the ones we are processing,
12605 we can't use it even if the register was only set once. */
12606 if (rsp->last_set_label == label_tick
12607 && DF_INSN_LUID (rsp->last_set) >= subst_low_luid)
12608 return 0;
12609
12610 /* If the value has all its registers valid, return it. */
12611 if (get_last_value_validate (&value, rsp->last_set, rsp->last_set_label, 0))
12612 return value;
12613
12614 /* Otherwise, make a copy and replace any invalid register with
12615 (clobber (const_int 0)). If that fails for some reason, return 0. */
12616
12617 value = copy_rtx (value);
12618 if (get_last_value_validate (&value, rsp->last_set, rsp->last_set_label, 1))
12619 return value;
12620
12621 return 0;
12622 }
12623 \f
12624 /* Return nonzero if expression X refers to a REG or to memory
12625 that is set in an instruction more recent than FROM_LUID. */
12626
12627 static int
12628 use_crosses_set_p (const_rtx x, int from_luid)
12629 {
12630 const char *fmt;
12631 int i;
12632 enum rtx_code code = GET_CODE (x);
12633
12634 if (code == REG)
12635 {
12636 unsigned int regno = REGNO (x);
12637 unsigned endreg = END_REGNO (x);
12638
12639 #ifdef PUSH_ROUNDING
12640 /* Don't allow uses of the stack pointer to be moved,
12641 because we don't know whether the move crosses a push insn. */
12642 if (regno == STACK_POINTER_REGNUM && PUSH_ARGS)
12643 return 1;
12644 #endif
12645 for (; regno < endreg; regno++)
12646 {
12647 reg_stat_type *rsp = VEC_index (reg_stat_type, reg_stat, regno);
12648 if (rsp->last_set
12649 && rsp->last_set_label == label_tick
12650 && DF_INSN_LUID (rsp->last_set) > from_luid)
12651 return 1;
12652 }
12653 return 0;
12654 }
12655
12656 if (code == MEM && mem_last_set > from_luid)
12657 return 1;
12658
12659 fmt = GET_RTX_FORMAT (code);
12660
12661 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
12662 {
12663 if (fmt[i] == 'E')
12664 {
12665 int j;
12666 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
12667 if (use_crosses_set_p (XVECEXP (x, i, j), from_luid))
12668 return 1;
12669 }
12670 else if (fmt[i] == 'e'
12671 && use_crosses_set_p (XEXP (x, i), from_luid))
12672 return 1;
12673 }
12674 return 0;
12675 }
12676 \f
12677 /* Define three variables used for communication between the following
12678 routines. */
12679
12680 static unsigned int reg_dead_regno, reg_dead_endregno;
12681 static int reg_dead_flag;
12682
12683 /* Function called via note_stores from reg_dead_at_p.
12684
12685 If DEST is within [reg_dead_regno, reg_dead_endregno), set
12686 reg_dead_flag to 1 if X is a CLOBBER and to -1 it is a SET. */
12687
12688 static void
12689 reg_dead_at_p_1 (rtx dest, const_rtx x, void *data ATTRIBUTE_UNUSED)
12690 {
12691 unsigned int regno, endregno;
12692
12693 if (!REG_P (dest))
12694 return;
12695
12696 regno = REGNO (dest);
12697 endregno = END_REGNO (dest);
12698 if (reg_dead_endregno > regno && reg_dead_regno < endregno)
12699 reg_dead_flag = (GET_CODE (x) == CLOBBER) ? 1 : -1;
12700 }
12701
12702 /* Return nonzero if REG is known to be dead at INSN.
12703
12704 We scan backwards from INSN. If we hit a REG_DEAD note or a CLOBBER
12705 referencing REG, it is dead. If we hit a SET referencing REG, it is
12706 live. Otherwise, see if it is live or dead at the start of the basic
12707 block we are in. Hard regs marked as being live in NEWPAT_USED_REGS
12708 must be assumed to be always live. */
12709
12710 static int
12711 reg_dead_at_p (rtx reg, rtx insn)
12712 {
12713 basic_block block;
12714 unsigned int i;
12715
12716 /* Set variables for reg_dead_at_p_1. */
12717 reg_dead_regno = REGNO (reg);
12718 reg_dead_endregno = END_REGNO (reg);
12719
12720 reg_dead_flag = 0;
12721
12722 /* Check that reg isn't mentioned in NEWPAT_USED_REGS. For fixed registers
12723 we allow the machine description to decide whether use-and-clobber
12724 patterns are OK. */
12725 if (reg_dead_regno < FIRST_PSEUDO_REGISTER)
12726 {
12727 for (i = reg_dead_regno; i < reg_dead_endregno; i++)
12728 if (!fixed_regs[i] && TEST_HARD_REG_BIT (newpat_used_regs, i))
12729 return 0;
12730 }
12731
12732 /* Scan backwards until we find a REG_DEAD note, SET, CLOBBER, or
12733 beginning of basic block. */
12734 block = BLOCK_FOR_INSN (insn);
12735 for (;;)
12736 {
12737 if (INSN_P (insn))
12738 {
12739 note_stores (PATTERN (insn), reg_dead_at_p_1, NULL);
12740 if (reg_dead_flag)
12741 return reg_dead_flag == 1 ? 1 : 0;
12742
12743 if (find_regno_note (insn, REG_DEAD, reg_dead_regno))
12744 return 1;
12745 }
12746
12747 if (insn == BB_HEAD (block))
12748 break;
12749
12750 insn = PREV_INSN (insn);
12751 }
12752
12753 /* Look at live-in sets for the basic block that we were in. */
12754 for (i = reg_dead_regno; i < reg_dead_endregno; i++)
12755 if (REGNO_REG_SET_P (df_get_live_in (block), i))
12756 return 0;
12757
12758 return 1;
12759 }
12760 \f
12761 /* Note hard registers in X that are used. */
12762
12763 static void
12764 mark_used_regs_combine (rtx x)
12765 {
12766 RTX_CODE code = GET_CODE (x);
12767 unsigned int regno;
12768 int i;
12769
12770 switch (code)
12771 {
12772 case LABEL_REF:
12773 case SYMBOL_REF:
12774 case CONST_INT:
12775 case CONST:
12776 case CONST_DOUBLE:
12777 case CONST_VECTOR:
12778 case PC:
12779 case ADDR_VEC:
12780 case ADDR_DIFF_VEC:
12781 case ASM_INPUT:
12782 #ifdef HAVE_cc0
12783 /* CC0 must die in the insn after it is set, so we don't need to take
12784 special note of it here. */
12785 case CC0:
12786 #endif
12787 return;
12788
12789 case CLOBBER:
12790 /* If we are clobbering a MEM, mark any hard registers inside the
12791 address as used. */
12792 if (MEM_P (XEXP (x, 0)))
12793 mark_used_regs_combine (XEXP (XEXP (x, 0), 0));
12794 return;
12795
12796 case REG:
12797 regno = REGNO (x);
12798 /* A hard reg in a wide mode may really be multiple registers.
12799 If so, mark all of them just like the first. */
12800 if (regno < FIRST_PSEUDO_REGISTER)
12801 {
12802 /* None of this applies to the stack, frame or arg pointers. */
12803 if (regno == STACK_POINTER_REGNUM
12804 #if !HARD_FRAME_POINTER_IS_FRAME_POINTER
12805 || regno == HARD_FRAME_POINTER_REGNUM
12806 #endif
12807 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
12808 || (regno == ARG_POINTER_REGNUM && fixed_regs[regno])
12809 #endif
12810 || regno == FRAME_POINTER_REGNUM)
12811 return;
12812
12813 add_to_hard_reg_set (&newpat_used_regs, GET_MODE (x), regno);
12814 }
12815 return;
12816
12817 case SET:
12818 {
12819 /* If setting a MEM, or a SUBREG of a MEM, then note any hard regs in
12820 the address. */
12821 rtx testreg = SET_DEST (x);
12822
12823 while (GET_CODE (testreg) == SUBREG
12824 || GET_CODE (testreg) == ZERO_EXTRACT
12825 || GET_CODE (testreg) == STRICT_LOW_PART)
12826 testreg = XEXP (testreg, 0);
12827
12828 if (MEM_P (testreg))
12829 mark_used_regs_combine (XEXP (testreg, 0));
12830
12831 mark_used_regs_combine (SET_SRC (x));
12832 }
12833 return;
12834
12835 default:
12836 break;
12837 }
12838
12839 /* Recursively scan the operands of this expression. */
12840
12841 {
12842 const char *fmt = GET_RTX_FORMAT (code);
12843
12844 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
12845 {
12846 if (fmt[i] == 'e')
12847 mark_used_regs_combine (XEXP (x, i));
12848 else if (fmt[i] == 'E')
12849 {
12850 int j;
12851
12852 for (j = 0; j < XVECLEN (x, i); j++)
12853 mark_used_regs_combine (XVECEXP (x, i, j));
12854 }
12855 }
12856 }
12857 }
12858 \f
12859 /* Remove register number REGNO from the dead registers list of INSN.
12860
12861 Return the note used to record the death, if there was one. */
12862
12863 rtx
12864 remove_death (unsigned int regno, rtx insn)
12865 {
12866 rtx note = find_regno_note (insn, REG_DEAD, regno);
12867
12868 if (note)
12869 remove_note (insn, note);
12870
12871 return note;
12872 }
12873
12874 /* For each register (hardware or pseudo) used within expression X, if its
12875 death is in an instruction with luid between FROM_LUID (inclusive) and
12876 TO_INSN (exclusive), put a REG_DEAD note for that register in the
12877 list headed by PNOTES.
12878
12879 That said, don't move registers killed by maybe_kill_insn.
12880
12881 This is done when X is being merged by combination into TO_INSN. These
12882 notes will then be distributed as needed. */
12883
12884 static void
12885 move_deaths (rtx x, rtx maybe_kill_insn, int from_luid, rtx to_insn,
12886 rtx *pnotes)
12887 {
12888 const char *fmt;
12889 int len, i;
12890 enum rtx_code code = GET_CODE (x);
12891
12892 if (code == REG)
12893 {
12894 unsigned int regno = REGNO (x);
12895 rtx where_dead = VEC_index (reg_stat_type, reg_stat, regno)->last_death;
12896
12897 /* Don't move the register if it gets killed in between from and to. */
12898 if (maybe_kill_insn && reg_set_p (x, maybe_kill_insn)
12899 && ! reg_referenced_p (x, maybe_kill_insn))
12900 return;
12901
12902 if (where_dead
12903 && BLOCK_FOR_INSN (where_dead) == BLOCK_FOR_INSN (to_insn)
12904 && DF_INSN_LUID (where_dead) >= from_luid
12905 && DF_INSN_LUID (where_dead) < DF_INSN_LUID (to_insn))
12906 {
12907 rtx note = remove_death (regno, where_dead);
12908
12909 /* It is possible for the call above to return 0. This can occur
12910 when last_death points to I2 or I1 that we combined with.
12911 In that case make a new note.
12912
12913 We must also check for the case where X is a hard register
12914 and NOTE is a death note for a range of hard registers
12915 including X. In that case, we must put REG_DEAD notes for
12916 the remaining registers in place of NOTE. */
12917
12918 if (note != 0 && regno < FIRST_PSEUDO_REGISTER
12919 && (GET_MODE_SIZE (GET_MODE (XEXP (note, 0)))
12920 > GET_MODE_SIZE (GET_MODE (x))))
12921 {
12922 unsigned int deadregno = REGNO (XEXP (note, 0));
12923 unsigned int deadend = END_HARD_REGNO (XEXP (note, 0));
12924 unsigned int ourend = END_HARD_REGNO (x);
12925 unsigned int i;
12926
12927 for (i = deadregno; i < deadend; i++)
12928 if (i < regno || i >= ourend)
12929 add_reg_note (where_dead, REG_DEAD, regno_reg_rtx[i]);
12930 }
12931
12932 /* If we didn't find any note, or if we found a REG_DEAD note that
12933 covers only part of the given reg, and we have a multi-reg hard
12934 register, then to be safe we must check for REG_DEAD notes
12935 for each register other than the first. They could have
12936 their own REG_DEAD notes lying around. */
12937 else if ((note == 0
12938 || (note != 0
12939 && (GET_MODE_SIZE (GET_MODE (XEXP (note, 0)))
12940 < GET_MODE_SIZE (GET_MODE (x)))))
12941 && regno < FIRST_PSEUDO_REGISTER
12942 && hard_regno_nregs[regno][GET_MODE (x)] > 1)
12943 {
12944 unsigned int ourend = END_HARD_REGNO (x);
12945 unsigned int i, offset;
12946 rtx oldnotes = 0;
12947
12948 if (note)
12949 offset = hard_regno_nregs[regno][GET_MODE (XEXP (note, 0))];
12950 else
12951 offset = 1;
12952
12953 for (i = regno + offset; i < ourend; i++)
12954 move_deaths (regno_reg_rtx[i],
12955 maybe_kill_insn, from_luid, to_insn, &oldnotes);
12956 }
12957
12958 if (note != 0 && GET_MODE (XEXP (note, 0)) == GET_MODE (x))
12959 {
12960 XEXP (note, 1) = *pnotes;
12961 *pnotes = note;
12962 }
12963 else
12964 *pnotes = alloc_reg_note (REG_DEAD, x, *pnotes);
12965 }
12966
12967 return;
12968 }
12969
12970 else if (GET_CODE (x) == SET)
12971 {
12972 rtx dest = SET_DEST (x);
12973
12974 move_deaths (SET_SRC (x), maybe_kill_insn, from_luid, to_insn, pnotes);
12975
12976 /* In the case of a ZERO_EXTRACT, a STRICT_LOW_PART, or a SUBREG
12977 that accesses one word of a multi-word item, some
12978 piece of everything register in the expression is used by
12979 this insn, so remove any old death. */
12980 /* ??? So why do we test for equality of the sizes? */
12981
12982 if (GET_CODE (dest) == ZERO_EXTRACT
12983 || GET_CODE (dest) == STRICT_LOW_PART
12984 || (GET_CODE (dest) == SUBREG
12985 && (((GET_MODE_SIZE (GET_MODE (dest))
12986 + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
12987 == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest)))
12988 + UNITS_PER_WORD - 1) / UNITS_PER_WORD))))
12989 {
12990 move_deaths (dest, maybe_kill_insn, from_luid, to_insn, pnotes);
12991 return;
12992 }
12993
12994 /* If this is some other SUBREG, we know it replaces the entire
12995 value, so use that as the destination. */
12996 if (GET_CODE (dest) == SUBREG)
12997 dest = SUBREG_REG (dest);
12998
12999 /* If this is a MEM, adjust deaths of anything used in the address.
13000 For a REG (the only other possibility), the entire value is
13001 being replaced so the old value is not used in this insn. */
13002
13003 if (MEM_P (dest))
13004 move_deaths (XEXP (dest, 0), maybe_kill_insn, from_luid,
13005 to_insn, pnotes);
13006 return;
13007 }
13008
13009 else if (GET_CODE (x) == CLOBBER)
13010 return;
13011
13012 len = GET_RTX_LENGTH (code);
13013 fmt = GET_RTX_FORMAT (code);
13014
13015 for (i = 0; i < len; i++)
13016 {
13017 if (fmt[i] == 'E')
13018 {
13019 int j;
13020 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
13021 move_deaths (XVECEXP (x, i, j), maybe_kill_insn, from_luid,
13022 to_insn, pnotes);
13023 }
13024 else if (fmt[i] == 'e')
13025 move_deaths (XEXP (x, i), maybe_kill_insn, from_luid, to_insn, pnotes);
13026 }
13027 }
13028 \f
13029 /* Return 1 if X is the target of a bit-field assignment in BODY, the
13030 pattern of an insn. X must be a REG. */
13031
13032 static int
13033 reg_bitfield_target_p (rtx x, rtx body)
13034 {
13035 int i;
13036
13037 if (GET_CODE (body) == SET)
13038 {
13039 rtx dest = SET_DEST (body);
13040 rtx target;
13041 unsigned int regno, tregno, endregno, endtregno;
13042
13043 if (GET_CODE (dest) == ZERO_EXTRACT)
13044 target = XEXP (dest, 0);
13045 else if (GET_CODE (dest) == STRICT_LOW_PART)
13046 target = SUBREG_REG (XEXP (dest, 0));
13047 else
13048 return 0;
13049
13050 if (GET_CODE (target) == SUBREG)
13051 target = SUBREG_REG (target);
13052
13053 if (!REG_P (target))
13054 return 0;
13055
13056 tregno = REGNO (target), regno = REGNO (x);
13057 if (tregno >= FIRST_PSEUDO_REGISTER || regno >= FIRST_PSEUDO_REGISTER)
13058 return target == x;
13059
13060 endtregno = end_hard_regno (GET_MODE (target), tregno);
13061 endregno = end_hard_regno (GET_MODE (x), regno);
13062
13063 return endregno > tregno && regno < endtregno;
13064 }
13065
13066 else if (GET_CODE (body) == PARALLEL)
13067 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
13068 if (reg_bitfield_target_p (x, XVECEXP (body, 0, i)))
13069 return 1;
13070
13071 return 0;
13072 }
13073 \f
13074 /* Given a chain of REG_NOTES originally from FROM_INSN, try to place them
13075 as appropriate. I3 and I2 are the insns resulting from the combination
13076 insns including FROM (I2 may be zero).
13077
13078 ELIM_I2 and ELIM_I1 are either zero or registers that we know will
13079 not need REG_DEAD notes because they are being substituted for. This
13080 saves searching in the most common cases.
13081
13082 Each note in the list is either ignored or placed on some insns, depending
13083 on the type of note. */
13084
13085 static void
13086 distribute_notes (rtx notes, rtx from_insn, rtx i3, rtx i2, rtx elim_i2,
13087 rtx elim_i1, rtx elim_i0)
13088 {
13089 rtx note, next_note;
13090 rtx tem;
13091
13092 for (note = notes; note; note = next_note)
13093 {
13094 rtx place = 0, place2 = 0;
13095
13096 next_note = XEXP (note, 1);
13097 switch (REG_NOTE_KIND (note))
13098 {
13099 case REG_BR_PROB:
13100 case REG_BR_PRED:
13101 /* Doesn't matter much where we put this, as long as it's somewhere.
13102 It is preferable to keep these notes on branches, which is most
13103 likely to be i3. */
13104 place = i3;
13105 break;
13106
13107 case REG_VALUE_PROFILE:
13108 /* Just get rid of this note, as it is unused later anyway. */
13109 break;
13110
13111 case REG_NON_LOCAL_GOTO:
13112 if (JUMP_P (i3))
13113 place = i3;
13114 else
13115 {
13116 gcc_assert (i2 && JUMP_P (i2));
13117 place = i2;
13118 }
13119 break;
13120
13121 case REG_EH_REGION:
13122 /* These notes must remain with the call or trapping instruction. */
13123 if (CALL_P (i3))
13124 place = i3;
13125 else if (i2 && CALL_P (i2))
13126 place = i2;
13127 else
13128 {
13129 gcc_assert (cfun->can_throw_non_call_exceptions);
13130 if (may_trap_p (i3))
13131 place = i3;
13132 else if (i2 && may_trap_p (i2))
13133 place = i2;
13134 /* ??? Otherwise assume we've combined things such that we
13135 can now prove that the instructions can't trap. Drop the
13136 note in this case. */
13137 }
13138 break;
13139
13140 case REG_NORETURN:
13141 case REG_SETJMP:
13142 /* These notes must remain with the call. It should not be
13143 possible for both I2 and I3 to be a call. */
13144 if (CALL_P (i3))
13145 place = i3;
13146 else
13147 {
13148 gcc_assert (i2 && CALL_P (i2));
13149 place = i2;
13150 }
13151 break;
13152
13153 case REG_UNUSED:
13154 /* Any clobbers for i3 may still exist, and so we must process
13155 REG_UNUSED notes from that insn.
13156
13157 Any clobbers from i2 or i1 can only exist if they were added by
13158 recog_for_combine. In that case, recog_for_combine created the
13159 necessary REG_UNUSED notes. Trying to keep any original
13160 REG_UNUSED notes from these insns can cause incorrect output
13161 if it is for the same register as the original i3 dest.
13162 In that case, we will notice that the register is set in i3,
13163 and then add a REG_UNUSED note for the destination of i3, which
13164 is wrong. However, it is possible to have REG_UNUSED notes from
13165 i2 or i1 for register which were both used and clobbered, so
13166 we keep notes from i2 or i1 if they will turn into REG_DEAD
13167 notes. */
13168
13169 /* If this register is set or clobbered in I3, put the note there
13170 unless there is one already. */
13171 if (reg_set_p (XEXP (note, 0), PATTERN (i3)))
13172 {
13173 if (from_insn != i3)
13174 break;
13175
13176 if (! (REG_P (XEXP (note, 0))
13177 ? find_regno_note (i3, REG_UNUSED, REGNO (XEXP (note, 0)))
13178 : find_reg_note (i3, REG_UNUSED, XEXP (note, 0))))
13179 place = i3;
13180 }
13181 /* Otherwise, if this register is used by I3, then this register
13182 now dies here, so we must put a REG_DEAD note here unless there
13183 is one already. */
13184 else if (reg_referenced_p (XEXP (note, 0), PATTERN (i3))
13185 && ! (REG_P (XEXP (note, 0))
13186 ? find_regno_note (i3, REG_DEAD,
13187 REGNO (XEXP (note, 0)))
13188 : find_reg_note (i3, REG_DEAD, XEXP (note, 0))))
13189 {
13190 PUT_REG_NOTE_KIND (note, REG_DEAD);
13191 place = i3;
13192 }
13193 break;
13194
13195 case REG_EQUAL:
13196 case REG_EQUIV:
13197 case REG_NOALIAS:
13198 /* These notes say something about results of an insn. We can
13199 only support them if they used to be on I3 in which case they
13200 remain on I3. Otherwise they are ignored.
13201
13202 If the note refers to an expression that is not a constant, we
13203 must also ignore the note since we cannot tell whether the
13204 equivalence is still true. It might be possible to do
13205 slightly better than this (we only have a problem if I2DEST
13206 or I1DEST is present in the expression), but it doesn't
13207 seem worth the trouble. */
13208
13209 if (from_insn == i3
13210 && (XEXP (note, 0) == 0 || CONSTANT_P (XEXP (note, 0))))
13211 place = i3;
13212 break;
13213
13214 case REG_INC:
13215 /* These notes say something about how a register is used. They must
13216 be present on any use of the register in I2 or I3. */
13217 if (reg_mentioned_p (XEXP (note, 0), PATTERN (i3)))
13218 place = i3;
13219
13220 if (i2 && reg_mentioned_p (XEXP (note, 0), PATTERN (i2)))
13221 {
13222 if (place)
13223 place2 = i2;
13224 else
13225 place = i2;
13226 }
13227 break;
13228
13229 case REG_LABEL_TARGET:
13230 case REG_LABEL_OPERAND:
13231 /* This can show up in several ways -- either directly in the
13232 pattern, or hidden off in the constant pool with (or without?)
13233 a REG_EQUAL note. */
13234 /* ??? Ignore the without-reg_equal-note problem for now. */
13235 if (reg_mentioned_p (XEXP (note, 0), PATTERN (i3))
13236 || ((tem = find_reg_note (i3, REG_EQUAL, NULL_RTX))
13237 && GET_CODE (XEXP (tem, 0)) == LABEL_REF
13238 && XEXP (XEXP (tem, 0), 0) == XEXP (note, 0)))
13239 place = i3;
13240
13241 if (i2
13242 && (reg_mentioned_p (XEXP (note, 0), PATTERN (i2))
13243 || ((tem = find_reg_note (i2, REG_EQUAL, NULL_RTX))
13244 && GET_CODE (XEXP (tem, 0)) == LABEL_REF
13245 && XEXP (XEXP (tem, 0), 0) == XEXP (note, 0))))
13246 {
13247 if (place)
13248 place2 = i2;
13249 else
13250 place = i2;
13251 }
13252
13253 /* For REG_LABEL_TARGET on a JUMP_P, we prefer to put the note
13254 as a JUMP_LABEL or decrement LABEL_NUSES if it's already
13255 there. */
13256 if (place && JUMP_P (place)
13257 && REG_NOTE_KIND (note) == REG_LABEL_TARGET
13258 && (JUMP_LABEL (place) == NULL
13259 || JUMP_LABEL (place) == XEXP (note, 0)))
13260 {
13261 rtx label = JUMP_LABEL (place);
13262
13263 if (!label)
13264 JUMP_LABEL (place) = XEXP (note, 0);
13265 else if (LABEL_P (label))
13266 LABEL_NUSES (label)--;
13267 }
13268
13269 if (place2 && JUMP_P (place2)
13270 && REG_NOTE_KIND (note) == REG_LABEL_TARGET
13271 && (JUMP_LABEL (place2) == NULL
13272 || JUMP_LABEL (place2) == XEXP (note, 0)))
13273 {
13274 rtx label = JUMP_LABEL (place2);
13275
13276 if (!label)
13277 JUMP_LABEL (place2) = XEXP (note, 0);
13278 else if (LABEL_P (label))
13279 LABEL_NUSES (label)--;
13280 place2 = 0;
13281 }
13282 break;
13283
13284 case REG_NONNEG:
13285 /* This note says something about the value of a register prior
13286 to the execution of an insn. It is too much trouble to see
13287 if the note is still correct in all situations. It is better
13288 to simply delete it. */
13289 break;
13290
13291 case REG_DEAD:
13292 /* If we replaced the right hand side of FROM_INSN with a
13293 REG_EQUAL note, the original use of the dying register
13294 will not have been combined into I3 and I2. In such cases,
13295 FROM_INSN is guaranteed to be the first of the combined
13296 instructions, so we simply need to search back before
13297 FROM_INSN for the previous use or set of this register,
13298 then alter the notes there appropriately.
13299
13300 If the register is used as an input in I3, it dies there.
13301 Similarly for I2, if it is nonzero and adjacent to I3.
13302
13303 If the register is not used as an input in either I3 or I2
13304 and it is not one of the registers we were supposed to eliminate,
13305 there are two possibilities. We might have a non-adjacent I2
13306 or we might have somehow eliminated an additional register
13307 from a computation. For example, we might have had A & B where
13308 we discover that B will always be zero. In this case we will
13309 eliminate the reference to A.
13310
13311 In both cases, we must search to see if we can find a previous
13312 use of A and put the death note there. */
13313
13314 if (from_insn
13315 && from_insn == i2mod
13316 && !reg_overlap_mentioned_p (XEXP (note, 0), i2mod_new_rhs))
13317 tem = from_insn;
13318 else
13319 {
13320 if (from_insn
13321 && CALL_P (from_insn)
13322 && find_reg_fusage (from_insn, USE, XEXP (note, 0)))
13323 place = from_insn;
13324 else if (reg_referenced_p (XEXP (note, 0), PATTERN (i3)))
13325 place = i3;
13326 else if (i2 != 0 && next_nonnote_nondebug_insn (i2) == i3
13327 && reg_referenced_p (XEXP (note, 0), PATTERN (i2)))
13328 place = i2;
13329 else if ((rtx_equal_p (XEXP (note, 0), elim_i2)
13330 && !(i2mod
13331 && reg_overlap_mentioned_p (XEXP (note, 0),
13332 i2mod_old_rhs)))
13333 || rtx_equal_p (XEXP (note, 0), elim_i1)
13334 || rtx_equal_p (XEXP (note, 0), elim_i0))
13335 break;
13336 tem = i3;
13337 }
13338
13339 if (place == 0)
13340 {
13341 basic_block bb = this_basic_block;
13342
13343 for (tem = PREV_INSN (tem); place == 0; tem = PREV_INSN (tem))
13344 {
13345 if (!NONDEBUG_INSN_P (tem))
13346 {
13347 if (tem == BB_HEAD (bb))
13348 break;
13349 continue;
13350 }
13351
13352 /* If the register is being set at TEM, see if that is all
13353 TEM is doing. If so, delete TEM. Otherwise, make this
13354 into a REG_UNUSED note instead. Don't delete sets to
13355 global register vars. */
13356 if ((REGNO (XEXP (note, 0)) >= FIRST_PSEUDO_REGISTER
13357 || !global_regs[REGNO (XEXP (note, 0))])
13358 && reg_set_p (XEXP (note, 0), PATTERN (tem)))
13359 {
13360 rtx set = single_set (tem);
13361 rtx inner_dest = 0;
13362 #ifdef HAVE_cc0
13363 rtx cc0_setter = NULL_RTX;
13364 #endif
13365
13366 if (set != 0)
13367 for (inner_dest = SET_DEST (set);
13368 (GET_CODE (inner_dest) == STRICT_LOW_PART
13369 || GET_CODE (inner_dest) == SUBREG
13370 || GET_CODE (inner_dest) == ZERO_EXTRACT);
13371 inner_dest = XEXP (inner_dest, 0))
13372 ;
13373
13374 /* Verify that it was the set, and not a clobber that
13375 modified the register.
13376
13377 CC0 targets must be careful to maintain setter/user
13378 pairs. If we cannot delete the setter due to side
13379 effects, mark the user with an UNUSED note instead
13380 of deleting it. */
13381
13382 if (set != 0 && ! side_effects_p (SET_SRC (set))
13383 && rtx_equal_p (XEXP (note, 0), inner_dest)
13384 #ifdef HAVE_cc0
13385 && (! reg_mentioned_p (cc0_rtx, SET_SRC (set))
13386 || ((cc0_setter = prev_cc0_setter (tem)) != NULL
13387 && sets_cc0_p (PATTERN (cc0_setter)) > 0))
13388 #endif
13389 )
13390 {
13391 /* Move the notes and links of TEM elsewhere.
13392 This might delete other dead insns recursively.
13393 First set the pattern to something that won't use
13394 any register. */
13395 rtx old_notes = REG_NOTES (tem);
13396
13397 PATTERN (tem) = pc_rtx;
13398 REG_NOTES (tem) = NULL;
13399
13400 distribute_notes (old_notes, tem, tem, NULL_RTX,
13401 NULL_RTX, NULL_RTX, NULL_RTX);
13402 distribute_links (LOG_LINKS (tem));
13403
13404 SET_INSN_DELETED (tem);
13405 if (tem == i2)
13406 i2 = NULL_RTX;
13407
13408 #ifdef HAVE_cc0
13409 /* Delete the setter too. */
13410 if (cc0_setter)
13411 {
13412 PATTERN (cc0_setter) = pc_rtx;
13413 old_notes = REG_NOTES (cc0_setter);
13414 REG_NOTES (cc0_setter) = NULL;
13415
13416 distribute_notes (old_notes, cc0_setter,
13417 cc0_setter, NULL_RTX,
13418 NULL_RTX, NULL_RTX, NULL_RTX);
13419 distribute_links (LOG_LINKS (cc0_setter));
13420
13421 SET_INSN_DELETED (cc0_setter);
13422 if (cc0_setter == i2)
13423 i2 = NULL_RTX;
13424 }
13425 #endif
13426 }
13427 else
13428 {
13429 PUT_REG_NOTE_KIND (note, REG_UNUSED);
13430
13431 /* If there isn't already a REG_UNUSED note, put one
13432 here. Do not place a REG_DEAD note, even if
13433 the register is also used here; that would not
13434 match the algorithm used in lifetime analysis
13435 and can cause the consistency check in the
13436 scheduler to fail. */
13437 if (! find_regno_note (tem, REG_UNUSED,
13438 REGNO (XEXP (note, 0))))
13439 place = tem;
13440 break;
13441 }
13442 }
13443 else if (reg_referenced_p (XEXP (note, 0), PATTERN (tem))
13444 || (CALL_P (tem)
13445 && find_reg_fusage (tem, USE, XEXP (note, 0))))
13446 {
13447 place = tem;
13448
13449 /* If we are doing a 3->2 combination, and we have a
13450 register which formerly died in i3 and was not used
13451 by i2, which now no longer dies in i3 and is used in
13452 i2 but does not die in i2, and place is between i2
13453 and i3, then we may need to move a link from place to
13454 i2. */
13455 if (i2 && DF_INSN_LUID (place) > DF_INSN_LUID (i2)
13456 && from_insn
13457 && DF_INSN_LUID (from_insn) > DF_INSN_LUID (i2)
13458 && reg_referenced_p (XEXP (note, 0), PATTERN (i2)))
13459 {
13460 rtx links = LOG_LINKS (place);
13461 LOG_LINKS (place) = 0;
13462 distribute_links (links);
13463 }
13464 break;
13465 }
13466
13467 if (tem == BB_HEAD (bb))
13468 break;
13469 }
13470
13471 }
13472
13473 /* If the register is set or already dead at PLACE, we needn't do
13474 anything with this note if it is still a REG_DEAD note.
13475 We check here if it is set at all, not if is it totally replaced,
13476 which is what `dead_or_set_p' checks, so also check for it being
13477 set partially. */
13478
13479 if (place && REG_NOTE_KIND (note) == REG_DEAD)
13480 {
13481 unsigned int regno = REGNO (XEXP (note, 0));
13482 reg_stat_type *rsp = VEC_index (reg_stat_type, reg_stat, regno);
13483
13484 if (dead_or_set_p (place, XEXP (note, 0))
13485 || reg_bitfield_target_p (XEXP (note, 0), PATTERN (place)))
13486 {
13487 /* Unless the register previously died in PLACE, clear
13488 last_death. [I no longer understand why this is
13489 being done.] */
13490 if (rsp->last_death != place)
13491 rsp->last_death = 0;
13492 place = 0;
13493 }
13494 else
13495 rsp->last_death = place;
13496
13497 /* If this is a death note for a hard reg that is occupying
13498 multiple registers, ensure that we are still using all
13499 parts of the object. If we find a piece of the object
13500 that is unused, we must arrange for an appropriate REG_DEAD
13501 note to be added for it. However, we can't just emit a USE
13502 and tag the note to it, since the register might actually
13503 be dead; so we recourse, and the recursive call then finds
13504 the previous insn that used this register. */
13505
13506 if (place && regno < FIRST_PSEUDO_REGISTER
13507 && hard_regno_nregs[regno][GET_MODE (XEXP (note, 0))] > 1)
13508 {
13509 unsigned int endregno = END_HARD_REGNO (XEXP (note, 0));
13510 int all_used = 1;
13511 unsigned int i;
13512
13513 for (i = regno; i < endregno; i++)
13514 if ((! refers_to_regno_p (i, i + 1, PATTERN (place), 0)
13515 && ! find_regno_fusage (place, USE, i))
13516 || dead_or_set_regno_p (place, i))
13517 all_used = 0;
13518
13519 if (! all_used)
13520 {
13521 /* Put only REG_DEAD notes for pieces that are
13522 not already dead or set. */
13523
13524 for (i = regno; i < endregno;
13525 i += hard_regno_nregs[i][reg_raw_mode[i]])
13526 {
13527 rtx piece = regno_reg_rtx[i];
13528 basic_block bb = this_basic_block;
13529
13530 if (! dead_or_set_p (place, piece)
13531 && ! reg_bitfield_target_p (piece,
13532 PATTERN (place)))
13533 {
13534 rtx new_note = alloc_reg_note (REG_DEAD, piece,
13535 NULL_RTX);
13536
13537 distribute_notes (new_note, place, place,
13538 NULL_RTX, NULL_RTX, NULL_RTX,
13539 NULL_RTX);
13540 }
13541 else if (! refers_to_regno_p (i, i + 1,
13542 PATTERN (place), 0)
13543 && ! find_regno_fusage (place, USE, i))
13544 for (tem = PREV_INSN (place); ;
13545 tem = PREV_INSN (tem))
13546 {
13547 if (!NONDEBUG_INSN_P (tem))
13548 {
13549 if (tem == BB_HEAD (bb))
13550 break;
13551 continue;
13552 }
13553 if (dead_or_set_p (tem, piece)
13554 || reg_bitfield_target_p (piece,
13555 PATTERN (tem)))
13556 {
13557 add_reg_note (tem, REG_UNUSED, piece);
13558 break;
13559 }
13560 }
13561
13562 }
13563
13564 place = 0;
13565 }
13566 }
13567 }
13568 break;
13569
13570 default:
13571 /* Any other notes should not be present at this point in the
13572 compilation. */
13573 gcc_unreachable ();
13574 }
13575
13576 if (place)
13577 {
13578 XEXP (note, 1) = REG_NOTES (place);
13579 REG_NOTES (place) = note;
13580 }
13581
13582 if (place2)
13583 add_reg_note (place2, REG_NOTE_KIND (note), XEXP (note, 0));
13584 }
13585 }
13586 \f
13587 /* Similarly to above, distribute the LOG_LINKS that used to be present on
13588 I3, I2, and I1 to new locations. This is also called to add a link
13589 pointing at I3 when I3's destination is changed. */
13590
13591 static void
13592 distribute_links (rtx links)
13593 {
13594 rtx link, next_link;
13595
13596 for (link = links; link; link = next_link)
13597 {
13598 rtx place = 0;
13599 rtx insn;
13600 rtx set, reg;
13601
13602 next_link = XEXP (link, 1);
13603
13604 /* If the insn that this link points to is a NOTE or isn't a single
13605 set, ignore it. In the latter case, it isn't clear what we
13606 can do other than ignore the link, since we can't tell which
13607 register it was for. Such links wouldn't be used by combine
13608 anyway.
13609
13610 It is not possible for the destination of the target of the link to
13611 have been changed by combine. The only potential of this is if we
13612 replace I3, I2, and I1 by I3 and I2. But in that case the
13613 destination of I2 also remains unchanged. */
13614
13615 if (NOTE_P (XEXP (link, 0))
13616 || (set = single_set (XEXP (link, 0))) == 0)
13617 continue;
13618
13619 reg = SET_DEST (set);
13620 while (GET_CODE (reg) == SUBREG || GET_CODE (reg) == ZERO_EXTRACT
13621 || GET_CODE (reg) == STRICT_LOW_PART)
13622 reg = XEXP (reg, 0);
13623
13624 /* A LOG_LINK is defined as being placed on the first insn that uses
13625 a register and points to the insn that sets the register. Start
13626 searching at the next insn after the target of the link and stop
13627 when we reach a set of the register or the end of the basic block.
13628
13629 Note that this correctly handles the link that used to point from
13630 I3 to I2. Also note that not much searching is typically done here
13631 since most links don't point very far away. */
13632
13633 for (insn = NEXT_INSN (XEXP (link, 0));
13634 (insn && (this_basic_block->next_bb == EXIT_BLOCK_PTR
13635 || BB_HEAD (this_basic_block->next_bb) != insn));
13636 insn = NEXT_INSN (insn))
13637 if (DEBUG_INSN_P (insn))
13638 continue;
13639 else if (INSN_P (insn) && reg_overlap_mentioned_p (reg, PATTERN (insn)))
13640 {
13641 if (reg_referenced_p (reg, PATTERN (insn)))
13642 place = insn;
13643 break;
13644 }
13645 else if (CALL_P (insn)
13646 && find_reg_fusage (insn, USE, reg))
13647 {
13648 place = insn;
13649 break;
13650 }
13651 else if (INSN_P (insn) && reg_set_p (reg, insn))
13652 break;
13653
13654 /* If we found a place to put the link, place it there unless there
13655 is already a link to the same insn as LINK at that point. */
13656
13657 if (place)
13658 {
13659 rtx link2;
13660
13661 for (link2 = LOG_LINKS (place); link2; link2 = XEXP (link2, 1))
13662 if (XEXP (link2, 0) == XEXP (link, 0))
13663 break;
13664
13665 if (link2 == 0)
13666 {
13667 XEXP (link, 1) = LOG_LINKS (place);
13668 LOG_LINKS (place) = link;
13669
13670 /* Set added_links_insn to the earliest insn we added a
13671 link to. */
13672 if (added_links_insn == 0
13673 || DF_INSN_LUID (added_links_insn) > DF_INSN_LUID (place))
13674 added_links_insn = place;
13675 }
13676 }
13677 }
13678 }
13679 \f
13680 /* Subroutine of unmentioned_reg_p and callback from for_each_rtx.
13681 Check whether the expression pointer to by LOC is a register or
13682 memory, and if so return 1 if it isn't mentioned in the rtx EXPR.
13683 Otherwise return zero. */
13684
13685 static int
13686 unmentioned_reg_p_1 (rtx *loc, void *expr)
13687 {
13688 rtx x = *loc;
13689
13690 if (x != NULL_RTX
13691 && (REG_P (x) || MEM_P (x))
13692 && ! reg_mentioned_p (x, (rtx) expr))
13693 return 1;
13694 return 0;
13695 }
13696
13697 /* Check for any register or memory mentioned in EQUIV that is not
13698 mentioned in EXPR. This is used to restrict EQUIV to "specializations"
13699 of EXPR where some registers may have been replaced by constants. */
13700
13701 static bool
13702 unmentioned_reg_p (rtx equiv, rtx expr)
13703 {
13704 return for_each_rtx (&equiv, unmentioned_reg_p_1, expr);
13705 }
13706 \f
13707 void
13708 dump_combine_stats (FILE *file)
13709 {
13710 fprintf
13711 (file,
13712 ";; Combiner statistics: %d attempts, %d substitutions (%d requiring new space),\n;; %d successes.\n\n",
13713 combine_attempts, combine_merges, combine_extras, combine_successes);
13714 }
13715
13716 void
13717 dump_combine_total_stats (FILE *file)
13718 {
13719 fprintf
13720 (file,
13721 "\n;; Combiner totals: %d attempts, %d substitutions (%d requiring new space),\n;; %d successes.\n",
13722 total_attempts, total_merges, total_extras, total_successes);
13723 }
13724 \f
13725 static bool
13726 gate_handle_combine (void)
13727 {
13728 return (optimize > 0);
13729 }
13730
13731 /* Try combining insns through substitution. */
13732 static unsigned int
13733 rest_of_handle_combine (void)
13734 {
13735 int rebuild_jump_labels_after_combine;
13736
13737 df_set_flags (DF_LR_RUN_DCE + DF_DEFER_INSN_RESCAN);
13738 df_note_add_problem ();
13739 df_analyze ();
13740
13741 regstat_init_n_sets_and_refs ();
13742
13743 rebuild_jump_labels_after_combine
13744 = combine_instructions (get_insns (), max_reg_num ());
13745
13746 /* Combining insns may have turned an indirect jump into a
13747 direct jump. Rebuild the JUMP_LABEL fields of jumping
13748 instructions. */
13749 if (rebuild_jump_labels_after_combine)
13750 {
13751 timevar_push (TV_JUMP);
13752 rebuild_jump_labels (get_insns ());
13753 cleanup_cfg (0);
13754 timevar_pop (TV_JUMP);
13755 }
13756
13757 regstat_free_n_sets_and_refs ();
13758 return 0;
13759 }
13760
13761 struct rtl_opt_pass pass_combine =
13762 {
13763 {
13764 RTL_PASS,
13765 "combine", /* name */
13766 gate_handle_combine, /* gate */
13767 rest_of_handle_combine, /* execute */
13768 NULL, /* sub */
13769 NULL, /* next */
13770 0, /* static_pass_number */
13771 TV_COMBINE, /* tv_id */
13772 PROP_cfglayout, /* properties_required */
13773 0, /* properties_provided */
13774 0, /* properties_destroyed */
13775 0, /* todo_flags_start */
13776 TODO_dump_func |
13777 TODO_df_finish | TODO_verify_rtl_sharing |
13778 TODO_ggc_collect, /* todo_flags_finish */
13779 }
13780 };