re PR rtl-optimization/46490 (ACATS c460007 fails at -O2 or above)
[gcc.git] / gcc / combine.c
1 /* Optimize by combining instructions for GNU compiler.
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
4 Free Software Foundation, Inc.
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
12
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
21
22 /* This module is essentially the "combiner" phase of the U. of Arizona
23 Portable Optimizer, but redone to work on our list-structured
24 representation for RTL instead of their string representation.
25
26 The LOG_LINKS of each insn identify the most recent assignment
27 to each REG used in the insn. It is a list of previous insns,
28 each of which contains a SET for a REG that is used in this insn
29 and not used or set in between. LOG_LINKs never cross basic blocks.
30 They were set up by the preceding pass (lifetime analysis).
31
32 We try to combine each pair of insns joined by a logical link.
33 We also try to combine triples of insns A, B and C when
34 C has a link back to B and B has a link back to A.
35
36 LOG_LINKS does not have links for use of the CC0. They don't
37 need to, because the insn that sets the CC0 is always immediately
38 before the insn that tests it. So we always regard a branch
39 insn as having a logical link to the preceding insn. The same is true
40 for an insn explicitly using CC0.
41
42 We check (with use_crosses_set_p) to avoid combining in such a way
43 as to move a computation to a place where its value would be different.
44
45 Combination is done by mathematically substituting the previous
46 insn(s) values for the regs they set into the expressions in
47 the later insns that refer to these regs. If the result is a valid insn
48 for our target machine, according to the machine description,
49 we install it, delete the earlier insns, and update the data flow
50 information (LOG_LINKS and REG_NOTES) for what we did.
51
52 There are a few exceptions where the dataflow information isn't
53 completely updated (however this is only a local issue since it is
54 regenerated before the next pass that uses it):
55
56 - reg_live_length is not updated
57 - reg_n_refs is not adjusted in the rare case when a register is
58 no longer required in a computation
59 - there are extremely rare cases (see distribute_notes) when a
60 REG_DEAD note is lost
61 - a LOG_LINKS entry that refers to an insn with multiple SETs may be
62 removed because there is no way to know which register it was
63 linking
64
65 To simplify substitution, we combine only when the earlier insn(s)
66 consist of only a single assignment. To simplify updating afterward,
67 we never combine when a subroutine call appears in the middle.
68
69 Since we do not represent assignments to CC0 explicitly except when that
70 is all an insn does, there is no LOG_LINKS entry in an insn that uses
71 the condition code for the insn that set the condition code.
72 Fortunately, these two insns must be consecutive.
73 Therefore, every JUMP_INSN is taken to have an implicit logical link
74 to the preceding insn. This is not quite right, since non-jumps can
75 also use the condition code; but in practice such insns would not
76 combine anyway. */
77
78 #include "config.h"
79 #include "system.h"
80 #include "coretypes.h"
81 #include "tm.h"
82 #include "rtl.h"
83 #include "tree.h"
84 #include "tm_p.h"
85 #include "flags.h"
86 #include "regs.h"
87 #include "hard-reg-set.h"
88 #include "basic-block.h"
89 #include "insn-config.h"
90 #include "function.h"
91 /* Include expr.h after insn-config.h so we get HAVE_conditional_move. */
92 #include "expr.h"
93 #include "insn-attr.h"
94 #include "recog.h"
95 #include "diagnostic-core.h"
96 #include "toplev.h"
97 #include "target.h"
98 #include "optabs.h"
99 #include "insn-codes.h"
100 #include "rtlhooks-def.h"
101 /* Include output.h for dump_file. */
102 #include "output.h"
103 #include "params.h"
104 #include "timevar.h"
105 #include "tree-pass.h"
106 #include "df.h"
107 #include "cgraph.h"
108
109 /* Number of attempts to combine instructions in this function. */
110
111 static int combine_attempts;
112
113 /* Number of attempts that got as far as substitution in this function. */
114
115 static int combine_merges;
116
117 /* Number of instructions combined with added SETs in this function. */
118
119 static int combine_extras;
120
121 /* Number of instructions combined in this function. */
122
123 static int combine_successes;
124
125 /* Totals over entire compilation. */
126
127 static int total_attempts, total_merges, total_extras, total_successes;
128
129 /* combine_instructions may try to replace the right hand side of the
130 second instruction with the value of an associated REG_EQUAL note
131 before throwing it at try_combine. That is problematic when there
132 is a REG_DEAD note for a register used in the old right hand side
133 and can cause distribute_notes to do wrong things. This is the
134 second instruction if it has been so modified, null otherwise. */
135
136 static rtx i2mod;
137
138 /* When I2MOD is nonnull, this is a copy of the old right hand side. */
139
140 static rtx i2mod_old_rhs;
141
142 /* When I2MOD is nonnull, this is a copy of the new right hand side. */
143
144 static rtx i2mod_new_rhs;
145 \f
146 typedef struct reg_stat_struct {
147 /* Record last point of death of (hard or pseudo) register n. */
148 rtx last_death;
149
150 /* Record last point of modification of (hard or pseudo) register n. */
151 rtx last_set;
152
153 /* The next group of fields allows the recording of the last value assigned
154 to (hard or pseudo) register n. We use this information to see if an
155 operation being processed is redundant given a prior operation performed
156 on the register. For example, an `and' with a constant is redundant if
157 all the zero bits are already known to be turned off.
158
159 We use an approach similar to that used by cse, but change it in the
160 following ways:
161
162 (1) We do not want to reinitialize at each label.
163 (2) It is useful, but not critical, to know the actual value assigned
164 to a register. Often just its form is helpful.
165
166 Therefore, we maintain the following fields:
167
168 last_set_value the last value assigned
169 last_set_label records the value of label_tick when the
170 register was assigned
171 last_set_table_tick records the value of label_tick when a
172 value using the register is assigned
173 last_set_invalid set to nonzero when it is not valid
174 to use the value of this register in some
175 register's value
176
177 To understand the usage of these tables, it is important to understand
178 the distinction between the value in last_set_value being valid and
179 the register being validly contained in some other expression in the
180 table.
181
182 (The next two parameters are out of date).
183
184 reg_stat[i].last_set_value is valid if it is nonzero, and either
185 reg_n_sets[i] is 1 or reg_stat[i].last_set_label == label_tick.
186
187 Register I may validly appear in any expression returned for the value
188 of another register if reg_n_sets[i] is 1. It may also appear in the
189 value for register J if reg_stat[j].last_set_invalid is zero, or
190 reg_stat[i].last_set_label < reg_stat[j].last_set_label.
191
192 If an expression is found in the table containing a register which may
193 not validly appear in an expression, the register is replaced by
194 something that won't match, (clobber (const_int 0)). */
195
196 /* Record last value assigned to (hard or pseudo) register n. */
197
198 rtx last_set_value;
199
200 /* Record the value of label_tick when an expression involving register n
201 is placed in last_set_value. */
202
203 int last_set_table_tick;
204
205 /* Record the value of label_tick when the value for register n is placed in
206 last_set_value. */
207
208 int last_set_label;
209
210 /* These fields are maintained in parallel with last_set_value and are
211 used to store the mode in which the register was last set, the bits
212 that were known to be zero when it was last set, and the number of
213 sign bits copies it was known to have when it was last set. */
214
215 unsigned HOST_WIDE_INT last_set_nonzero_bits;
216 char last_set_sign_bit_copies;
217 ENUM_BITFIELD(machine_mode) last_set_mode : 8;
218
219 /* Set nonzero if references to register n in expressions should not be
220 used. last_set_invalid is set nonzero when this register is being
221 assigned to and last_set_table_tick == label_tick. */
222
223 char last_set_invalid;
224
225 /* Some registers that are set more than once and used in more than one
226 basic block are nevertheless always set in similar ways. For example,
227 a QImode register may be loaded from memory in two places on a machine
228 where byte loads zero extend.
229
230 We record in the following fields if a register has some leading bits
231 that are always equal to the sign bit, and what we know about the
232 nonzero bits of a register, specifically which bits are known to be
233 zero.
234
235 If an entry is zero, it means that we don't know anything special. */
236
237 unsigned char sign_bit_copies;
238
239 unsigned HOST_WIDE_INT nonzero_bits;
240
241 /* Record the value of the label_tick when the last truncation
242 happened. The field truncated_to_mode is only valid if
243 truncation_label == label_tick. */
244
245 int truncation_label;
246
247 /* Record the last truncation seen for this register. If truncation
248 is not a nop to this mode we might be able to save an explicit
249 truncation if we know that value already contains a truncated
250 value. */
251
252 ENUM_BITFIELD(machine_mode) truncated_to_mode : 8;
253 } reg_stat_type;
254
255 DEF_VEC_O(reg_stat_type);
256 DEF_VEC_ALLOC_O(reg_stat_type,heap);
257
258 static VEC(reg_stat_type,heap) *reg_stat;
259
260 /* Record the luid of the last insn that invalidated memory
261 (anything that writes memory, and subroutine calls, but not pushes). */
262
263 static int mem_last_set;
264
265 /* Record the luid of the last CALL_INSN
266 so we can tell whether a potential combination crosses any calls. */
267
268 static int last_call_luid;
269
270 /* When `subst' is called, this is the insn that is being modified
271 (by combining in a previous insn). The PATTERN of this insn
272 is still the old pattern partially modified and it should not be
273 looked at, but this may be used to examine the successors of the insn
274 to judge whether a simplification is valid. */
275
276 static rtx subst_insn;
277
278 /* This is the lowest LUID that `subst' is currently dealing with.
279 get_last_value will not return a value if the register was set at or
280 after this LUID. If not for this mechanism, we could get confused if
281 I2 or I1 in try_combine were an insn that used the old value of a register
282 to obtain a new value. In that case, we might erroneously get the
283 new value of the register when we wanted the old one. */
284
285 static int subst_low_luid;
286
287 /* This contains any hard registers that are used in newpat; reg_dead_at_p
288 must consider all these registers to be always live. */
289
290 static HARD_REG_SET newpat_used_regs;
291
292 /* This is an insn to which a LOG_LINKS entry has been added. If this
293 insn is the earlier than I2 or I3, combine should rescan starting at
294 that location. */
295
296 static rtx added_links_insn;
297
298 /* Basic block in which we are performing combines. */
299 static basic_block this_basic_block;
300 static bool optimize_this_for_speed_p;
301
302 \f
303 /* Length of the currently allocated uid_insn_cost array. */
304
305 static int max_uid_known;
306
307 /* The following array records the insn_rtx_cost for every insn
308 in the instruction stream. */
309
310 static int *uid_insn_cost;
311
312 /* The following array records the LOG_LINKS for every insn in the
313 instruction stream as an INSN_LIST rtx. */
314
315 static rtx *uid_log_links;
316
317 #define INSN_COST(INSN) (uid_insn_cost[INSN_UID (INSN)])
318 #define LOG_LINKS(INSN) (uid_log_links[INSN_UID (INSN)])
319
320 /* Incremented for each basic block. */
321
322 static int label_tick;
323
324 /* Reset to label_tick for each extended basic block in scanning order. */
325
326 static int label_tick_ebb_start;
327
328 /* Mode used to compute significance in reg_stat[].nonzero_bits. It is the
329 largest integer mode that can fit in HOST_BITS_PER_WIDE_INT. */
330
331 static enum machine_mode nonzero_bits_mode;
332
333 /* Nonzero when reg_stat[].nonzero_bits and reg_stat[].sign_bit_copies can
334 be safely used. It is zero while computing them and after combine has
335 completed. This former test prevents propagating values based on
336 previously set values, which can be incorrect if a variable is modified
337 in a loop. */
338
339 static int nonzero_sign_valid;
340
341 \f
342 /* Record one modification to rtl structure
343 to be undone by storing old_contents into *where. */
344
345 enum undo_kind { UNDO_RTX, UNDO_INT, UNDO_MODE };
346
347 struct undo
348 {
349 struct undo *next;
350 enum undo_kind kind;
351 union { rtx r; int i; enum machine_mode m; } old_contents;
352 union { rtx *r; int *i; } where;
353 };
354
355 /* Record a bunch of changes to be undone, up to MAX_UNDO of them.
356 num_undo says how many are currently recorded.
357
358 other_insn is nonzero if we have modified some other insn in the process
359 of working on subst_insn. It must be verified too. */
360
361 struct undobuf
362 {
363 struct undo *undos;
364 struct undo *frees;
365 rtx other_insn;
366 };
367
368 static struct undobuf undobuf;
369
370 /* Number of times the pseudo being substituted for
371 was found and replaced. */
372
373 static int n_occurrences;
374
375 static rtx reg_nonzero_bits_for_combine (const_rtx, enum machine_mode, const_rtx,
376 enum machine_mode,
377 unsigned HOST_WIDE_INT,
378 unsigned HOST_WIDE_INT *);
379 static rtx reg_num_sign_bit_copies_for_combine (const_rtx, enum machine_mode, const_rtx,
380 enum machine_mode,
381 unsigned int, unsigned int *);
382 static void do_SUBST (rtx *, rtx);
383 static void do_SUBST_INT (int *, int);
384 static void init_reg_last (void);
385 static void setup_incoming_promotions (rtx);
386 static void set_nonzero_bits_and_sign_copies (rtx, const_rtx, void *);
387 static int cant_combine_insn_p (rtx);
388 static int can_combine_p (rtx, rtx, rtx, rtx, rtx, rtx, rtx *, rtx *);
389 static int combinable_i3pat (rtx, rtx *, rtx, rtx, rtx, int, int, rtx *);
390 static int contains_muldiv (rtx);
391 static rtx try_combine (rtx, rtx, rtx, rtx, int *);
392 static void undo_all (void);
393 static void undo_commit (void);
394 static rtx *find_split_point (rtx *, rtx, bool);
395 static rtx subst (rtx, rtx, rtx, int, int);
396 static rtx combine_simplify_rtx (rtx, enum machine_mode, int);
397 static rtx simplify_if_then_else (rtx);
398 static rtx simplify_set (rtx);
399 static rtx simplify_logical (rtx);
400 static rtx expand_compound_operation (rtx);
401 static const_rtx expand_field_assignment (const_rtx);
402 static rtx make_extraction (enum machine_mode, rtx, HOST_WIDE_INT,
403 rtx, unsigned HOST_WIDE_INT, int, int, int);
404 static rtx extract_left_shift (rtx, int);
405 static rtx make_compound_operation (rtx, enum rtx_code);
406 static int get_pos_from_mask (unsigned HOST_WIDE_INT,
407 unsigned HOST_WIDE_INT *);
408 static rtx canon_reg_for_combine (rtx, rtx);
409 static rtx force_to_mode (rtx, enum machine_mode,
410 unsigned HOST_WIDE_INT, int);
411 static rtx if_then_else_cond (rtx, rtx *, rtx *);
412 static rtx known_cond (rtx, enum rtx_code, rtx, rtx);
413 static int rtx_equal_for_field_assignment_p (rtx, rtx);
414 static rtx make_field_assignment (rtx);
415 static rtx apply_distributive_law (rtx);
416 static rtx distribute_and_simplify_rtx (rtx, int);
417 static rtx simplify_and_const_int_1 (enum machine_mode, rtx,
418 unsigned HOST_WIDE_INT);
419 static rtx simplify_and_const_int (rtx, enum machine_mode, rtx,
420 unsigned HOST_WIDE_INT);
421 static int merge_outer_ops (enum rtx_code *, HOST_WIDE_INT *, enum rtx_code,
422 HOST_WIDE_INT, enum machine_mode, int *);
423 static rtx simplify_shift_const_1 (enum rtx_code, enum machine_mode, rtx, int);
424 static rtx simplify_shift_const (rtx, enum rtx_code, enum machine_mode, rtx,
425 int);
426 static int recog_for_combine (rtx *, rtx, rtx *);
427 static rtx gen_lowpart_for_combine (enum machine_mode, rtx);
428 static enum rtx_code simplify_comparison (enum rtx_code, rtx *, rtx *);
429 static void update_table_tick (rtx);
430 static void record_value_for_reg (rtx, rtx, rtx);
431 static void check_promoted_subreg (rtx, rtx);
432 static void record_dead_and_set_regs_1 (rtx, const_rtx, void *);
433 static void record_dead_and_set_regs (rtx);
434 static int get_last_value_validate (rtx *, rtx, int, int);
435 static rtx get_last_value (const_rtx);
436 static int use_crosses_set_p (const_rtx, int);
437 static void reg_dead_at_p_1 (rtx, const_rtx, void *);
438 static int reg_dead_at_p (rtx, rtx);
439 static void move_deaths (rtx, rtx, int, rtx, rtx *);
440 static int reg_bitfield_target_p (rtx, rtx);
441 static void distribute_notes (rtx, rtx, rtx, rtx, rtx, rtx, rtx);
442 static void distribute_links (rtx);
443 static void mark_used_regs_combine (rtx);
444 static void record_promoted_value (rtx, rtx);
445 static int unmentioned_reg_p_1 (rtx *, void *);
446 static bool unmentioned_reg_p (rtx, rtx);
447 static int record_truncated_value (rtx *, void *);
448 static void record_truncated_values (rtx *, void *);
449 static bool reg_truncated_to_mode (enum machine_mode, const_rtx);
450 static rtx gen_lowpart_or_truncate (enum machine_mode, rtx);
451 \f
452
453 /* It is not safe to use ordinary gen_lowpart in combine.
454 See comments in gen_lowpart_for_combine. */
455 #undef RTL_HOOKS_GEN_LOWPART
456 #define RTL_HOOKS_GEN_LOWPART gen_lowpart_for_combine
457
458 /* Our implementation of gen_lowpart never emits a new pseudo. */
459 #undef RTL_HOOKS_GEN_LOWPART_NO_EMIT
460 #define RTL_HOOKS_GEN_LOWPART_NO_EMIT gen_lowpart_for_combine
461
462 #undef RTL_HOOKS_REG_NONZERO_REG_BITS
463 #define RTL_HOOKS_REG_NONZERO_REG_BITS reg_nonzero_bits_for_combine
464
465 #undef RTL_HOOKS_REG_NUM_SIGN_BIT_COPIES
466 #define RTL_HOOKS_REG_NUM_SIGN_BIT_COPIES reg_num_sign_bit_copies_for_combine
467
468 #undef RTL_HOOKS_REG_TRUNCATED_TO_MODE
469 #define RTL_HOOKS_REG_TRUNCATED_TO_MODE reg_truncated_to_mode
470
471 static const struct rtl_hooks combine_rtl_hooks = RTL_HOOKS_INITIALIZER;
472
473 \f
474 /* Try to split PATTERN found in INSN. This returns NULL_RTX if
475 PATTERN can not be split. Otherwise, it returns an insn sequence.
476 This is a wrapper around split_insns which ensures that the
477 reg_stat vector is made larger if the splitter creates a new
478 register. */
479
480 static rtx
481 combine_split_insns (rtx pattern, rtx insn)
482 {
483 rtx ret;
484 unsigned int nregs;
485
486 ret = split_insns (pattern, insn);
487 nregs = max_reg_num ();
488 if (nregs > VEC_length (reg_stat_type, reg_stat))
489 VEC_safe_grow_cleared (reg_stat_type, heap, reg_stat, nregs);
490 return ret;
491 }
492
493 /* This is used by find_single_use to locate an rtx in LOC that
494 contains exactly one use of DEST, which is typically either a REG
495 or CC0. It returns a pointer to the innermost rtx expression
496 containing DEST. Appearances of DEST that are being used to
497 totally replace it are not counted. */
498
499 static rtx *
500 find_single_use_1 (rtx dest, rtx *loc)
501 {
502 rtx x = *loc;
503 enum rtx_code code = GET_CODE (x);
504 rtx *result = NULL;
505 rtx *this_result;
506 int i;
507 const char *fmt;
508
509 switch (code)
510 {
511 case CONST_INT:
512 case CONST:
513 case LABEL_REF:
514 case SYMBOL_REF:
515 case CONST_DOUBLE:
516 case CONST_VECTOR:
517 case CLOBBER:
518 return 0;
519
520 case SET:
521 /* If the destination is anything other than CC0, PC, a REG or a SUBREG
522 of a REG that occupies all of the REG, the insn uses DEST if
523 it is mentioned in the destination or the source. Otherwise, we
524 need just check the source. */
525 if (GET_CODE (SET_DEST (x)) != CC0
526 && GET_CODE (SET_DEST (x)) != PC
527 && !REG_P (SET_DEST (x))
528 && ! (GET_CODE (SET_DEST (x)) == SUBREG
529 && REG_P (SUBREG_REG (SET_DEST (x)))
530 && (((GET_MODE_SIZE (GET_MODE (SUBREG_REG (SET_DEST (x))))
531 + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)
532 == ((GET_MODE_SIZE (GET_MODE (SET_DEST (x)))
533 + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD))))
534 break;
535
536 return find_single_use_1 (dest, &SET_SRC (x));
537
538 case MEM:
539 case SUBREG:
540 return find_single_use_1 (dest, &XEXP (x, 0));
541
542 default:
543 break;
544 }
545
546 /* If it wasn't one of the common cases above, check each expression and
547 vector of this code. Look for a unique usage of DEST. */
548
549 fmt = GET_RTX_FORMAT (code);
550 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
551 {
552 if (fmt[i] == 'e')
553 {
554 if (dest == XEXP (x, i)
555 || (REG_P (dest) && REG_P (XEXP (x, i))
556 && REGNO (dest) == REGNO (XEXP (x, i))))
557 this_result = loc;
558 else
559 this_result = find_single_use_1 (dest, &XEXP (x, i));
560
561 if (result == NULL)
562 result = this_result;
563 else if (this_result)
564 /* Duplicate usage. */
565 return NULL;
566 }
567 else if (fmt[i] == 'E')
568 {
569 int j;
570
571 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
572 {
573 if (XVECEXP (x, i, j) == dest
574 || (REG_P (dest)
575 && REG_P (XVECEXP (x, i, j))
576 && REGNO (XVECEXP (x, i, j)) == REGNO (dest)))
577 this_result = loc;
578 else
579 this_result = find_single_use_1 (dest, &XVECEXP (x, i, j));
580
581 if (result == NULL)
582 result = this_result;
583 else if (this_result)
584 return NULL;
585 }
586 }
587 }
588
589 return result;
590 }
591
592
593 /* See if DEST, produced in INSN, is used only a single time in the
594 sequel. If so, return a pointer to the innermost rtx expression in which
595 it is used.
596
597 If PLOC is nonzero, *PLOC is set to the insn containing the single use.
598
599 If DEST is cc0_rtx, we look only at the next insn. In that case, we don't
600 care about REG_DEAD notes or LOG_LINKS.
601
602 Otherwise, we find the single use by finding an insn that has a
603 LOG_LINKS pointing at INSN and has a REG_DEAD note for DEST. If DEST is
604 only referenced once in that insn, we know that it must be the first
605 and last insn referencing DEST. */
606
607 static rtx *
608 find_single_use (rtx dest, rtx insn, rtx *ploc)
609 {
610 basic_block bb;
611 rtx next;
612 rtx *result;
613 rtx link;
614
615 #ifdef HAVE_cc0
616 if (dest == cc0_rtx)
617 {
618 next = NEXT_INSN (insn);
619 if (next == 0
620 || (!NONJUMP_INSN_P (next) && !JUMP_P (next)))
621 return 0;
622
623 result = find_single_use_1 (dest, &PATTERN (next));
624 if (result && ploc)
625 *ploc = next;
626 return result;
627 }
628 #endif
629
630 if (!REG_P (dest))
631 return 0;
632
633 bb = BLOCK_FOR_INSN (insn);
634 for (next = NEXT_INSN (insn);
635 next && BLOCK_FOR_INSN (next) == bb;
636 next = NEXT_INSN (next))
637 if (INSN_P (next) && dead_or_set_p (next, dest))
638 {
639 for (link = LOG_LINKS (next); link; link = XEXP (link, 1))
640 if (XEXP (link, 0) == insn)
641 break;
642
643 if (link)
644 {
645 result = find_single_use_1 (dest, &PATTERN (next));
646 if (ploc)
647 *ploc = next;
648 return result;
649 }
650 }
651
652 return 0;
653 }
654 \f
655 /* Substitute NEWVAL, an rtx expression, into INTO, a place in some
656 insn. The substitution can be undone by undo_all. If INTO is already
657 set to NEWVAL, do not record this change. Because computing NEWVAL might
658 also call SUBST, we have to compute it before we put anything into
659 the undo table. */
660
661 static void
662 do_SUBST (rtx *into, rtx newval)
663 {
664 struct undo *buf;
665 rtx oldval = *into;
666
667 if (oldval == newval)
668 return;
669
670 /* We'd like to catch as many invalid transformations here as
671 possible. Unfortunately, there are way too many mode changes
672 that are perfectly valid, so we'd waste too much effort for
673 little gain doing the checks here. Focus on catching invalid
674 transformations involving integer constants. */
675 if (GET_MODE_CLASS (GET_MODE (oldval)) == MODE_INT
676 && CONST_INT_P (newval))
677 {
678 /* Sanity check that we're replacing oldval with a CONST_INT
679 that is a valid sign-extension for the original mode. */
680 gcc_assert (INTVAL (newval)
681 == trunc_int_for_mode (INTVAL (newval), GET_MODE (oldval)));
682
683 /* Replacing the operand of a SUBREG or a ZERO_EXTEND with a
684 CONST_INT is not valid, because after the replacement, the
685 original mode would be gone. Unfortunately, we can't tell
686 when do_SUBST is called to replace the operand thereof, so we
687 perform this test on oldval instead, checking whether an
688 invalid replacement took place before we got here. */
689 gcc_assert (!(GET_CODE (oldval) == SUBREG
690 && CONST_INT_P (SUBREG_REG (oldval))));
691 gcc_assert (!(GET_CODE (oldval) == ZERO_EXTEND
692 && CONST_INT_P (XEXP (oldval, 0))));
693 }
694
695 if (undobuf.frees)
696 buf = undobuf.frees, undobuf.frees = buf->next;
697 else
698 buf = XNEW (struct undo);
699
700 buf->kind = UNDO_RTX;
701 buf->where.r = into;
702 buf->old_contents.r = oldval;
703 *into = newval;
704
705 buf->next = undobuf.undos, undobuf.undos = buf;
706 }
707
708 #define SUBST(INTO, NEWVAL) do_SUBST(&(INTO), (NEWVAL))
709
710 /* Similar to SUBST, but NEWVAL is an int expression. Note that substitution
711 for the value of a HOST_WIDE_INT value (including CONST_INT) is
712 not safe. */
713
714 static void
715 do_SUBST_INT (int *into, int newval)
716 {
717 struct undo *buf;
718 int oldval = *into;
719
720 if (oldval == newval)
721 return;
722
723 if (undobuf.frees)
724 buf = undobuf.frees, undobuf.frees = buf->next;
725 else
726 buf = XNEW (struct undo);
727
728 buf->kind = UNDO_INT;
729 buf->where.i = into;
730 buf->old_contents.i = oldval;
731 *into = newval;
732
733 buf->next = undobuf.undos, undobuf.undos = buf;
734 }
735
736 #define SUBST_INT(INTO, NEWVAL) do_SUBST_INT(&(INTO), (NEWVAL))
737
738 /* Similar to SUBST, but just substitute the mode. This is used when
739 changing the mode of a pseudo-register, so that any other
740 references to the entry in the regno_reg_rtx array will change as
741 well. */
742
743 static void
744 do_SUBST_MODE (rtx *into, enum machine_mode newval)
745 {
746 struct undo *buf;
747 enum machine_mode oldval = GET_MODE (*into);
748
749 if (oldval == newval)
750 return;
751
752 if (undobuf.frees)
753 buf = undobuf.frees, undobuf.frees = buf->next;
754 else
755 buf = XNEW (struct undo);
756
757 buf->kind = UNDO_MODE;
758 buf->where.r = into;
759 buf->old_contents.m = oldval;
760 adjust_reg_mode (*into, newval);
761
762 buf->next = undobuf.undos, undobuf.undos = buf;
763 }
764
765 #define SUBST_MODE(INTO, NEWVAL) do_SUBST_MODE(&(INTO), (NEWVAL))
766 \f
767 /* Subroutine of try_combine. Determine whether the combine replacement
768 patterns NEWPAT, NEWI2PAT and NEWOTHERPAT are cheaper according to
769 insn_rtx_cost that the original instruction sequence I0, I1, I2, I3 and
770 undobuf.other_insn. Note that I1 and/or NEWI2PAT may be NULL_RTX.
771 NEWOTHERPAT and undobuf.other_insn may also both be NULL_RTX. This
772 function returns false, if the costs of all instructions can be
773 estimated, and the replacements are more expensive than the original
774 sequence. */
775
776 static bool
777 combine_validate_cost (rtx i0, rtx i1, rtx i2, rtx i3, rtx newpat,
778 rtx newi2pat, rtx newotherpat)
779 {
780 int i0_cost, i1_cost, i2_cost, i3_cost;
781 int new_i2_cost, new_i3_cost;
782 int old_cost, new_cost;
783
784 /* Lookup the original insn_rtx_costs. */
785 i2_cost = INSN_COST (i2);
786 i3_cost = INSN_COST (i3);
787
788 if (i1)
789 {
790 i1_cost = INSN_COST (i1);
791 if (i0)
792 {
793 i0_cost = INSN_COST (i0);
794 old_cost = (i0_cost > 0 && i1_cost > 0 && i2_cost > 0 && i3_cost > 0
795 ? i0_cost + i1_cost + i2_cost + i3_cost : 0);
796 }
797 else
798 {
799 old_cost = (i1_cost > 0 && i2_cost > 0 && i3_cost > 0
800 ? i1_cost + i2_cost + i3_cost : 0);
801 i0_cost = 0;
802 }
803 }
804 else
805 {
806 old_cost = (i2_cost > 0 && i3_cost > 0) ? i2_cost + i3_cost : 0;
807 i1_cost = i0_cost = 0;
808 }
809
810 /* Calculate the replacement insn_rtx_costs. */
811 new_i3_cost = insn_rtx_cost (newpat, optimize_this_for_speed_p);
812 if (newi2pat)
813 {
814 new_i2_cost = insn_rtx_cost (newi2pat, optimize_this_for_speed_p);
815 new_cost = (new_i2_cost > 0 && new_i3_cost > 0)
816 ? new_i2_cost + new_i3_cost : 0;
817 }
818 else
819 {
820 new_cost = new_i3_cost;
821 new_i2_cost = 0;
822 }
823
824 if (undobuf.other_insn)
825 {
826 int old_other_cost, new_other_cost;
827
828 old_other_cost = INSN_COST (undobuf.other_insn);
829 new_other_cost = insn_rtx_cost (newotherpat, optimize_this_for_speed_p);
830 if (old_other_cost > 0 && new_other_cost > 0)
831 {
832 old_cost += old_other_cost;
833 new_cost += new_other_cost;
834 }
835 else
836 old_cost = 0;
837 }
838
839 /* Disallow this recombination if both new_cost and old_cost are
840 greater than zero, and new_cost is greater than old cost. */
841 if (old_cost > 0
842 && new_cost > old_cost)
843 {
844 if (dump_file)
845 {
846 if (i0)
847 {
848 fprintf (dump_file,
849 "rejecting combination of insns %d, %d, %d and %d\n",
850 INSN_UID (i0), INSN_UID (i1), INSN_UID (i2),
851 INSN_UID (i3));
852 fprintf (dump_file, "original costs %d + %d + %d + %d = %d\n",
853 i0_cost, i1_cost, i2_cost, i3_cost, old_cost);
854 }
855 else if (i1)
856 {
857 fprintf (dump_file,
858 "rejecting combination of insns %d, %d and %d\n",
859 INSN_UID (i1), INSN_UID (i2), INSN_UID (i3));
860 fprintf (dump_file, "original costs %d + %d + %d = %d\n",
861 i1_cost, i2_cost, i3_cost, old_cost);
862 }
863 else
864 {
865 fprintf (dump_file,
866 "rejecting combination of insns %d and %d\n",
867 INSN_UID (i2), INSN_UID (i3));
868 fprintf (dump_file, "original costs %d + %d = %d\n",
869 i2_cost, i3_cost, old_cost);
870 }
871
872 if (newi2pat)
873 {
874 fprintf (dump_file, "replacement costs %d + %d = %d\n",
875 new_i2_cost, new_i3_cost, new_cost);
876 }
877 else
878 fprintf (dump_file, "replacement cost %d\n", new_cost);
879 }
880
881 return false;
882 }
883
884 /* Update the uid_insn_cost array with the replacement costs. */
885 INSN_COST (i2) = new_i2_cost;
886 INSN_COST (i3) = new_i3_cost;
887 if (i1)
888 INSN_COST (i1) = 0;
889
890 return true;
891 }
892
893
894 /* Delete any insns that copy a register to itself. */
895
896 static void
897 delete_noop_moves (void)
898 {
899 rtx insn, next;
900 basic_block bb;
901
902 FOR_EACH_BB (bb)
903 {
904 for (insn = BB_HEAD (bb); insn != NEXT_INSN (BB_END (bb)); insn = next)
905 {
906 next = NEXT_INSN (insn);
907 if (INSN_P (insn) && noop_move_p (insn))
908 {
909 if (dump_file)
910 fprintf (dump_file, "deleting noop move %d\n", INSN_UID (insn));
911
912 delete_insn_and_edges (insn);
913 }
914 }
915 }
916 }
917
918 \f
919 /* Fill in log links field for all insns. */
920
921 static void
922 create_log_links (void)
923 {
924 basic_block bb;
925 rtx *next_use, insn;
926 df_ref *def_vec, *use_vec;
927
928 next_use = XCNEWVEC (rtx, max_reg_num ());
929
930 /* Pass through each block from the end, recording the uses of each
931 register and establishing log links when def is encountered.
932 Note that we do not clear next_use array in order to save time,
933 so we have to test whether the use is in the same basic block as def.
934
935 There are a few cases below when we do not consider the definition or
936 usage -- these are taken from original flow.c did. Don't ask me why it is
937 done this way; I don't know and if it works, I don't want to know. */
938
939 FOR_EACH_BB (bb)
940 {
941 FOR_BB_INSNS_REVERSE (bb, insn)
942 {
943 if (!NONDEBUG_INSN_P (insn))
944 continue;
945
946 /* Log links are created only once. */
947 gcc_assert (!LOG_LINKS (insn));
948
949 for (def_vec = DF_INSN_DEFS (insn); *def_vec; def_vec++)
950 {
951 df_ref def = *def_vec;
952 int regno = DF_REF_REGNO (def);
953 rtx use_insn;
954
955 if (!next_use[regno])
956 continue;
957
958 /* Do not consider if it is pre/post modification in MEM. */
959 if (DF_REF_FLAGS (def) & DF_REF_PRE_POST_MODIFY)
960 continue;
961
962 /* Do not make the log link for frame pointer. */
963 if ((regno == FRAME_POINTER_REGNUM
964 && (! reload_completed || frame_pointer_needed))
965 #if !HARD_FRAME_POINTER_IS_FRAME_POINTER
966 || (regno == HARD_FRAME_POINTER_REGNUM
967 && (! reload_completed || frame_pointer_needed))
968 #endif
969 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
970 || (regno == ARG_POINTER_REGNUM && fixed_regs[regno])
971 #endif
972 )
973 continue;
974
975 use_insn = next_use[regno];
976 if (BLOCK_FOR_INSN (use_insn) == bb)
977 {
978 /* flow.c claimed:
979
980 We don't build a LOG_LINK for hard registers contained
981 in ASM_OPERANDs. If these registers get replaced,
982 we might wind up changing the semantics of the insn,
983 even if reload can make what appear to be valid
984 assignments later. */
985 if (regno >= FIRST_PSEUDO_REGISTER
986 || asm_noperands (PATTERN (use_insn)) < 0)
987 {
988 /* Don't add duplicate links between instructions. */
989 rtx links;
990 for (links = LOG_LINKS (use_insn); links;
991 links = XEXP (links, 1))
992 if (insn == XEXP (links, 0))
993 break;
994
995 if (!links)
996 LOG_LINKS (use_insn) =
997 alloc_INSN_LIST (insn, LOG_LINKS (use_insn));
998 }
999 }
1000 next_use[regno] = NULL_RTX;
1001 }
1002
1003 for (use_vec = DF_INSN_USES (insn); *use_vec; use_vec++)
1004 {
1005 df_ref use = *use_vec;
1006 int regno = DF_REF_REGNO (use);
1007
1008 /* Do not consider the usage of the stack pointer
1009 by function call. */
1010 if (DF_REF_FLAGS (use) & DF_REF_CALL_STACK_USAGE)
1011 continue;
1012
1013 next_use[regno] = insn;
1014 }
1015 }
1016 }
1017
1018 free (next_use);
1019 }
1020
1021 /* Clear LOG_LINKS fields of insns. */
1022
1023 static void
1024 clear_log_links (void)
1025 {
1026 rtx insn;
1027
1028 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
1029 if (INSN_P (insn))
1030 free_INSN_LIST_list (&LOG_LINKS (insn));
1031 }
1032
1033 /* Walk the LOG_LINKS of insn B to see if we find a reference to A. Return
1034 true if we found a LOG_LINK that proves that A feeds B. This only works
1035 if there are no instructions between A and B which could have a link
1036 depending on A, since in that case we would not record a link for B. */
1037
1038 static bool
1039 insn_a_feeds_b (rtx a, rtx b)
1040 {
1041 rtx links;
1042 for (links = LOG_LINKS (b); links; links = XEXP (links, 1))
1043 if (XEXP (links, 0) == a)
1044 return true;
1045 return false;
1046 }
1047 \f
1048 /* Main entry point for combiner. F is the first insn of the function.
1049 NREGS is the first unused pseudo-reg number.
1050
1051 Return nonzero if the combiner has turned an indirect jump
1052 instruction into a direct jump. */
1053 static int
1054 combine_instructions (rtx f, unsigned int nregs)
1055 {
1056 rtx insn, next;
1057 #ifdef HAVE_cc0
1058 rtx prev;
1059 #endif
1060 rtx links, nextlinks;
1061 rtx first;
1062 basic_block last_bb;
1063
1064 int new_direct_jump_p = 0;
1065
1066 for (first = f; first && !INSN_P (first); )
1067 first = NEXT_INSN (first);
1068 if (!first)
1069 return 0;
1070
1071 combine_attempts = 0;
1072 combine_merges = 0;
1073 combine_extras = 0;
1074 combine_successes = 0;
1075
1076 rtl_hooks = combine_rtl_hooks;
1077
1078 VEC_safe_grow_cleared (reg_stat_type, heap, reg_stat, nregs);
1079
1080 init_recog_no_volatile ();
1081
1082 /* Allocate array for insn info. */
1083 max_uid_known = get_max_uid ();
1084 uid_log_links = XCNEWVEC (rtx, max_uid_known + 1);
1085 uid_insn_cost = XCNEWVEC (int, max_uid_known + 1);
1086
1087 nonzero_bits_mode = mode_for_size (HOST_BITS_PER_WIDE_INT, MODE_INT, 0);
1088
1089 /* Don't use reg_stat[].nonzero_bits when computing it. This can cause
1090 problems when, for example, we have j <<= 1 in a loop. */
1091
1092 nonzero_sign_valid = 0;
1093 label_tick = label_tick_ebb_start = 1;
1094
1095 /* Scan all SETs and see if we can deduce anything about what
1096 bits are known to be zero for some registers and how many copies
1097 of the sign bit are known to exist for those registers.
1098
1099 Also set any known values so that we can use it while searching
1100 for what bits are known to be set. */
1101
1102 setup_incoming_promotions (first);
1103 /* Allow the entry block and the first block to fall into the same EBB.
1104 Conceptually the incoming promotions are assigned to the entry block. */
1105 last_bb = ENTRY_BLOCK_PTR;
1106
1107 create_log_links ();
1108 FOR_EACH_BB (this_basic_block)
1109 {
1110 optimize_this_for_speed_p = optimize_bb_for_speed_p (this_basic_block);
1111 last_call_luid = 0;
1112 mem_last_set = -1;
1113
1114 label_tick++;
1115 if (!single_pred_p (this_basic_block)
1116 || single_pred (this_basic_block) != last_bb)
1117 label_tick_ebb_start = label_tick;
1118 last_bb = this_basic_block;
1119
1120 FOR_BB_INSNS (this_basic_block, insn)
1121 if (INSN_P (insn) && BLOCK_FOR_INSN (insn))
1122 {
1123 subst_low_luid = DF_INSN_LUID (insn);
1124 subst_insn = insn;
1125
1126 note_stores (PATTERN (insn), set_nonzero_bits_and_sign_copies,
1127 insn);
1128 record_dead_and_set_regs (insn);
1129
1130 #ifdef AUTO_INC_DEC
1131 for (links = REG_NOTES (insn); links; links = XEXP (links, 1))
1132 if (REG_NOTE_KIND (links) == REG_INC)
1133 set_nonzero_bits_and_sign_copies (XEXP (links, 0), NULL_RTX,
1134 insn);
1135 #endif
1136
1137 /* Record the current insn_rtx_cost of this instruction. */
1138 if (NONJUMP_INSN_P (insn))
1139 INSN_COST (insn) = insn_rtx_cost (PATTERN (insn),
1140 optimize_this_for_speed_p);
1141 if (dump_file)
1142 fprintf(dump_file, "insn_cost %d: %d\n",
1143 INSN_UID (insn), INSN_COST (insn));
1144 }
1145 }
1146
1147 nonzero_sign_valid = 1;
1148
1149 /* Now scan all the insns in forward order. */
1150 label_tick = label_tick_ebb_start = 1;
1151 init_reg_last ();
1152 setup_incoming_promotions (first);
1153 last_bb = ENTRY_BLOCK_PTR;
1154
1155 FOR_EACH_BB (this_basic_block)
1156 {
1157 optimize_this_for_speed_p = optimize_bb_for_speed_p (this_basic_block);
1158 last_call_luid = 0;
1159 mem_last_set = -1;
1160
1161 label_tick++;
1162 if (!single_pred_p (this_basic_block)
1163 || single_pred (this_basic_block) != last_bb)
1164 label_tick_ebb_start = label_tick;
1165 last_bb = this_basic_block;
1166
1167 rtl_profile_for_bb (this_basic_block);
1168 for (insn = BB_HEAD (this_basic_block);
1169 insn != NEXT_INSN (BB_END (this_basic_block));
1170 insn = next ? next : NEXT_INSN (insn))
1171 {
1172 next = 0;
1173 if (NONDEBUG_INSN_P (insn))
1174 {
1175 /* See if we know about function return values before this
1176 insn based upon SUBREG flags. */
1177 check_promoted_subreg (insn, PATTERN (insn));
1178
1179 /* See if we can find hardregs and subreg of pseudos in
1180 narrower modes. This could help turning TRUNCATEs
1181 into SUBREGs. */
1182 note_uses (&PATTERN (insn), record_truncated_values, NULL);
1183
1184 /* Try this insn with each insn it links back to. */
1185
1186 for (links = LOG_LINKS (insn); links; links = XEXP (links, 1))
1187 if ((next = try_combine (insn, XEXP (links, 0), NULL_RTX,
1188 NULL_RTX, &new_direct_jump_p)) != 0)
1189 goto retry;
1190
1191 /* Try each sequence of three linked insns ending with this one. */
1192
1193 for (links = LOG_LINKS (insn); links; links = XEXP (links, 1))
1194 {
1195 rtx link = XEXP (links, 0);
1196
1197 /* If the linked insn has been replaced by a note, then there
1198 is no point in pursuing this chain any further. */
1199 if (NOTE_P (link))
1200 continue;
1201
1202 for (nextlinks = LOG_LINKS (link);
1203 nextlinks;
1204 nextlinks = XEXP (nextlinks, 1))
1205 if ((next = try_combine (insn, link, XEXP (nextlinks, 0),
1206 NULL_RTX,
1207 &new_direct_jump_p)) != 0)
1208 goto retry;
1209 }
1210
1211 #ifdef HAVE_cc0
1212 /* Try to combine a jump insn that uses CC0
1213 with a preceding insn that sets CC0, and maybe with its
1214 logical predecessor as well.
1215 This is how we make decrement-and-branch insns.
1216 We need this special code because data flow connections
1217 via CC0 do not get entered in LOG_LINKS. */
1218
1219 if (JUMP_P (insn)
1220 && (prev = prev_nonnote_insn (insn)) != 0
1221 && NONJUMP_INSN_P (prev)
1222 && sets_cc0_p (PATTERN (prev)))
1223 {
1224 if ((next = try_combine (insn, prev, NULL_RTX, NULL_RTX,
1225 &new_direct_jump_p)) != 0)
1226 goto retry;
1227
1228 for (nextlinks = LOG_LINKS (prev); nextlinks;
1229 nextlinks = XEXP (nextlinks, 1))
1230 if ((next = try_combine (insn, prev, XEXP (nextlinks, 0),
1231 NULL_RTX,
1232 &new_direct_jump_p)) != 0)
1233 goto retry;
1234 }
1235
1236 /* Do the same for an insn that explicitly references CC0. */
1237 if (NONJUMP_INSN_P (insn)
1238 && (prev = prev_nonnote_insn (insn)) != 0
1239 && NONJUMP_INSN_P (prev)
1240 && sets_cc0_p (PATTERN (prev))
1241 && GET_CODE (PATTERN (insn)) == SET
1242 && reg_mentioned_p (cc0_rtx, SET_SRC (PATTERN (insn))))
1243 {
1244 if ((next = try_combine (insn, prev, NULL_RTX, NULL_RTX,
1245 &new_direct_jump_p)) != 0)
1246 goto retry;
1247
1248 for (nextlinks = LOG_LINKS (prev); nextlinks;
1249 nextlinks = XEXP (nextlinks, 1))
1250 if ((next = try_combine (insn, prev, XEXP (nextlinks, 0),
1251 NULL_RTX,
1252 &new_direct_jump_p)) != 0)
1253 goto retry;
1254 }
1255
1256 /* Finally, see if any of the insns that this insn links to
1257 explicitly references CC0. If so, try this insn, that insn,
1258 and its predecessor if it sets CC0. */
1259 for (links = LOG_LINKS (insn); links; links = XEXP (links, 1))
1260 if (NONJUMP_INSN_P (XEXP (links, 0))
1261 && GET_CODE (PATTERN (XEXP (links, 0))) == SET
1262 && reg_mentioned_p (cc0_rtx, SET_SRC (PATTERN (XEXP (links, 0))))
1263 && (prev = prev_nonnote_insn (XEXP (links, 0))) != 0
1264 && NONJUMP_INSN_P (prev)
1265 && sets_cc0_p (PATTERN (prev))
1266 && (next = try_combine (insn, XEXP (links, 0),
1267 prev, NULL_RTX,
1268 &new_direct_jump_p)) != 0)
1269 goto retry;
1270 #endif
1271
1272 /* Try combining an insn with two different insns whose results it
1273 uses. */
1274 for (links = LOG_LINKS (insn); links; links = XEXP (links, 1))
1275 for (nextlinks = XEXP (links, 1); nextlinks;
1276 nextlinks = XEXP (nextlinks, 1))
1277 if ((next = try_combine (insn, XEXP (links, 0),
1278 XEXP (nextlinks, 0), NULL_RTX,
1279 &new_direct_jump_p)) != 0)
1280 goto retry;
1281
1282 /* Try four-instruction combinations. */
1283 for (links = LOG_LINKS (insn); links; links = XEXP (links, 1))
1284 {
1285 rtx next1;
1286 rtx link = XEXP (links, 0);
1287
1288 /* If the linked insn has been replaced by a note, then there
1289 is no point in pursuing this chain any further. */
1290 if (NOTE_P (link))
1291 continue;
1292
1293 for (next1 = LOG_LINKS (link); next1; next1 = XEXP (next1, 1))
1294 {
1295 rtx link1 = XEXP (next1, 0);
1296 if (NOTE_P (link1))
1297 continue;
1298 /* I0 -> I1 -> I2 -> I3. */
1299 for (nextlinks = LOG_LINKS (link1); nextlinks;
1300 nextlinks = XEXP (nextlinks, 1))
1301 if ((next = try_combine (insn, link, link1,
1302 XEXP (nextlinks, 0),
1303 &new_direct_jump_p)) != 0)
1304 goto retry;
1305 /* I0, I1 -> I2, I2 -> I3. */
1306 for (nextlinks = XEXP (next1, 1); nextlinks;
1307 nextlinks = XEXP (nextlinks, 1))
1308 if ((next = try_combine (insn, link, link1,
1309 XEXP (nextlinks, 0),
1310 &new_direct_jump_p)) != 0)
1311 goto retry;
1312 }
1313
1314 for (next1 = XEXP (links, 1); next1; next1 = XEXP (next1, 1))
1315 {
1316 rtx link1 = XEXP (next1, 0);
1317 if (NOTE_P (link1))
1318 continue;
1319 /* I0 -> I2; I1, I2 -> I3. */
1320 for (nextlinks = LOG_LINKS (link); nextlinks;
1321 nextlinks = XEXP (nextlinks, 1))
1322 if ((next = try_combine (insn, link, link1,
1323 XEXP (nextlinks, 0),
1324 &new_direct_jump_p)) != 0)
1325 goto retry;
1326 /* I0 -> I1; I1, I2 -> I3. */
1327 for (nextlinks = LOG_LINKS (link1); nextlinks;
1328 nextlinks = XEXP (nextlinks, 1))
1329 if ((next = try_combine (insn, link, link1,
1330 XEXP (nextlinks, 0),
1331 &new_direct_jump_p)) != 0)
1332 goto retry;
1333 }
1334 }
1335
1336 /* Try this insn with each REG_EQUAL note it links back to. */
1337 for (links = LOG_LINKS (insn); links; links = XEXP (links, 1))
1338 {
1339 rtx set, note;
1340 rtx temp = XEXP (links, 0);
1341 if ((set = single_set (temp)) != 0
1342 && (note = find_reg_equal_equiv_note (temp)) != 0
1343 && (note = XEXP (note, 0), GET_CODE (note)) != EXPR_LIST
1344 /* Avoid using a register that may already been marked
1345 dead by an earlier instruction. */
1346 && ! unmentioned_reg_p (note, SET_SRC (set))
1347 && (GET_MODE (note) == VOIDmode
1348 ? SCALAR_INT_MODE_P (GET_MODE (SET_DEST (set)))
1349 : GET_MODE (SET_DEST (set)) == GET_MODE (note)))
1350 {
1351 /* Temporarily replace the set's source with the
1352 contents of the REG_EQUAL note. The insn will
1353 be deleted or recognized by try_combine. */
1354 rtx orig = SET_SRC (set);
1355 SET_SRC (set) = note;
1356 i2mod = temp;
1357 i2mod_old_rhs = copy_rtx (orig);
1358 i2mod_new_rhs = copy_rtx (note);
1359 next = try_combine (insn, i2mod, NULL_RTX, NULL_RTX,
1360 &new_direct_jump_p);
1361 i2mod = NULL_RTX;
1362 if (next)
1363 goto retry;
1364 SET_SRC (set) = orig;
1365 }
1366 }
1367
1368 if (!NOTE_P (insn))
1369 record_dead_and_set_regs (insn);
1370
1371 retry:
1372 ;
1373 }
1374 }
1375 }
1376
1377 default_rtl_profile ();
1378 clear_log_links ();
1379 clear_bb_flags ();
1380 new_direct_jump_p |= purge_all_dead_edges ();
1381 delete_noop_moves ();
1382
1383 /* Clean up. */
1384 free (uid_log_links);
1385 free (uid_insn_cost);
1386 VEC_free (reg_stat_type, heap, reg_stat);
1387
1388 {
1389 struct undo *undo, *next;
1390 for (undo = undobuf.frees; undo; undo = next)
1391 {
1392 next = undo->next;
1393 free (undo);
1394 }
1395 undobuf.frees = 0;
1396 }
1397
1398 total_attempts += combine_attempts;
1399 total_merges += combine_merges;
1400 total_extras += combine_extras;
1401 total_successes += combine_successes;
1402
1403 nonzero_sign_valid = 0;
1404 rtl_hooks = general_rtl_hooks;
1405
1406 /* Make recognizer allow volatile MEMs again. */
1407 init_recog ();
1408
1409 return new_direct_jump_p;
1410 }
1411
1412 /* Wipe the last_xxx fields of reg_stat in preparation for another pass. */
1413
1414 static void
1415 init_reg_last (void)
1416 {
1417 unsigned int i;
1418 reg_stat_type *p;
1419
1420 FOR_EACH_VEC_ELT (reg_stat_type, reg_stat, i, p)
1421 memset (p, 0, offsetof (reg_stat_type, sign_bit_copies));
1422 }
1423 \f
1424 /* Set up any promoted values for incoming argument registers. */
1425
1426 static void
1427 setup_incoming_promotions (rtx first)
1428 {
1429 tree arg;
1430 bool strictly_local = false;
1431
1432 for (arg = DECL_ARGUMENTS (current_function_decl); arg;
1433 arg = DECL_CHAIN (arg))
1434 {
1435 rtx x, reg = DECL_INCOMING_RTL (arg);
1436 int uns1, uns3;
1437 enum machine_mode mode1, mode2, mode3, mode4;
1438
1439 /* Only continue if the incoming argument is in a register. */
1440 if (!REG_P (reg))
1441 continue;
1442
1443 /* Determine, if possible, whether all call sites of the current
1444 function lie within the current compilation unit. (This does
1445 take into account the exporting of a function via taking its
1446 address, and so forth.) */
1447 strictly_local = cgraph_local_info (current_function_decl)->local;
1448
1449 /* The mode and signedness of the argument before any promotions happen
1450 (equal to the mode of the pseudo holding it at that stage). */
1451 mode1 = TYPE_MODE (TREE_TYPE (arg));
1452 uns1 = TYPE_UNSIGNED (TREE_TYPE (arg));
1453
1454 /* The mode and signedness of the argument after any source language and
1455 TARGET_PROMOTE_PROTOTYPES-driven promotions. */
1456 mode2 = TYPE_MODE (DECL_ARG_TYPE (arg));
1457 uns3 = TYPE_UNSIGNED (DECL_ARG_TYPE (arg));
1458
1459 /* The mode and signedness of the argument as it is actually passed,
1460 after any TARGET_PROMOTE_FUNCTION_ARGS-driven ABI promotions. */
1461 mode3 = promote_function_mode (DECL_ARG_TYPE (arg), mode2, &uns3,
1462 TREE_TYPE (cfun->decl), 0);
1463
1464 /* The mode of the register in which the argument is being passed. */
1465 mode4 = GET_MODE (reg);
1466
1467 /* Eliminate sign extensions in the callee when:
1468 (a) A mode promotion has occurred; */
1469 if (mode1 == mode3)
1470 continue;
1471 /* (b) The mode of the register is the same as the mode of
1472 the argument as it is passed; */
1473 if (mode3 != mode4)
1474 continue;
1475 /* (c) There's no language level extension; */
1476 if (mode1 == mode2)
1477 ;
1478 /* (c.1) All callers are from the current compilation unit. If that's
1479 the case we don't have to rely on an ABI, we only have to know
1480 what we're generating right now, and we know that we will do the
1481 mode1 to mode2 promotion with the given sign. */
1482 else if (!strictly_local)
1483 continue;
1484 /* (c.2) The combination of the two promotions is useful. This is
1485 true when the signs match, or if the first promotion is unsigned.
1486 In the later case, (sign_extend (zero_extend x)) is the same as
1487 (zero_extend (zero_extend x)), so make sure to force UNS3 true. */
1488 else if (uns1)
1489 uns3 = true;
1490 else if (uns3)
1491 continue;
1492
1493 /* Record that the value was promoted from mode1 to mode3,
1494 so that any sign extension at the head of the current
1495 function may be eliminated. */
1496 x = gen_rtx_CLOBBER (mode1, const0_rtx);
1497 x = gen_rtx_fmt_e ((uns3 ? ZERO_EXTEND : SIGN_EXTEND), mode3, x);
1498 record_value_for_reg (reg, first, x);
1499 }
1500 }
1501
1502 /* Called via note_stores. If X is a pseudo that is narrower than
1503 HOST_BITS_PER_WIDE_INT and is being set, record what bits are known zero.
1504
1505 If we are setting only a portion of X and we can't figure out what
1506 portion, assume all bits will be used since we don't know what will
1507 be happening.
1508
1509 Similarly, set how many bits of X are known to be copies of the sign bit
1510 at all locations in the function. This is the smallest number implied
1511 by any set of X. */
1512
1513 static void
1514 set_nonzero_bits_and_sign_copies (rtx x, const_rtx set, void *data)
1515 {
1516 rtx insn = (rtx) data;
1517 unsigned int num;
1518
1519 if (REG_P (x)
1520 && REGNO (x) >= FIRST_PSEUDO_REGISTER
1521 /* If this register is undefined at the start of the file, we can't
1522 say what its contents were. */
1523 && ! REGNO_REG_SET_P
1524 (DF_LR_IN (ENTRY_BLOCK_PTR->next_bb), REGNO (x))
1525 && GET_MODE_BITSIZE (GET_MODE (x)) <= HOST_BITS_PER_WIDE_INT)
1526 {
1527 reg_stat_type *rsp = VEC_index (reg_stat_type, reg_stat, REGNO (x));
1528
1529 if (set == 0 || GET_CODE (set) == CLOBBER)
1530 {
1531 rsp->nonzero_bits = GET_MODE_MASK (GET_MODE (x));
1532 rsp->sign_bit_copies = 1;
1533 return;
1534 }
1535
1536 /* If this register is being initialized using itself, and the
1537 register is uninitialized in this basic block, and there are
1538 no LOG_LINKS which set the register, then part of the
1539 register is uninitialized. In that case we can't assume
1540 anything about the number of nonzero bits.
1541
1542 ??? We could do better if we checked this in
1543 reg_{nonzero_bits,num_sign_bit_copies}_for_combine. Then we
1544 could avoid making assumptions about the insn which initially
1545 sets the register, while still using the information in other
1546 insns. We would have to be careful to check every insn
1547 involved in the combination. */
1548
1549 if (insn
1550 && reg_referenced_p (x, PATTERN (insn))
1551 && !REGNO_REG_SET_P (DF_LR_IN (BLOCK_FOR_INSN (insn)),
1552 REGNO (x)))
1553 {
1554 rtx link;
1555
1556 for (link = LOG_LINKS (insn); link; link = XEXP (link, 1))
1557 {
1558 if (dead_or_set_p (XEXP (link, 0), x))
1559 break;
1560 }
1561 if (!link)
1562 {
1563 rsp->nonzero_bits = GET_MODE_MASK (GET_MODE (x));
1564 rsp->sign_bit_copies = 1;
1565 return;
1566 }
1567 }
1568
1569 /* If this is a complex assignment, see if we can convert it into a
1570 simple assignment. */
1571 set = expand_field_assignment (set);
1572
1573 /* If this is a simple assignment, or we have a paradoxical SUBREG,
1574 set what we know about X. */
1575
1576 if (SET_DEST (set) == x
1577 || (GET_CODE (SET_DEST (set)) == SUBREG
1578 && (GET_MODE_SIZE (GET_MODE (SET_DEST (set)))
1579 > GET_MODE_SIZE (GET_MODE (SUBREG_REG (SET_DEST (set)))))
1580 && SUBREG_REG (SET_DEST (set)) == x))
1581 {
1582 rtx src = SET_SRC (set);
1583
1584 #ifdef SHORT_IMMEDIATES_SIGN_EXTEND
1585 /* If X is narrower than a word and SRC is a non-negative
1586 constant that would appear negative in the mode of X,
1587 sign-extend it for use in reg_stat[].nonzero_bits because some
1588 machines (maybe most) will actually do the sign-extension
1589 and this is the conservative approach.
1590
1591 ??? For 2.5, try to tighten up the MD files in this regard
1592 instead of this kludge. */
1593
1594 if (GET_MODE_BITSIZE (GET_MODE (x)) < BITS_PER_WORD
1595 && CONST_INT_P (src)
1596 && INTVAL (src) > 0
1597 && 0 != (UINTVAL (src)
1598 & ((unsigned HOST_WIDE_INT) 1
1599 << (GET_MODE_BITSIZE (GET_MODE (x)) - 1))))
1600 src = GEN_INT (UINTVAL (src)
1601 | ((unsigned HOST_WIDE_INT) (-1)
1602 << GET_MODE_BITSIZE (GET_MODE (x))));
1603 #endif
1604
1605 /* Don't call nonzero_bits if it cannot change anything. */
1606 if (rsp->nonzero_bits != ~(unsigned HOST_WIDE_INT) 0)
1607 rsp->nonzero_bits |= nonzero_bits (src, nonzero_bits_mode);
1608 num = num_sign_bit_copies (SET_SRC (set), GET_MODE (x));
1609 if (rsp->sign_bit_copies == 0
1610 || rsp->sign_bit_copies > num)
1611 rsp->sign_bit_copies = num;
1612 }
1613 else
1614 {
1615 rsp->nonzero_bits = GET_MODE_MASK (GET_MODE (x));
1616 rsp->sign_bit_copies = 1;
1617 }
1618 }
1619 }
1620 \f
1621 /* See if INSN can be combined into I3. PRED, PRED2, SUCC and SUCC2 are
1622 optionally insns that were previously combined into I3 or that will be
1623 combined into the merger of INSN and I3. The order is PRED, PRED2,
1624 INSN, SUCC, SUCC2, I3.
1625
1626 Return 0 if the combination is not allowed for any reason.
1627
1628 If the combination is allowed, *PDEST will be set to the single
1629 destination of INSN and *PSRC to the single source, and this function
1630 will return 1. */
1631
1632 static int
1633 can_combine_p (rtx insn, rtx i3, rtx pred ATTRIBUTE_UNUSED,
1634 rtx pred2 ATTRIBUTE_UNUSED, rtx succ, rtx succ2,
1635 rtx *pdest, rtx *psrc)
1636 {
1637 int i;
1638 const_rtx set = 0;
1639 rtx src, dest;
1640 rtx p;
1641 #ifdef AUTO_INC_DEC
1642 rtx link;
1643 #endif
1644 bool all_adjacent = true;
1645
1646 if (succ)
1647 {
1648 if (succ2)
1649 {
1650 if (next_active_insn (succ2) != i3)
1651 all_adjacent = false;
1652 if (next_active_insn (succ) != succ2)
1653 all_adjacent = false;
1654 }
1655 else if (next_active_insn (succ) != i3)
1656 all_adjacent = false;
1657 if (next_active_insn (insn) != succ)
1658 all_adjacent = false;
1659 }
1660 else if (next_active_insn (insn) != i3)
1661 all_adjacent = false;
1662
1663 /* Can combine only if previous insn is a SET of a REG, a SUBREG or CC0.
1664 or a PARALLEL consisting of such a SET and CLOBBERs.
1665
1666 If INSN has CLOBBER parallel parts, ignore them for our processing.
1667 By definition, these happen during the execution of the insn. When it
1668 is merged with another insn, all bets are off. If they are, in fact,
1669 needed and aren't also supplied in I3, they may be added by
1670 recog_for_combine. Otherwise, it won't match.
1671
1672 We can also ignore a SET whose SET_DEST is mentioned in a REG_UNUSED
1673 note.
1674
1675 Get the source and destination of INSN. If more than one, can't
1676 combine. */
1677
1678 if (GET_CODE (PATTERN (insn)) == SET)
1679 set = PATTERN (insn);
1680 else if (GET_CODE (PATTERN (insn)) == PARALLEL
1681 && GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) == SET)
1682 {
1683 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
1684 {
1685 rtx elt = XVECEXP (PATTERN (insn), 0, i);
1686
1687 switch (GET_CODE (elt))
1688 {
1689 /* This is important to combine floating point insns
1690 for the SH4 port. */
1691 case USE:
1692 /* Combining an isolated USE doesn't make sense.
1693 We depend here on combinable_i3pat to reject them. */
1694 /* The code below this loop only verifies that the inputs of
1695 the SET in INSN do not change. We call reg_set_between_p
1696 to verify that the REG in the USE does not change between
1697 I3 and INSN.
1698 If the USE in INSN was for a pseudo register, the matching
1699 insn pattern will likely match any register; combining this
1700 with any other USE would only be safe if we knew that the
1701 used registers have identical values, or if there was
1702 something to tell them apart, e.g. different modes. For
1703 now, we forgo such complicated tests and simply disallow
1704 combining of USES of pseudo registers with any other USE. */
1705 if (REG_P (XEXP (elt, 0))
1706 && GET_CODE (PATTERN (i3)) == PARALLEL)
1707 {
1708 rtx i3pat = PATTERN (i3);
1709 int i = XVECLEN (i3pat, 0) - 1;
1710 unsigned int regno = REGNO (XEXP (elt, 0));
1711
1712 do
1713 {
1714 rtx i3elt = XVECEXP (i3pat, 0, i);
1715
1716 if (GET_CODE (i3elt) == USE
1717 && REG_P (XEXP (i3elt, 0))
1718 && (REGNO (XEXP (i3elt, 0)) == regno
1719 ? reg_set_between_p (XEXP (elt, 0),
1720 PREV_INSN (insn), i3)
1721 : regno >= FIRST_PSEUDO_REGISTER))
1722 return 0;
1723 }
1724 while (--i >= 0);
1725 }
1726 break;
1727
1728 /* We can ignore CLOBBERs. */
1729 case CLOBBER:
1730 break;
1731
1732 case SET:
1733 /* Ignore SETs whose result isn't used but not those that
1734 have side-effects. */
1735 if (find_reg_note (insn, REG_UNUSED, SET_DEST (elt))
1736 && insn_nothrow_p (insn)
1737 && !side_effects_p (elt))
1738 break;
1739
1740 /* If we have already found a SET, this is a second one and
1741 so we cannot combine with this insn. */
1742 if (set)
1743 return 0;
1744
1745 set = elt;
1746 break;
1747
1748 default:
1749 /* Anything else means we can't combine. */
1750 return 0;
1751 }
1752 }
1753
1754 if (set == 0
1755 /* If SET_SRC is an ASM_OPERANDS we can't throw away these CLOBBERs,
1756 so don't do anything with it. */
1757 || GET_CODE (SET_SRC (set)) == ASM_OPERANDS)
1758 return 0;
1759 }
1760 else
1761 return 0;
1762
1763 if (set == 0)
1764 return 0;
1765
1766 set = expand_field_assignment (set);
1767 src = SET_SRC (set), dest = SET_DEST (set);
1768
1769 /* Don't eliminate a store in the stack pointer. */
1770 if (dest == stack_pointer_rtx
1771 /* Don't combine with an insn that sets a register to itself if it has
1772 a REG_EQUAL note. This may be part of a LIBCALL sequence. */
1773 || (rtx_equal_p (src, dest) && find_reg_note (insn, REG_EQUAL, NULL_RTX))
1774 /* Can't merge an ASM_OPERANDS. */
1775 || GET_CODE (src) == ASM_OPERANDS
1776 /* Can't merge a function call. */
1777 || GET_CODE (src) == CALL
1778 /* Don't eliminate a function call argument. */
1779 || (CALL_P (i3)
1780 && (find_reg_fusage (i3, USE, dest)
1781 || (REG_P (dest)
1782 && REGNO (dest) < FIRST_PSEUDO_REGISTER
1783 && global_regs[REGNO (dest)])))
1784 /* Don't substitute into an incremented register. */
1785 || FIND_REG_INC_NOTE (i3, dest)
1786 || (succ && FIND_REG_INC_NOTE (succ, dest))
1787 || (succ2 && FIND_REG_INC_NOTE (succ2, dest))
1788 /* Don't substitute into a non-local goto, this confuses CFG. */
1789 || (JUMP_P (i3) && find_reg_note (i3, REG_NON_LOCAL_GOTO, NULL_RTX))
1790 /* Make sure that DEST is not used after SUCC but before I3. */
1791 || (!all_adjacent
1792 && ((succ2
1793 && (reg_used_between_p (dest, succ2, i3)
1794 || reg_used_between_p (dest, succ, succ2)))
1795 || (!succ2 && succ && reg_used_between_p (dest, succ, i3))))
1796 /* Make sure that the value that is to be substituted for the register
1797 does not use any registers whose values alter in between. However,
1798 If the insns are adjacent, a use can't cross a set even though we
1799 think it might (this can happen for a sequence of insns each setting
1800 the same destination; last_set of that register might point to
1801 a NOTE). If INSN has a REG_EQUIV note, the register is always
1802 equivalent to the memory so the substitution is valid even if there
1803 are intervening stores. Also, don't move a volatile asm or
1804 UNSPEC_VOLATILE across any other insns. */
1805 || (! all_adjacent
1806 && (((!MEM_P (src)
1807 || ! find_reg_note (insn, REG_EQUIV, src))
1808 && use_crosses_set_p (src, DF_INSN_LUID (insn)))
1809 || (GET_CODE (src) == ASM_OPERANDS && MEM_VOLATILE_P (src))
1810 || GET_CODE (src) == UNSPEC_VOLATILE))
1811 /* Don't combine across a CALL_INSN, because that would possibly
1812 change whether the life span of some REGs crosses calls or not,
1813 and it is a pain to update that information.
1814 Exception: if source is a constant, moving it later can't hurt.
1815 Accept that as a special case. */
1816 || (DF_INSN_LUID (insn) < last_call_luid && ! CONSTANT_P (src)))
1817 return 0;
1818
1819 /* DEST must either be a REG or CC0. */
1820 if (REG_P (dest))
1821 {
1822 /* If register alignment is being enforced for multi-word items in all
1823 cases except for parameters, it is possible to have a register copy
1824 insn referencing a hard register that is not allowed to contain the
1825 mode being copied and which would not be valid as an operand of most
1826 insns. Eliminate this problem by not combining with such an insn.
1827
1828 Also, on some machines we don't want to extend the life of a hard
1829 register. */
1830
1831 if (REG_P (src)
1832 && ((REGNO (dest) < FIRST_PSEUDO_REGISTER
1833 && ! HARD_REGNO_MODE_OK (REGNO (dest), GET_MODE (dest)))
1834 /* Don't extend the life of a hard register unless it is
1835 user variable (if we have few registers) or it can't
1836 fit into the desired register (meaning something special
1837 is going on).
1838 Also avoid substituting a return register into I3, because
1839 reload can't handle a conflict with constraints of other
1840 inputs. */
1841 || (REGNO (src) < FIRST_PSEUDO_REGISTER
1842 && ! HARD_REGNO_MODE_OK (REGNO (src), GET_MODE (src)))))
1843 return 0;
1844 }
1845 else if (GET_CODE (dest) != CC0)
1846 return 0;
1847
1848
1849 if (GET_CODE (PATTERN (i3)) == PARALLEL)
1850 for (i = XVECLEN (PATTERN (i3), 0) - 1; i >= 0; i--)
1851 if (GET_CODE (XVECEXP (PATTERN (i3), 0, i)) == CLOBBER)
1852 {
1853 /* Don't substitute for a register intended as a clobberable
1854 operand. */
1855 rtx reg = XEXP (XVECEXP (PATTERN (i3), 0, i), 0);
1856 if (rtx_equal_p (reg, dest))
1857 return 0;
1858
1859 /* If the clobber represents an earlyclobber operand, we must not
1860 substitute an expression containing the clobbered register.
1861 As we do not analyze the constraint strings here, we have to
1862 make the conservative assumption. However, if the register is
1863 a fixed hard reg, the clobber cannot represent any operand;
1864 we leave it up to the machine description to either accept or
1865 reject use-and-clobber patterns. */
1866 if (!REG_P (reg)
1867 || REGNO (reg) >= FIRST_PSEUDO_REGISTER
1868 || !fixed_regs[REGNO (reg)])
1869 if (reg_overlap_mentioned_p (reg, src))
1870 return 0;
1871 }
1872
1873 /* If INSN contains anything volatile, or is an `asm' (whether volatile
1874 or not), reject, unless nothing volatile comes between it and I3 */
1875
1876 if (GET_CODE (src) == ASM_OPERANDS || volatile_refs_p (src))
1877 {
1878 /* Make sure neither succ nor succ2 contains a volatile reference. */
1879 if (succ2 != 0 && volatile_refs_p (PATTERN (succ2)))
1880 return 0;
1881 if (succ != 0 && volatile_refs_p (PATTERN (succ)))
1882 return 0;
1883 /* We'll check insns between INSN and I3 below. */
1884 }
1885
1886 /* If INSN is an asm, and DEST is a hard register, reject, since it has
1887 to be an explicit register variable, and was chosen for a reason. */
1888
1889 if (GET_CODE (src) == ASM_OPERANDS
1890 && REG_P (dest) && REGNO (dest) < FIRST_PSEUDO_REGISTER)
1891 return 0;
1892
1893 /* If there are any volatile insns between INSN and I3, reject, because
1894 they might affect machine state. */
1895
1896 for (p = NEXT_INSN (insn); p != i3; p = NEXT_INSN (p))
1897 if (INSN_P (p) && p != succ && p != succ2 && volatile_insn_p (PATTERN (p)))
1898 return 0;
1899
1900 /* If INSN contains an autoincrement or autodecrement, make sure that
1901 register is not used between there and I3, and not already used in
1902 I3 either. Neither must it be used in PRED or SUCC, if they exist.
1903 Also insist that I3 not be a jump; if it were one
1904 and the incremented register were spilled, we would lose. */
1905
1906 #ifdef AUTO_INC_DEC
1907 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
1908 if (REG_NOTE_KIND (link) == REG_INC
1909 && (JUMP_P (i3)
1910 || reg_used_between_p (XEXP (link, 0), insn, i3)
1911 || (pred != NULL_RTX
1912 && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (pred)))
1913 || (pred2 != NULL_RTX
1914 && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (pred2)))
1915 || (succ != NULL_RTX
1916 && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (succ)))
1917 || (succ2 != NULL_RTX
1918 && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (succ2)))
1919 || reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (i3))))
1920 return 0;
1921 #endif
1922
1923 #ifdef HAVE_cc0
1924 /* Don't combine an insn that follows a CC0-setting insn.
1925 An insn that uses CC0 must not be separated from the one that sets it.
1926 We do, however, allow I2 to follow a CC0-setting insn if that insn
1927 is passed as I1; in that case it will be deleted also.
1928 We also allow combining in this case if all the insns are adjacent
1929 because that would leave the two CC0 insns adjacent as well.
1930 It would be more logical to test whether CC0 occurs inside I1 or I2,
1931 but that would be much slower, and this ought to be equivalent. */
1932
1933 p = prev_nonnote_insn (insn);
1934 if (p && p != pred && NONJUMP_INSN_P (p) && sets_cc0_p (PATTERN (p))
1935 && ! all_adjacent)
1936 return 0;
1937 #endif
1938
1939 /* If we get here, we have passed all the tests and the combination is
1940 to be allowed. */
1941
1942 *pdest = dest;
1943 *psrc = src;
1944
1945 return 1;
1946 }
1947 \f
1948 /* LOC is the location within I3 that contains its pattern or the component
1949 of a PARALLEL of the pattern. We validate that it is valid for combining.
1950
1951 One problem is if I3 modifies its output, as opposed to replacing it
1952 entirely, we can't allow the output to contain I2DEST, I1DEST or I0DEST as
1953 doing so would produce an insn that is not equivalent to the original insns.
1954
1955 Consider:
1956
1957 (set (reg:DI 101) (reg:DI 100))
1958 (set (subreg:SI (reg:DI 101) 0) <foo>)
1959
1960 This is NOT equivalent to:
1961
1962 (parallel [(set (subreg:SI (reg:DI 100) 0) <foo>)
1963 (set (reg:DI 101) (reg:DI 100))])
1964
1965 Not only does this modify 100 (in which case it might still be valid
1966 if 100 were dead in I2), it sets 101 to the ORIGINAL value of 100.
1967
1968 We can also run into a problem if I2 sets a register that I1
1969 uses and I1 gets directly substituted into I3 (not via I2). In that
1970 case, we would be getting the wrong value of I2DEST into I3, so we
1971 must reject the combination. This case occurs when I2 and I1 both
1972 feed into I3, rather than when I1 feeds into I2, which feeds into I3.
1973 If I1_NOT_IN_SRC is nonzero, it means that finding I1 in the source
1974 of a SET must prevent combination from occurring. The same situation
1975 can occur for I0, in which case I0_NOT_IN_SRC is set.
1976
1977 Before doing the above check, we first try to expand a field assignment
1978 into a set of logical operations.
1979
1980 If PI3_DEST_KILLED is nonzero, it is a pointer to a location in which
1981 we place a register that is both set and used within I3. If more than one
1982 such register is detected, we fail.
1983
1984 Return 1 if the combination is valid, zero otherwise. */
1985
1986 static int
1987 combinable_i3pat (rtx i3, rtx *loc, rtx i2dest, rtx i1dest, rtx i0dest,
1988 int i1_not_in_src, int i0_not_in_src, rtx *pi3dest_killed)
1989 {
1990 rtx x = *loc;
1991
1992 if (GET_CODE (x) == SET)
1993 {
1994 rtx set = x ;
1995 rtx dest = SET_DEST (set);
1996 rtx src = SET_SRC (set);
1997 rtx inner_dest = dest;
1998 rtx subdest;
1999
2000 while (GET_CODE (inner_dest) == STRICT_LOW_PART
2001 || GET_CODE (inner_dest) == SUBREG
2002 || GET_CODE (inner_dest) == ZERO_EXTRACT)
2003 inner_dest = XEXP (inner_dest, 0);
2004
2005 /* Check for the case where I3 modifies its output, as discussed
2006 above. We don't want to prevent pseudos from being combined
2007 into the address of a MEM, so only prevent the combination if
2008 i1 or i2 set the same MEM. */
2009 if ((inner_dest != dest &&
2010 (!MEM_P (inner_dest)
2011 || rtx_equal_p (i2dest, inner_dest)
2012 || (i1dest && rtx_equal_p (i1dest, inner_dest))
2013 || (i0dest && rtx_equal_p (i0dest, inner_dest)))
2014 && (reg_overlap_mentioned_p (i2dest, inner_dest)
2015 || (i1dest && reg_overlap_mentioned_p (i1dest, inner_dest))
2016 || (i0dest && reg_overlap_mentioned_p (i0dest, inner_dest))))
2017
2018 /* This is the same test done in can_combine_p except we can't test
2019 all_adjacent; we don't have to, since this instruction will stay
2020 in place, thus we are not considering increasing the lifetime of
2021 INNER_DEST.
2022
2023 Also, if this insn sets a function argument, combining it with
2024 something that might need a spill could clobber a previous
2025 function argument; the all_adjacent test in can_combine_p also
2026 checks this; here, we do a more specific test for this case. */
2027
2028 || (REG_P (inner_dest)
2029 && REGNO (inner_dest) < FIRST_PSEUDO_REGISTER
2030 && (! HARD_REGNO_MODE_OK (REGNO (inner_dest),
2031 GET_MODE (inner_dest))))
2032 || (i1_not_in_src && reg_overlap_mentioned_p (i1dest, src))
2033 || (i0_not_in_src && reg_overlap_mentioned_p (i0dest, src)))
2034 return 0;
2035
2036 /* If DEST is used in I3, it is being killed in this insn, so
2037 record that for later. We have to consider paradoxical
2038 subregs here, since they kill the whole register, but we
2039 ignore partial subregs, STRICT_LOW_PART, etc.
2040 Never add REG_DEAD notes for the FRAME_POINTER_REGNUM or the
2041 STACK_POINTER_REGNUM, since these are always considered to be
2042 live. Similarly for ARG_POINTER_REGNUM if it is fixed. */
2043 subdest = dest;
2044 if (GET_CODE (subdest) == SUBREG
2045 && (GET_MODE_SIZE (GET_MODE (subdest))
2046 >= GET_MODE_SIZE (GET_MODE (SUBREG_REG (subdest)))))
2047 subdest = SUBREG_REG (subdest);
2048 if (pi3dest_killed
2049 && REG_P (subdest)
2050 && reg_referenced_p (subdest, PATTERN (i3))
2051 && REGNO (subdest) != FRAME_POINTER_REGNUM
2052 #if !HARD_FRAME_POINTER_IS_FRAME_POINTER
2053 && REGNO (subdest) != HARD_FRAME_POINTER_REGNUM
2054 #endif
2055 #if ARG_POINTER_REGNUM != FRAME_POINTER_REGNUM
2056 && (REGNO (subdest) != ARG_POINTER_REGNUM
2057 || ! fixed_regs [REGNO (subdest)])
2058 #endif
2059 && REGNO (subdest) != STACK_POINTER_REGNUM)
2060 {
2061 if (*pi3dest_killed)
2062 return 0;
2063
2064 *pi3dest_killed = subdest;
2065 }
2066 }
2067
2068 else if (GET_CODE (x) == PARALLEL)
2069 {
2070 int i;
2071
2072 for (i = 0; i < XVECLEN (x, 0); i++)
2073 if (! combinable_i3pat (i3, &XVECEXP (x, 0, i), i2dest, i1dest, i0dest,
2074 i1_not_in_src, i0_not_in_src, pi3dest_killed))
2075 return 0;
2076 }
2077
2078 return 1;
2079 }
2080 \f
2081 /* Return 1 if X is an arithmetic expression that contains a multiplication
2082 and division. We don't count multiplications by powers of two here. */
2083
2084 static int
2085 contains_muldiv (rtx x)
2086 {
2087 switch (GET_CODE (x))
2088 {
2089 case MOD: case DIV: case UMOD: case UDIV:
2090 return 1;
2091
2092 case MULT:
2093 return ! (CONST_INT_P (XEXP (x, 1))
2094 && exact_log2 (UINTVAL (XEXP (x, 1))) >= 0);
2095 default:
2096 if (BINARY_P (x))
2097 return contains_muldiv (XEXP (x, 0))
2098 || contains_muldiv (XEXP (x, 1));
2099
2100 if (UNARY_P (x))
2101 return contains_muldiv (XEXP (x, 0));
2102
2103 return 0;
2104 }
2105 }
2106 \f
2107 /* Determine whether INSN can be used in a combination. Return nonzero if
2108 not. This is used in try_combine to detect early some cases where we
2109 can't perform combinations. */
2110
2111 static int
2112 cant_combine_insn_p (rtx insn)
2113 {
2114 rtx set;
2115 rtx src, dest;
2116
2117 /* If this isn't really an insn, we can't do anything.
2118 This can occur when flow deletes an insn that it has merged into an
2119 auto-increment address. */
2120 if (! INSN_P (insn))
2121 return 1;
2122
2123 /* Never combine loads and stores involving hard regs that are likely
2124 to be spilled. The register allocator can usually handle such
2125 reg-reg moves by tying. If we allow the combiner to make
2126 substitutions of likely-spilled regs, reload might die.
2127 As an exception, we allow combinations involving fixed regs; these are
2128 not available to the register allocator so there's no risk involved. */
2129
2130 set = single_set (insn);
2131 if (! set)
2132 return 0;
2133 src = SET_SRC (set);
2134 dest = SET_DEST (set);
2135 if (GET_CODE (src) == SUBREG)
2136 src = SUBREG_REG (src);
2137 if (GET_CODE (dest) == SUBREG)
2138 dest = SUBREG_REG (dest);
2139 if (REG_P (src) && REG_P (dest)
2140 && ((HARD_REGISTER_P (src)
2141 && ! TEST_HARD_REG_BIT (fixed_reg_set, REGNO (src))
2142 && targetm.class_likely_spilled_p (REGNO_REG_CLASS (REGNO (src))))
2143 || (HARD_REGISTER_P (dest)
2144 && ! TEST_HARD_REG_BIT (fixed_reg_set, REGNO (dest))
2145 && targetm.class_likely_spilled_p (REGNO_REG_CLASS (REGNO (dest))))))
2146 return 1;
2147
2148 return 0;
2149 }
2150
2151 struct likely_spilled_retval_info
2152 {
2153 unsigned regno, nregs;
2154 unsigned mask;
2155 };
2156
2157 /* Called via note_stores by likely_spilled_retval_p. Remove from info->mask
2158 hard registers that are known to be written to / clobbered in full. */
2159 static void
2160 likely_spilled_retval_1 (rtx x, const_rtx set, void *data)
2161 {
2162 struct likely_spilled_retval_info *const info =
2163 (struct likely_spilled_retval_info *) data;
2164 unsigned regno, nregs;
2165 unsigned new_mask;
2166
2167 if (!REG_P (XEXP (set, 0)))
2168 return;
2169 regno = REGNO (x);
2170 if (regno >= info->regno + info->nregs)
2171 return;
2172 nregs = hard_regno_nregs[regno][GET_MODE (x)];
2173 if (regno + nregs <= info->regno)
2174 return;
2175 new_mask = (2U << (nregs - 1)) - 1;
2176 if (regno < info->regno)
2177 new_mask >>= info->regno - regno;
2178 else
2179 new_mask <<= regno - info->regno;
2180 info->mask &= ~new_mask;
2181 }
2182
2183 /* Return nonzero iff part of the return value is live during INSN, and
2184 it is likely spilled. This can happen when more than one insn is needed
2185 to copy the return value, e.g. when we consider to combine into the
2186 second copy insn for a complex value. */
2187
2188 static int
2189 likely_spilled_retval_p (rtx insn)
2190 {
2191 rtx use = BB_END (this_basic_block);
2192 rtx reg, p;
2193 unsigned regno, nregs;
2194 /* We assume here that no machine mode needs more than
2195 32 hard registers when the value overlaps with a register
2196 for which TARGET_FUNCTION_VALUE_REGNO_P is true. */
2197 unsigned mask;
2198 struct likely_spilled_retval_info info;
2199
2200 if (!NONJUMP_INSN_P (use) || GET_CODE (PATTERN (use)) != USE || insn == use)
2201 return 0;
2202 reg = XEXP (PATTERN (use), 0);
2203 if (!REG_P (reg) || !targetm.calls.function_value_regno_p (REGNO (reg)))
2204 return 0;
2205 regno = REGNO (reg);
2206 nregs = hard_regno_nregs[regno][GET_MODE (reg)];
2207 if (nregs == 1)
2208 return 0;
2209 mask = (2U << (nregs - 1)) - 1;
2210
2211 /* Disregard parts of the return value that are set later. */
2212 info.regno = regno;
2213 info.nregs = nregs;
2214 info.mask = mask;
2215 for (p = PREV_INSN (use); info.mask && p != insn; p = PREV_INSN (p))
2216 if (INSN_P (p))
2217 note_stores (PATTERN (p), likely_spilled_retval_1, &info);
2218 mask = info.mask;
2219
2220 /* Check if any of the (probably) live return value registers is
2221 likely spilled. */
2222 nregs --;
2223 do
2224 {
2225 if ((mask & 1 << nregs)
2226 && targetm.class_likely_spilled_p (REGNO_REG_CLASS (regno + nregs)))
2227 return 1;
2228 } while (nregs--);
2229 return 0;
2230 }
2231
2232 /* Adjust INSN after we made a change to its destination.
2233
2234 Changing the destination can invalidate notes that say something about
2235 the results of the insn and a LOG_LINK pointing to the insn. */
2236
2237 static void
2238 adjust_for_new_dest (rtx insn)
2239 {
2240 /* For notes, be conservative and simply remove them. */
2241 remove_reg_equal_equiv_notes (insn);
2242
2243 /* The new insn will have a destination that was previously the destination
2244 of an insn just above it. Call distribute_links to make a LOG_LINK from
2245 the next use of that destination. */
2246 distribute_links (gen_rtx_INSN_LIST (VOIDmode, insn, NULL_RTX));
2247
2248 df_insn_rescan (insn);
2249 }
2250
2251 /* Return TRUE if combine can reuse reg X in mode MODE.
2252 ADDED_SETS is nonzero if the original set is still required. */
2253 static bool
2254 can_change_dest_mode (rtx x, int added_sets, enum machine_mode mode)
2255 {
2256 unsigned int regno;
2257
2258 if (!REG_P(x))
2259 return false;
2260
2261 regno = REGNO (x);
2262 /* Allow hard registers if the new mode is legal, and occupies no more
2263 registers than the old mode. */
2264 if (regno < FIRST_PSEUDO_REGISTER)
2265 return (HARD_REGNO_MODE_OK (regno, mode)
2266 && (hard_regno_nregs[regno][GET_MODE (x)]
2267 >= hard_regno_nregs[regno][mode]));
2268
2269 /* Or a pseudo that is only used once. */
2270 return (REG_N_SETS (regno) == 1 && !added_sets
2271 && !REG_USERVAR_P (x));
2272 }
2273
2274
2275 /* Check whether X, the destination of a set, refers to part of
2276 the register specified by REG. */
2277
2278 static bool
2279 reg_subword_p (rtx x, rtx reg)
2280 {
2281 /* Check that reg is an integer mode register. */
2282 if (!REG_P (reg) || GET_MODE_CLASS (GET_MODE (reg)) != MODE_INT)
2283 return false;
2284
2285 if (GET_CODE (x) == STRICT_LOW_PART
2286 || GET_CODE (x) == ZERO_EXTRACT)
2287 x = XEXP (x, 0);
2288
2289 return GET_CODE (x) == SUBREG
2290 && SUBREG_REG (x) == reg
2291 && GET_MODE_CLASS (GET_MODE (x)) == MODE_INT;
2292 }
2293
2294 #ifdef AUTO_INC_DEC
2295 /* Replace auto-increment addressing modes with explicit operations to access
2296 the same addresses without modifying the corresponding registers. */
2297
2298 static rtx
2299 cleanup_auto_inc_dec (rtx src, enum machine_mode mem_mode)
2300 {
2301 rtx x = src;
2302 const RTX_CODE code = GET_CODE (x);
2303 int i;
2304 const char *fmt;
2305
2306 switch (code)
2307 {
2308 case REG:
2309 case CONST_INT:
2310 case CONST_DOUBLE:
2311 case CONST_FIXED:
2312 case CONST_VECTOR:
2313 case SYMBOL_REF:
2314 case CODE_LABEL:
2315 case PC:
2316 case CC0:
2317 case SCRATCH:
2318 /* SCRATCH must be shared because they represent distinct values. */
2319 return x;
2320 case CLOBBER:
2321 if (REG_P (XEXP (x, 0)) && REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER)
2322 return x;
2323 break;
2324
2325 case CONST:
2326 if (shared_const_p (x))
2327 return x;
2328 break;
2329
2330 case MEM:
2331 mem_mode = GET_MODE (x);
2332 break;
2333
2334 case PRE_INC:
2335 case PRE_DEC:
2336 gcc_assert (mem_mode != VOIDmode && mem_mode != BLKmode);
2337 return gen_rtx_PLUS (GET_MODE (x),
2338 cleanup_auto_inc_dec (XEXP (x, 0), mem_mode),
2339 GEN_INT (code == PRE_INC
2340 ? GET_MODE_SIZE (mem_mode)
2341 : -GET_MODE_SIZE (mem_mode)));
2342
2343 case POST_INC:
2344 case POST_DEC:
2345 case PRE_MODIFY:
2346 case POST_MODIFY:
2347 return cleanup_auto_inc_dec (code == PRE_MODIFY
2348 ? XEXP (x, 1) : XEXP (x, 0),
2349 mem_mode);
2350
2351 default:
2352 break;
2353 }
2354
2355 /* Copy the various flags, fields, and other information. We assume
2356 that all fields need copying, and then clear the fields that should
2357 not be copied. That is the sensible default behavior, and forces
2358 us to explicitly document why we are *not* copying a flag. */
2359 x = shallow_copy_rtx (x);
2360
2361 /* We do not copy the USED flag, which is used as a mark bit during
2362 walks over the RTL. */
2363 RTX_FLAG (x, used) = 0;
2364
2365 /* We do not copy FRAME_RELATED for INSNs. */
2366 if (INSN_P (x))
2367 RTX_FLAG (x, frame_related) = 0;
2368
2369 fmt = GET_RTX_FORMAT (code);
2370 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2371 if (fmt[i] == 'e')
2372 XEXP (x, i) = cleanup_auto_inc_dec (XEXP (x, i), mem_mode);
2373 else if (fmt[i] == 'E' || fmt[i] == 'V')
2374 {
2375 int j;
2376 XVEC (x, i) = rtvec_alloc (XVECLEN (x, i));
2377 for (j = 0; j < XVECLEN (x, i); j++)
2378 XVECEXP (x, i, j)
2379 = cleanup_auto_inc_dec (XVECEXP (src, i, j), mem_mode);
2380 }
2381
2382 return x;
2383 }
2384 #endif
2385
2386 /* Auxiliary data structure for propagate_for_debug_stmt. */
2387
2388 struct rtx_subst_pair
2389 {
2390 rtx to;
2391 bool adjusted;
2392 };
2393
2394 /* DATA points to an rtx_subst_pair. Return the value that should be
2395 substituted. */
2396
2397 static rtx
2398 propagate_for_debug_subst (rtx from, const_rtx old_rtx, void *data)
2399 {
2400 struct rtx_subst_pair *pair = (struct rtx_subst_pair *)data;
2401
2402 if (!rtx_equal_p (from, old_rtx))
2403 return NULL_RTX;
2404 if (!pair->adjusted)
2405 {
2406 pair->adjusted = true;
2407 #ifdef AUTO_INC_DEC
2408 pair->to = cleanup_auto_inc_dec (pair->to, VOIDmode);
2409 #else
2410 pair->to = copy_rtx (pair->to);
2411 #endif
2412 pair->to = make_compound_operation (pair->to, SET);
2413 return pair->to;
2414 }
2415 return copy_rtx (pair->to);
2416 }
2417
2418 /* Replace all the occurrences of DEST with SRC in DEBUG_INSNs between INSN
2419 and LAST. */
2420
2421 static void
2422 propagate_for_debug (rtx insn, rtx last, rtx dest, rtx src)
2423 {
2424 rtx next, loc;
2425
2426 struct rtx_subst_pair p;
2427 p.to = src;
2428 p.adjusted = false;
2429
2430 next = NEXT_INSN (insn);
2431 while (next != last)
2432 {
2433 insn = next;
2434 next = NEXT_INSN (insn);
2435 if (DEBUG_INSN_P (insn))
2436 {
2437 loc = simplify_replace_fn_rtx (INSN_VAR_LOCATION_LOC (insn),
2438 dest, propagate_for_debug_subst, &p);
2439 if (loc == INSN_VAR_LOCATION_LOC (insn))
2440 continue;
2441 INSN_VAR_LOCATION_LOC (insn) = loc;
2442 df_insn_rescan (insn);
2443 }
2444 }
2445 }
2446
2447 /* Delete the unconditional jump INSN and adjust the CFG correspondingly.
2448 Note that the INSN should be deleted *after* removing dead edges, so
2449 that the kept edge is the fallthrough edge for a (set (pc) (pc))
2450 but not for a (set (pc) (label_ref FOO)). */
2451
2452 static void
2453 update_cfg_for_uncondjump (rtx insn)
2454 {
2455 basic_block bb = BLOCK_FOR_INSN (insn);
2456 bool at_end = (BB_END (bb) == insn);
2457
2458 if (at_end)
2459 purge_dead_edges (bb);
2460
2461 delete_insn (insn);
2462 if (at_end && EDGE_COUNT (bb->succs) == 1)
2463 single_succ_edge (bb)->flags |= EDGE_FALLTHRU;
2464 }
2465
2466 /* Try to combine the insns I0, I1 and I2 into I3.
2467 Here I0, I1 and I2 appear earlier than I3.
2468 I0 and I1 can be zero; then we combine just I2 into I3, or I1 and I2 into
2469 I3.
2470
2471 If we are combining more than two insns and the resulting insn is not
2472 recognized, try splitting it into two insns. If that happens, I2 and I3
2473 are retained and I1/I0 are pseudo-deleted by turning them into a NOTE.
2474 Otherwise, I0, I1 and I2 are pseudo-deleted.
2475
2476 Return 0 if the combination does not work. Then nothing is changed.
2477 If we did the combination, return the insn at which combine should
2478 resume scanning.
2479
2480 Set NEW_DIRECT_JUMP_P to a nonzero value if try_combine creates a
2481 new direct jump instruction. */
2482
2483 static rtx
2484 try_combine (rtx i3, rtx i2, rtx i1, rtx i0, int *new_direct_jump_p)
2485 {
2486 /* New patterns for I3 and I2, respectively. */
2487 rtx newpat, newi2pat = 0;
2488 rtvec newpat_vec_with_clobbers = 0;
2489 int substed_i2 = 0, substed_i1 = 0, substed_i0 = 0;
2490 /* Indicates need to preserve SET in I0, I1 or I2 in I3 if it is not
2491 dead. */
2492 int added_sets_0, added_sets_1, added_sets_2;
2493 /* Total number of SETs to put into I3. */
2494 int total_sets;
2495 /* Nonzero if I2's or I1's body now appears in I3. */
2496 int i2_is_used = 0, i1_is_used = 0;
2497 /* INSN_CODEs for new I3, new I2, and user of condition code. */
2498 int insn_code_number, i2_code_number = 0, other_code_number = 0;
2499 /* Contains I3 if the destination of I3 is used in its source, which means
2500 that the old life of I3 is being killed. If that usage is placed into
2501 I2 and not in I3, a REG_DEAD note must be made. */
2502 rtx i3dest_killed = 0;
2503 /* SET_DEST and SET_SRC of I2, I1 and I0. */
2504 rtx i2dest = 0, i2src = 0, i1dest = 0, i1src = 0, i0dest = 0, i0src = 0;
2505 /* Copy of SET_SRC of I1, if needed. */
2506 rtx i1src_copy = 0;
2507 /* Set if I2DEST was reused as a scratch register. */
2508 bool i2scratch = false;
2509 /* The PATTERNs of I0, I1, and I2, or a copy of them in certain cases. */
2510 rtx i0pat = 0, i1pat = 0, i2pat = 0;
2511 /* Indicates if I2DEST or I1DEST is in I2SRC or I1_SRC. */
2512 int i2dest_in_i2src = 0, i1dest_in_i1src = 0, i2dest_in_i1src = 0;
2513 int i0dest_in_i0src = 0, i1dest_in_i0src = 0, i2dest_in_i0src = 0;
2514 int i2dest_killed = 0, i1dest_killed = 0, i0dest_killed = 0;
2515 int i1_feeds_i2_n = 0, i0_feeds_i2_n = 0, i0_feeds_i1_n = 0;
2516 /* Notes that must be added to REG_NOTES in I3 and I2. */
2517 rtx new_i3_notes, new_i2_notes;
2518 /* Notes that we substituted I3 into I2 instead of the normal case. */
2519 int i3_subst_into_i2 = 0;
2520 /* Notes that I1, I2 or I3 is a MULT operation. */
2521 int have_mult = 0;
2522 int swap_i2i3 = 0;
2523 int changed_i3_dest = 0;
2524
2525 int maxreg;
2526 rtx temp;
2527 rtx link;
2528 rtx other_pat = 0;
2529 rtx new_other_notes;
2530 int i;
2531
2532 /* Only try four-insn combinations when there's high likelihood of
2533 success. Look for simple insns, such as loads of constants or
2534 binary operations involving a constant. */
2535 if (i0)
2536 {
2537 int i;
2538 int ngood = 0;
2539 int nshift = 0;
2540
2541 if (!flag_expensive_optimizations)
2542 return 0;
2543
2544 for (i = 0; i < 4; i++)
2545 {
2546 rtx insn = i == 0 ? i0 : i == 1 ? i1 : i == 2 ? i2 : i3;
2547 rtx set = single_set (insn);
2548 rtx src;
2549 if (!set)
2550 continue;
2551 src = SET_SRC (set);
2552 if (CONSTANT_P (src))
2553 {
2554 ngood += 2;
2555 break;
2556 }
2557 else if (BINARY_P (src) && CONSTANT_P (XEXP (src, 1)))
2558 ngood++;
2559 else if (GET_CODE (src) == ASHIFT || GET_CODE (src) == ASHIFTRT
2560 || GET_CODE (src) == LSHIFTRT)
2561 nshift++;
2562 }
2563 if (ngood < 2 && nshift < 2)
2564 return 0;
2565 }
2566
2567 /* Exit early if one of the insns involved can't be used for
2568 combinations. */
2569 if (cant_combine_insn_p (i3)
2570 || cant_combine_insn_p (i2)
2571 || (i1 && cant_combine_insn_p (i1))
2572 || (i0 && cant_combine_insn_p (i0))
2573 || likely_spilled_retval_p (i3))
2574 return 0;
2575
2576 combine_attempts++;
2577 undobuf.other_insn = 0;
2578
2579 /* Reset the hard register usage information. */
2580 CLEAR_HARD_REG_SET (newpat_used_regs);
2581
2582 if (dump_file && (dump_flags & TDF_DETAILS))
2583 {
2584 if (i0)
2585 fprintf (dump_file, "\nTrying %d, %d, %d -> %d:\n",
2586 INSN_UID (i0), INSN_UID (i1), INSN_UID (i2), INSN_UID (i3));
2587 else if (i1)
2588 fprintf (dump_file, "\nTrying %d, %d -> %d:\n",
2589 INSN_UID (i1), INSN_UID (i2), INSN_UID (i3));
2590 else
2591 fprintf (dump_file, "\nTrying %d -> %d:\n",
2592 INSN_UID (i2), INSN_UID (i3));
2593 }
2594
2595 /* If multiple insns feed into one of I2 or I3, they can be in any
2596 order. To simplify the code below, reorder them in sequence. */
2597 if (i0 && DF_INSN_LUID (i0) > DF_INSN_LUID (i2))
2598 temp = i2, i2 = i0, i0 = temp;
2599 if (i0 && DF_INSN_LUID (i0) > DF_INSN_LUID (i1))
2600 temp = i1, i1 = i0, i0 = temp;
2601 if (i1 && DF_INSN_LUID (i1) > DF_INSN_LUID (i2))
2602 temp = i1, i1 = i2, i2 = temp;
2603
2604 added_links_insn = 0;
2605
2606 /* First check for one important special case that the code below will
2607 not handle. Namely, the case where I1 is zero, I2 is a PARALLEL
2608 and I3 is a SET whose SET_SRC is a SET_DEST in I2. In that case,
2609 we may be able to replace that destination with the destination of I3.
2610 This occurs in the common code where we compute both a quotient and
2611 remainder into a structure, in which case we want to do the computation
2612 directly into the structure to avoid register-register copies.
2613
2614 Note that this case handles both multiple sets in I2 and also cases
2615 where I2 has a number of CLOBBERs inside the PARALLEL.
2616
2617 We make very conservative checks below and only try to handle the
2618 most common cases of this. For example, we only handle the case
2619 where I2 and I3 are adjacent to avoid making difficult register
2620 usage tests. */
2621
2622 if (i1 == 0 && NONJUMP_INSN_P (i3) && GET_CODE (PATTERN (i3)) == SET
2623 && REG_P (SET_SRC (PATTERN (i3)))
2624 && REGNO (SET_SRC (PATTERN (i3))) >= FIRST_PSEUDO_REGISTER
2625 && find_reg_note (i3, REG_DEAD, SET_SRC (PATTERN (i3)))
2626 && GET_CODE (PATTERN (i2)) == PARALLEL
2627 && ! side_effects_p (SET_DEST (PATTERN (i3)))
2628 /* If the dest of I3 is a ZERO_EXTRACT or STRICT_LOW_PART, the code
2629 below would need to check what is inside (and reg_overlap_mentioned_p
2630 doesn't support those codes anyway). Don't allow those destinations;
2631 the resulting insn isn't likely to be recognized anyway. */
2632 && GET_CODE (SET_DEST (PATTERN (i3))) != ZERO_EXTRACT
2633 && GET_CODE (SET_DEST (PATTERN (i3))) != STRICT_LOW_PART
2634 && ! reg_overlap_mentioned_p (SET_SRC (PATTERN (i3)),
2635 SET_DEST (PATTERN (i3)))
2636 && next_active_insn (i2) == i3)
2637 {
2638 rtx p2 = PATTERN (i2);
2639
2640 /* Make sure that the destination of I3,
2641 which we are going to substitute into one output of I2,
2642 is not used within another output of I2. We must avoid making this:
2643 (parallel [(set (mem (reg 69)) ...)
2644 (set (reg 69) ...)])
2645 which is not well-defined as to order of actions.
2646 (Besides, reload can't handle output reloads for this.)
2647
2648 The problem can also happen if the dest of I3 is a memory ref,
2649 if another dest in I2 is an indirect memory ref. */
2650 for (i = 0; i < XVECLEN (p2, 0); i++)
2651 if ((GET_CODE (XVECEXP (p2, 0, i)) == SET
2652 || GET_CODE (XVECEXP (p2, 0, i)) == CLOBBER)
2653 && reg_overlap_mentioned_p (SET_DEST (PATTERN (i3)),
2654 SET_DEST (XVECEXP (p2, 0, i))))
2655 break;
2656
2657 if (i == XVECLEN (p2, 0))
2658 for (i = 0; i < XVECLEN (p2, 0); i++)
2659 if (GET_CODE (XVECEXP (p2, 0, i)) == SET
2660 && SET_DEST (XVECEXP (p2, 0, i)) == SET_SRC (PATTERN (i3)))
2661 {
2662 combine_merges++;
2663
2664 subst_insn = i3;
2665 subst_low_luid = DF_INSN_LUID (i2);
2666
2667 added_sets_2 = added_sets_1 = added_sets_0 = 0;
2668 i2src = SET_SRC (XVECEXP (p2, 0, i));
2669 i2dest = SET_DEST (XVECEXP (p2, 0, i));
2670 i2dest_killed = dead_or_set_p (i2, i2dest);
2671
2672 /* Replace the dest in I2 with our dest and make the resulting
2673 insn the new pattern for I3. Then skip to where we validate
2674 the pattern. Everything was set up above. */
2675 SUBST (SET_DEST (XVECEXP (p2, 0, i)), SET_DEST (PATTERN (i3)));
2676 newpat = p2;
2677 i3_subst_into_i2 = 1;
2678 goto validate_replacement;
2679 }
2680 }
2681
2682 /* If I2 is setting a pseudo to a constant and I3 is setting some
2683 sub-part of it to another constant, merge them by making a new
2684 constant. */
2685 if (i1 == 0
2686 && (temp = single_set (i2)) != 0
2687 && (CONST_INT_P (SET_SRC (temp))
2688 || GET_CODE (SET_SRC (temp)) == CONST_DOUBLE)
2689 && GET_CODE (PATTERN (i3)) == SET
2690 && (CONST_INT_P (SET_SRC (PATTERN (i3)))
2691 || GET_CODE (SET_SRC (PATTERN (i3))) == CONST_DOUBLE)
2692 && reg_subword_p (SET_DEST (PATTERN (i3)), SET_DEST (temp)))
2693 {
2694 rtx dest = SET_DEST (PATTERN (i3));
2695 int offset = -1;
2696 int width = 0;
2697
2698 if (GET_CODE (dest) == ZERO_EXTRACT)
2699 {
2700 if (CONST_INT_P (XEXP (dest, 1))
2701 && CONST_INT_P (XEXP (dest, 2)))
2702 {
2703 width = INTVAL (XEXP (dest, 1));
2704 offset = INTVAL (XEXP (dest, 2));
2705 dest = XEXP (dest, 0);
2706 if (BITS_BIG_ENDIAN)
2707 offset = GET_MODE_BITSIZE (GET_MODE (dest)) - width - offset;
2708 }
2709 }
2710 else
2711 {
2712 if (GET_CODE (dest) == STRICT_LOW_PART)
2713 dest = XEXP (dest, 0);
2714 width = GET_MODE_BITSIZE (GET_MODE (dest));
2715 offset = 0;
2716 }
2717
2718 if (offset >= 0)
2719 {
2720 /* If this is the low part, we're done. */
2721 if (subreg_lowpart_p (dest))
2722 ;
2723 /* Handle the case where inner is twice the size of outer. */
2724 else if (GET_MODE_BITSIZE (GET_MODE (SET_DEST (temp)))
2725 == 2 * GET_MODE_BITSIZE (GET_MODE (dest)))
2726 offset += GET_MODE_BITSIZE (GET_MODE (dest));
2727 /* Otherwise give up for now. */
2728 else
2729 offset = -1;
2730 }
2731
2732 if (offset >= 0
2733 && (GET_MODE_BITSIZE (GET_MODE (SET_DEST (temp)))
2734 <= HOST_BITS_PER_DOUBLE_INT))
2735 {
2736 double_int m, o, i;
2737 rtx inner = SET_SRC (PATTERN (i3));
2738 rtx outer = SET_SRC (temp);
2739
2740 o = rtx_to_double_int (outer);
2741 i = rtx_to_double_int (inner);
2742
2743 m = double_int_mask (width);
2744 i = double_int_and (i, m);
2745 m = double_int_lshift (m, offset, HOST_BITS_PER_DOUBLE_INT, false);
2746 i = double_int_lshift (i, offset, HOST_BITS_PER_DOUBLE_INT, false);
2747 o = double_int_ior (double_int_and_not (o, m), i);
2748
2749 combine_merges++;
2750 subst_insn = i3;
2751 subst_low_luid = DF_INSN_LUID (i2);
2752 added_sets_2 = added_sets_1 = added_sets_0 = 0;
2753 i2dest = SET_DEST (temp);
2754 i2dest_killed = dead_or_set_p (i2, i2dest);
2755
2756 /* Replace the source in I2 with the new constant and make the
2757 resulting insn the new pattern for I3. Then skip to where we
2758 validate the pattern. Everything was set up above. */
2759 SUBST (SET_SRC (temp),
2760 immed_double_int_const (o, GET_MODE (SET_DEST (temp))));
2761
2762 newpat = PATTERN (i2);
2763
2764 /* The dest of I3 has been replaced with the dest of I2. */
2765 changed_i3_dest = 1;
2766 goto validate_replacement;
2767 }
2768 }
2769
2770 #ifndef HAVE_cc0
2771 /* If we have no I1 and I2 looks like:
2772 (parallel [(set (reg:CC X) (compare:CC OP (const_int 0)))
2773 (set Y OP)])
2774 make up a dummy I1 that is
2775 (set Y OP)
2776 and change I2 to be
2777 (set (reg:CC X) (compare:CC Y (const_int 0)))
2778
2779 (We can ignore any trailing CLOBBERs.)
2780
2781 This undoes a previous combination and allows us to match a branch-and-
2782 decrement insn. */
2783
2784 if (i1 == 0 && GET_CODE (PATTERN (i2)) == PARALLEL
2785 && XVECLEN (PATTERN (i2), 0) >= 2
2786 && GET_CODE (XVECEXP (PATTERN (i2), 0, 0)) == SET
2787 && (GET_MODE_CLASS (GET_MODE (SET_DEST (XVECEXP (PATTERN (i2), 0, 0))))
2788 == MODE_CC)
2789 && GET_CODE (SET_SRC (XVECEXP (PATTERN (i2), 0, 0))) == COMPARE
2790 && XEXP (SET_SRC (XVECEXP (PATTERN (i2), 0, 0)), 1) == const0_rtx
2791 && GET_CODE (XVECEXP (PATTERN (i2), 0, 1)) == SET
2792 && REG_P (SET_DEST (XVECEXP (PATTERN (i2), 0, 1)))
2793 && rtx_equal_p (XEXP (SET_SRC (XVECEXP (PATTERN (i2), 0, 0)), 0),
2794 SET_SRC (XVECEXP (PATTERN (i2), 0, 1))))
2795 {
2796 for (i = XVECLEN (PATTERN (i2), 0) - 1; i >= 2; i--)
2797 if (GET_CODE (XVECEXP (PATTERN (i2), 0, i)) != CLOBBER)
2798 break;
2799
2800 if (i == 1)
2801 {
2802 /* We make I1 with the same INSN_UID as I2. This gives it
2803 the same DF_INSN_LUID for value tracking. Our fake I1 will
2804 never appear in the insn stream so giving it the same INSN_UID
2805 as I2 will not cause a problem. */
2806
2807 i1 = gen_rtx_INSN (VOIDmode, INSN_UID (i2), NULL_RTX, i2,
2808 BLOCK_FOR_INSN (i2), XVECEXP (PATTERN (i2), 0, 1),
2809 INSN_LOCATOR (i2), -1, NULL_RTX);
2810
2811 SUBST (PATTERN (i2), XVECEXP (PATTERN (i2), 0, 0));
2812 SUBST (XEXP (SET_SRC (PATTERN (i2)), 0),
2813 SET_DEST (PATTERN (i1)));
2814 }
2815 }
2816 #endif
2817
2818 /* Verify that I2 and I1 are valid for combining. */
2819 if (! can_combine_p (i2, i3, i0, i1, NULL_RTX, NULL_RTX, &i2dest, &i2src)
2820 || (i1 && ! can_combine_p (i1, i3, i0, NULL_RTX, i2, NULL_RTX,
2821 &i1dest, &i1src))
2822 || (i0 && ! can_combine_p (i0, i3, NULL_RTX, NULL_RTX, i1, i2,
2823 &i0dest, &i0src)))
2824 {
2825 undo_all ();
2826 return 0;
2827 }
2828
2829 /* Record whether I2DEST is used in I2SRC and similarly for the other
2830 cases. Knowing this will help in register status updating below. */
2831 i2dest_in_i2src = reg_overlap_mentioned_p (i2dest, i2src);
2832 i1dest_in_i1src = i1 && reg_overlap_mentioned_p (i1dest, i1src);
2833 i2dest_in_i1src = i1 && reg_overlap_mentioned_p (i2dest, i1src);
2834 i0dest_in_i0src = i0 && reg_overlap_mentioned_p (i0dest, i0src);
2835 i1dest_in_i0src = i0 && reg_overlap_mentioned_p (i1dest, i0src);
2836 i2dest_in_i0src = i0 && reg_overlap_mentioned_p (i2dest, i0src);
2837 i2dest_killed = dead_or_set_p (i2, i2dest);
2838 i1dest_killed = i1 && dead_or_set_p (i1, i1dest);
2839 i0dest_killed = i0 && dead_or_set_p (i0, i0dest);
2840
2841 /* For the earlier insns, determine which of the subsequent ones they
2842 feed. */
2843 i1_feeds_i2_n = i1 && insn_a_feeds_b (i1, i2);
2844 i0_feeds_i1_n = i0 && insn_a_feeds_b (i0, i1);
2845 i0_feeds_i2_n = (i0 && (!i0_feeds_i1_n ? insn_a_feeds_b (i0, i2)
2846 : (!reg_overlap_mentioned_p (i1dest, i0dest)
2847 && reg_overlap_mentioned_p (i0dest, i2src))));
2848
2849 /* Ensure that I3's pattern can be the destination of combines. */
2850 if (! combinable_i3pat (i3, &PATTERN (i3), i2dest, i1dest, i0dest,
2851 i1 && i2dest_in_i1src && !i1_feeds_i2_n,
2852 i0 && ((i2dest_in_i0src && !i0_feeds_i2_n)
2853 || (i1dest_in_i0src && !i0_feeds_i1_n)),
2854 &i3dest_killed))
2855 {
2856 undo_all ();
2857 return 0;
2858 }
2859
2860 /* See if any of the insns is a MULT operation. Unless one is, we will
2861 reject a combination that is, since it must be slower. Be conservative
2862 here. */
2863 if (GET_CODE (i2src) == MULT
2864 || (i1 != 0 && GET_CODE (i1src) == MULT)
2865 || (i0 != 0 && GET_CODE (i0src) == MULT)
2866 || (GET_CODE (PATTERN (i3)) == SET
2867 && GET_CODE (SET_SRC (PATTERN (i3))) == MULT))
2868 have_mult = 1;
2869
2870 /* If I3 has an inc, then give up if I1 or I2 uses the reg that is inc'd.
2871 We used to do this EXCEPT in one case: I3 has a post-inc in an
2872 output operand. However, that exception can give rise to insns like
2873 mov r3,(r3)+
2874 which is a famous insn on the PDP-11 where the value of r3 used as the
2875 source was model-dependent. Avoid this sort of thing. */
2876
2877 #if 0
2878 if (!(GET_CODE (PATTERN (i3)) == SET
2879 && REG_P (SET_SRC (PATTERN (i3)))
2880 && MEM_P (SET_DEST (PATTERN (i3)))
2881 && (GET_CODE (XEXP (SET_DEST (PATTERN (i3)), 0)) == POST_INC
2882 || GET_CODE (XEXP (SET_DEST (PATTERN (i3)), 0)) == POST_DEC)))
2883 /* It's not the exception. */
2884 #endif
2885 #ifdef AUTO_INC_DEC
2886 for (link = REG_NOTES (i3); link; link = XEXP (link, 1))
2887 if (REG_NOTE_KIND (link) == REG_INC
2888 && (reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (i2))
2889 || (i1 != 0
2890 && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (i1)))))
2891 {
2892 undo_all ();
2893 return 0;
2894 }
2895 #endif
2896
2897 /* See if the SETs in I1 or I2 need to be kept around in the merged
2898 instruction: whenever the value set there is still needed past I3.
2899 For the SETs in I2, this is easy: we see if I2DEST dies or is set in I3.
2900
2901 For the SET in I1, we have two cases: If I1 and I2 independently
2902 feed into I3, the set in I1 needs to be kept around if I1DEST dies
2903 or is set in I3. Otherwise (if I1 feeds I2 which feeds I3), the set
2904 in I1 needs to be kept around unless I1DEST dies or is set in either
2905 I2 or I3. The same consideration applies to I0. */
2906
2907 added_sets_2 = !dead_or_set_p (i3, i2dest);
2908
2909 if (i1)
2910 added_sets_1 = !(dead_or_set_p (i3, i1dest)
2911 || (i1_feeds_i2_n && dead_or_set_p (i2, i1dest)));
2912 else
2913 added_sets_1 = 0;
2914
2915 if (i0)
2916 added_sets_0 = !(dead_or_set_p (i3, i0dest)
2917 || (i0_feeds_i2_n && dead_or_set_p (i2, i0dest))
2918 || (i0_feeds_i1_n && dead_or_set_p (i1, i0dest)));
2919 else
2920 added_sets_0 = 0;
2921
2922 /* We are about to copy insns for the case where they need to be kept
2923 around. Check that they can be copied in the merged instruction. */
2924
2925 if (targetm.cannot_copy_insn_p
2926 && ((added_sets_2 && targetm.cannot_copy_insn_p (i2))
2927 || (i1 && added_sets_1 && targetm.cannot_copy_insn_p (i1))
2928 || (i0 && added_sets_0 && targetm.cannot_copy_insn_p (i0))))
2929 {
2930 undo_all ();
2931 return 0;
2932 }
2933
2934 /* If the set in I2 needs to be kept around, we must make a copy of
2935 PATTERN (I2), so that when we substitute I1SRC for I1DEST in
2936 PATTERN (I2), we are only substituting for the original I1DEST, not into
2937 an already-substituted copy. This also prevents making self-referential
2938 rtx. If I2 is a PARALLEL, we just need the piece that assigns I2SRC to
2939 I2DEST. */
2940
2941 if (added_sets_2)
2942 {
2943 if (GET_CODE (PATTERN (i2)) == PARALLEL)
2944 i2pat = gen_rtx_SET (VOIDmode, i2dest, copy_rtx (i2src));
2945 else
2946 i2pat = copy_rtx (PATTERN (i2));
2947 }
2948
2949 if (added_sets_1)
2950 {
2951 if (GET_CODE (PATTERN (i1)) == PARALLEL)
2952 i1pat = gen_rtx_SET (VOIDmode, i1dest, copy_rtx (i1src));
2953 else
2954 i1pat = copy_rtx (PATTERN (i1));
2955 }
2956
2957 if (added_sets_0)
2958 {
2959 if (GET_CODE (PATTERN (i0)) == PARALLEL)
2960 i0pat = gen_rtx_SET (VOIDmode, i0dest, copy_rtx (i0src));
2961 else
2962 i0pat = copy_rtx (PATTERN (i0));
2963 }
2964
2965 combine_merges++;
2966
2967 /* Substitute in the latest insn for the regs set by the earlier ones. */
2968
2969 maxreg = max_reg_num ();
2970
2971 subst_insn = i3;
2972
2973 #ifndef HAVE_cc0
2974 /* Many machines that don't use CC0 have insns that can both perform an
2975 arithmetic operation and set the condition code. These operations will
2976 be represented as a PARALLEL with the first element of the vector
2977 being a COMPARE of an arithmetic operation with the constant zero.
2978 The second element of the vector will set some pseudo to the result
2979 of the same arithmetic operation. If we simplify the COMPARE, we won't
2980 match such a pattern and so will generate an extra insn. Here we test
2981 for this case, where both the comparison and the operation result are
2982 needed, and make the PARALLEL by just replacing I2DEST in I3SRC with
2983 I2SRC. Later we will make the PARALLEL that contains I2. */
2984
2985 if (i1 == 0 && added_sets_2 && GET_CODE (PATTERN (i3)) == SET
2986 && GET_CODE (SET_SRC (PATTERN (i3))) == COMPARE
2987 && XEXP (SET_SRC (PATTERN (i3)), 1) == const0_rtx
2988 && rtx_equal_p (XEXP (SET_SRC (PATTERN (i3)), 0), i2dest))
2989 {
2990 #ifdef SELECT_CC_MODE
2991 rtx *cc_use;
2992 enum machine_mode compare_mode;
2993 #endif
2994
2995 newpat = PATTERN (i3);
2996 SUBST (XEXP (SET_SRC (newpat), 0), i2src);
2997
2998 i2_is_used = 1;
2999
3000 #ifdef SELECT_CC_MODE
3001 /* See if a COMPARE with the operand we substituted in should be done
3002 with the mode that is currently being used. If not, do the same
3003 processing we do in `subst' for a SET; namely, if the destination
3004 is used only once, try to replace it with a register of the proper
3005 mode and also replace the COMPARE. */
3006 if (undobuf.other_insn == 0
3007 && (cc_use = find_single_use (SET_DEST (newpat), i3,
3008 &undobuf.other_insn))
3009 && ((compare_mode = SELECT_CC_MODE (GET_CODE (*cc_use),
3010 i2src, const0_rtx))
3011 != GET_MODE (SET_DEST (newpat))))
3012 {
3013 if (can_change_dest_mode (SET_DEST (newpat), added_sets_2,
3014 compare_mode))
3015 {
3016 unsigned int regno = REGNO (SET_DEST (newpat));
3017 rtx new_dest;
3018
3019 if (regno < FIRST_PSEUDO_REGISTER)
3020 new_dest = gen_rtx_REG (compare_mode, regno);
3021 else
3022 {
3023 SUBST_MODE (regno_reg_rtx[regno], compare_mode);
3024 new_dest = regno_reg_rtx[regno];
3025 }
3026
3027 SUBST (SET_DEST (newpat), new_dest);
3028 SUBST (XEXP (*cc_use, 0), new_dest);
3029 SUBST (SET_SRC (newpat),
3030 gen_rtx_COMPARE (compare_mode, i2src, const0_rtx));
3031 }
3032 else
3033 undobuf.other_insn = 0;
3034 }
3035 #endif
3036 }
3037 else
3038 #endif
3039 {
3040 /* It is possible that the source of I2 or I1 may be performing
3041 an unneeded operation, such as a ZERO_EXTEND of something
3042 that is known to have the high part zero. Handle that case
3043 by letting subst look at the innermost one of them.
3044
3045 Another way to do this would be to have a function that tries
3046 to simplify a single insn instead of merging two or more
3047 insns. We don't do this because of the potential of infinite
3048 loops and because of the potential extra memory required.
3049 However, doing it the way we are is a bit of a kludge and
3050 doesn't catch all cases.
3051
3052 But only do this if -fexpensive-optimizations since it slows
3053 things down and doesn't usually win.
3054
3055 This is not done in the COMPARE case above because the
3056 unmodified I2PAT is used in the PARALLEL and so a pattern
3057 with a modified I2SRC would not match. */
3058
3059 if (flag_expensive_optimizations)
3060 {
3061 /* Pass pc_rtx so no substitutions are done, just
3062 simplifications. */
3063 if (i1)
3064 {
3065 subst_low_luid = DF_INSN_LUID (i1);
3066 i1src = subst (i1src, pc_rtx, pc_rtx, 0, 0);
3067 }
3068 else
3069 {
3070 subst_low_luid = DF_INSN_LUID (i2);
3071 i2src = subst (i2src, pc_rtx, pc_rtx, 0, 0);
3072 }
3073 }
3074
3075 n_occurrences = 0; /* `subst' counts here */
3076 subst_low_luid = DF_INSN_LUID (i2);
3077
3078 /* If I1 feeds into I2 and I1DEST is in I1SRC, we need to make a unique
3079 copy of I2SRC each time we substitute it, in order to avoid creating
3080 self-referential RTL when we will be substituting I1SRC for I1DEST
3081 later. Likewise if I0 feeds into I2 and I0DEST is in I0SRC. */
3082 newpat = subst (PATTERN (i3), i2dest, i2src, 0,
3083 (i1_feeds_i2_n && i1dest_in_i1src)
3084 || (i0_feeds_i2_n && i0dest_in_i0src));
3085 substed_i2 = 1;
3086
3087 /* Record whether I2's body now appears within I3's body. */
3088 i2_is_used = n_occurrences;
3089 }
3090
3091 /* If we already got a failure, don't try to do more. Otherwise, try to
3092 substitute I1 if we have it. */
3093
3094 if (i1 && GET_CODE (newpat) != CLOBBER)
3095 {
3096 /* Check that an autoincrement side-effect on I1 has not been lost.
3097 This happens if I1DEST is mentioned in I2 and dies there, and
3098 has disappeared from the new pattern. */
3099 if ((FIND_REG_INC_NOTE (i1, NULL_RTX) != 0
3100 && i1_feeds_i2_n
3101 && dead_or_set_p (i2, i1dest)
3102 && !reg_overlap_mentioned_p (i1dest, newpat))
3103 /* Before we can do this substitution, we must redo the test done
3104 above (see detailed comments there) that ensures I1DEST isn't
3105 mentioned in any SETs in NEWPAT that are field assignments. */
3106 || !combinable_i3pat (NULL_RTX, &newpat, i1dest, NULL_RTX, NULL_RTX,
3107 0, 0, 0))
3108 {
3109 undo_all ();
3110 return 0;
3111 }
3112
3113 n_occurrences = 0;
3114 subst_low_luid = DF_INSN_LUID (i1);
3115
3116 /* If I0 feeds into I1 and I0DEST is in I0SRC, we need to make a unique
3117 copy of I1SRC each time we substitute it, in order to avoid creating
3118 self-referential RTL when we will be substituting I0SRC for I0DEST
3119 later. */
3120 newpat = subst (newpat, i1dest, i1src, 0,
3121 i0_feeds_i1_n && i0dest_in_i0src);
3122 substed_i1 = 1;
3123
3124 /* Record whether I1's body now appears within I3's body. */
3125 i1_is_used = n_occurrences;
3126 }
3127
3128 /* Likewise for I0 if we have it. */
3129
3130 if (i0 && GET_CODE (newpat) != CLOBBER)
3131 {
3132 if ((FIND_REG_INC_NOTE (i0, NULL_RTX) != 0
3133 && ((i0_feeds_i2_n && dead_or_set_p (i2, i0dest))
3134 || (i0_feeds_i1_n && dead_or_set_p (i1, i0dest)))
3135 && !reg_overlap_mentioned_p (i0dest, newpat))
3136 || !combinable_i3pat (NULL_RTX, &newpat, i0dest, NULL_RTX, NULL_RTX,
3137 0, 0, 0))
3138 {
3139 undo_all ();
3140 return 0;
3141 }
3142
3143 /* If the following substitution will modify I1SRC, make a copy of it
3144 for the case where it is substituted for I1DEST in I2PAT later. */
3145 if (i0_feeds_i1_n && added_sets_2 && i1_feeds_i2_n)
3146 i1src_copy = copy_rtx (i1src);
3147
3148 n_occurrences = 0;
3149 subst_low_luid = DF_INSN_LUID (i0);
3150 newpat = subst (newpat, i0dest, i0src, 0, 0);
3151 substed_i0 = 1;
3152 }
3153
3154 /* Fail if an autoincrement side-effect has been duplicated. Be careful
3155 to count all the ways that I2SRC and I1SRC can be used. */
3156 if ((FIND_REG_INC_NOTE (i2, NULL_RTX) != 0
3157 && i2_is_used + added_sets_2 > 1)
3158 || (i1 != 0 && FIND_REG_INC_NOTE (i1, NULL_RTX) != 0
3159 && (i1_is_used + added_sets_1 + (added_sets_2 && i1_feeds_i2_n)
3160 > 1))
3161 || (i0 != 0 && FIND_REG_INC_NOTE (i0, NULL_RTX) != 0
3162 && (n_occurrences + added_sets_0
3163 + (added_sets_1 && i0_feeds_i1_n)
3164 + (added_sets_2 && i0_feeds_i2_n)
3165 > 1))
3166 /* Fail if we tried to make a new register. */
3167 || max_reg_num () != maxreg
3168 /* Fail if we couldn't do something and have a CLOBBER. */
3169 || GET_CODE (newpat) == CLOBBER
3170 /* Fail if this new pattern is a MULT and we didn't have one before
3171 at the outer level. */
3172 || (GET_CODE (newpat) == SET && GET_CODE (SET_SRC (newpat)) == MULT
3173 && ! have_mult))
3174 {
3175 undo_all ();
3176 return 0;
3177 }
3178
3179 /* If the actions of the earlier insns must be kept
3180 in addition to substituting them into the latest one,
3181 we must make a new PARALLEL for the latest insn
3182 to hold additional the SETs. */
3183
3184 if (added_sets_0 || added_sets_1 || added_sets_2)
3185 {
3186 int extra_sets = added_sets_0 + added_sets_1 + added_sets_2;
3187 combine_extras++;
3188
3189 if (GET_CODE (newpat) == PARALLEL)
3190 {
3191 rtvec old = XVEC (newpat, 0);
3192 total_sets = XVECLEN (newpat, 0) + extra_sets;
3193 newpat = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (total_sets));
3194 memcpy (XVEC (newpat, 0)->elem, &old->elem[0],
3195 sizeof (old->elem[0]) * old->num_elem);
3196 }
3197 else
3198 {
3199 rtx old = newpat;
3200 total_sets = 1 + extra_sets;
3201 newpat = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (total_sets));
3202 XVECEXP (newpat, 0, 0) = old;
3203 }
3204
3205 if (added_sets_0)
3206 XVECEXP (newpat, 0, --total_sets) = i0pat;
3207
3208 if (added_sets_1)
3209 {
3210 rtx t = i1pat;
3211 if (i0_feeds_i1_n)
3212 t = subst (t, i0dest, i0src, 0, 0);
3213
3214 XVECEXP (newpat, 0, --total_sets) = t;
3215 }
3216 if (added_sets_2)
3217 {
3218 rtx t = i2pat;
3219 if (i1_feeds_i2_n)
3220 t = subst (t, i1dest, i1src_copy ? i1src_copy : i1src, 0,
3221 i0_feeds_i1_n && i0dest_in_i0src);
3222 if ((i0_feeds_i1_n && i1_feeds_i2_n) || i0_feeds_i2_n)
3223 t = subst (t, i0dest, i0src, 0, 0);
3224
3225 XVECEXP (newpat, 0, --total_sets) = t;
3226 }
3227 }
3228
3229 validate_replacement:
3230
3231 /* Note which hard regs this insn has as inputs. */
3232 mark_used_regs_combine (newpat);
3233
3234 /* If recog_for_combine fails, it strips existing clobbers. If we'll
3235 consider splitting this pattern, we might need these clobbers. */
3236 if (i1 && GET_CODE (newpat) == PARALLEL
3237 && GET_CODE (XVECEXP (newpat, 0, XVECLEN (newpat, 0) - 1)) == CLOBBER)
3238 {
3239 int len = XVECLEN (newpat, 0);
3240
3241 newpat_vec_with_clobbers = rtvec_alloc (len);
3242 for (i = 0; i < len; i++)
3243 RTVEC_ELT (newpat_vec_with_clobbers, i) = XVECEXP (newpat, 0, i);
3244 }
3245
3246 /* Is the result of combination a valid instruction? */
3247 insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
3248
3249 /* If the result isn't valid, see if it is a PARALLEL of two SETs where
3250 the second SET's destination is a register that is unused and isn't
3251 marked as an instruction that might trap in an EH region. In that case,
3252 we just need the first SET. This can occur when simplifying a divmod
3253 insn. We *must* test for this case here because the code below that
3254 splits two independent SETs doesn't handle this case correctly when it
3255 updates the register status.
3256
3257 It's pointless doing this if we originally had two sets, one from
3258 i3, and one from i2. Combining then splitting the parallel results
3259 in the original i2 again plus an invalid insn (which we delete).
3260 The net effect is only to move instructions around, which makes
3261 debug info less accurate.
3262
3263 Also check the case where the first SET's destination is unused.
3264 That would not cause incorrect code, but does cause an unneeded
3265 insn to remain. */
3266
3267 if (insn_code_number < 0
3268 && !(added_sets_2 && i1 == 0)
3269 && GET_CODE (newpat) == PARALLEL
3270 && XVECLEN (newpat, 0) == 2
3271 && GET_CODE (XVECEXP (newpat, 0, 0)) == SET
3272 && GET_CODE (XVECEXP (newpat, 0, 1)) == SET
3273 && asm_noperands (newpat) < 0)
3274 {
3275 rtx set0 = XVECEXP (newpat, 0, 0);
3276 rtx set1 = XVECEXP (newpat, 0, 1);
3277
3278 if (((REG_P (SET_DEST (set1))
3279 && find_reg_note (i3, REG_UNUSED, SET_DEST (set1)))
3280 || (GET_CODE (SET_DEST (set1)) == SUBREG
3281 && find_reg_note (i3, REG_UNUSED, SUBREG_REG (SET_DEST (set1)))))
3282 && insn_nothrow_p (i3)
3283 && !side_effects_p (SET_SRC (set1)))
3284 {
3285 newpat = set0;
3286 insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
3287 }
3288
3289 else if (((REG_P (SET_DEST (set0))
3290 && find_reg_note (i3, REG_UNUSED, SET_DEST (set0)))
3291 || (GET_CODE (SET_DEST (set0)) == SUBREG
3292 && find_reg_note (i3, REG_UNUSED,
3293 SUBREG_REG (SET_DEST (set0)))))
3294 && insn_nothrow_p (i3)
3295 && !side_effects_p (SET_SRC (set0)))
3296 {
3297 newpat = set1;
3298 insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
3299
3300 if (insn_code_number >= 0)
3301 changed_i3_dest = 1;
3302 }
3303 }
3304
3305 /* If we were combining three insns and the result is a simple SET
3306 with no ASM_OPERANDS that wasn't recognized, try to split it into two
3307 insns. There are two ways to do this. It can be split using a
3308 machine-specific method (like when you have an addition of a large
3309 constant) or by combine in the function find_split_point. */
3310
3311 if (i1 && insn_code_number < 0 && GET_CODE (newpat) == SET
3312 && asm_noperands (newpat) < 0)
3313 {
3314 rtx parallel, m_split, *split;
3315
3316 /* See if the MD file can split NEWPAT. If it can't, see if letting it
3317 use I2DEST as a scratch register will help. In the latter case,
3318 convert I2DEST to the mode of the source of NEWPAT if we can. */
3319
3320 m_split = combine_split_insns (newpat, i3);
3321
3322 /* We can only use I2DEST as a scratch reg if it doesn't overlap any
3323 inputs of NEWPAT. */
3324
3325 /* ??? If I2DEST is not safe, and I1DEST exists, then it would be
3326 possible to try that as a scratch reg. This would require adding
3327 more code to make it work though. */
3328
3329 if (m_split == 0 && ! reg_overlap_mentioned_p (i2dest, newpat))
3330 {
3331 enum machine_mode new_mode = GET_MODE (SET_DEST (newpat));
3332
3333 /* First try to split using the original register as a
3334 scratch register. */
3335 parallel = gen_rtx_PARALLEL (VOIDmode,
3336 gen_rtvec (2, newpat,
3337 gen_rtx_CLOBBER (VOIDmode,
3338 i2dest)));
3339 m_split = combine_split_insns (parallel, i3);
3340
3341 /* If that didn't work, try changing the mode of I2DEST if
3342 we can. */
3343 if (m_split == 0
3344 && new_mode != GET_MODE (i2dest)
3345 && new_mode != VOIDmode
3346 && can_change_dest_mode (i2dest, added_sets_2, new_mode))
3347 {
3348 enum machine_mode old_mode = GET_MODE (i2dest);
3349 rtx ni2dest;
3350
3351 if (REGNO (i2dest) < FIRST_PSEUDO_REGISTER)
3352 ni2dest = gen_rtx_REG (new_mode, REGNO (i2dest));
3353 else
3354 {
3355 SUBST_MODE (regno_reg_rtx[REGNO (i2dest)], new_mode);
3356 ni2dest = regno_reg_rtx[REGNO (i2dest)];
3357 }
3358
3359 parallel = (gen_rtx_PARALLEL
3360 (VOIDmode,
3361 gen_rtvec (2, newpat,
3362 gen_rtx_CLOBBER (VOIDmode,
3363 ni2dest))));
3364 m_split = combine_split_insns (parallel, i3);
3365
3366 if (m_split == 0
3367 && REGNO (i2dest) >= FIRST_PSEUDO_REGISTER)
3368 {
3369 struct undo *buf;
3370
3371 adjust_reg_mode (regno_reg_rtx[REGNO (i2dest)], old_mode);
3372 buf = undobuf.undos;
3373 undobuf.undos = buf->next;
3374 buf->next = undobuf.frees;
3375 undobuf.frees = buf;
3376 }
3377 }
3378
3379 i2scratch = m_split != 0;
3380 }
3381
3382 /* If recog_for_combine has discarded clobbers, try to use them
3383 again for the split. */
3384 if (m_split == 0 && newpat_vec_with_clobbers)
3385 {
3386 parallel = gen_rtx_PARALLEL (VOIDmode, newpat_vec_with_clobbers);
3387 m_split = combine_split_insns (parallel, i3);
3388 }
3389
3390 if (m_split && NEXT_INSN (m_split) == NULL_RTX)
3391 {
3392 m_split = PATTERN (m_split);
3393 insn_code_number = recog_for_combine (&m_split, i3, &new_i3_notes);
3394 if (insn_code_number >= 0)
3395 newpat = m_split;
3396 }
3397 else if (m_split && NEXT_INSN (NEXT_INSN (m_split)) == NULL_RTX
3398 && (next_real_insn (i2) == i3
3399 || ! use_crosses_set_p (PATTERN (m_split), DF_INSN_LUID (i2))))
3400 {
3401 rtx i2set, i3set;
3402 rtx newi3pat = PATTERN (NEXT_INSN (m_split));
3403 newi2pat = PATTERN (m_split);
3404
3405 i3set = single_set (NEXT_INSN (m_split));
3406 i2set = single_set (m_split);
3407
3408 i2_code_number = recog_for_combine (&newi2pat, i2, &new_i2_notes);
3409
3410 /* If I2 or I3 has multiple SETs, we won't know how to track
3411 register status, so don't use these insns. If I2's destination
3412 is used between I2 and I3, we also can't use these insns. */
3413
3414 if (i2_code_number >= 0 && i2set && i3set
3415 && (next_real_insn (i2) == i3
3416 || ! reg_used_between_p (SET_DEST (i2set), i2, i3)))
3417 insn_code_number = recog_for_combine (&newi3pat, i3,
3418 &new_i3_notes);
3419 if (insn_code_number >= 0)
3420 newpat = newi3pat;
3421
3422 /* It is possible that both insns now set the destination of I3.
3423 If so, we must show an extra use of it. */
3424
3425 if (insn_code_number >= 0)
3426 {
3427 rtx new_i3_dest = SET_DEST (i3set);
3428 rtx new_i2_dest = SET_DEST (i2set);
3429
3430 while (GET_CODE (new_i3_dest) == ZERO_EXTRACT
3431 || GET_CODE (new_i3_dest) == STRICT_LOW_PART
3432 || GET_CODE (new_i3_dest) == SUBREG)
3433 new_i3_dest = XEXP (new_i3_dest, 0);
3434
3435 while (GET_CODE (new_i2_dest) == ZERO_EXTRACT
3436 || GET_CODE (new_i2_dest) == STRICT_LOW_PART
3437 || GET_CODE (new_i2_dest) == SUBREG)
3438 new_i2_dest = XEXP (new_i2_dest, 0);
3439
3440 if (REG_P (new_i3_dest)
3441 && REG_P (new_i2_dest)
3442 && REGNO (new_i3_dest) == REGNO (new_i2_dest))
3443 INC_REG_N_SETS (REGNO (new_i2_dest), 1);
3444 }
3445 }
3446
3447 /* If we can split it and use I2DEST, go ahead and see if that
3448 helps things be recognized. Verify that none of the registers
3449 are set between I2 and I3. */
3450 if (insn_code_number < 0
3451 && (split = find_split_point (&newpat, i3, false)) != 0
3452 #ifdef HAVE_cc0
3453 && REG_P (i2dest)
3454 #endif
3455 /* We need I2DEST in the proper mode. If it is a hard register
3456 or the only use of a pseudo, we can change its mode.
3457 Make sure we don't change a hard register to have a mode that
3458 isn't valid for it, or change the number of registers. */
3459 && (GET_MODE (*split) == GET_MODE (i2dest)
3460 || GET_MODE (*split) == VOIDmode
3461 || can_change_dest_mode (i2dest, added_sets_2,
3462 GET_MODE (*split)))
3463 && (next_real_insn (i2) == i3
3464 || ! use_crosses_set_p (*split, DF_INSN_LUID (i2)))
3465 /* We can't overwrite I2DEST if its value is still used by
3466 NEWPAT. */
3467 && ! reg_referenced_p (i2dest, newpat))
3468 {
3469 rtx newdest = i2dest;
3470 enum rtx_code split_code = GET_CODE (*split);
3471 enum machine_mode split_mode = GET_MODE (*split);
3472 bool subst_done = false;
3473 newi2pat = NULL_RTX;
3474
3475 i2scratch = true;
3476
3477 /* *SPLIT may be part of I2SRC, so make sure we have the
3478 original expression around for later debug processing.
3479 We should not need I2SRC any more in other cases. */
3480 if (MAY_HAVE_DEBUG_INSNS)
3481 i2src = copy_rtx (i2src);
3482 else
3483 i2src = NULL;
3484
3485 /* Get NEWDEST as a register in the proper mode. We have already
3486 validated that we can do this. */
3487 if (GET_MODE (i2dest) != split_mode && split_mode != VOIDmode)
3488 {
3489 if (REGNO (i2dest) < FIRST_PSEUDO_REGISTER)
3490 newdest = gen_rtx_REG (split_mode, REGNO (i2dest));
3491 else
3492 {
3493 SUBST_MODE (regno_reg_rtx[REGNO (i2dest)], split_mode);
3494 newdest = regno_reg_rtx[REGNO (i2dest)];
3495 }
3496 }
3497
3498 /* If *SPLIT is a (mult FOO (const_int pow2)), convert it to
3499 an ASHIFT. This can occur if it was inside a PLUS and hence
3500 appeared to be a memory address. This is a kludge. */
3501 if (split_code == MULT
3502 && CONST_INT_P (XEXP (*split, 1))
3503 && INTVAL (XEXP (*split, 1)) > 0
3504 && (i = exact_log2 (UINTVAL (XEXP (*split, 1)))) >= 0)
3505 {
3506 SUBST (*split, gen_rtx_ASHIFT (split_mode,
3507 XEXP (*split, 0), GEN_INT (i)));
3508 /* Update split_code because we may not have a multiply
3509 anymore. */
3510 split_code = GET_CODE (*split);
3511 }
3512
3513 #ifdef INSN_SCHEDULING
3514 /* If *SPLIT is a paradoxical SUBREG, when we split it, it should
3515 be written as a ZERO_EXTEND. */
3516 if (split_code == SUBREG && MEM_P (SUBREG_REG (*split)))
3517 {
3518 #ifdef LOAD_EXTEND_OP
3519 /* Or as a SIGN_EXTEND if LOAD_EXTEND_OP says that that's
3520 what it really is. */
3521 if (LOAD_EXTEND_OP (GET_MODE (SUBREG_REG (*split)))
3522 == SIGN_EXTEND)
3523 SUBST (*split, gen_rtx_SIGN_EXTEND (split_mode,
3524 SUBREG_REG (*split)));
3525 else
3526 #endif
3527 SUBST (*split, gen_rtx_ZERO_EXTEND (split_mode,
3528 SUBREG_REG (*split)));
3529 }
3530 #endif
3531
3532 /* Attempt to split binary operators using arithmetic identities. */
3533 if (BINARY_P (SET_SRC (newpat))
3534 && split_mode == GET_MODE (SET_SRC (newpat))
3535 && ! side_effects_p (SET_SRC (newpat)))
3536 {
3537 rtx setsrc = SET_SRC (newpat);
3538 enum machine_mode mode = GET_MODE (setsrc);
3539 enum rtx_code code = GET_CODE (setsrc);
3540 rtx src_op0 = XEXP (setsrc, 0);
3541 rtx src_op1 = XEXP (setsrc, 1);
3542
3543 /* Split "X = Y op Y" as "Z = Y; X = Z op Z". */
3544 if (rtx_equal_p (src_op0, src_op1))
3545 {
3546 newi2pat = gen_rtx_SET (VOIDmode, newdest, src_op0);
3547 SUBST (XEXP (setsrc, 0), newdest);
3548 SUBST (XEXP (setsrc, 1), newdest);
3549 subst_done = true;
3550 }
3551 /* Split "((P op Q) op R) op S" where op is PLUS or MULT. */
3552 else if ((code == PLUS || code == MULT)
3553 && GET_CODE (src_op0) == code
3554 && GET_CODE (XEXP (src_op0, 0)) == code
3555 && (INTEGRAL_MODE_P (mode)
3556 || (FLOAT_MODE_P (mode)
3557 && flag_unsafe_math_optimizations)))
3558 {
3559 rtx p = XEXP (XEXP (src_op0, 0), 0);
3560 rtx q = XEXP (XEXP (src_op0, 0), 1);
3561 rtx r = XEXP (src_op0, 1);
3562 rtx s = src_op1;
3563
3564 /* Split both "((X op Y) op X) op Y" and
3565 "((X op Y) op Y) op X" as "T op T" where T is
3566 "X op Y". */
3567 if ((rtx_equal_p (p,r) && rtx_equal_p (q,s))
3568 || (rtx_equal_p (p,s) && rtx_equal_p (q,r)))
3569 {
3570 newi2pat = gen_rtx_SET (VOIDmode, newdest,
3571 XEXP (src_op0, 0));
3572 SUBST (XEXP (setsrc, 0), newdest);
3573 SUBST (XEXP (setsrc, 1), newdest);
3574 subst_done = true;
3575 }
3576 /* Split "((X op X) op Y) op Y)" as "T op T" where
3577 T is "X op Y". */
3578 else if (rtx_equal_p (p,q) && rtx_equal_p (r,s))
3579 {
3580 rtx tmp = simplify_gen_binary (code, mode, p, r);
3581 newi2pat = gen_rtx_SET (VOIDmode, newdest, tmp);
3582 SUBST (XEXP (setsrc, 0), newdest);
3583 SUBST (XEXP (setsrc, 1), newdest);
3584 subst_done = true;
3585 }
3586 }
3587 }
3588
3589 if (!subst_done)
3590 {
3591 newi2pat = gen_rtx_SET (VOIDmode, newdest, *split);
3592 SUBST (*split, newdest);
3593 }
3594
3595 i2_code_number = recog_for_combine (&newi2pat, i2, &new_i2_notes);
3596
3597 /* recog_for_combine might have added CLOBBERs to newi2pat.
3598 Make sure NEWPAT does not depend on the clobbered regs. */
3599 if (GET_CODE (newi2pat) == PARALLEL)
3600 for (i = XVECLEN (newi2pat, 0) - 1; i >= 0; i--)
3601 if (GET_CODE (XVECEXP (newi2pat, 0, i)) == CLOBBER)
3602 {
3603 rtx reg = XEXP (XVECEXP (newi2pat, 0, i), 0);
3604 if (reg_overlap_mentioned_p (reg, newpat))
3605 {
3606 undo_all ();
3607 return 0;
3608 }
3609 }
3610
3611 /* If the split point was a MULT and we didn't have one before,
3612 don't use one now. */
3613 if (i2_code_number >= 0 && ! (split_code == MULT && ! have_mult))
3614 insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
3615 }
3616 }
3617
3618 /* Check for a case where we loaded from memory in a narrow mode and
3619 then sign extended it, but we need both registers. In that case,
3620 we have a PARALLEL with both loads from the same memory location.
3621 We can split this into a load from memory followed by a register-register
3622 copy. This saves at least one insn, more if register allocation can
3623 eliminate the copy.
3624
3625 We cannot do this if the destination of the first assignment is a
3626 condition code register or cc0. We eliminate this case by making sure
3627 the SET_DEST and SET_SRC have the same mode.
3628
3629 We cannot do this if the destination of the second assignment is
3630 a register that we have already assumed is zero-extended. Similarly
3631 for a SUBREG of such a register. */
3632
3633 else if (i1 && insn_code_number < 0 && asm_noperands (newpat) < 0
3634 && GET_CODE (newpat) == PARALLEL
3635 && XVECLEN (newpat, 0) == 2
3636 && GET_CODE (XVECEXP (newpat, 0, 0)) == SET
3637 && GET_CODE (SET_SRC (XVECEXP (newpat, 0, 0))) == SIGN_EXTEND
3638 && (GET_MODE (SET_DEST (XVECEXP (newpat, 0, 0)))
3639 == GET_MODE (SET_SRC (XVECEXP (newpat, 0, 0))))
3640 && GET_CODE (XVECEXP (newpat, 0, 1)) == SET
3641 && rtx_equal_p (SET_SRC (XVECEXP (newpat, 0, 1)),
3642 XEXP (SET_SRC (XVECEXP (newpat, 0, 0)), 0))
3643 && ! use_crosses_set_p (SET_SRC (XVECEXP (newpat, 0, 1)),
3644 DF_INSN_LUID (i2))
3645 && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) != ZERO_EXTRACT
3646 && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) != STRICT_LOW_PART
3647 && ! (temp = SET_DEST (XVECEXP (newpat, 0, 1)),
3648 (REG_P (temp)
3649 && VEC_index (reg_stat_type, reg_stat,
3650 REGNO (temp))->nonzero_bits != 0
3651 && GET_MODE_BITSIZE (GET_MODE (temp)) < BITS_PER_WORD
3652 && GET_MODE_BITSIZE (GET_MODE (temp)) < HOST_BITS_PER_INT
3653 && (VEC_index (reg_stat_type, reg_stat,
3654 REGNO (temp))->nonzero_bits
3655 != GET_MODE_MASK (word_mode))))
3656 && ! (GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) == SUBREG
3657 && (temp = SUBREG_REG (SET_DEST (XVECEXP (newpat, 0, 1))),
3658 (REG_P (temp)
3659 && VEC_index (reg_stat_type, reg_stat,
3660 REGNO (temp))->nonzero_bits != 0
3661 && GET_MODE_BITSIZE (GET_MODE (temp)) < BITS_PER_WORD
3662 && GET_MODE_BITSIZE (GET_MODE (temp)) < HOST_BITS_PER_INT
3663 && (VEC_index (reg_stat_type, reg_stat,
3664 REGNO (temp))->nonzero_bits
3665 != GET_MODE_MASK (word_mode)))))
3666 && ! reg_overlap_mentioned_p (SET_DEST (XVECEXP (newpat, 0, 1)),
3667 SET_SRC (XVECEXP (newpat, 0, 1)))
3668 && ! find_reg_note (i3, REG_UNUSED,
3669 SET_DEST (XVECEXP (newpat, 0, 0))))
3670 {
3671 rtx ni2dest;
3672
3673 newi2pat = XVECEXP (newpat, 0, 0);
3674 ni2dest = SET_DEST (XVECEXP (newpat, 0, 0));
3675 newpat = XVECEXP (newpat, 0, 1);
3676 SUBST (SET_SRC (newpat),
3677 gen_lowpart (GET_MODE (SET_SRC (newpat)), ni2dest));
3678 i2_code_number = recog_for_combine (&newi2pat, i2, &new_i2_notes);
3679
3680 if (i2_code_number >= 0)
3681 insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
3682
3683 if (insn_code_number >= 0)
3684 swap_i2i3 = 1;
3685 }
3686
3687 /* Similarly, check for a case where we have a PARALLEL of two independent
3688 SETs but we started with three insns. In this case, we can do the sets
3689 as two separate insns. This case occurs when some SET allows two
3690 other insns to combine, but the destination of that SET is still live. */
3691
3692 else if (i1 && insn_code_number < 0 && asm_noperands (newpat) < 0
3693 && GET_CODE (newpat) == PARALLEL
3694 && XVECLEN (newpat, 0) == 2
3695 && GET_CODE (XVECEXP (newpat, 0, 0)) == SET
3696 && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 0))) != ZERO_EXTRACT
3697 && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 0))) != STRICT_LOW_PART
3698 && GET_CODE (XVECEXP (newpat, 0, 1)) == SET
3699 && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) != ZERO_EXTRACT
3700 && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) != STRICT_LOW_PART
3701 && ! reg_referenced_p (SET_DEST (XVECEXP (newpat, 0, 1)),
3702 XVECEXP (newpat, 0, 0))
3703 && ! reg_referenced_p (SET_DEST (XVECEXP (newpat, 0, 0)),
3704 XVECEXP (newpat, 0, 1))
3705 && ! (contains_muldiv (SET_SRC (XVECEXP (newpat, 0, 0)))
3706 && contains_muldiv (SET_SRC (XVECEXP (newpat, 0, 1)))))
3707 {
3708 /* Normally, it doesn't matter which of the two is done first,
3709 but the one that references cc0 can't be the second, and
3710 one which uses any regs/memory set in between i2 and i3 can't
3711 be first. */
3712 if (!use_crosses_set_p (SET_SRC (XVECEXP (newpat, 0, 1)),
3713 DF_INSN_LUID (i2))
3714 #ifdef HAVE_cc0
3715 && !reg_referenced_p (cc0_rtx, XVECEXP (newpat, 0, 0))
3716 #endif
3717 )
3718 {
3719 newi2pat = XVECEXP (newpat, 0, 1);
3720 newpat = XVECEXP (newpat, 0, 0);
3721 }
3722 else if (!use_crosses_set_p (SET_SRC (XVECEXP (newpat, 0, 0)),
3723 DF_INSN_LUID (i2))
3724 #ifdef HAVE_cc0
3725 && !reg_referenced_p (cc0_rtx, XVECEXP (newpat, 0, 1))
3726 #endif
3727 )
3728 {
3729 newi2pat = XVECEXP (newpat, 0, 0);
3730 newpat = XVECEXP (newpat, 0, 1);
3731 }
3732 else
3733 {
3734 undo_all ();
3735 return 0;
3736 }
3737
3738 i2_code_number = recog_for_combine (&newi2pat, i2, &new_i2_notes);
3739
3740 if (i2_code_number >= 0)
3741 {
3742 /* recog_for_combine might have added CLOBBERs to newi2pat.
3743 Make sure NEWPAT does not depend on the clobbered regs. */
3744 if (GET_CODE (newi2pat) == PARALLEL)
3745 {
3746 for (i = XVECLEN (newi2pat, 0) - 1; i >= 0; i--)
3747 if (GET_CODE (XVECEXP (newi2pat, 0, i)) == CLOBBER)
3748 {
3749 rtx reg = XEXP (XVECEXP (newi2pat, 0, i), 0);
3750 if (reg_overlap_mentioned_p (reg, newpat))
3751 {
3752 undo_all ();
3753 return 0;
3754 }
3755 }
3756 }
3757
3758 insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
3759 }
3760 }
3761
3762 /* If it still isn't recognized, fail and change things back the way they
3763 were. */
3764 if ((insn_code_number < 0
3765 /* Is the result a reasonable ASM_OPERANDS? */
3766 && (! check_asm_operands (newpat) || added_sets_1 || added_sets_2)))
3767 {
3768 undo_all ();
3769 return 0;
3770 }
3771
3772 /* If we had to change another insn, make sure it is valid also. */
3773 if (undobuf.other_insn)
3774 {
3775 CLEAR_HARD_REG_SET (newpat_used_regs);
3776
3777 other_pat = PATTERN (undobuf.other_insn);
3778 other_code_number = recog_for_combine (&other_pat, undobuf.other_insn,
3779 &new_other_notes);
3780
3781 if (other_code_number < 0 && ! check_asm_operands (other_pat))
3782 {
3783 undo_all ();
3784 return 0;
3785 }
3786 }
3787
3788 #ifdef HAVE_cc0
3789 /* If I2 is the CC0 setter and I3 is the CC0 user then check whether
3790 they are adjacent to each other or not. */
3791 {
3792 rtx p = prev_nonnote_insn (i3);
3793 if (p && p != i2 && NONJUMP_INSN_P (p) && newi2pat
3794 && sets_cc0_p (newi2pat))
3795 {
3796 undo_all ();
3797 return 0;
3798 }
3799 }
3800 #endif
3801
3802 /* Only allow this combination if insn_rtx_costs reports that the
3803 replacement instructions are cheaper than the originals. */
3804 if (!combine_validate_cost (i0, i1, i2, i3, newpat, newi2pat, other_pat))
3805 {
3806 undo_all ();
3807 return 0;
3808 }
3809
3810 if (MAY_HAVE_DEBUG_INSNS)
3811 {
3812 struct undo *undo;
3813
3814 for (undo = undobuf.undos; undo; undo = undo->next)
3815 if (undo->kind == UNDO_MODE)
3816 {
3817 rtx reg = *undo->where.r;
3818 enum machine_mode new_mode = GET_MODE (reg);
3819 enum machine_mode old_mode = undo->old_contents.m;
3820
3821 /* Temporarily revert mode back. */
3822 adjust_reg_mode (reg, old_mode);
3823
3824 if (reg == i2dest && i2scratch)
3825 {
3826 /* If we used i2dest as a scratch register with a
3827 different mode, substitute it for the original
3828 i2src while its original mode is temporarily
3829 restored, and then clear i2scratch so that we don't
3830 do it again later. */
3831 propagate_for_debug (i2, i3, reg, i2src);
3832 i2scratch = false;
3833 /* Put back the new mode. */
3834 adjust_reg_mode (reg, new_mode);
3835 }
3836 else
3837 {
3838 rtx tempreg = gen_raw_REG (old_mode, REGNO (reg));
3839 rtx first, last;
3840
3841 if (reg == i2dest)
3842 {
3843 first = i2;
3844 last = i3;
3845 }
3846 else
3847 {
3848 first = i3;
3849 last = undobuf.other_insn;
3850 gcc_assert (last);
3851 }
3852
3853 /* We're dealing with a reg that changed mode but not
3854 meaning, so we want to turn it into a subreg for
3855 the new mode. However, because of REG sharing and
3856 because its mode had already changed, we have to do
3857 it in two steps. First, replace any debug uses of
3858 reg, with its original mode temporarily restored,
3859 with this copy we have created; then, replace the
3860 copy with the SUBREG of the original shared reg,
3861 once again changed to the new mode. */
3862 propagate_for_debug (first, last, reg, tempreg);
3863 adjust_reg_mode (reg, new_mode);
3864 propagate_for_debug (first, last, tempreg,
3865 lowpart_subreg (old_mode, reg, new_mode));
3866 }
3867 }
3868 }
3869
3870 /* If we will be able to accept this, we have made a
3871 change to the destination of I3. This requires us to
3872 do a few adjustments. */
3873
3874 if (changed_i3_dest)
3875 {
3876 PATTERN (i3) = newpat;
3877 adjust_for_new_dest (i3);
3878 }
3879
3880 /* We now know that we can do this combination. Merge the insns and
3881 update the status of registers and LOG_LINKS. */
3882
3883 if (undobuf.other_insn)
3884 {
3885 rtx note, next;
3886
3887 PATTERN (undobuf.other_insn) = other_pat;
3888
3889 /* If any of the notes in OTHER_INSN were REG_UNUSED, ensure that they
3890 are still valid. Then add any non-duplicate notes added by
3891 recog_for_combine. */
3892 for (note = REG_NOTES (undobuf.other_insn); note; note = next)
3893 {
3894 next = XEXP (note, 1);
3895
3896 if (REG_NOTE_KIND (note) == REG_UNUSED
3897 && ! reg_set_p (XEXP (note, 0), PATTERN (undobuf.other_insn)))
3898 remove_note (undobuf.other_insn, note);
3899 }
3900
3901 distribute_notes (new_other_notes, undobuf.other_insn,
3902 undobuf.other_insn, NULL_RTX, NULL_RTX, NULL_RTX,
3903 NULL_RTX);
3904 }
3905
3906 if (swap_i2i3)
3907 {
3908 rtx insn;
3909 rtx link;
3910 rtx ni2dest;
3911
3912 /* I3 now uses what used to be its destination and which is now
3913 I2's destination. This requires us to do a few adjustments. */
3914 PATTERN (i3) = newpat;
3915 adjust_for_new_dest (i3);
3916
3917 /* We need a LOG_LINK from I3 to I2. But we used to have one,
3918 so we still will.
3919
3920 However, some later insn might be using I2's dest and have
3921 a LOG_LINK pointing at I3. We must remove this link.
3922 The simplest way to remove the link is to point it at I1,
3923 which we know will be a NOTE. */
3924
3925 /* newi2pat is usually a SET here; however, recog_for_combine might
3926 have added some clobbers. */
3927 if (GET_CODE (newi2pat) == PARALLEL)
3928 ni2dest = SET_DEST (XVECEXP (newi2pat, 0, 0));
3929 else
3930 ni2dest = SET_DEST (newi2pat);
3931
3932 for (insn = NEXT_INSN (i3);
3933 insn && (this_basic_block->next_bb == EXIT_BLOCK_PTR
3934 || insn != BB_HEAD (this_basic_block->next_bb));
3935 insn = NEXT_INSN (insn))
3936 {
3937 if (INSN_P (insn) && reg_referenced_p (ni2dest, PATTERN (insn)))
3938 {
3939 for (link = LOG_LINKS (insn); link;
3940 link = XEXP (link, 1))
3941 if (XEXP (link, 0) == i3)
3942 XEXP (link, 0) = i1;
3943
3944 break;
3945 }
3946 }
3947 }
3948
3949 {
3950 rtx i3notes, i2notes, i1notes = 0, i0notes = 0;
3951 rtx i3links, i2links, i1links = 0, i0links = 0;
3952 rtx midnotes = 0;
3953 int from_luid;
3954 unsigned int regno;
3955 /* Compute which registers we expect to eliminate. newi2pat may be setting
3956 either i3dest or i2dest, so we must check it. Also, i1dest may be the
3957 same as i3dest, in which case newi2pat may be setting i1dest. */
3958 rtx elim_i2 = ((newi2pat && reg_set_p (i2dest, newi2pat))
3959 || i2dest_in_i2src || i2dest_in_i1src || i2dest_in_i0src
3960 || !i2dest_killed
3961 ? 0 : i2dest);
3962 rtx elim_i1 = (i1 == 0 || i1dest_in_i1src || i1dest_in_i0src
3963 || (newi2pat && reg_set_p (i1dest, newi2pat))
3964 || !i1dest_killed
3965 ? 0 : i1dest);
3966 rtx elim_i0 = (i0 == 0 || i0dest_in_i0src
3967 || (newi2pat && reg_set_p (i0dest, newi2pat))
3968 || !i0dest_killed
3969 ? 0 : i0dest);
3970
3971 /* Get the old REG_NOTES and LOG_LINKS from all our insns and
3972 clear them. */
3973 i3notes = REG_NOTES (i3), i3links = LOG_LINKS (i3);
3974 i2notes = REG_NOTES (i2), i2links = LOG_LINKS (i2);
3975 if (i1)
3976 i1notes = REG_NOTES (i1), i1links = LOG_LINKS (i1);
3977 if (i0)
3978 i0notes = REG_NOTES (i0), i0links = LOG_LINKS (i0);
3979
3980 /* Ensure that we do not have something that should not be shared but
3981 occurs multiple times in the new insns. Check this by first
3982 resetting all the `used' flags and then copying anything is shared. */
3983
3984 reset_used_flags (i3notes);
3985 reset_used_flags (i2notes);
3986 reset_used_flags (i1notes);
3987 reset_used_flags (i0notes);
3988 reset_used_flags (newpat);
3989 reset_used_flags (newi2pat);
3990 if (undobuf.other_insn)
3991 reset_used_flags (PATTERN (undobuf.other_insn));
3992
3993 i3notes = copy_rtx_if_shared (i3notes);
3994 i2notes = copy_rtx_if_shared (i2notes);
3995 i1notes = copy_rtx_if_shared (i1notes);
3996 i0notes = copy_rtx_if_shared (i0notes);
3997 newpat = copy_rtx_if_shared (newpat);
3998 newi2pat = copy_rtx_if_shared (newi2pat);
3999 if (undobuf.other_insn)
4000 reset_used_flags (PATTERN (undobuf.other_insn));
4001
4002 INSN_CODE (i3) = insn_code_number;
4003 PATTERN (i3) = newpat;
4004
4005 if (CALL_P (i3) && CALL_INSN_FUNCTION_USAGE (i3))
4006 {
4007 rtx call_usage = CALL_INSN_FUNCTION_USAGE (i3);
4008
4009 reset_used_flags (call_usage);
4010 call_usage = copy_rtx (call_usage);
4011
4012 if (substed_i2)
4013 {
4014 /* I2SRC must still be meaningful at this point. Some splitting
4015 operations can invalidate I2SRC, but those operations do not
4016 apply to calls. */
4017 gcc_assert (i2src);
4018 replace_rtx (call_usage, i2dest, i2src);
4019 }
4020
4021 if (substed_i1)
4022 replace_rtx (call_usage, i1dest, i1src);
4023 if (substed_i0)
4024 replace_rtx (call_usage, i0dest, i0src);
4025
4026 CALL_INSN_FUNCTION_USAGE (i3) = call_usage;
4027 }
4028
4029 if (undobuf.other_insn)
4030 INSN_CODE (undobuf.other_insn) = other_code_number;
4031
4032 /* We had one special case above where I2 had more than one set and
4033 we replaced a destination of one of those sets with the destination
4034 of I3. In that case, we have to update LOG_LINKS of insns later
4035 in this basic block. Note that this (expensive) case is rare.
4036
4037 Also, in this case, we must pretend that all REG_NOTEs for I2
4038 actually came from I3, so that REG_UNUSED notes from I2 will be
4039 properly handled. */
4040
4041 if (i3_subst_into_i2)
4042 {
4043 for (i = 0; i < XVECLEN (PATTERN (i2), 0); i++)
4044 if ((GET_CODE (XVECEXP (PATTERN (i2), 0, i)) == SET
4045 || GET_CODE (XVECEXP (PATTERN (i2), 0, i)) == CLOBBER)
4046 && REG_P (SET_DEST (XVECEXP (PATTERN (i2), 0, i)))
4047 && SET_DEST (XVECEXP (PATTERN (i2), 0, i)) != i2dest
4048 && ! find_reg_note (i2, REG_UNUSED,
4049 SET_DEST (XVECEXP (PATTERN (i2), 0, i))))
4050 for (temp = NEXT_INSN (i2);
4051 temp && (this_basic_block->next_bb == EXIT_BLOCK_PTR
4052 || BB_HEAD (this_basic_block) != temp);
4053 temp = NEXT_INSN (temp))
4054 if (temp != i3 && INSN_P (temp))
4055 for (link = LOG_LINKS (temp); link; link = XEXP (link, 1))
4056 if (XEXP (link, 0) == i2)
4057 XEXP (link, 0) = i3;
4058
4059 if (i3notes)
4060 {
4061 rtx link = i3notes;
4062 while (XEXP (link, 1))
4063 link = XEXP (link, 1);
4064 XEXP (link, 1) = i2notes;
4065 }
4066 else
4067 i3notes = i2notes;
4068 i2notes = 0;
4069 }
4070
4071 LOG_LINKS (i3) = 0;
4072 REG_NOTES (i3) = 0;
4073 LOG_LINKS (i2) = 0;
4074 REG_NOTES (i2) = 0;
4075
4076 if (newi2pat)
4077 {
4078 if (MAY_HAVE_DEBUG_INSNS && i2scratch)
4079 propagate_for_debug (i2, i3, i2dest, i2src);
4080 INSN_CODE (i2) = i2_code_number;
4081 PATTERN (i2) = newi2pat;
4082 }
4083 else
4084 {
4085 if (MAY_HAVE_DEBUG_INSNS && i2src)
4086 propagate_for_debug (i2, i3, i2dest, i2src);
4087 SET_INSN_DELETED (i2);
4088 }
4089
4090 if (i1)
4091 {
4092 LOG_LINKS (i1) = 0;
4093 REG_NOTES (i1) = 0;
4094 if (MAY_HAVE_DEBUG_INSNS)
4095 propagate_for_debug (i1, i3, i1dest, i1src);
4096 SET_INSN_DELETED (i1);
4097 }
4098
4099 if (i0)
4100 {
4101 LOG_LINKS (i0) = 0;
4102 REG_NOTES (i0) = 0;
4103 if (MAY_HAVE_DEBUG_INSNS)
4104 propagate_for_debug (i0, i3, i0dest, i0src);
4105 SET_INSN_DELETED (i0);
4106 }
4107
4108 /* Get death notes for everything that is now used in either I3 or
4109 I2 and used to die in a previous insn. If we built two new
4110 patterns, move from I1 to I2 then I2 to I3 so that we get the
4111 proper movement on registers that I2 modifies. */
4112
4113 if (i0)
4114 from_luid = DF_INSN_LUID (i0);
4115 else if (i1)
4116 from_luid = DF_INSN_LUID (i1);
4117 else
4118 from_luid = DF_INSN_LUID (i2);
4119 if (newi2pat)
4120 move_deaths (newi2pat, NULL_RTX, from_luid, i2, &midnotes);
4121 move_deaths (newpat, newi2pat, from_luid, i3, &midnotes);
4122
4123 /* Distribute all the LOG_LINKS and REG_NOTES from I1, I2, and I3. */
4124 if (i3notes)
4125 distribute_notes (i3notes, i3, i3, newi2pat ? i2 : NULL_RTX,
4126 elim_i2, elim_i1, elim_i0);
4127 if (i2notes)
4128 distribute_notes (i2notes, i2, i3, newi2pat ? i2 : NULL_RTX,
4129 elim_i2, elim_i1, elim_i0);
4130 if (i1notes)
4131 distribute_notes (i1notes, i1, i3, newi2pat ? i2 : NULL_RTX,
4132 elim_i2, elim_i1, elim_i0);
4133 if (i0notes)
4134 distribute_notes (i0notes, i0, i3, newi2pat ? i2 : NULL_RTX,
4135 elim_i2, elim_i1, elim_i0);
4136 if (midnotes)
4137 distribute_notes (midnotes, NULL_RTX, i3, newi2pat ? i2 : NULL_RTX,
4138 elim_i2, elim_i1, elim_i0);
4139
4140 /* Distribute any notes added to I2 or I3 by recog_for_combine. We
4141 know these are REG_UNUSED and want them to go to the desired insn,
4142 so we always pass it as i3. */
4143
4144 if (newi2pat && new_i2_notes)
4145 distribute_notes (new_i2_notes, i2, i2, NULL_RTX, NULL_RTX, NULL_RTX,
4146 NULL_RTX);
4147
4148 if (new_i3_notes)
4149 distribute_notes (new_i3_notes, i3, i3, NULL_RTX, NULL_RTX, NULL_RTX,
4150 NULL_RTX);
4151
4152 /* If I3DEST was used in I3SRC, it really died in I3. We may need to
4153 put a REG_DEAD note for it somewhere. If NEWI2PAT exists and sets
4154 I3DEST, the death must be somewhere before I2, not I3. If we passed I3
4155 in that case, it might delete I2. Similarly for I2 and I1.
4156 Show an additional death due to the REG_DEAD note we make here. If
4157 we discard it in distribute_notes, we will decrement it again. */
4158
4159 if (i3dest_killed)
4160 {
4161 if (newi2pat && reg_set_p (i3dest_killed, newi2pat))
4162 distribute_notes (alloc_reg_note (REG_DEAD, i3dest_killed,
4163 NULL_RTX),
4164 NULL_RTX, i2, NULL_RTX, elim_i2, elim_i1, elim_i0);
4165 else
4166 distribute_notes (alloc_reg_note (REG_DEAD, i3dest_killed,
4167 NULL_RTX),
4168 NULL_RTX, i3, newi2pat ? i2 : NULL_RTX,
4169 elim_i2, elim_i1, elim_i0);
4170 }
4171
4172 if (i2dest_in_i2src)
4173 {
4174 rtx new_note = alloc_reg_note (REG_DEAD, i2dest, NULL_RTX);
4175 if (newi2pat && reg_set_p (i2dest, newi2pat))
4176 distribute_notes (new_note, NULL_RTX, i2, NULL_RTX, NULL_RTX,
4177 NULL_RTX, NULL_RTX);
4178 else
4179 distribute_notes (new_note, NULL_RTX, i3, newi2pat ? i2 : NULL_RTX,
4180 NULL_RTX, NULL_RTX, NULL_RTX);
4181 }
4182
4183 if (i1dest_in_i1src)
4184 {
4185 rtx new_note = alloc_reg_note (REG_DEAD, i1dest, NULL_RTX);
4186 if (newi2pat && reg_set_p (i1dest, newi2pat))
4187 distribute_notes (new_note, NULL_RTX, i2, NULL_RTX, NULL_RTX,
4188 NULL_RTX, NULL_RTX);
4189 else
4190 distribute_notes (new_note, NULL_RTX, i3, newi2pat ? i2 : NULL_RTX,
4191 NULL_RTX, NULL_RTX, NULL_RTX);
4192 }
4193
4194 if (i0dest_in_i0src)
4195 {
4196 rtx new_note = alloc_reg_note (REG_DEAD, i0dest, NULL_RTX);
4197 if (newi2pat && reg_set_p (i0dest, newi2pat))
4198 distribute_notes (new_note, NULL_RTX, i2, NULL_RTX, NULL_RTX,
4199 NULL_RTX, NULL_RTX);
4200 else
4201 distribute_notes (new_note, NULL_RTX, i3, newi2pat ? i2 : NULL_RTX,
4202 NULL_RTX, NULL_RTX, NULL_RTX);
4203 }
4204
4205 distribute_links (i3links);
4206 distribute_links (i2links);
4207 distribute_links (i1links);
4208 distribute_links (i0links);
4209
4210 if (REG_P (i2dest))
4211 {
4212 rtx link;
4213 rtx i2_insn = 0, i2_val = 0, set;
4214
4215 /* The insn that used to set this register doesn't exist, and
4216 this life of the register may not exist either. See if one of
4217 I3's links points to an insn that sets I2DEST. If it does,
4218 that is now the last known value for I2DEST. If we don't update
4219 this and I2 set the register to a value that depended on its old
4220 contents, we will get confused. If this insn is used, thing
4221 will be set correctly in combine_instructions. */
4222
4223 for (link = LOG_LINKS (i3); link; link = XEXP (link, 1))
4224 if ((set = single_set (XEXP (link, 0))) != 0
4225 && rtx_equal_p (i2dest, SET_DEST (set)))
4226 i2_insn = XEXP (link, 0), i2_val = SET_SRC (set);
4227
4228 record_value_for_reg (i2dest, i2_insn, i2_val);
4229
4230 /* If the reg formerly set in I2 died only once and that was in I3,
4231 zero its use count so it won't make `reload' do any work. */
4232 if (! added_sets_2
4233 && (newi2pat == 0 || ! reg_mentioned_p (i2dest, newi2pat))
4234 && ! i2dest_in_i2src)
4235 {
4236 regno = REGNO (i2dest);
4237 INC_REG_N_SETS (regno, -1);
4238 }
4239 }
4240
4241 if (i1 && REG_P (i1dest))
4242 {
4243 rtx link;
4244 rtx i1_insn = 0, i1_val = 0, set;
4245
4246 for (link = LOG_LINKS (i3); link; link = XEXP (link, 1))
4247 if ((set = single_set (XEXP (link, 0))) != 0
4248 && rtx_equal_p (i1dest, SET_DEST (set)))
4249 i1_insn = XEXP (link, 0), i1_val = SET_SRC (set);
4250
4251 record_value_for_reg (i1dest, i1_insn, i1_val);
4252
4253 regno = REGNO (i1dest);
4254 if (! added_sets_1 && ! i1dest_in_i1src)
4255 INC_REG_N_SETS (regno, -1);
4256 }
4257
4258 if (i0 && REG_P (i0dest))
4259 {
4260 rtx link;
4261 rtx i0_insn = 0, i0_val = 0, set;
4262
4263 for (link = LOG_LINKS (i3); link; link = XEXP (link, 1))
4264 if ((set = single_set (XEXP (link, 0))) != 0
4265 && rtx_equal_p (i0dest, SET_DEST (set)))
4266 i0_insn = XEXP (link, 0), i0_val = SET_SRC (set);
4267
4268 record_value_for_reg (i0dest, i0_insn, i0_val);
4269
4270 regno = REGNO (i0dest);
4271 if (! added_sets_0 && ! i0dest_in_i0src)
4272 INC_REG_N_SETS (regno, -1);
4273 }
4274
4275 /* Update reg_stat[].nonzero_bits et al for any changes that may have
4276 been made to this insn. The order of
4277 set_nonzero_bits_and_sign_copies() is important. Because newi2pat
4278 can affect nonzero_bits of newpat */
4279 if (newi2pat)
4280 note_stores (newi2pat, set_nonzero_bits_and_sign_copies, NULL);
4281 note_stores (newpat, set_nonzero_bits_and_sign_copies, NULL);
4282 }
4283
4284 if (undobuf.other_insn != NULL_RTX)
4285 {
4286 if (dump_file)
4287 {
4288 fprintf (dump_file, "modifying other_insn ");
4289 dump_insn_slim (dump_file, undobuf.other_insn);
4290 }
4291 df_insn_rescan (undobuf.other_insn);
4292 }
4293
4294 if (i0 && !(NOTE_P(i0) && (NOTE_KIND (i0) == NOTE_INSN_DELETED)))
4295 {
4296 if (dump_file)
4297 {
4298 fprintf (dump_file, "modifying insn i1 ");
4299 dump_insn_slim (dump_file, i0);
4300 }
4301 df_insn_rescan (i0);
4302 }
4303
4304 if (i1 && !(NOTE_P(i1) && (NOTE_KIND (i1) == NOTE_INSN_DELETED)))
4305 {
4306 if (dump_file)
4307 {
4308 fprintf (dump_file, "modifying insn i1 ");
4309 dump_insn_slim (dump_file, i1);
4310 }
4311 df_insn_rescan (i1);
4312 }
4313
4314 if (i2 && !(NOTE_P(i2) && (NOTE_KIND (i2) == NOTE_INSN_DELETED)))
4315 {
4316 if (dump_file)
4317 {
4318 fprintf (dump_file, "modifying insn i2 ");
4319 dump_insn_slim (dump_file, i2);
4320 }
4321 df_insn_rescan (i2);
4322 }
4323
4324 if (i3 && !(NOTE_P(i3) && (NOTE_KIND (i3) == NOTE_INSN_DELETED)))
4325 {
4326 if (dump_file)
4327 {
4328 fprintf (dump_file, "modifying insn i3 ");
4329 dump_insn_slim (dump_file, i3);
4330 }
4331 df_insn_rescan (i3);
4332 }
4333
4334 /* Set new_direct_jump_p if a new return or simple jump instruction
4335 has been created. Adjust the CFG accordingly. */
4336
4337 if (returnjump_p (i3) || any_uncondjump_p (i3))
4338 {
4339 *new_direct_jump_p = 1;
4340 mark_jump_label (PATTERN (i3), i3, 0);
4341 update_cfg_for_uncondjump (i3);
4342 }
4343
4344 if (undobuf.other_insn != NULL_RTX
4345 && (returnjump_p (undobuf.other_insn)
4346 || any_uncondjump_p (undobuf.other_insn)))
4347 {
4348 *new_direct_jump_p = 1;
4349 update_cfg_for_uncondjump (undobuf.other_insn);
4350 }
4351
4352 /* A noop might also need cleaning up of CFG, if it comes from the
4353 simplification of a jump. */
4354 if (GET_CODE (newpat) == SET
4355 && SET_SRC (newpat) == pc_rtx
4356 && SET_DEST (newpat) == pc_rtx)
4357 {
4358 *new_direct_jump_p = 1;
4359 update_cfg_for_uncondjump (i3);
4360 }
4361
4362 combine_successes++;
4363 undo_commit ();
4364
4365 if (added_links_insn
4366 && (newi2pat == 0 || DF_INSN_LUID (added_links_insn) < DF_INSN_LUID (i2))
4367 && DF_INSN_LUID (added_links_insn) < DF_INSN_LUID (i3))
4368 return added_links_insn;
4369 else
4370 return newi2pat ? i2 : i3;
4371 }
4372 \f
4373 /* Undo all the modifications recorded in undobuf. */
4374
4375 static void
4376 undo_all (void)
4377 {
4378 struct undo *undo, *next;
4379
4380 for (undo = undobuf.undos; undo; undo = next)
4381 {
4382 next = undo->next;
4383 switch (undo->kind)
4384 {
4385 case UNDO_RTX:
4386 *undo->where.r = undo->old_contents.r;
4387 break;
4388 case UNDO_INT:
4389 *undo->where.i = undo->old_contents.i;
4390 break;
4391 case UNDO_MODE:
4392 adjust_reg_mode (*undo->where.r, undo->old_contents.m);
4393 break;
4394 default:
4395 gcc_unreachable ();
4396 }
4397
4398 undo->next = undobuf.frees;
4399 undobuf.frees = undo;
4400 }
4401
4402 undobuf.undos = 0;
4403 }
4404
4405 /* We've committed to accepting the changes we made. Move all
4406 of the undos to the free list. */
4407
4408 static void
4409 undo_commit (void)
4410 {
4411 struct undo *undo, *next;
4412
4413 for (undo = undobuf.undos; undo; undo = next)
4414 {
4415 next = undo->next;
4416 undo->next = undobuf.frees;
4417 undobuf.frees = undo;
4418 }
4419 undobuf.undos = 0;
4420 }
4421 \f
4422 /* Find the innermost point within the rtx at LOC, possibly LOC itself,
4423 where we have an arithmetic expression and return that point. LOC will
4424 be inside INSN.
4425
4426 try_combine will call this function to see if an insn can be split into
4427 two insns. */
4428
4429 static rtx *
4430 find_split_point (rtx *loc, rtx insn, bool set_src)
4431 {
4432 rtx x = *loc;
4433 enum rtx_code code = GET_CODE (x);
4434 rtx *split;
4435 unsigned HOST_WIDE_INT len = 0;
4436 HOST_WIDE_INT pos = 0;
4437 int unsignedp = 0;
4438 rtx inner = NULL_RTX;
4439
4440 /* First special-case some codes. */
4441 switch (code)
4442 {
4443 case SUBREG:
4444 #ifdef INSN_SCHEDULING
4445 /* If we are making a paradoxical SUBREG invalid, it becomes a split
4446 point. */
4447 if (MEM_P (SUBREG_REG (x)))
4448 return loc;
4449 #endif
4450 return find_split_point (&SUBREG_REG (x), insn, false);
4451
4452 case MEM:
4453 #ifdef HAVE_lo_sum
4454 /* If we have (mem (const ..)) or (mem (symbol_ref ...)), split it
4455 using LO_SUM and HIGH. */
4456 if (GET_CODE (XEXP (x, 0)) == CONST
4457 || GET_CODE (XEXP (x, 0)) == SYMBOL_REF)
4458 {
4459 enum machine_mode address_mode
4460 = targetm.addr_space.address_mode (MEM_ADDR_SPACE (x));
4461
4462 SUBST (XEXP (x, 0),
4463 gen_rtx_LO_SUM (address_mode,
4464 gen_rtx_HIGH (address_mode, XEXP (x, 0)),
4465 XEXP (x, 0)));
4466 return &XEXP (XEXP (x, 0), 0);
4467 }
4468 #endif
4469
4470 /* If we have a PLUS whose second operand is a constant and the
4471 address is not valid, perhaps will can split it up using
4472 the machine-specific way to split large constants. We use
4473 the first pseudo-reg (one of the virtual regs) as a placeholder;
4474 it will not remain in the result. */
4475 if (GET_CODE (XEXP (x, 0)) == PLUS
4476 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
4477 && ! memory_address_addr_space_p (GET_MODE (x), XEXP (x, 0),
4478 MEM_ADDR_SPACE (x)))
4479 {
4480 rtx reg = regno_reg_rtx[FIRST_PSEUDO_REGISTER];
4481 rtx seq = combine_split_insns (gen_rtx_SET (VOIDmode, reg,
4482 XEXP (x, 0)),
4483 subst_insn);
4484
4485 /* This should have produced two insns, each of which sets our
4486 placeholder. If the source of the second is a valid address,
4487 we can make put both sources together and make a split point
4488 in the middle. */
4489
4490 if (seq
4491 && NEXT_INSN (seq) != NULL_RTX
4492 && NEXT_INSN (NEXT_INSN (seq)) == NULL_RTX
4493 && NONJUMP_INSN_P (seq)
4494 && GET_CODE (PATTERN (seq)) == SET
4495 && SET_DEST (PATTERN (seq)) == reg
4496 && ! reg_mentioned_p (reg,
4497 SET_SRC (PATTERN (seq)))
4498 && NONJUMP_INSN_P (NEXT_INSN (seq))
4499 && GET_CODE (PATTERN (NEXT_INSN (seq))) == SET
4500 && SET_DEST (PATTERN (NEXT_INSN (seq))) == reg
4501 && memory_address_addr_space_p
4502 (GET_MODE (x), SET_SRC (PATTERN (NEXT_INSN (seq))),
4503 MEM_ADDR_SPACE (x)))
4504 {
4505 rtx src1 = SET_SRC (PATTERN (seq));
4506 rtx src2 = SET_SRC (PATTERN (NEXT_INSN (seq)));
4507
4508 /* Replace the placeholder in SRC2 with SRC1. If we can
4509 find where in SRC2 it was placed, that can become our
4510 split point and we can replace this address with SRC2.
4511 Just try two obvious places. */
4512
4513 src2 = replace_rtx (src2, reg, src1);
4514 split = 0;
4515 if (XEXP (src2, 0) == src1)
4516 split = &XEXP (src2, 0);
4517 else if (GET_RTX_FORMAT (GET_CODE (XEXP (src2, 0)))[0] == 'e'
4518 && XEXP (XEXP (src2, 0), 0) == src1)
4519 split = &XEXP (XEXP (src2, 0), 0);
4520
4521 if (split)
4522 {
4523 SUBST (XEXP (x, 0), src2);
4524 return split;
4525 }
4526 }
4527
4528 /* If that didn't work, perhaps the first operand is complex and
4529 needs to be computed separately, so make a split point there.
4530 This will occur on machines that just support REG + CONST
4531 and have a constant moved through some previous computation. */
4532
4533 else if (!OBJECT_P (XEXP (XEXP (x, 0), 0))
4534 && ! (GET_CODE (XEXP (XEXP (x, 0), 0)) == SUBREG
4535 && OBJECT_P (SUBREG_REG (XEXP (XEXP (x, 0), 0)))))
4536 return &XEXP (XEXP (x, 0), 0);
4537 }
4538
4539 /* If we have a PLUS whose first operand is complex, try computing it
4540 separately by making a split there. */
4541 if (GET_CODE (XEXP (x, 0)) == PLUS
4542 && ! memory_address_addr_space_p (GET_MODE (x), XEXP (x, 0),
4543 MEM_ADDR_SPACE (x))
4544 && ! OBJECT_P (XEXP (XEXP (x, 0), 0))
4545 && ! (GET_CODE (XEXP (XEXP (x, 0), 0)) == SUBREG
4546 && OBJECT_P (SUBREG_REG (XEXP (XEXP (x, 0), 0)))))
4547 return &XEXP (XEXP (x, 0), 0);
4548 break;
4549
4550 case SET:
4551 #ifdef HAVE_cc0
4552 /* If SET_DEST is CC0 and SET_SRC is not an operand, a COMPARE, or a
4553 ZERO_EXTRACT, the most likely reason why this doesn't match is that
4554 we need to put the operand into a register. So split at that
4555 point. */
4556
4557 if (SET_DEST (x) == cc0_rtx
4558 && GET_CODE (SET_SRC (x)) != COMPARE
4559 && GET_CODE (SET_SRC (x)) != ZERO_EXTRACT
4560 && !OBJECT_P (SET_SRC (x))
4561 && ! (GET_CODE (SET_SRC (x)) == SUBREG
4562 && OBJECT_P (SUBREG_REG (SET_SRC (x)))))
4563 return &SET_SRC (x);
4564 #endif
4565
4566 /* See if we can split SET_SRC as it stands. */
4567 split = find_split_point (&SET_SRC (x), insn, true);
4568 if (split && split != &SET_SRC (x))
4569 return split;
4570
4571 /* See if we can split SET_DEST as it stands. */
4572 split = find_split_point (&SET_DEST (x), insn, false);
4573 if (split && split != &SET_DEST (x))
4574 return split;
4575
4576 /* See if this is a bitfield assignment with everything constant. If
4577 so, this is an IOR of an AND, so split it into that. */
4578 if (GET_CODE (SET_DEST (x)) == ZERO_EXTRACT
4579 && (GET_MODE_BITSIZE (GET_MODE (XEXP (SET_DEST (x), 0)))
4580 <= HOST_BITS_PER_WIDE_INT)
4581 && CONST_INT_P (XEXP (SET_DEST (x), 1))
4582 && CONST_INT_P (XEXP (SET_DEST (x), 2))
4583 && CONST_INT_P (SET_SRC (x))
4584 && ((INTVAL (XEXP (SET_DEST (x), 1))
4585 + INTVAL (XEXP (SET_DEST (x), 2)))
4586 <= GET_MODE_BITSIZE (GET_MODE (XEXP (SET_DEST (x), 0))))
4587 && ! side_effects_p (XEXP (SET_DEST (x), 0)))
4588 {
4589 HOST_WIDE_INT pos = INTVAL (XEXP (SET_DEST (x), 2));
4590 unsigned HOST_WIDE_INT len = INTVAL (XEXP (SET_DEST (x), 1));
4591 unsigned HOST_WIDE_INT src = INTVAL (SET_SRC (x));
4592 rtx dest = XEXP (SET_DEST (x), 0);
4593 enum machine_mode mode = GET_MODE (dest);
4594 unsigned HOST_WIDE_INT mask
4595 = ((unsigned HOST_WIDE_INT) 1 << len) - 1;
4596 rtx or_mask;
4597
4598 if (BITS_BIG_ENDIAN)
4599 pos = GET_MODE_BITSIZE (mode) - len - pos;
4600
4601 or_mask = gen_int_mode (src << pos, mode);
4602 if (src == mask)
4603 SUBST (SET_SRC (x),
4604 simplify_gen_binary (IOR, mode, dest, or_mask));
4605 else
4606 {
4607 rtx negmask = gen_int_mode (~(mask << pos), mode);
4608 SUBST (SET_SRC (x),
4609 simplify_gen_binary (IOR, mode,
4610 simplify_gen_binary (AND, mode,
4611 dest, negmask),
4612 or_mask));
4613 }
4614
4615 SUBST (SET_DEST (x), dest);
4616
4617 split = find_split_point (&SET_SRC (x), insn, true);
4618 if (split && split != &SET_SRC (x))
4619 return split;
4620 }
4621
4622 /* Otherwise, see if this is an operation that we can split into two.
4623 If so, try to split that. */
4624 code = GET_CODE (SET_SRC (x));
4625
4626 switch (code)
4627 {
4628 case AND:
4629 /* If we are AND'ing with a large constant that is only a single
4630 bit and the result is only being used in a context where we
4631 need to know if it is zero or nonzero, replace it with a bit
4632 extraction. This will avoid the large constant, which might
4633 have taken more than one insn to make. If the constant were
4634 not a valid argument to the AND but took only one insn to make,
4635 this is no worse, but if it took more than one insn, it will
4636 be better. */
4637
4638 if (CONST_INT_P (XEXP (SET_SRC (x), 1))
4639 && REG_P (XEXP (SET_SRC (x), 0))
4640 && (pos = exact_log2 (UINTVAL (XEXP (SET_SRC (x), 1)))) >= 7
4641 && REG_P (SET_DEST (x))
4642 && (split = find_single_use (SET_DEST (x), insn, (rtx*) 0)) != 0
4643 && (GET_CODE (*split) == EQ || GET_CODE (*split) == NE)
4644 && XEXP (*split, 0) == SET_DEST (x)
4645 && XEXP (*split, 1) == const0_rtx)
4646 {
4647 rtx extraction = make_extraction (GET_MODE (SET_DEST (x)),
4648 XEXP (SET_SRC (x), 0),
4649 pos, NULL_RTX, 1, 1, 0, 0);
4650 if (extraction != 0)
4651 {
4652 SUBST (SET_SRC (x), extraction);
4653 return find_split_point (loc, insn, false);
4654 }
4655 }
4656 break;
4657
4658 case NE:
4659 /* If STORE_FLAG_VALUE is -1, this is (NE X 0) and only one bit of X
4660 is known to be on, this can be converted into a NEG of a shift. */
4661 if (STORE_FLAG_VALUE == -1 && XEXP (SET_SRC (x), 1) == const0_rtx
4662 && GET_MODE (SET_SRC (x)) == GET_MODE (XEXP (SET_SRC (x), 0))
4663 && 1 <= (pos = exact_log2
4664 (nonzero_bits (XEXP (SET_SRC (x), 0),
4665 GET_MODE (XEXP (SET_SRC (x), 0))))))
4666 {
4667 enum machine_mode mode = GET_MODE (XEXP (SET_SRC (x), 0));
4668
4669 SUBST (SET_SRC (x),
4670 gen_rtx_NEG (mode,
4671 gen_rtx_LSHIFTRT (mode,
4672 XEXP (SET_SRC (x), 0),
4673 GEN_INT (pos))));
4674
4675 split = find_split_point (&SET_SRC (x), insn, true);
4676 if (split && split != &SET_SRC (x))
4677 return split;
4678 }
4679 break;
4680
4681 case SIGN_EXTEND:
4682 inner = XEXP (SET_SRC (x), 0);
4683
4684 /* We can't optimize if either mode is a partial integer
4685 mode as we don't know how many bits are significant
4686 in those modes. */
4687 if (GET_MODE_CLASS (GET_MODE (inner)) == MODE_PARTIAL_INT
4688 || GET_MODE_CLASS (GET_MODE (SET_SRC (x))) == MODE_PARTIAL_INT)
4689 break;
4690
4691 pos = 0;
4692 len = GET_MODE_BITSIZE (GET_MODE (inner));
4693 unsignedp = 0;
4694 break;
4695
4696 case SIGN_EXTRACT:
4697 case ZERO_EXTRACT:
4698 if (CONST_INT_P (XEXP (SET_SRC (x), 1))
4699 && CONST_INT_P (XEXP (SET_SRC (x), 2)))
4700 {
4701 inner = XEXP (SET_SRC (x), 0);
4702 len = INTVAL (XEXP (SET_SRC (x), 1));
4703 pos = INTVAL (XEXP (SET_SRC (x), 2));
4704
4705 if (BITS_BIG_ENDIAN)
4706 pos = GET_MODE_BITSIZE (GET_MODE (inner)) - len - pos;
4707 unsignedp = (code == ZERO_EXTRACT);
4708 }
4709 break;
4710
4711 default:
4712 break;
4713 }
4714
4715 if (len && pos >= 0 && pos + len <= GET_MODE_BITSIZE (GET_MODE (inner)))
4716 {
4717 enum machine_mode mode = GET_MODE (SET_SRC (x));
4718
4719 /* For unsigned, we have a choice of a shift followed by an
4720 AND or two shifts. Use two shifts for field sizes where the
4721 constant might be too large. We assume here that we can
4722 always at least get 8-bit constants in an AND insn, which is
4723 true for every current RISC. */
4724
4725 if (unsignedp && len <= 8)
4726 {
4727 SUBST (SET_SRC (x),
4728 gen_rtx_AND (mode,
4729 gen_rtx_LSHIFTRT
4730 (mode, gen_lowpart (mode, inner),
4731 GEN_INT (pos)),
4732 GEN_INT (((unsigned HOST_WIDE_INT) 1 << len)
4733 - 1)));
4734
4735 split = find_split_point (&SET_SRC (x), insn, true);
4736 if (split && split != &SET_SRC (x))
4737 return split;
4738 }
4739 else
4740 {
4741 SUBST (SET_SRC (x),
4742 gen_rtx_fmt_ee
4743 (unsignedp ? LSHIFTRT : ASHIFTRT, mode,
4744 gen_rtx_ASHIFT (mode,
4745 gen_lowpart (mode, inner),
4746 GEN_INT (GET_MODE_BITSIZE (mode)
4747 - len - pos)),
4748 GEN_INT (GET_MODE_BITSIZE (mode) - len)));
4749
4750 split = find_split_point (&SET_SRC (x), insn, true);
4751 if (split && split != &SET_SRC (x))
4752 return split;
4753 }
4754 }
4755
4756 /* See if this is a simple operation with a constant as the second
4757 operand. It might be that this constant is out of range and hence
4758 could be used as a split point. */
4759 if (BINARY_P (SET_SRC (x))
4760 && CONSTANT_P (XEXP (SET_SRC (x), 1))
4761 && (OBJECT_P (XEXP (SET_SRC (x), 0))
4762 || (GET_CODE (XEXP (SET_SRC (x), 0)) == SUBREG
4763 && OBJECT_P (SUBREG_REG (XEXP (SET_SRC (x), 0))))))
4764 return &XEXP (SET_SRC (x), 1);
4765
4766 /* Finally, see if this is a simple operation with its first operand
4767 not in a register. The operation might require this operand in a
4768 register, so return it as a split point. We can always do this
4769 because if the first operand were another operation, we would have
4770 already found it as a split point. */
4771 if ((BINARY_P (SET_SRC (x)) || UNARY_P (SET_SRC (x)))
4772 && ! register_operand (XEXP (SET_SRC (x), 0), VOIDmode))
4773 return &XEXP (SET_SRC (x), 0);
4774
4775 return 0;
4776
4777 case AND:
4778 case IOR:
4779 /* We write NOR as (and (not A) (not B)), but if we don't have a NOR,
4780 it is better to write this as (not (ior A B)) so we can split it.
4781 Similarly for IOR. */
4782 if (GET_CODE (XEXP (x, 0)) == NOT && GET_CODE (XEXP (x, 1)) == NOT)
4783 {
4784 SUBST (*loc,
4785 gen_rtx_NOT (GET_MODE (x),
4786 gen_rtx_fmt_ee (code == IOR ? AND : IOR,
4787 GET_MODE (x),
4788 XEXP (XEXP (x, 0), 0),
4789 XEXP (XEXP (x, 1), 0))));
4790 return find_split_point (loc, insn, set_src);
4791 }
4792
4793 /* Many RISC machines have a large set of logical insns. If the
4794 second operand is a NOT, put it first so we will try to split the
4795 other operand first. */
4796 if (GET_CODE (XEXP (x, 1)) == NOT)
4797 {
4798 rtx tem = XEXP (x, 0);
4799 SUBST (XEXP (x, 0), XEXP (x, 1));
4800 SUBST (XEXP (x, 1), tem);
4801 }
4802 break;
4803
4804 case PLUS:
4805 case MINUS:
4806 /* Canonicalization can produce (minus A (mult B C)), where C is a
4807 constant. It may be better to try splitting (plus (mult B -C) A)
4808 instead if this isn't a multiply by a power of two. */
4809 if (set_src && code == MINUS && GET_CODE (XEXP (x, 1)) == MULT
4810 && GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT
4811 && exact_log2 (INTVAL (XEXP (XEXP (x, 1), 1))) < 0)
4812 {
4813 enum machine_mode mode = GET_MODE (x);
4814 unsigned HOST_WIDE_INT this_int = INTVAL (XEXP (XEXP (x, 1), 1));
4815 HOST_WIDE_INT other_int = trunc_int_for_mode (-this_int, mode);
4816 SUBST (*loc, gen_rtx_PLUS (mode, gen_rtx_MULT (mode,
4817 XEXP (XEXP (x, 1), 0),
4818 GEN_INT (other_int)),
4819 XEXP (x, 0)));
4820 return find_split_point (loc, insn, set_src);
4821 }
4822
4823 /* Split at a multiply-accumulate instruction. However if this is
4824 the SET_SRC, we likely do not have such an instruction and it's
4825 worthless to try this split. */
4826 if (!set_src && GET_CODE (XEXP (x, 0)) == MULT)
4827 return loc;
4828
4829 default:
4830 break;
4831 }
4832
4833 /* Otherwise, select our actions depending on our rtx class. */
4834 switch (GET_RTX_CLASS (code))
4835 {
4836 case RTX_BITFIELD_OPS: /* This is ZERO_EXTRACT and SIGN_EXTRACT. */
4837 case RTX_TERNARY:
4838 split = find_split_point (&XEXP (x, 2), insn, false);
4839 if (split)
4840 return split;
4841 /* ... fall through ... */
4842 case RTX_BIN_ARITH:
4843 case RTX_COMM_ARITH:
4844 case RTX_COMPARE:
4845 case RTX_COMM_COMPARE:
4846 split = find_split_point (&XEXP (x, 1), insn, false);
4847 if (split)
4848 return split;
4849 /* ... fall through ... */
4850 case RTX_UNARY:
4851 /* Some machines have (and (shift ...) ...) insns. If X is not
4852 an AND, but XEXP (X, 0) is, use it as our split point. */
4853 if (GET_CODE (x) != AND && GET_CODE (XEXP (x, 0)) == AND)
4854 return &XEXP (x, 0);
4855
4856 split = find_split_point (&XEXP (x, 0), insn, false);
4857 if (split)
4858 return split;
4859 return loc;
4860
4861 default:
4862 /* Otherwise, we don't have a split point. */
4863 return 0;
4864 }
4865 }
4866 \f
4867 /* Throughout X, replace FROM with TO, and return the result.
4868 The result is TO if X is FROM;
4869 otherwise the result is X, but its contents may have been modified.
4870 If they were modified, a record was made in undobuf so that
4871 undo_all will (among other things) return X to its original state.
4872
4873 If the number of changes necessary is too much to record to undo,
4874 the excess changes are not made, so the result is invalid.
4875 The changes already made can still be undone.
4876 undobuf.num_undo is incremented for such changes, so by testing that
4877 the caller can tell whether the result is valid.
4878
4879 `n_occurrences' is incremented each time FROM is replaced.
4880
4881 IN_DEST is nonzero if we are processing the SET_DEST of a SET.
4882
4883 UNIQUE_COPY is nonzero if each substitution must be unique. We do this
4884 by copying if `n_occurrences' is nonzero. */
4885
4886 static rtx
4887 subst (rtx x, rtx from, rtx to, int in_dest, int unique_copy)
4888 {
4889 enum rtx_code code = GET_CODE (x);
4890 enum machine_mode op0_mode = VOIDmode;
4891 const char *fmt;
4892 int len, i;
4893 rtx new_rtx;
4894
4895 /* Two expressions are equal if they are identical copies of a shared
4896 RTX or if they are both registers with the same register number
4897 and mode. */
4898
4899 #define COMBINE_RTX_EQUAL_P(X,Y) \
4900 ((X) == (Y) \
4901 || (REG_P (X) && REG_P (Y) \
4902 && REGNO (X) == REGNO (Y) && GET_MODE (X) == GET_MODE (Y)))
4903
4904 if (! in_dest && COMBINE_RTX_EQUAL_P (x, from))
4905 {
4906 n_occurrences++;
4907 return (unique_copy && n_occurrences > 1 ? copy_rtx (to) : to);
4908 }
4909
4910 /* If X and FROM are the same register but different modes, they
4911 will not have been seen as equal above. However, the log links code
4912 will make a LOG_LINKS entry for that case. If we do nothing, we
4913 will try to rerecognize our original insn and, when it succeeds,
4914 we will delete the feeding insn, which is incorrect.
4915
4916 So force this insn not to match in this (rare) case. */
4917 if (! in_dest && code == REG && REG_P (from)
4918 && reg_overlap_mentioned_p (x, from))
4919 return gen_rtx_CLOBBER (GET_MODE (x), const0_rtx);
4920
4921 /* If this is an object, we are done unless it is a MEM or LO_SUM, both
4922 of which may contain things that can be combined. */
4923 if (code != MEM && code != LO_SUM && OBJECT_P (x))
4924 return x;
4925
4926 /* It is possible to have a subexpression appear twice in the insn.
4927 Suppose that FROM is a register that appears within TO.
4928 Then, after that subexpression has been scanned once by `subst',
4929 the second time it is scanned, TO may be found. If we were
4930 to scan TO here, we would find FROM within it and create a
4931 self-referent rtl structure which is completely wrong. */
4932 if (COMBINE_RTX_EQUAL_P (x, to))
4933 return to;
4934
4935 /* Parallel asm_operands need special attention because all of the
4936 inputs are shared across the arms. Furthermore, unsharing the
4937 rtl results in recognition failures. Failure to handle this case
4938 specially can result in circular rtl.
4939
4940 Solve this by doing a normal pass across the first entry of the
4941 parallel, and only processing the SET_DESTs of the subsequent
4942 entries. Ug. */
4943
4944 if (code == PARALLEL
4945 && GET_CODE (XVECEXP (x, 0, 0)) == SET
4946 && GET_CODE (SET_SRC (XVECEXP (x, 0, 0))) == ASM_OPERANDS)
4947 {
4948 new_rtx = subst (XVECEXP (x, 0, 0), from, to, 0, unique_copy);
4949
4950 /* If this substitution failed, this whole thing fails. */
4951 if (GET_CODE (new_rtx) == CLOBBER
4952 && XEXP (new_rtx, 0) == const0_rtx)
4953 return new_rtx;
4954
4955 SUBST (XVECEXP (x, 0, 0), new_rtx);
4956
4957 for (i = XVECLEN (x, 0) - 1; i >= 1; i--)
4958 {
4959 rtx dest = SET_DEST (XVECEXP (x, 0, i));
4960
4961 if (!REG_P (dest)
4962 && GET_CODE (dest) != CC0
4963 && GET_CODE (dest) != PC)
4964 {
4965 new_rtx = subst (dest, from, to, 0, unique_copy);
4966
4967 /* If this substitution failed, this whole thing fails. */
4968 if (GET_CODE (new_rtx) == CLOBBER
4969 && XEXP (new_rtx, 0) == const0_rtx)
4970 return new_rtx;
4971
4972 SUBST (SET_DEST (XVECEXP (x, 0, i)), new_rtx);
4973 }
4974 }
4975 }
4976 else
4977 {
4978 len = GET_RTX_LENGTH (code);
4979 fmt = GET_RTX_FORMAT (code);
4980
4981 /* We don't need to process a SET_DEST that is a register, CC0,
4982 or PC, so set up to skip this common case. All other cases
4983 where we want to suppress replacing something inside a
4984 SET_SRC are handled via the IN_DEST operand. */
4985 if (code == SET
4986 && (REG_P (SET_DEST (x))
4987 || GET_CODE (SET_DEST (x)) == CC0
4988 || GET_CODE (SET_DEST (x)) == PC))
4989 fmt = "ie";
4990
4991 /* Get the mode of operand 0 in case X is now a SIGN_EXTEND of a
4992 constant. */
4993 if (fmt[0] == 'e')
4994 op0_mode = GET_MODE (XEXP (x, 0));
4995
4996 for (i = 0; i < len; i++)
4997 {
4998 if (fmt[i] == 'E')
4999 {
5000 int j;
5001 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
5002 {
5003 if (COMBINE_RTX_EQUAL_P (XVECEXP (x, i, j), from))
5004 {
5005 new_rtx = (unique_copy && n_occurrences
5006 ? copy_rtx (to) : to);
5007 n_occurrences++;
5008 }
5009 else
5010 {
5011 new_rtx = subst (XVECEXP (x, i, j), from, to, 0,
5012 unique_copy);
5013
5014 /* If this substitution failed, this whole thing
5015 fails. */
5016 if (GET_CODE (new_rtx) == CLOBBER
5017 && XEXP (new_rtx, 0) == const0_rtx)
5018 return new_rtx;
5019 }
5020
5021 SUBST (XVECEXP (x, i, j), new_rtx);
5022 }
5023 }
5024 else if (fmt[i] == 'e')
5025 {
5026 /* If this is a register being set, ignore it. */
5027 new_rtx = XEXP (x, i);
5028 if (in_dest
5029 && i == 0
5030 && (((code == SUBREG || code == ZERO_EXTRACT)
5031 && REG_P (new_rtx))
5032 || code == STRICT_LOW_PART))
5033 ;
5034
5035 else if (COMBINE_RTX_EQUAL_P (XEXP (x, i), from))
5036 {
5037 /* In general, don't install a subreg involving two
5038 modes not tieable. It can worsen register
5039 allocation, and can even make invalid reload
5040 insns, since the reg inside may need to be copied
5041 from in the outside mode, and that may be invalid
5042 if it is an fp reg copied in integer mode.
5043
5044 We allow two exceptions to this: It is valid if
5045 it is inside another SUBREG and the mode of that
5046 SUBREG and the mode of the inside of TO is
5047 tieable and it is valid if X is a SET that copies
5048 FROM to CC0. */
5049
5050 if (GET_CODE (to) == SUBREG
5051 && ! MODES_TIEABLE_P (GET_MODE (to),
5052 GET_MODE (SUBREG_REG (to)))
5053 && ! (code == SUBREG
5054 && MODES_TIEABLE_P (GET_MODE (x),
5055 GET_MODE (SUBREG_REG (to))))
5056 #ifdef HAVE_cc0
5057 && ! (code == SET && i == 1 && XEXP (x, 0) == cc0_rtx)
5058 #endif
5059 )
5060 return gen_rtx_CLOBBER (VOIDmode, const0_rtx);
5061
5062 #ifdef CANNOT_CHANGE_MODE_CLASS
5063 if (code == SUBREG
5064 && REG_P (to)
5065 && REGNO (to) < FIRST_PSEUDO_REGISTER
5066 && REG_CANNOT_CHANGE_MODE_P (REGNO (to),
5067 GET_MODE (to),
5068 GET_MODE (x)))
5069 return gen_rtx_CLOBBER (VOIDmode, const0_rtx);
5070 #endif
5071
5072 new_rtx = (unique_copy && n_occurrences ? copy_rtx (to) : to);
5073 n_occurrences++;
5074 }
5075 else
5076 /* If we are in a SET_DEST, suppress most cases unless we
5077 have gone inside a MEM, in which case we want to
5078 simplify the address. We assume here that things that
5079 are actually part of the destination have their inner
5080 parts in the first expression. This is true for SUBREG,
5081 STRICT_LOW_PART, and ZERO_EXTRACT, which are the only
5082 things aside from REG and MEM that should appear in a
5083 SET_DEST. */
5084 new_rtx = subst (XEXP (x, i), from, to,
5085 (((in_dest
5086 && (code == SUBREG || code == STRICT_LOW_PART
5087 || code == ZERO_EXTRACT))
5088 || code == SET)
5089 && i == 0), unique_copy);
5090
5091 /* If we found that we will have to reject this combination,
5092 indicate that by returning the CLOBBER ourselves, rather than
5093 an expression containing it. This will speed things up as
5094 well as prevent accidents where two CLOBBERs are considered
5095 to be equal, thus producing an incorrect simplification. */
5096
5097 if (GET_CODE (new_rtx) == CLOBBER && XEXP (new_rtx, 0) == const0_rtx)
5098 return new_rtx;
5099
5100 if (GET_CODE (x) == SUBREG
5101 && (CONST_INT_P (new_rtx)
5102 || GET_CODE (new_rtx) == CONST_DOUBLE))
5103 {
5104 enum machine_mode mode = GET_MODE (x);
5105
5106 x = simplify_subreg (GET_MODE (x), new_rtx,
5107 GET_MODE (SUBREG_REG (x)),
5108 SUBREG_BYTE (x));
5109 if (! x)
5110 x = gen_rtx_CLOBBER (mode, const0_rtx);
5111 }
5112 else if (CONST_INT_P (new_rtx)
5113 && GET_CODE (x) == ZERO_EXTEND)
5114 {
5115 x = simplify_unary_operation (ZERO_EXTEND, GET_MODE (x),
5116 new_rtx, GET_MODE (XEXP (x, 0)));
5117 gcc_assert (x);
5118 }
5119 else
5120 SUBST (XEXP (x, i), new_rtx);
5121 }
5122 }
5123 }
5124
5125 /* Check if we are loading something from the constant pool via float
5126 extension; in this case we would undo compress_float_constant
5127 optimization and degenerate constant load to an immediate value. */
5128 if (GET_CODE (x) == FLOAT_EXTEND
5129 && MEM_P (XEXP (x, 0))
5130 && MEM_READONLY_P (XEXP (x, 0)))
5131 {
5132 rtx tmp = avoid_constant_pool_reference (x);
5133 if (x != tmp)
5134 return x;
5135 }
5136
5137 /* Try to simplify X. If the simplification changed the code, it is likely
5138 that further simplification will help, so loop, but limit the number
5139 of repetitions that will be performed. */
5140
5141 for (i = 0; i < 4; i++)
5142 {
5143 /* If X is sufficiently simple, don't bother trying to do anything
5144 with it. */
5145 if (code != CONST_INT && code != REG && code != CLOBBER)
5146 x = combine_simplify_rtx (x, op0_mode, in_dest);
5147
5148 if (GET_CODE (x) == code)
5149 break;
5150
5151 code = GET_CODE (x);
5152
5153 /* We no longer know the original mode of operand 0 since we
5154 have changed the form of X) */
5155 op0_mode = VOIDmode;
5156 }
5157
5158 return x;
5159 }
5160 \f
5161 /* Simplify X, a piece of RTL. We just operate on the expression at the
5162 outer level; call `subst' to simplify recursively. Return the new
5163 expression.
5164
5165 OP0_MODE is the original mode of XEXP (x, 0). IN_DEST is nonzero
5166 if we are inside a SET_DEST. */
5167
5168 static rtx
5169 combine_simplify_rtx (rtx x, enum machine_mode op0_mode, int in_dest)
5170 {
5171 enum rtx_code code = GET_CODE (x);
5172 enum machine_mode mode = GET_MODE (x);
5173 rtx temp;
5174 int i;
5175
5176 /* If this is a commutative operation, put a constant last and a complex
5177 expression first. We don't need to do this for comparisons here. */
5178 if (COMMUTATIVE_ARITH_P (x)
5179 && swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
5180 {
5181 temp = XEXP (x, 0);
5182 SUBST (XEXP (x, 0), XEXP (x, 1));
5183 SUBST (XEXP (x, 1), temp);
5184 }
5185
5186 /* If this is a simple operation applied to an IF_THEN_ELSE, try
5187 applying it to the arms of the IF_THEN_ELSE. This often simplifies
5188 things. Check for cases where both arms are testing the same
5189 condition.
5190
5191 Don't do anything if all operands are very simple. */
5192
5193 if ((BINARY_P (x)
5194 && ((!OBJECT_P (XEXP (x, 0))
5195 && ! (GET_CODE (XEXP (x, 0)) == SUBREG
5196 && OBJECT_P (SUBREG_REG (XEXP (x, 0)))))
5197 || (!OBJECT_P (XEXP (x, 1))
5198 && ! (GET_CODE (XEXP (x, 1)) == SUBREG
5199 && OBJECT_P (SUBREG_REG (XEXP (x, 1)))))))
5200 || (UNARY_P (x)
5201 && (!OBJECT_P (XEXP (x, 0))
5202 && ! (GET_CODE (XEXP (x, 0)) == SUBREG
5203 && OBJECT_P (SUBREG_REG (XEXP (x, 0)))))))
5204 {
5205 rtx cond, true_rtx, false_rtx;
5206
5207 cond = if_then_else_cond (x, &true_rtx, &false_rtx);
5208 if (cond != 0
5209 /* If everything is a comparison, what we have is highly unlikely
5210 to be simpler, so don't use it. */
5211 && ! (COMPARISON_P (x)
5212 && (COMPARISON_P (true_rtx) || COMPARISON_P (false_rtx))))
5213 {
5214 rtx cop1 = const0_rtx;
5215 enum rtx_code cond_code = simplify_comparison (NE, &cond, &cop1);
5216
5217 if (cond_code == NE && COMPARISON_P (cond))
5218 return x;
5219
5220 /* Simplify the alternative arms; this may collapse the true and
5221 false arms to store-flag values. Be careful to use copy_rtx
5222 here since true_rtx or false_rtx might share RTL with x as a
5223 result of the if_then_else_cond call above. */
5224 true_rtx = subst (copy_rtx (true_rtx), pc_rtx, pc_rtx, 0, 0);
5225 false_rtx = subst (copy_rtx (false_rtx), pc_rtx, pc_rtx, 0, 0);
5226
5227 /* If true_rtx and false_rtx are not general_operands, an if_then_else
5228 is unlikely to be simpler. */
5229 if (general_operand (true_rtx, VOIDmode)
5230 && general_operand (false_rtx, VOIDmode))
5231 {
5232 enum rtx_code reversed;
5233
5234 /* Restarting if we generate a store-flag expression will cause
5235 us to loop. Just drop through in this case. */
5236
5237 /* If the result values are STORE_FLAG_VALUE and zero, we can
5238 just make the comparison operation. */
5239 if (true_rtx == const_true_rtx && false_rtx == const0_rtx)
5240 x = simplify_gen_relational (cond_code, mode, VOIDmode,
5241 cond, cop1);
5242 else if (true_rtx == const0_rtx && false_rtx == const_true_rtx
5243 && ((reversed = reversed_comparison_code_parts
5244 (cond_code, cond, cop1, NULL))
5245 != UNKNOWN))
5246 x = simplify_gen_relational (reversed, mode, VOIDmode,
5247 cond, cop1);
5248
5249 /* Likewise, we can make the negate of a comparison operation
5250 if the result values are - STORE_FLAG_VALUE and zero. */
5251 else if (CONST_INT_P (true_rtx)
5252 && INTVAL (true_rtx) == - STORE_FLAG_VALUE
5253 && false_rtx == const0_rtx)
5254 x = simplify_gen_unary (NEG, mode,
5255 simplify_gen_relational (cond_code,
5256 mode, VOIDmode,
5257 cond, cop1),
5258 mode);
5259 else if (CONST_INT_P (false_rtx)
5260 && INTVAL (false_rtx) == - STORE_FLAG_VALUE
5261 && true_rtx == const0_rtx
5262 && ((reversed = reversed_comparison_code_parts
5263 (cond_code, cond, cop1, NULL))
5264 != UNKNOWN))
5265 x = simplify_gen_unary (NEG, mode,
5266 simplify_gen_relational (reversed,
5267 mode, VOIDmode,
5268 cond, cop1),
5269 mode);
5270 else
5271 return gen_rtx_IF_THEN_ELSE (mode,
5272 simplify_gen_relational (cond_code,
5273 mode,
5274 VOIDmode,
5275 cond,
5276 cop1),
5277 true_rtx, false_rtx);
5278
5279 code = GET_CODE (x);
5280 op0_mode = VOIDmode;
5281 }
5282 }
5283 }
5284
5285 /* Try to fold this expression in case we have constants that weren't
5286 present before. */
5287 temp = 0;
5288 switch (GET_RTX_CLASS (code))
5289 {
5290 case RTX_UNARY:
5291 if (op0_mode == VOIDmode)
5292 op0_mode = GET_MODE (XEXP (x, 0));
5293 temp = simplify_unary_operation (code, mode, XEXP (x, 0), op0_mode);
5294 break;
5295 case RTX_COMPARE:
5296 case RTX_COMM_COMPARE:
5297 {
5298 enum machine_mode cmp_mode = GET_MODE (XEXP (x, 0));
5299 if (cmp_mode == VOIDmode)
5300 {
5301 cmp_mode = GET_MODE (XEXP (x, 1));
5302 if (cmp_mode == VOIDmode)
5303 cmp_mode = op0_mode;
5304 }
5305 temp = simplify_relational_operation (code, mode, cmp_mode,
5306 XEXP (x, 0), XEXP (x, 1));
5307 }
5308 break;
5309 case RTX_COMM_ARITH:
5310 case RTX_BIN_ARITH:
5311 temp = simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
5312 break;
5313 case RTX_BITFIELD_OPS:
5314 case RTX_TERNARY:
5315 temp = simplify_ternary_operation (code, mode, op0_mode, XEXP (x, 0),
5316 XEXP (x, 1), XEXP (x, 2));
5317 break;
5318 default:
5319 break;
5320 }
5321
5322 if (temp)
5323 {
5324 x = temp;
5325 code = GET_CODE (temp);
5326 op0_mode = VOIDmode;
5327 mode = GET_MODE (temp);
5328 }
5329
5330 /* First see if we can apply the inverse distributive law. */
5331 if (code == PLUS || code == MINUS
5332 || code == AND || code == IOR || code == XOR)
5333 {
5334 x = apply_distributive_law (x);
5335 code = GET_CODE (x);
5336 op0_mode = VOIDmode;
5337 }
5338
5339 /* If CODE is an associative operation not otherwise handled, see if we
5340 can associate some operands. This can win if they are constants or
5341 if they are logically related (i.e. (a & b) & a). */
5342 if ((code == PLUS || code == MINUS || code == MULT || code == DIV
5343 || code == AND || code == IOR || code == XOR
5344 || code == SMAX || code == SMIN || code == UMAX || code == UMIN)
5345 && ((INTEGRAL_MODE_P (mode) && code != DIV)
5346 || (flag_associative_math && FLOAT_MODE_P (mode))))
5347 {
5348 if (GET_CODE (XEXP (x, 0)) == code)
5349 {
5350 rtx other = XEXP (XEXP (x, 0), 0);
5351 rtx inner_op0 = XEXP (XEXP (x, 0), 1);
5352 rtx inner_op1 = XEXP (x, 1);
5353 rtx inner;
5354
5355 /* Make sure we pass the constant operand if any as the second
5356 one if this is a commutative operation. */
5357 if (CONSTANT_P (inner_op0) && COMMUTATIVE_ARITH_P (x))
5358 {
5359 rtx tem = inner_op0;
5360 inner_op0 = inner_op1;
5361 inner_op1 = tem;
5362 }
5363 inner = simplify_binary_operation (code == MINUS ? PLUS
5364 : code == DIV ? MULT
5365 : code,
5366 mode, inner_op0, inner_op1);
5367
5368 /* For commutative operations, try the other pair if that one
5369 didn't simplify. */
5370 if (inner == 0 && COMMUTATIVE_ARITH_P (x))
5371 {
5372 other = XEXP (XEXP (x, 0), 1);
5373 inner = simplify_binary_operation (code, mode,
5374 XEXP (XEXP (x, 0), 0),
5375 XEXP (x, 1));
5376 }
5377
5378 if (inner)
5379 return simplify_gen_binary (code, mode, other, inner);
5380 }
5381 }
5382
5383 /* A little bit of algebraic simplification here. */
5384 switch (code)
5385 {
5386 case MEM:
5387 /* Ensure that our address has any ASHIFTs converted to MULT in case
5388 address-recognizing predicates are called later. */
5389 temp = make_compound_operation (XEXP (x, 0), MEM);
5390 SUBST (XEXP (x, 0), temp);
5391 break;
5392
5393 case SUBREG:
5394 if (op0_mode == VOIDmode)
5395 op0_mode = GET_MODE (SUBREG_REG (x));
5396
5397 /* See if this can be moved to simplify_subreg. */
5398 if (CONSTANT_P (SUBREG_REG (x))
5399 && subreg_lowpart_offset (mode, op0_mode) == SUBREG_BYTE (x)
5400 /* Don't call gen_lowpart if the inner mode
5401 is VOIDmode and we cannot simplify it, as SUBREG without
5402 inner mode is invalid. */
5403 && (GET_MODE (SUBREG_REG (x)) != VOIDmode
5404 || gen_lowpart_common (mode, SUBREG_REG (x))))
5405 return gen_lowpart (mode, SUBREG_REG (x));
5406
5407 if (GET_MODE_CLASS (GET_MODE (SUBREG_REG (x))) == MODE_CC)
5408 break;
5409 {
5410 rtx temp;
5411 temp = simplify_subreg (mode, SUBREG_REG (x), op0_mode,
5412 SUBREG_BYTE (x));
5413 if (temp)
5414 return temp;
5415 }
5416
5417 /* Don't change the mode of the MEM if that would change the meaning
5418 of the address. */
5419 if (MEM_P (SUBREG_REG (x))
5420 && (MEM_VOLATILE_P (SUBREG_REG (x))
5421 || mode_dependent_address_p (XEXP (SUBREG_REG (x), 0))))
5422 return gen_rtx_CLOBBER (mode, const0_rtx);
5423
5424 /* Note that we cannot do any narrowing for non-constants since
5425 we might have been counting on using the fact that some bits were
5426 zero. We now do this in the SET. */
5427
5428 break;
5429
5430 case NEG:
5431 temp = expand_compound_operation (XEXP (x, 0));
5432
5433 /* For C equal to the width of MODE minus 1, (neg (ashiftrt X C)) can be
5434 replaced by (lshiftrt X C). This will convert
5435 (neg (sign_extract X 1 Y)) to (zero_extract X 1 Y). */
5436
5437 if (GET_CODE (temp) == ASHIFTRT
5438 && CONST_INT_P (XEXP (temp, 1))
5439 && INTVAL (XEXP (temp, 1)) == GET_MODE_BITSIZE (mode) - 1)
5440 return simplify_shift_const (NULL_RTX, LSHIFTRT, mode, XEXP (temp, 0),
5441 INTVAL (XEXP (temp, 1)));
5442
5443 /* If X has only a single bit that might be nonzero, say, bit I, convert
5444 (neg X) to (ashiftrt (ashift X C-I) C-I) where C is the bitsize of
5445 MODE minus 1. This will convert (neg (zero_extract X 1 Y)) to
5446 (sign_extract X 1 Y). But only do this if TEMP isn't a register
5447 or a SUBREG of one since we'd be making the expression more
5448 complex if it was just a register. */
5449
5450 if (!REG_P (temp)
5451 && ! (GET_CODE (temp) == SUBREG
5452 && REG_P (SUBREG_REG (temp)))
5453 && (i = exact_log2 (nonzero_bits (temp, mode))) >= 0)
5454 {
5455 rtx temp1 = simplify_shift_const
5456 (NULL_RTX, ASHIFTRT, mode,
5457 simplify_shift_const (NULL_RTX, ASHIFT, mode, temp,
5458 GET_MODE_BITSIZE (mode) - 1 - i),
5459 GET_MODE_BITSIZE (mode) - 1 - i);
5460
5461 /* If all we did was surround TEMP with the two shifts, we
5462 haven't improved anything, so don't use it. Otherwise,
5463 we are better off with TEMP1. */
5464 if (GET_CODE (temp1) != ASHIFTRT
5465 || GET_CODE (XEXP (temp1, 0)) != ASHIFT
5466 || XEXP (XEXP (temp1, 0), 0) != temp)
5467 return temp1;
5468 }
5469 break;
5470
5471 case TRUNCATE:
5472 /* We can't handle truncation to a partial integer mode here
5473 because we don't know the real bitsize of the partial
5474 integer mode. */
5475 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
5476 break;
5477
5478 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
5479 SUBST (XEXP (x, 0),
5480 force_to_mode (XEXP (x, 0), GET_MODE (XEXP (x, 0)),
5481 GET_MODE_MASK (mode), 0));
5482
5483 /* We can truncate a constant value and return it. */
5484 if (CONST_INT_P (XEXP (x, 0)))
5485 return gen_int_mode (INTVAL (XEXP (x, 0)), mode);
5486
5487 /* Similarly to what we do in simplify-rtx.c, a truncate of a register
5488 whose value is a comparison can be replaced with a subreg if
5489 STORE_FLAG_VALUE permits. */
5490 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
5491 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0
5492 && (temp = get_last_value (XEXP (x, 0)))
5493 && COMPARISON_P (temp))
5494 return gen_lowpart (mode, XEXP (x, 0));
5495 break;
5496
5497 case CONST:
5498 /* (const (const X)) can become (const X). Do it this way rather than
5499 returning the inner CONST since CONST can be shared with a
5500 REG_EQUAL note. */
5501 if (GET_CODE (XEXP (x, 0)) == CONST)
5502 SUBST (XEXP (x, 0), XEXP (XEXP (x, 0), 0));
5503 break;
5504
5505 #ifdef HAVE_lo_sum
5506 case LO_SUM:
5507 /* Convert (lo_sum (high FOO) FOO) to FOO. This is necessary so we
5508 can add in an offset. find_split_point will split this address up
5509 again if it doesn't match. */
5510 if (GET_CODE (XEXP (x, 0)) == HIGH
5511 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
5512 return XEXP (x, 1);
5513 break;
5514 #endif
5515
5516 case PLUS:
5517 /* (plus (xor (and <foo> (const_int pow2 - 1)) <c>) <-c>)
5518 when c is (const_int (pow2 + 1) / 2) is a sign extension of a
5519 bit-field and can be replaced by either a sign_extend or a
5520 sign_extract. The `and' may be a zero_extend and the two
5521 <c>, -<c> constants may be reversed. */
5522 if (GET_CODE (XEXP (x, 0)) == XOR
5523 && CONST_INT_P (XEXP (x, 1))
5524 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
5525 && INTVAL (XEXP (x, 1)) == -INTVAL (XEXP (XEXP (x, 0), 1))
5526 && ((i = exact_log2 (UINTVAL (XEXP (XEXP (x, 0), 1)))) >= 0
5527 || (i = exact_log2 (UINTVAL (XEXP (x, 1)))) >= 0)
5528 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
5529 && ((GET_CODE (XEXP (XEXP (x, 0), 0)) == AND
5530 && CONST_INT_P (XEXP (XEXP (XEXP (x, 0), 0), 1))
5531 && (UINTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1))
5532 == ((unsigned HOST_WIDE_INT) 1 << (i + 1)) - 1))
5533 || (GET_CODE (XEXP (XEXP (x, 0), 0)) == ZERO_EXTEND
5534 && (GET_MODE_BITSIZE (GET_MODE (XEXP (XEXP (XEXP (x, 0), 0), 0)))
5535 == (unsigned int) i + 1))))
5536 return simplify_shift_const
5537 (NULL_RTX, ASHIFTRT, mode,
5538 simplify_shift_const (NULL_RTX, ASHIFT, mode,
5539 XEXP (XEXP (XEXP (x, 0), 0), 0),
5540 GET_MODE_BITSIZE (mode) - (i + 1)),
5541 GET_MODE_BITSIZE (mode) - (i + 1));
5542
5543 /* If only the low-order bit of X is possibly nonzero, (plus x -1)
5544 can become (ashiftrt (ashift (xor x 1) C) C) where C is
5545 the bitsize of the mode - 1. This allows simplification of
5546 "a = (b & 8) == 0;" */
5547 if (XEXP (x, 1) == constm1_rtx
5548 && !REG_P (XEXP (x, 0))
5549 && ! (GET_CODE (XEXP (x, 0)) == SUBREG
5550 && REG_P (SUBREG_REG (XEXP (x, 0))))
5551 && nonzero_bits (XEXP (x, 0), mode) == 1)
5552 return simplify_shift_const (NULL_RTX, ASHIFTRT, mode,
5553 simplify_shift_const (NULL_RTX, ASHIFT, mode,
5554 gen_rtx_XOR (mode, XEXP (x, 0), const1_rtx),
5555 GET_MODE_BITSIZE (mode) - 1),
5556 GET_MODE_BITSIZE (mode) - 1);
5557
5558 /* If we are adding two things that have no bits in common, convert
5559 the addition into an IOR. This will often be further simplified,
5560 for example in cases like ((a & 1) + (a & 2)), which can
5561 become a & 3. */
5562
5563 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
5564 && (nonzero_bits (XEXP (x, 0), mode)
5565 & nonzero_bits (XEXP (x, 1), mode)) == 0)
5566 {
5567 /* Try to simplify the expression further. */
5568 rtx tor = simplify_gen_binary (IOR, mode, XEXP (x, 0), XEXP (x, 1));
5569 temp = combine_simplify_rtx (tor, mode, in_dest);
5570
5571 /* If we could, great. If not, do not go ahead with the IOR
5572 replacement, since PLUS appears in many special purpose
5573 address arithmetic instructions. */
5574 if (GET_CODE (temp) != CLOBBER && temp != tor)
5575 return temp;
5576 }
5577 break;
5578
5579 case MINUS:
5580 /* (minus <foo> (and <foo> (const_int -pow2))) becomes
5581 (and <foo> (const_int pow2-1)) */
5582 if (GET_CODE (XEXP (x, 1)) == AND
5583 && CONST_INT_P (XEXP (XEXP (x, 1), 1))
5584 && exact_log2 (-UINTVAL (XEXP (XEXP (x, 1), 1))) >= 0
5585 && rtx_equal_p (XEXP (XEXP (x, 1), 0), XEXP (x, 0)))
5586 return simplify_and_const_int (NULL_RTX, mode, XEXP (x, 0),
5587 -INTVAL (XEXP (XEXP (x, 1), 1)) - 1);
5588 break;
5589
5590 case MULT:
5591 /* If we have (mult (plus A B) C), apply the distributive law and then
5592 the inverse distributive law to see if things simplify. This
5593 occurs mostly in addresses, often when unrolling loops. */
5594
5595 if (GET_CODE (XEXP (x, 0)) == PLUS)
5596 {
5597 rtx result = distribute_and_simplify_rtx (x, 0);
5598 if (result)
5599 return result;
5600 }
5601
5602 /* Try simplify a*(b/c) as (a*b)/c. */
5603 if (FLOAT_MODE_P (mode) && flag_associative_math
5604 && GET_CODE (XEXP (x, 0)) == DIV)
5605 {
5606 rtx tem = simplify_binary_operation (MULT, mode,
5607 XEXP (XEXP (x, 0), 0),
5608 XEXP (x, 1));
5609 if (tem)
5610 return simplify_gen_binary (DIV, mode, tem, XEXP (XEXP (x, 0), 1));
5611 }
5612 break;
5613
5614 case UDIV:
5615 /* If this is a divide by a power of two, treat it as a shift if
5616 its first operand is a shift. */
5617 if (CONST_INT_P (XEXP (x, 1))
5618 && (i = exact_log2 (UINTVAL (XEXP (x, 1)))) >= 0
5619 && (GET_CODE (XEXP (x, 0)) == ASHIFT
5620 || GET_CODE (XEXP (x, 0)) == LSHIFTRT
5621 || GET_CODE (XEXP (x, 0)) == ASHIFTRT
5622 || GET_CODE (XEXP (x, 0)) == ROTATE
5623 || GET_CODE (XEXP (x, 0)) == ROTATERT))
5624 return simplify_shift_const (NULL_RTX, LSHIFTRT, mode, XEXP (x, 0), i);
5625 break;
5626
5627 case EQ: case NE:
5628 case GT: case GTU: case GE: case GEU:
5629 case LT: case LTU: case LE: case LEU:
5630 case UNEQ: case LTGT:
5631 case UNGT: case UNGE:
5632 case UNLT: case UNLE:
5633 case UNORDERED: case ORDERED:
5634 /* If the first operand is a condition code, we can't do anything
5635 with it. */
5636 if (GET_CODE (XEXP (x, 0)) == COMPARE
5637 || (GET_MODE_CLASS (GET_MODE (XEXP (x, 0))) != MODE_CC
5638 && ! CC0_P (XEXP (x, 0))))
5639 {
5640 rtx op0 = XEXP (x, 0);
5641 rtx op1 = XEXP (x, 1);
5642 enum rtx_code new_code;
5643
5644 if (GET_CODE (op0) == COMPARE)
5645 op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
5646
5647 /* Simplify our comparison, if possible. */
5648 new_code = simplify_comparison (code, &op0, &op1);
5649
5650 /* If STORE_FLAG_VALUE is 1, we can convert (ne x 0) to simply X
5651 if only the low-order bit is possibly nonzero in X (such as when
5652 X is a ZERO_EXTRACT of one bit). Similarly, we can convert EQ to
5653 (xor X 1) or (minus 1 X); we use the former. Finally, if X is
5654 known to be either 0 or -1, NE becomes a NEG and EQ becomes
5655 (plus X 1).
5656
5657 Remove any ZERO_EXTRACT we made when thinking this was a
5658 comparison. It may now be simpler to use, e.g., an AND. If a
5659 ZERO_EXTRACT is indeed appropriate, it will be placed back by
5660 the call to make_compound_operation in the SET case. */
5661
5662 if (STORE_FLAG_VALUE == 1
5663 && new_code == NE && GET_MODE_CLASS (mode) == MODE_INT
5664 && op1 == const0_rtx
5665 && mode == GET_MODE (op0)
5666 && nonzero_bits (op0, mode) == 1)
5667 return gen_lowpart (mode,
5668 expand_compound_operation (op0));
5669
5670 else if (STORE_FLAG_VALUE == 1
5671 && new_code == NE && GET_MODE_CLASS (mode) == MODE_INT
5672 && op1 == const0_rtx
5673 && mode == GET_MODE (op0)
5674 && (num_sign_bit_copies (op0, mode)
5675 == GET_MODE_BITSIZE (mode)))
5676 {
5677 op0 = expand_compound_operation (op0);
5678 return simplify_gen_unary (NEG, mode,
5679 gen_lowpart (mode, op0),
5680 mode);
5681 }
5682
5683 else if (STORE_FLAG_VALUE == 1
5684 && new_code == EQ && GET_MODE_CLASS (mode) == MODE_INT
5685 && op1 == const0_rtx
5686 && mode == GET_MODE (op0)
5687 && nonzero_bits (op0, mode) == 1)
5688 {
5689 op0 = expand_compound_operation (op0);
5690 return simplify_gen_binary (XOR, mode,
5691 gen_lowpart (mode, op0),
5692 const1_rtx);
5693 }
5694
5695 else if (STORE_FLAG_VALUE == 1
5696 && new_code == EQ && GET_MODE_CLASS (mode) == MODE_INT
5697 && op1 == const0_rtx
5698 && mode == GET_MODE (op0)
5699 && (num_sign_bit_copies (op0, mode)
5700 == GET_MODE_BITSIZE (mode)))
5701 {
5702 op0 = expand_compound_operation (op0);
5703 return plus_constant (gen_lowpart (mode, op0), 1);
5704 }
5705
5706 /* If STORE_FLAG_VALUE is -1, we have cases similar to
5707 those above. */
5708 if (STORE_FLAG_VALUE == -1
5709 && new_code == NE && GET_MODE_CLASS (mode) == MODE_INT
5710 && op1 == const0_rtx
5711 && (num_sign_bit_copies (op0, mode)
5712 == GET_MODE_BITSIZE (mode)))
5713 return gen_lowpart (mode,
5714 expand_compound_operation (op0));
5715
5716 else if (STORE_FLAG_VALUE == -1
5717 && new_code == NE && GET_MODE_CLASS (mode) == MODE_INT
5718 && op1 == const0_rtx
5719 && mode == GET_MODE (op0)
5720 && nonzero_bits (op0, mode) == 1)
5721 {
5722 op0 = expand_compound_operation (op0);
5723 return simplify_gen_unary (NEG, mode,
5724 gen_lowpart (mode, op0),
5725 mode);
5726 }
5727
5728 else if (STORE_FLAG_VALUE == -1
5729 && new_code == EQ && GET_MODE_CLASS (mode) == MODE_INT
5730 && op1 == const0_rtx
5731 && mode == GET_MODE (op0)
5732 && (num_sign_bit_copies (op0, mode)
5733 == GET_MODE_BITSIZE (mode)))
5734 {
5735 op0 = expand_compound_operation (op0);
5736 return simplify_gen_unary (NOT, mode,
5737 gen_lowpart (mode, op0),
5738 mode);
5739 }
5740
5741 /* If X is 0/1, (eq X 0) is X-1. */
5742 else if (STORE_FLAG_VALUE == -1
5743 && new_code == EQ && GET_MODE_CLASS (mode) == MODE_INT
5744 && op1 == const0_rtx
5745 && mode == GET_MODE (op0)
5746 && nonzero_bits (op0, mode) == 1)
5747 {
5748 op0 = expand_compound_operation (op0);
5749 return plus_constant (gen_lowpart (mode, op0), -1);
5750 }
5751
5752 /* If STORE_FLAG_VALUE says to just test the sign bit and X has just
5753 one bit that might be nonzero, we can convert (ne x 0) to
5754 (ashift x c) where C puts the bit in the sign bit. Remove any
5755 AND with STORE_FLAG_VALUE when we are done, since we are only
5756 going to test the sign bit. */
5757 if (new_code == NE && GET_MODE_CLASS (mode) == MODE_INT
5758 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
5759 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
5760 == (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1))
5761 && op1 == const0_rtx
5762 && mode == GET_MODE (op0)
5763 && (i = exact_log2 (nonzero_bits (op0, mode))) >= 0)
5764 {
5765 x = simplify_shift_const (NULL_RTX, ASHIFT, mode,
5766 expand_compound_operation (op0),
5767 GET_MODE_BITSIZE (mode) - 1 - i);
5768 if (GET_CODE (x) == AND && XEXP (x, 1) == const_true_rtx)
5769 return XEXP (x, 0);
5770 else
5771 return x;
5772 }
5773
5774 /* If the code changed, return a whole new comparison. */
5775 if (new_code != code)
5776 return gen_rtx_fmt_ee (new_code, mode, op0, op1);
5777
5778 /* Otherwise, keep this operation, but maybe change its operands.
5779 This also converts (ne (compare FOO BAR) 0) to (ne FOO BAR). */
5780 SUBST (XEXP (x, 0), op0);
5781 SUBST (XEXP (x, 1), op1);
5782 }
5783 break;
5784
5785 case IF_THEN_ELSE:
5786 return simplify_if_then_else (x);
5787
5788 case ZERO_EXTRACT:
5789 case SIGN_EXTRACT:
5790 case ZERO_EXTEND:
5791 case SIGN_EXTEND:
5792 /* If we are processing SET_DEST, we are done. */
5793 if (in_dest)
5794 return x;
5795
5796 return expand_compound_operation (x);
5797
5798 case SET:
5799 return simplify_set (x);
5800
5801 case AND:
5802 case IOR:
5803 return simplify_logical (x);
5804
5805 case ASHIFT:
5806 case LSHIFTRT:
5807 case ASHIFTRT:
5808 case ROTATE:
5809 case ROTATERT:
5810 /* If this is a shift by a constant amount, simplify it. */
5811 if (CONST_INT_P (XEXP (x, 1)))
5812 return simplify_shift_const (x, code, mode, XEXP (x, 0),
5813 INTVAL (XEXP (x, 1)));
5814
5815 else if (SHIFT_COUNT_TRUNCATED && !REG_P (XEXP (x, 1)))
5816 SUBST (XEXP (x, 1),
5817 force_to_mode (XEXP (x, 1), GET_MODE (XEXP (x, 1)),
5818 ((unsigned HOST_WIDE_INT) 1
5819 << exact_log2 (GET_MODE_BITSIZE (GET_MODE (x))))
5820 - 1,
5821 0));
5822 break;
5823
5824 default:
5825 break;
5826 }
5827
5828 return x;
5829 }
5830 \f
5831 /* Simplify X, an IF_THEN_ELSE expression. Return the new expression. */
5832
5833 static rtx
5834 simplify_if_then_else (rtx x)
5835 {
5836 enum machine_mode mode = GET_MODE (x);
5837 rtx cond = XEXP (x, 0);
5838 rtx true_rtx = XEXP (x, 1);
5839 rtx false_rtx = XEXP (x, 2);
5840 enum rtx_code true_code = GET_CODE (cond);
5841 int comparison_p = COMPARISON_P (cond);
5842 rtx temp;
5843 int i;
5844 enum rtx_code false_code;
5845 rtx reversed;
5846
5847 /* Simplify storing of the truth value. */
5848 if (comparison_p && true_rtx == const_true_rtx && false_rtx == const0_rtx)
5849 return simplify_gen_relational (true_code, mode, VOIDmode,
5850 XEXP (cond, 0), XEXP (cond, 1));
5851
5852 /* Also when the truth value has to be reversed. */
5853 if (comparison_p
5854 && true_rtx == const0_rtx && false_rtx == const_true_rtx
5855 && (reversed = reversed_comparison (cond, mode)))
5856 return reversed;
5857
5858 /* Sometimes we can simplify the arm of an IF_THEN_ELSE if a register used
5859 in it is being compared against certain values. Get the true and false
5860 comparisons and see if that says anything about the value of each arm. */
5861
5862 if (comparison_p
5863 && ((false_code = reversed_comparison_code (cond, NULL))
5864 != UNKNOWN)
5865 && REG_P (XEXP (cond, 0)))
5866 {
5867 HOST_WIDE_INT nzb;
5868 rtx from = XEXP (cond, 0);
5869 rtx true_val = XEXP (cond, 1);
5870 rtx false_val = true_val;
5871 int swapped = 0;
5872
5873 /* If FALSE_CODE is EQ, swap the codes and arms. */
5874
5875 if (false_code == EQ)
5876 {
5877 swapped = 1, true_code = EQ, false_code = NE;
5878 temp = true_rtx, true_rtx = false_rtx, false_rtx = temp;
5879 }
5880
5881 /* If we are comparing against zero and the expression being tested has
5882 only a single bit that might be nonzero, that is its value when it is
5883 not equal to zero. Similarly if it is known to be -1 or 0. */
5884
5885 if (true_code == EQ && true_val == const0_rtx
5886 && exact_log2 (nzb = nonzero_bits (from, GET_MODE (from))) >= 0)
5887 {
5888 false_code = EQ;
5889 false_val = GEN_INT (trunc_int_for_mode (nzb, GET_MODE (from)));
5890 }
5891 else if (true_code == EQ && true_val == const0_rtx
5892 && (num_sign_bit_copies (from, GET_MODE (from))
5893 == GET_MODE_BITSIZE (GET_MODE (from))))
5894 {
5895 false_code = EQ;
5896 false_val = constm1_rtx;
5897 }
5898
5899 /* Now simplify an arm if we know the value of the register in the
5900 branch and it is used in the arm. Be careful due to the potential
5901 of locally-shared RTL. */
5902
5903 if (reg_mentioned_p (from, true_rtx))
5904 true_rtx = subst (known_cond (copy_rtx (true_rtx), true_code,
5905 from, true_val),
5906 pc_rtx, pc_rtx, 0, 0);
5907 if (reg_mentioned_p (from, false_rtx))
5908 false_rtx = subst (known_cond (copy_rtx (false_rtx), false_code,
5909 from, false_val),
5910 pc_rtx, pc_rtx, 0, 0);
5911
5912 SUBST (XEXP (x, 1), swapped ? false_rtx : true_rtx);
5913 SUBST (XEXP (x, 2), swapped ? true_rtx : false_rtx);
5914
5915 true_rtx = XEXP (x, 1);
5916 false_rtx = XEXP (x, 2);
5917 true_code = GET_CODE (cond);
5918 }
5919
5920 /* If we have (if_then_else FOO (pc) (label_ref BAR)) and FOO can be
5921 reversed, do so to avoid needing two sets of patterns for
5922 subtract-and-branch insns. Similarly if we have a constant in the true
5923 arm, the false arm is the same as the first operand of the comparison, or
5924 the false arm is more complicated than the true arm. */
5925
5926 if (comparison_p
5927 && reversed_comparison_code (cond, NULL) != UNKNOWN
5928 && (true_rtx == pc_rtx
5929 || (CONSTANT_P (true_rtx)
5930 && !CONST_INT_P (false_rtx) && false_rtx != pc_rtx)
5931 || true_rtx == const0_rtx
5932 || (OBJECT_P (true_rtx) && !OBJECT_P (false_rtx))
5933 || (GET_CODE (true_rtx) == SUBREG && OBJECT_P (SUBREG_REG (true_rtx))
5934 && !OBJECT_P (false_rtx))
5935 || reg_mentioned_p (true_rtx, false_rtx)
5936 || rtx_equal_p (false_rtx, XEXP (cond, 0))))
5937 {
5938 true_code = reversed_comparison_code (cond, NULL);
5939 SUBST (XEXP (x, 0), reversed_comparison (cond, GET_MODE (cond)));
5940 SUBST (XEXP (x, 1), false_rtx);
5941 SUBST (XEXP (x, 2), true_rtx);
5942
5943 temp = true_rtx, true_rtx = false_rtx, false_rtx = temp;
5944 cond = XEXP (x, 0);
5945
5946 /* It is possible that the conditional has been simplified out. */
5947 true_code = GET_CODE (cond);
5948 comparison_p = COMPARISON_P (cond);
5949 }
5950
5951 /* If the two arms are identical, we don't need the comparison. */
5952
5953 if (rtx_equal_p (true_rtx, false_rtx) && ! side_effects_p (cond))
5954 return true_rtx;
5955
5956 /* Convert a == b ? b : a to "a". */
5957 if (true_code == EQ && ! side_effects_p (cond)
5958 && !HONOR_NANS (mode)
5959 && rtx_equal_p (XEXP (cond, 0), false_rtx)
5960 && rtx_equal_p (XEXP (cond, 1), true_rtx))
5961 return false_rtx;
5962 else if (true_code == NE && ! side_effects_p (cond)
5963 && !HONOR_NANS (mode)
5964 && rtx_equal_p (XEXP (cond, 0), true_rtx)
5965 && rtx_equal_p (XEXP (cond, 1), false_rtx))
5966 return true_rtx;
5967
5968 /* Look for cases where we have (abs x) or (neg (abs X)). */
5969
5970 if (GET_MODE_CLASS (mode) == MODE_INT
5971 && comparison_p
5972 && XEXP (cond, 1) == const0_rtx
5973 && GET_CODE (false_rtx) == NEG
5974 && rtx_equal_p (true_rtx, XEXP (false_rtx, 0))
5975 && rtx_equal_p (true_rtx, XEXP (cond, 0))
5976 && ! side_effects_p (true_rtx))
5977 switch (true_code)
5978 {
5979 case GT:
5980 case GE:
5981 return simplify_gen_unary (ABS, mode, true_rtx, mode);
5982 case LT:
5983 case LE:
5984 return
5985 simplify_gen_unary (NEG, mode,
5986 simplify_gen_unary (ABS, mode, true_rtx, mode),
5987 mode);
5988 default:
5989 break;
5990 }
5991
5992 /* Look for MIN or MAX. */
5993
5994 if ((! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
5995 && comparison_p
5996 && rtx_equal_p (XEXP (cond, 0), true_rtx)
5997 && rtx_equal_p (XEXP (cond, 1), false_rtx)
5998 && ! side_effects_p (cond))
5999 switch (true_code)
6000 {
6001 case GE:
6002 case GT:
6003 return simplify_gen_binary (SMAX, mode, true_rtx, false_rtx);
6004 case LE:
6005 case LT:
6006 return simplify_gen_binary (SMIN, mode, true_rtx, false_rtx);
6007 case GEU:
6008 case GTU:
6009 return simplify_gen_binary (UMAX, mode, true_rtx, false_rtx);
6010 case LEU:
6011 case LTU:
6012 return simplify_gen_binary (UMIN, mode, true_rtx, false_rtx);
6013 default:
6014 break;
6015 }
6016
6017 /* If we have (if_then_else COND (OP Z C1) Z) and OP is an identity when its
6018 second operand is zero, this can be done as (OP Z (mult COND C2)) where
6019 C2 = C1 * STORE_FLAG_VALUE. Similarly if OP has an outer ZERO_EXTEND or
6020 SIGN_EXTEND as long as Z is already extended (so we don't destroy it).
6021 We can do this kind of thing in some cases when STORE_FLAG_VALUE is
6022 neither 1 or -1, but it isn't worth checking for. */
6023
6024 if ((STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
6025 && comparison_p
6026 && GET_MODE_CLASS (mode) == MODE_INT
6027 && ! side_effects_p (x))
6028 {
6029 rtx t = make_compound_operation (true_rtx, SET);
6030 rtx f = make_compound_operation (false_rtx, SET);
6031 rtx cond_op0 = XEXP (cond, 0);
6032 rtx cond_op1 = XEXP (cond, 1);
6033 enum rtx_code op = UNKNOWN, extend_op = UNKNOWN;
6034 enum machine_mode m = mode;
6035 rtx z = 0, c1 = NULL_RTX;
6036
6037 if ((GET_CODE (t) == PLUS || GET_CODE (t) == MINUS
6038 || GET_CODE (t) == IOR || GET_CODE (t) == XOR
6039 || GET_CODE (t) == ASHIFT
6040 || GET_CODE (t) == LSHIFTRT || GET_CODE (t) == ASHIFTRT)
6041 && rtx_equal_p (XEXP (t, 0), f))
6042 c1 = XEXP (t, 1), op = GET_CODE (t), z = f;
6043
6044 /* If an identity-zero op is commutative, check whether there
6045 would be a match if we swapped the operands. */
6046 else if ((GET_CODE (t) == PLUS || GET_CODE (t) == IOR
6047 || GET_CODE (t) == XOR)
6048 && rtx_equal_p (XEXP (t, 1), f))
6049 c1 = XEXP (t, 0), op = GET_CODE (t), z = f;
6050 else if (GET_CODE (t) == SIGN_EXTEND
6051 && (GET_CODE (XEXP (t, 0)) == PLUS
6052 || GET_CODE (XEXP (t, 0)) == MINUS
6053 || GET_CODE (XEXP (t, 0)) == IOR
6054 || GET_CODE (XEXP (t, 0)) == XOR
6055 || GET_CODE (XEXP (t, 0)) == ASHIFT
6056 || GET_CODE (XEXP (t, 0)) == LSHIFTRT
6057 || GET_CODE (XEXP (t, 0)) == ASHIFTRT)
6058 && GET_CODE (XEXP (XEXP (t, 0), 0)) == SUBREG
6059 && subreg_lowpart_p (XEXP (XEXP (t, 0), 0))
6060 && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 0)), f)
6061 && (num_sign_bit_copies (f, GET_MODE (f))
6062 > (unsigned int)
6063 (GET_MODE_BITSIZE (mode)
6064 - GET_MODE_BITSIZE (GET_MODE (XEXP (XEXP (t, 0), 0))))))
6065 {
6066 c1 = XEXP (XEXP (t, 0), 1); z = f; op = GET_CODE (XEXP (t, 0));
6067 extend_op = SIGN_EXTEND;
6068 m = GET_MODE (XEXP (t, 0));
6069 }
6070 else if (GET_CODE (t) == SIGN_EXTEND
6071 && (GET_CODE (XEXP (t, 0)) == PLUS
6072 || GET_CODE (XEXP (t, 0)) == IOR
6073 || GET_CODE (XEXP (t, 0)) == XOR)
6074 && GET_CODE (XEXP (XEXP (t, 0), 1)) == SUBREG
6075 && subreg_lowpart_p (XEXP (XEXP (t, 0), 1))
6076 && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 1)), f)
6077 && (num_sign_bit_copies (f, GET_MODE (f))
6078 > (unsigned int)
6079 (GET_MODE_BITSIZE (mode)
6080 - GET_MODE_BITSIZE (GET_MODE (XEXP (XEXP (t, 0), 1))))))
6081 {
6082 c1 = XEXP (XEXP (t, 0), 0); z = f; op = GET_CODE (XEXP (t, 0));
6083 extend_op = SIGN_EXTEND;
6084 m = GET_MODE (XEXP (t, 0));
6085 }
6086 else if (GET_CODE (t) == ZERO_EXTEND
6087 && (GET_CODE (XEXP (t, 0)) == PLUS
6088 || GET_CODE (XEXP (t, 0)) == MINUS
6089 || GET_CODE (XEXP (t, 0)) == IOR
6090 || GET_CODE (XEXP (t, 0)) == XOR
6091 || GET_CODE (XEXP (t, 0)) == ASHIFT
6092 || GET_CODE (XEXP (t, 0)) == LSHIFTRT
6093 || GET_CODE (XEXP (t, 0)) == ASHIFTRT)
6094 && GET_CODE (XEXP (XEXP (t, 0), 0)) == SUBREG
6095 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
6096 && subreg_lowpart_p (XEXP (XEXP (t, 0), 0))
6097 && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 0)), f)
6098 && ((nonzero_bits (f, GET_MODE (f))
6099 & ~GET_MODE_MASK (GET_MODE (XEXP (XEXP (t, 0), 0))))
6100 == 0))
6101 {
6102 c1 = XEXP (XEXP (t, 0), 1); z = f; op = GET_CODE (XEXP (t, 0));
6103 extend_op = ZERO_EXTEND;
6104 m = GET_MODE (XEXP (t, 0));
6105 }
6106 else if (GET_CODE (t) == ZERO_EXTEND
6107 && (GET_CODE (XEXP (t, 0)) == PLUS
6108 || GET_CODE (XEXP (t, 0)) == IOR
6109 || GET_CODE (XEXP (t, 0)) == XOR)
6110 && GET_CODE (XEXP (XEXP (t, 0), 1)) == SUBREG
6111 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
6112 && subreg_lowpart_p (XEXP (XEXP (t, 0), 1))
6113 && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 1)), f)
6114 && ((nonzero_bits (f, GET_MODE (f))
6115 & ~GET_MODE_MASK (GET_MODE (XEXP (XEXP (t, 0), 1))))
6116 == 0))
6117 {
6118 c1 = XEXP (XEXP (t, 0), 0); z = f; op = GET_CODE (XEXP (t, 0));
6119 extend_op = ZERO_EXTEND;
6120 m = GET_MODE (XEXP (t, 0));
6121 }
6122
6123 if (z)
6124 {
6125 temp = subst (simplify_gen_relational (true_code, m, VOIDmode,
6126 cond_op0, cond_op1),
6127 pc_rtx, pc_rtx, 0, 0);
6128 temp = simplify_gen_binary (MULT, m, temp,
6129 simplify_gen_binary (MULT, m, c1,
6130 const_true_rtx));
6131 temp = subst (temp, pc_rtx, pc_rtx, 0, 0);
6132 temp = simplify_gen_binary (op, m, gen_lowpart (m, z), temp);
6133
6134 if (extend_op != UNKNOWN)
6135 temp = simplify_gen_unary (extend_op, mode, temp, m);
6136
6137 return temp;
6138 }
6139 }
6140
6141 /* If we have (if_then_else (ne A 0) C1 0) and either A is known to be 0 or
6142 1 and C1 is a single bit or A is known to be 0 or -1 and C1 is the
6143 negation of a single bit, we can convert this operation to a shift. We
6144 can actually do this more generally, but it doesn't seem worth it. */
6145
6146 if (true_code == NE && XEXP (cond, 1) == const0_rtx
6147 && false_rtx == const0_rtx && CONST_INT_P (true_rtx)
6148 && ((1 == nonzero_bits (XEXP (cond, 0), mode)
6149 && (i = exact_log2 (UINTVAL (true_rtx))) >= 0)
6150 || ((num_sign_bit_copies (XEXP (cond, 0), mode)
6151 == GET_MODE_BITSIZE (mode))
6152 && (i = exact_log2 (-UINTVAL (true_rtx))) >= 0)))
6153 return
6154 simplify_shift_const (NULL_RTX, ASHIFT, mode,
6155 gen_lowpart (mode, XEXP (cond, 0)), i);
6156
6157 /* (IF_THEN_ELSE (NE REG 0) (0) (8)) is REG for nonzero_bits (REG) == 8. */
6158 if (true_code == NE && XEXP (cond, 1) == const0_rtx
6159 && false_rtx == const0_rtx && CONST_INT_P (true_rtx)
6160 && GET_MODE (XEXP (cond, 0)) == mode
6161 && (UINTVAL (true_rtx) & GET_MODE_MASK (mode))
6162 == nonzero_bits (XEXP (cond, 0), mode)
6163 && (i = exact_log2 (UINTVAL (true_rtx) & GET_MODE_MASK (mode))) >= 0)
6164 return XEXP (cond, 0);
6165
6166 return x;
6167 }
6168 \f
6169 /* Simplify X, a SET expression. Return the new expression. */
6170
6171 static rtx
6172 simplify_set (rtx x)
6173 {
6174 rtx src = SET_SRC (x);
6175 rtx dest = SET_DEST (x);
6176 enum machine_mode mode
6177 = GET_MODE (src) != VOIDmode ? GET_MODE (src) : GET_MODE (dest);
6178 rtx other_insn;
6179 rtx *cc_use;
6180
6181 /* (set (pc) (return)) gets written as (return). */
6182 if (GET_CODE (dest) == PC && GET_CODE (src) == RETURN)
6183 return src;
6184
6185 /* Now that we know for sure which bits of SRC we are using, see if we can
6186 simplify the expression for the object knowing that we only need the
6187 low-order bits. */
6188
6189 if (GET_MODE_CLASS (mode) == MODE_INT
6190 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
6191 {
6192 src = force_to_mode (src, mode, ~(unsigned HOST_WIDE_INT) 0, 0);
6193 SUBST (SET_SRC (x), src);
6194 }
6195
6196 /* If we are setting CC0 or if the source is a COMPARE, look for the use of
6197 the comparison result and try to simplify it unless we already have used
6198 undobuf.other_insn. */
6199 if ((GET_MODE_CLASS (mode) == MODE_CC
6200 || GET_CODE (src) == COMPARE
6201 || CC0_P (dest))
6202 && (cc_use = find_single_use (dest, subst_insn, &other_insn)) != 0
6203 && (undobuf.other_insn == 0 || other_insn == undobuf.other_insn)
6204 && COMPARISON_P (*cc_use)
6205 && rtx_equal_p (XEXP (*cc_use, 0), dest))
6206 {
6207 enum rtx_code old_code = GET_CODE (*cc_use);
6208 enum rtx_code new_code;
6209 rtx op0, op1, tmp;
6210 int other_changed = 0;
6211 enum machine_mode compare_mode = GET_MODE (dest);
6212
6213 if (GET_CODE (src) == COMPARE)
6214 op0 = XEXP (src, 0), op1 = XEXP (src, 1);
6215 else
6216 op0 = src, op1 = CONST0_RTX (GET_MODE (src));
6217
6218 tmp = simplify_relational_operation (old_code, compare_mode, VOIDmode,
6219 op0, op1);
6220 if (!tmp)
6221 new_code = old_code;
6222 else if (!CONSTANT_P (tmp))
6223 {
6224 new_code = GET_CODE (tmp);
6225 op0 = XEXP (tmp, 0);
6226 op1 = XEXP (tmp, 1);
6227 }
6228 else
6229 {
6230 rtx pat = PATTERN (other_insn);
6231 undobuf.other_insn = other_insn;
6232 SUBST (*cc_use, tmp);
6233
6234 /* Attempt to simplify CC user. */
6235 if (GET_CODE (pat) == SET)
6236 {
6237 rtx new_rtx = simplify_rtx (SET_SRC (pat));
6238 if (new_rtx != NULL_RTX)
6239 SUBST (SET_SRC (pat), new_rtx);
6240 }
6241
6242 /* Convert X into a no-op move. */
6243 SUBST (SET_DEST (x), pc_rtx);
6244 SUBST (SET_SRC (x), pc_rtx);
6245 return x;
6246 }
6247
6248 /* Simplify our comparison, if possible. */
6249 new_code = simplify_comparison (new_code, &op0, &op1);
6250
6251 #ifdef SELECT_CC_MODE
6252 /* If this machine has CC modes other than CCmode, check to see if we
6253 need to use a different CC mode here. */
6254 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
6255 compare_mode = GET_MODE (op0);
6256 else
6257 compare_mode = SELECT_CC_MODE (new_code, op0, op1);
6258
6259 #ifndef HAVE_cc0
6260 /* If the mode changed, we have to change SET_DEST, the mode in the
6261 compare, and the mode in the place SET_DEST is used. If SET_DEST is
6262 a hard register, just build new versions with the proper mode. If it
6263 is a pseudo, we lose unless it is only time we set the pseudo, in
6264 which case we can safely change its mode. */
6265 if (compare_mode != GET_MODE (dest))
6266 {
6267 if (can_change_dest_mode (dest, 0, compare_mode))
6268 {
6269 unsigned int regno = REGNO (dest);
6270 rtx new_dest;
6271
6272 if (regno < FIRST_PSEUDO_REGISTER)
6273 new_dest = gen_rtx_REG (compare_mode, regno);
6274 else
6275 {
6276 SUBST_MODE (regno_reg_rtx[regno], compare_mode);
6277 new_dest = regno_reg_rtx[regno];
6278 }
6279
6280 SUBST (SET_DEST (x), new_dest);
6281 SUBST (XEXP (*cc_use, 0), new_dest);
6282 other_changed = 1;
6283
6284 dest = new_dest;
6285 }
6286 }
6287 #endif /* cc0 */
6288 #endif /* SELECT_CC_MODE */
6289
6290 /* If the code changed, we have to build a new comparison in
6291 undobuf.other_insn. */
6292 if (new_code != old_code)
6293 {
6294 int other_changed_previously = other_changed;
6295 unsigned HOST_WIDE_INT mask;
6296 rtx old_cc_use = *cc_use;
6297
6298 SUBST (*cc_use, gen_rtx_fmt_ee (new_code, GET_MODE (*cc_use),
6299 dest, const0_rtx));
6300 other_changed = 1;
6301
6302 /* If the only change we made was to change an EQ into an NE or
6303 vice versa, OP0 has only one bit that might be nonzero, and OP1
6304 is zero, check if changing the user of the condition code will
6305 produce a valid insn. If it won't, we can keep the original code
6306 in that insn by surrounding our operation with an XOR. */
6307
6308 if (((old_code == NE && new_code == EQ)
6309 || (old_code == EQ && new_code == NE))
6310 && ! other_changed_previously && op1 == const0_rtx
6311 && GET_MODE_BITSIZE (GET_MODE (op0)) <= HOST_BITS_PER_WIDE_INT
6312 && exact_log2 (mask = nonzero_bits (op0, GET_MODE (op0))) >= 0)
6313 {
6314 rtx pat = PATTERN (other_insn), note = 0;
6315
6316 if ((recog_for_combine (&pat, other_insn, &note) < 0
6317 && ! check_asm_operands (pat)))
6318 {
6319 *cc_use = old_cc_use;
6320 other_changed = 0;
6321
6322 op0 = simplify_gen_binary (XOR, GET_MODE (op0),
6323 op0, GEN_INT (mask));
6324 }
6325 }
6326 }
6327
6328 if (other_changed)
6329 undobuf.other_insn = other_insn;
6330
6331 /* Otherwise, if we didn't previously have a COMPARE in the
6332 correct mode, we need one. */
6333 if (GET_CODE (src) != COMPARE || GET_MODE (src) != compare_mode)
6334 {
6335 SUBST (SET_SRC (x), gen_rtx_COMPARE (compare_mode, op0, op1));
6336 src = SET_SRC (x);
6337 }
6338 else if (GET_MODE (op0) == compare_mode && op1 == const0_rtx)
6339 {
6340 SUBST (SET_SRC (x), op0);
6341 src = SET_SRC (x);
6342 }
6343 /* Otherwise, update the COMPARE if needed. */
6344 else if (XEXP (src, 0) != op0 || XEXP (src, 1) != op1)
6345 {
6346 SUBST (SET_SRC (x), gen_rtx_COMPARE (compare_mode, op0, op1));
6347 src = SET_SRC (x);
6348 }
6349 }
6350 else
6351 {
6352 /* Get SET_SRC in a form where we have placed back any
6353 compound expressions. Then do the checks below. */
6354 src = make_compound_operation (src, SET);
6355 SUBST (SET_SRC (x), src);
6356 }
6357
6358 /* If we have (set x (subreg:m1 (op:m2 ...) 0)) with OP being some operation,
6359 and X being a REG or (subreg (reg)), we may be able to convert this to
6360 (set (subreg:m2 x) (op)).
6361
6362 We can always do this if M1 is narrower than M2 because that means that
6363 we only care about the low bits of the result.
6364
6365 However, on machines without WORD_REGISTER_OPERATIONS defined, we cannot
6366 perform a narrower operation than requested since the high-order bits will
6367 be undefined. On machine where it is defined, this transformation is safe
6368 as long as M1 and M2 have the same number of words. */
6369
6370 if (GET_CODE (src) == SUBREG && subreg_lowpart_p (src)
6371 && !OBJECT_P (SUBREG_REG (src))
6372 && (((GET_MODE_SIZE (GET_MODE (src)) + (UNITS_PER_WORD - 1))
6373 / UNITS_PER_WORD)
6374 == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (src)))
6375 + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD))
6376 #ifndef WORD_REGISTER_OPERATIONS
6377 && (GET_MODE_SIZE (GET_MODE (src))
6378 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (src))))
6379 #endif
6380 #ifdef CANNOT_CHANGE_MODE_CLASS
6381 && ! (REG_P (dest) && REGNO (dest) < FIRST_PSEUDO_REGISTER
6382 && REG_CANNOT_CHANGE_MODE_P (REGNO (dest),
6383 GET_MODE (SUBREG_REG (src)),
6384 GET_MODE (src)))
6385 #endif
6386 && (REG_P (dest)
6387 || (GET_CODE (dest) == SUBREG
6388 && REG_P (SUBREG_REG (dest)))))
6389 {
6390 SUBST (SET_DEST (x),
6391 gen_lowpart (GET_MODE (SUBREG_REG (src)),
6392 dest));
6393 SUBST (SET_SRC (x), SUBREG_REG (src));
6394
6395 src = SET_SRC (x), dest = SET_DEST (x);
6396 }
6397
6398 #ifdef HAVE_cc0
6399 /* If we have (set (cc0) (subreg ...)), we try to remove the subreg
6400 in SRC. */
6401 if (dest == cc0_rtx
6402 && GET_CODE (src) == SUBREG
6403 && subreg_lowpart_p (src)
6404 && (GET_MODE_BITSIZE (GET_MODE (src))
6405 < GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (src)))))
6406 {
6407 rtx inner = SUBREG_REG (src);
6408 enum machine_mode inner_mode = GET_MODE (inner);
6409
6410 /* Here we make sure that we don't have a sign bit on. */
6411 if (GET_MODE_BITSIZE (inner_mode) <= HOST_BITS_PER_WIDE_INT
6412 && (nonzero_bits (inner, inner_mode)
6413 < ((unsigned HOST_WIDE_INT) 1
6414 << (GET_MODE_BITSIZE (GET_MODE (src)) - 1))))
6415 {
6416 SUBST (SET_SRC (x), inner);
6417 src = SET_SRC (x);
6418 }
6419 }
6420 #endif
6421
6422 #ifdef LOAD_EXTEND_OP
6423 /* If we have (set FOO (subreg:M (mem:N BAR) 0)) with M wider than N, this
6424 would require a paradoxical subreg. Replace the subreg with a
6425 zero_extend to avoid the reload that would otherwise be required. */
6426
6427 if (GET_CODE (src) == SUBREG && subreg_lowpart_p (src)
6428 && INTEGRAL_MODE_P (GET_MODE (SUBREG_REG (src)))
6429 && LOAD_EXTEND_OP (GET_MODE (SUBREG_REG (src))) != UNKNOWN
6430 && SUBREG_BYTE (src) == 0
6431 && (GET_MODE_SIZE (GET_MODE (src))
6432 > GET_MODE_SIZE (GET_MODE (SUBREG_REG (src))))
6433 && MEM_P (SUBREG_REG (src)))
6434 {
6435 SUBST (SET_SRC (x),
6436 gen_rtx_fmt_e (LOAD_EXTEND_OP (GET_MODE (SUBREG_REG (src))),
6437 GET_MODE (src), SUBREG_REG (src)));
6438
6439 src = SET_SRC (x);
6440 }
6441 #endif
6442
6443 /* If we don't have a conditional move, SET_SRC is an IF_THEN_ELSE, and we
6444 are comparing an item known to be 0 or -1 against 0, use a logical
6445 operation instead. Check for one of the arms being an IOR of the other
6446 arm with some value. We compute three terms to be IOR'ed together. In
6447 practice, at most two will be nonzero. Then we do the IOR's. */
6448
6449 if (GET_CODE (dest) != PC
6450 && GET_CODE (src) == IF_THEN_ELSE
6451 && GET_MODE_CLASS (GET_MODE (src)) == MODE_INT
6452 && (GET_CODE (XEXP (src, 0)) == EQ || GET_CODE (XEXP (src, 0)) == NE)
6453 && XEXP (XEXP (src, 0), 1) == const0_rtx
6454 && GET_MODE (src) == GET_MODE (XEXP (XEXP (src, 0), 0))
6455 #ifdef HAVE_conditional_move
6456 && ! can_conditionally_move_p (GET_MODE (src))
6457 #endif
6458 && (num_sign_bit_copies (XEXP (XEXP (src, 0), 0),
6459 GET_MODE (XEXP (XEXP (src, 0), 0)))
6460 == GET_MODE_BITSIZE (GET_MODE (XEXP (XEXP (src, 0), 0))))
6461 && ! side_effects_p (src))
6462 {
6463 rtx true_rtx = (GET_CODE (XEXP (src, 0)) == NE
6464 ? XEXP (src, 1) : XEXP (src, 2));
6465 rtx false_rtx = (GET_CODE (XEXP (src, 0)) == NE
6466 ? XEXP (src, 2) : XEXP (src, 1));
6467 rtx term1 = const0_rtx, term2, term3;
6468
6469 if (GET_CODE (true_rtx) == IOR
6470 && rtx_equal_p (XEXP (true_rtx, 0), false_rtx))
6471 term1 = false_rtx, true_rtx = XEXP (true_rtx, 1), false_rtx = const0_rtx;
6472 else if (GET_CODE (true_rtx) == IOR
6473 && rtx_equal_p (XEXP (true_rtx, 1), false_rtx))
6474 term1 = false_rtx, true_rtx = XEXP (true_rtx, 0), false_rtx = const0_rtx;
6475 else if (GET_CODE (false_rtx) == IOR
6476 && rtx_equal_p (XEXP (false_rtx, 0), true_rtx))
6477 term1 = true_rtx, false_rtx = XEXP (false_rtx, 1), true_rtx = const0_rtx;
6478 else if (GET_CODE (false_rtx) == IOR
6479 && rtx_equal_p (XEXP (false_rtx, 1), true_rtx))
6480 term1 = true_rtx, false_rtx = XEXP (false_rtx, 0), true_rtx = const0_rtx;
6481
6482 term2 = simplify_gen_binary (AND, GET_MODE (src),
6483 XEXP (XEXP (src, 0), 0), true_rtx);
6484 term3 = simplify_gen_binary (AND, GET_MODE (src),
6485 simplify_gen_unary (NOT, GET_MODE (src),
6486 XEXP (XEXP (src, 0), 0),
6487 GET_MODE (src)),
6488 false_rtx);
6489
6490 SUBST (SET_SRC (x),
6491 simplify_gen_binary (IOR, GET_MODE (src),
6492 simplify_gen_binary (IOR, GET_MODE (src),
6493 term1, term2),
6494 term3));
6495
6496 src = SET_SRC (x);
6497 }
6498
6499 /* If either SRC or DEST is a CLOBBER of (const_int 0), make this
6500 whole thing fail. */
6501 if (GET_CODE (src) == CLOBBER && XEXP (src, 0) == const0_rtx)
6502 return src;
6503 else if (GET_CODE (dest) == CLOBBER && XEXP (dest, 0) == const0_rtx)
6504 return dest;
6505 else
6506 /* Convert this into a field assignment operation, if possible. */
6507 return make_field_assignment (x);
6508 }
6509 \f
6510 /* Simplify, X, and AND, IOR, or XOR operation, and return the simplified
6511 result. */
6512
6513 static rtx
6514 simplify_logical (rtx x)
6515 {
6516 enum machine_mode mode = GET_MODE (x);
6517 rtx op0 = XEXP (x, 0);
6518 rtx op1 = XEXP (x, 1);
6519
6520 switch (GET_CODE (x))
6521 {
6522 case AND:
6523 /* We can call simplify_and_const_int only if we don't lose
6524 any (sign) bits when converting INTVAL (op1) to
6525 "unsigned HOST_WIDE_INT". */
6526 if (CONST_INT_P (op1)
6527 && (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
6528 || INTVAL (op1) > 0))
6529 {
6530 x = simplify_and_const_int (x, mode, op0, INTVAL (op1));
6531 if (GET_CODE (x) != AND)
6532 return x;
6533
6534 op0 = XEXP (x, 0);
6535 op1 = XEXP (x, 1);
6536 }
6537
6538 /* If we have any of (and (ior A B) C) or (and (xor A B) C),
6539 apply the distributive law and then the inverse distributive
6540 law to see if things simplify. */
6541 if (GET_CODE (op0) == IOR || GET_CODE (op0) == XOR)
6542 {
6543 rtx result = distribute_and_simplify_rtx (x, 0);
6544 if (result)
6545 return result;
6546 }
6547 if (GET_CODE (op1) == IOR || GET_CODE (op1) == XOR)
6548 {
6549 rtx result = distribute_and_simplify_rtx (x, 1);
6550 if (result)
6551 return result;
6552 }
6553 break;
6554
6555 case IOR:
6556 /* If we have (ior (and A B) C), apply the distributive law and then
6557 the inverse distributive law to see if things simplify. */
6558
6559 if (GET_CODE (op0) == AND)
6560 {
6561 rtx result = distribute_and_simplify_rtx (x, 0);
6562 if (result)
6563 return result;
6564 }
6565
6566 if (GET_CODE (op1) == AND)
6567 {
6568 rtx result = distribute_and_simplify_rtx (x, 1);
6569 if (result)
6570 return result;
6571 }
6572 break;
6573
6574 default:
6575 gcc_unreachable ();
6576 }
6577
6578 return x;
6579 }
6580 \f
6581 /* We consider ZERO_EXTRACT, SIGN_EXTRACT, and SIGN_EXTEND as "compound
6582 operations" because they can be replaced with two more basic operations.
6583 ZERO_EXTEND is also considered "compound" because it can be replaced with
6584 an AND operation, which is simpler, though only one operation.
6585
6586 The function expand_compound_operation is called with an rtx expression
6587 and will convert it to the appropriate shifts and AND operations,
6588 simplifying at each stage.
6589
6590 The function make_compound_operation is called to convert an expression
6591 consisting of shifts and ANDs into the equivalent compound expression.
6592 It is the inverse of this function, loosely speaking. */
6593
6594 static rtx
6595 expand_compound_operation (rtx x)
6596 {
6597 unsigned HOST_WIDE_INT pos = 0, len;
6598 int unsignedp = 0;
6599 unsigned int modewidth;
6600 rtx tem;
6601
6602 switch (GET_CODE (x))
6603 {
6604 case ZERO_EXTEND:
6605 unsignedp = 1;
6606 case SIGN_EXTEND:
6607 /* We can't necessarily use a const_int for a multiword mode;
6608 it depends on implicitly extending the value.
6609 Since we don't know the right way to extend it,
6610 we can't tell whether the implicit way is right.
6611
6612 Even for a mode that is no wider than a const_int,
6613 we can't win, because we need to sign extend one of its bits through
6614 the rest of it, and we don't know which bit. */
6615 if (CONST_INT_P (XEXP (x, 0)))
6616 return x;
6617
6618 /* Return if (subreg:MODE FROM 0) is not a safe replacement for
6619 (zero_extend:MODE FROM) or (sign_extend:MODE FROM). It is for any MEM
6620 because (SUBREG (MEM...)) is guaranteed to cause the MEM to be
6621 reloaded. If not for that, MEM's would very rarely be safe.
6622
6623 Reject MODEs bigger than a word, because we might not be able
6624 to reference a two-register group starting with an arbitrary register
6625 (and currently gen_lowpart might crash for a SUBREG). */
6626
6627 if (GET_MODE_SIZE (GET_MODE (XEXP (x, 0))) > UNITS_PER_WORD)
6628 return x;
6629
6630 /* Reject MODEs that aren't scalar integers because turning vector
6631 or complex modes into shifts causes problems. */
6632
6633 if (! SCALAR_INT_MODE_P (GET_MODE (XEXP (x, 0))))
6634 return x;
6635
6636 len = GET_MODE_BITSIZE (GET_MODE (XEXP (x, 0)));
6637 /* If the inner object has VOIDmode (the only way this can happen
6638 is if it is an ASM_OPERANDS), we can't do anything since we don't
6639 know how much masking to do. */
6640 if (len == 0)
6641 return x;
6642
6643 break;
6644
6645 case ZERO_EXTRACT:
6646 unsignedp = 1;
6647
6648 /* ... fall through ... */
6649
6650 case SIGN_EXTRACT:
6651 /* If the operand is a CLOBBER, just return it. */
6652 if (GET_CODE (XEXP (x, 0)) == CLOBBER)
6653 return XEXP (x, 0);
6654
6655 if (!CONST_INT_P (XEXP (x, 1))
6656 || !CONST_INT_P (XEXP (x, 2))
6657 || GET_MODE (XEXP (x, 0)) == VOIDmode)
6658 return x;
6659
6660 /* Reject MODEs that aren't scalar integers because turning vector
6661 or complex modes into shifts causes problems. */
6662
6663 if (! SCALAR_INT_MODE_P (GET_MODE (XEXP (x, 0))))
6664 return x;
6665
6666 len = INTVAL (XEXP (x, 1));
6667 pos = INTVAL (XEXP (x, 2));
6668
6669 /* This should stay within the object being extracted, fail otherwise. */
6670 if (len + pos > GET_MODE_BITSIZE (GET_MODE (XEXP (x, 0))))
6671 return x;
6672
6673 if (BITS_BIG_ENDIAN)
6674 pos = GET_MODE_BITSIZE (GET_MODE (XEXP (x, 0))) - len - pos;
6675
6676 break;
6677
6678 default:
6679 return x;
6680 }
6681 /* Convert sign extension to zero extension, if we know that the high
6682 bit is not set, as this is easier to optimize. It will be converted
6683 back to cheaper alternative in make_extraction. */
6684 if (GET_CODE (x) == SIGN_EXTEND
6685 && (GET_MODE_BITSIZE (GET_MODE (x)) <= HOST_BITS_PER_WIDE_INT
6686 && ((nonzero_bits (XEXP (x, 0), GET_MODE (XEXP (x, 0)))
6687 & ~(((unsigned HOST_WIDE_INT)
6688 GET_MODE_MASK (GET_MODE (XEXP (x, 0))))
6689 >> 1))
6690 == 0)))
6691 {
6692 rtx temp = gen_rtx_ZERO_EXTEND (GET_MODE (x), XEXP (x, 0));
6693 rtx temp2 = expand_compound_operation (temp);
6694
6695 /* Make sure this is a profitable operation. */
6696 if (rtx_cost (x, SET, optimize_this_for_speed_p)
6697 > rtx_cost (temp2, SET, optimize_this_for_speed_p))
6698 return temp2;
6699 else if (rtx_cost (x, SET, optimize_this_for_speed_p)
6700 > rtx_cost (temp, SET, optimize_this_for_speed_p))
6701 return temp;
6702 else
6703 return x;
6704 }
6705
6706 /* We can optimize some special cases of ZERO_EXTEND. */
6707 if (GET_CODE (x) == ZERO_EXTEND)
6708 {
6709 /* (zero_extend:DI (truncate:SI foo:DI)) is just foo:DI if we
6710 know that the last value didn't have any inappropriate bits
6711 set. */
6712 if (GET_CODE (XEXP (x, 0)) == TRUNCATE
6713 && GET_MODE (XEXP (XEXP (x, 0), 0)) == GET_MODE (x)
6714 && GET_MODE_BITSIZE (GET_MODE (x)) <= HOST_BITS_PER_WIDE_INT
6715 && (nonzero_bits (XEXP (XEXP (x, 0), 0), GET_MODE (x))
6716 & ~GET_MODE_MASK (GET_MODE (XEXP (x, 0)))) == 0)
6717 return XEXP (XEXP (x, 0), 0);
6718
6719 /* Likewise for (zero_extend:DI (subreg:SI foo:DI 0)). */
6720 if (GET_CODE (XEXP (x, 0)) == SUBREG
6721 && GET_MODE (SUBREG_REG (XEXP (x, 0))) == GET_MODE (x)
6722 && subreg_lowpart_p (XEXP (x, 0))
6723 && GET_MODE_BITSIZE (GET_MODE (x)) <= HOST_BITS_PER_WIDE_INT
6724 && (nonzero_bits (SUBREG_REG (XEXP (x, 0)), GET_MODE (x))
6725 & ~GET_MODE_MASK (GET_MODE (XEXP (x, 0)))) == 0)
6726 return SUBREG_REG (XEXP (x, 0));
6727
6728 /* (zero_extend:DI (truncate:SI foo:DI)) is just foo:DI when foo
6729 is a comparison and STORE_FLAG_VALUE permits. This is like
6730 the first case, but it works even when GET_MODE (x) is larger
6731 than HOST_WIDE_INT. */
6732 if (GET_CODE (XEXP (x, 0)) == TRUNCATE
6733 && GET_MODE (XEXP (XEXP (x, 0), 0)) == GET_MODE (x)
6734 && COMPARISON_P (XEXP (XEXP (x, 0), 0))
6735 && (GET_MODE_BITSIZE (GET_MODE (XEXP (x, 0)))
6736 <= HOST_BITS_PER_WIDE_INT)
6737 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (GET_MODE (XEXP (x, 0)))) == 0)
6738 return XEXP (XEXP (x, 0), 0);
6739
6740 /* Likewise for (zero_extend:DI (subreg:SI foo:DI 0)). */
6741 if (GET_CODE (XEXP (x, 0)) == SUBREG
6742 && GET_MODE (SUBREG_REG (XEXP (x, 0))) == GET_MODE (x)
6743 && subreg_lowpart_p (XEXP (x, 0))
6744 && COMPARISON_P (SUBREG_REG (XEXP (x, 0)))
6745 && (GET_MODE_BITSIZE (GET_MODE (XEXP (x, 0)))
6746 <= HOST_BITS_PER_WIDE_INT)
6747 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (GET_MODE (XEXP (x, 0)))) == 0)
6748 return SUBREG_REG (XEXP (x, 0));
6749
6750 }
6751
6752 /* If we reach here, we want to return a pair of shifts. The inner
6753 shift is a left shift of BITSIZE - POS - LEN bits. The outer
6754 shift is a right shift of BITSIZE - LEN bits. It is arithmetic or
6755 logical depending on the value of UNSIGNEDP.
6756
6757 If this was a ZERO_EXTEND or ZERO_EXTRACT, this pair of shifts will be
6758 converted into an AND of a shift.
6759
6760 We must check for the case where the left shift would have a negative
6761 count. This can happen in a case like (x >> 31) & 255 on machines
6762 that can't shift by a constant. On those machines, we would first
6763 combine the shift with the AND to produce a variable-position
6764 extraction. Then the constant of 31 would be substituted in
6765 to produce such a position. */
6766
6767 modewidth = GET_MODE_BITSIZE (GET_MODE (x));
6768 if (modewidth >= pos + len)
6769 {
6770 enum machine_mode mode = GET_MODE (x);
6771 tem = gen_lowpart (mode, XEXP (x, 0));
6772 if (!tem || GET_CODE (tem) == CLOBBER)
6773 return x;
6774 tem = simplify_shift_const (NULL_RTX, ASHIFT, mode,
6775 tem, modewidth - pos - len);
6776 tem = simplify_shift_const (NULL_RTX, unsignedp ? LSHIFTRT : ASHIFTRT,
6777 mode, tem, modewidth - len);
6778 }
6779 else if (unsignedp && len < HOST_BITS_PER_WIDE_INT)
6780 tem = simplify_and_const_int (NULL_RTX, GET_MODE (x),
6781 simplify_shift_const (NULL_RTX, LSHIFTRT,
6782 GET_MODE (x),
6783 XEXP (x, 0), pos),
6784 ((unsigned HOST_WIDE_INT) 1 << len) - 1);
6785 else
6786 /* Any other cases we can't handle. */
6787 return x;
6788
6789 /* If we couldn't do this for some reason, return the original
6790 expression. */
6791 if (GET_CODE (tem) == CLOBBER)
6792 return x;
6793
6794 return tem;
6795 }
6796 \f
6797 /* X is a SET which contains an assignment of one object into
6798 a part of another (such as a bit-field assignment, STRICT_LOW_PART,
6799 or certain SUBREGS). If possible, convert it into a series of
6800 logical operations.
6801
6802 We half-heartedly support variable positions, but do not at all
6803 support variable lengths. */
6804
6805 static const_rtx
6806 expand_field_assignment (const_rtx x)
6807 {
6808 rtx inner;
6809 rtx pos; /* Always counts from low bit. */
6810 int len;
6811 rtx mask, cleared, masked;
6812 enum machine_mode compute_mode;
6813
6814 /* Loop until we find something we can't simplify. */
6815 while (1)
6816 {
6817 if (GET_CODE (SET_DEST (x)) == STRICT_LOW_PART
6818 && GET_CODE (XEXP (SET_DEST (x), 0)) == SUBREG)
6819 {
6820 inner = SUBREG_REG (XEXP (SET_DEST (x), 0));
6821 len = GET_MODE_BITSIZE (GET_MODE (XEXP (SET_DEST (x), 0)));
6822 pos = GEN_INT (subreg_lsb (XEXP (SET_DEST (x), 0)));
6823 }
6824 else if (GET_CODE (SET_DEST (x)) == ZERO_EXTRACT
6825 && CONST_INT_P (XEXP (SET_DEST (x), 1)))
6826 {
6827 inner = XEXP (SET_DEST (x), 0);
6828 len = INTVAL (XEXP (SET_DEST (x), 1));
6829 pos = XEXP (SET_DEST (x), 2);
6830
6831 /* A constant position should stay within the width of INNER. */
6832 if (CONST_INT_P (pos)
6833 && INTVAL (pos) + len > GET_MODE_BITSIZE (GET_MODE (inner)))
6834 break;
6835
6836 if (BITS_BIG_ENDIAN)
6837 {
6838 if (CONST_INT_P (pos))
6839 pos = GEN_INT (GET_MODE_BITSIZE (GET_MODE (inner)) - len
6840 - INTVAL (pos));
6841 else if (GET_CODE (pos) == MINUS
6842 && CONST_INT_P (XEXP (pos, 1))
6843 && (INTVAL (XEXP (pos, 1))
6844 == GET_MODE_BITSIZE (GET_MODE (inner)) - len))
6845 /* If position is ADJUST - X, new position is X. */
6846 pos = XEXP (pos, 0);
6847 else
6848 pos = simplify_gen_binary (MINUS, GET_MODE (pos),
6849 GEN_INT (GET_MODE_BITSIZE (
6850 GET_MODE (inner))
6851 - len),
6852 pos);
6853 }
6854 }
6855
6856 /* A SUBREG between two modes that occupy the same numbers of words
6857 can be done by moving the SUBREG to the source. */
6858 else if (GET_CODE (SET_DEST (x)) == SUBREG
6859 /* We need SUBREGs to compute nonzero_bits properly. */
6860 && nonzero_sign_valid
6861 && (((GET_MODE_SIZE (GET_MODE (SET_DEST (x)))
6862 + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)
6863 == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (SET_DEST (x))))
6864 + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)))
6865 {
6866 x = gen_rtx_SET (VOIDmode, SUBREG_REG (SET_DEST (x)),
6867 gen_lowpart
6868 (GET_MODE (SUBREG_REG (SET_DEST (x))),
6869 SET_SRC (x)));
6870 continue;
6871 }
6872 else
6873 break;
6874
6875 while (GET_CODE (inner) == SUBREG && subreg_lowpart_p (inner))
6876 inner = SUBREG_REG (inner);
6877
6878 compute_mode = GET_MODE (inner);
6879
6880 /* Don't attempt bitwise arithmetic on non scalar integer modes. */
6881 if (! SCALAR_INT_MODE_P (compute_mode))
6882 {
6883 enum machine_mode imode;
6884
6885 /* Don't do anything for vector or complex integral types. */
6886 if (! FLOAT_MODE_P (compute_mode))
6887 break;
6888
6889 /* Try to find an integral mode to pun with. */
6890 imode = mode_for_size (GET_MODE_BITSIZE (compute_mode), MODE_INT, 0);
6891 if (imode == BLKmode)
6892 break;
6893
6894 compute_mode = imode;
6895 inner = gen_lowpart (imode, inner);
6896 }
6897
6898 /* Compute a mask of LEN bits, if we can do this on the host machine. */
6899 if (len >= HOST_BITS_PER_WIDE_INT)
6900 break;
6901
6902 /* Now compute the equivalent expression. Make a copy of INNER
6903 for the SET_DEST in case it is a MEM into which we will substitute;
6904 we don't want shared RTL in that case. */
6905 mask = GEN_INT (((unsigned HOST_WIDE_INT) 1 << len) - 1);
6906 cleared = simplify_gen_binary (AND, compute_mode,
6907 simplify_gen_unary (NOT, compute_mode,
6908 simplify_gen_binary (ASHIFT,
6909 compute_mode,
6910 mask, pos),
6911 compute_mode),
6912 inner);
6913 masked = simplify_gen_binary (ASHIFT, compute_mode,
6914 simplify_gen_binary (
6915 AND, compute_mode,
6916 gen_lowpart (compute_mode, SET_SRC (x)),
6917 mask),
6918 pos);
6919
6920 x = gen_rtx_SET (VOIDmode, copy_rtx (inner),
6921 simplify_gen_binary (IOR, compute_mode,
6922 cleared, masked));
6923 }
6924
6925 return x;
6926 }
6927 \f
6928 /* Return an RTX for a reference to LEN bits of INNER. If POS_RTX is nonzero,
6929 it is an RTX that represents a variable starting position; otherwise,
6930 POS is the (constant) starting bit position (counted from the LSB).
6931
6932 UNSIGNEDP is nonzero for an unsigned reference and zero for a
6933 signed reference.
6934
6935 IN_DEST is nonzero if this is a reference in the destination of a
6936 SET. This is used when a ZERO_ or SIGN_EXTRACT isn't needed. If nonzero,
6937 a STRICT_LOW_PART will be used, if zero, ZERO_EXTEND or SIGN_EXTEND will
6938 be used.
6939
6940 IN_COMPARE is nonzero if we are in a COMPARE. This means that a
6941 ZERO_EXTRACT should be built even for bits starting at bit 0.
6942
6943 MODE is the desired mode of the result (if IN_DEST == 0).
6944
6945 The result is an RTX for the extraction or NULL_RTX if the target
6946 can't handle it. */
6947
6948 static rtx
6949 make_extraction (enum machine_mode mode, rtx inner, HOST_WIDE_INT pos,
6950 rtx pos_rtx, unsigned HOST_WIDE_INT len, int unsignedp,
6951 int in_dest, int in_compare)
6952 {
6953 /* This mode describes the size of the storage area
6954 to fetch the overall value from. Within that, we
6955 ignore the POS lowest bits, etc. */
6956 enum machine_mode is_mode = GET_MODE (inner);
6957 enum machine_mode inner_mode;
6958 enum machine_mode wanted_inner_mode;
6959 enum machine_mode wanted_inner_reg_mode = word_mode;
6960 enum machine_mode pos_mode = word_mode;
6961 enum machine_mode extraction_mode = word_mode;
6962 enum machine_mode tmode = mode_for_size (len, MODE_INT, 1);
6963 rtx new_rtx = 0;
6964 rtx orig_pos_rtx = pos_rtx;
6965 HOST_WIDE_INT orig_pos;
6966
6967 if (GET_CODE (inner) == SUBREG && subreg_lowpart_p (inner))
6968 {
6969 /* If going from (subreg:SI (mem:QI ...)) to (mem:QI ...),
6970 consider just the QI as the memory to extract from.
6971 The subreg adds or removes high bits; its mode is
6972 irrelevant to the meaning of this extraction,
6973 since POS and LEN count from the lsb. */
6974 if (MEM_P (SUBREG_REG (inner)))
6975 is_mode = GET_MODE (SUBREG_REG (inner));
6976 inner = SUBREG_REG (inner);
6977 }
6978 else if (GET_CODE (inner) == ASHIFT
6979 && CONST_INT_P (XEXP (inner, 1))
6980 && pos_rtx == 0 && pos == 0
6981 && len > UINTVAL (XEXP (inner, 1)))
6982 {
6983 /* We're extracting the least significant bits of an rtx
6984 (ashift X (const_int C)), where LEN > C. Extract the
6985 least significant (LEN - C) bits of X, giving an rtx
6986 whose mode is MODE, then shift it left C times. */
6987 new_rtx = make_extraction (mode, XEXP (inner, 0),
6988 0, 0, len - INTVAL (XEXP (inner, 1)),
6989 unsignedp, in_dest, in_compare);
6990 if (new_rtx != 0)
6991 return gen_rtx_ASHIFT (mode, new_rtx, XEXP (inner, 1));
6992 }
6993
6994 inner_mode = GET_MODE (inner);
6995
6996 if (pos_rtx && CONST_INT_P (pos_rtx))
6997 pos = INTVAL (pos_rtx), pos_rtx = 0;
6998
6999 /* See if this can be done without an extraction. We never can if the
7000 width of the field is not the same as that of some integer mode. For
7001 registers, we can only avoid the extraction if the position is at the
7002 low-order bit and this is either not in the destination or we have the
7003 appropriate STRICT_LOW_PART operation available.
7004
7005 For MEM, we can avoid an extract if the field starts on an appropriate
7006 boundary and we can change the mode of the memory reference. */
7007
7008 if (tmode != BLKmode
7009 && ((pos_rtx == 0 && (pos % BITS_PER_WORD) == 0
7010 && !MEM_P (inner)
7011 && (inner_mode == tmode
7012 || !REG_P (inner)
7013 || TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (tmode),
7014 GET_MODE_BITSIZE (inner_mode))
7015 || reg_truncated_to_mode (tmode, inner))
7016 && (! in_dest
7017 || (REG_P (inner)
7018 && have_insn_for (STRICT_LOW_PART, tmode))))
7019 || (MEM_P (inner) && pos_rtx == 0
7020 && (pos
7021 % (STRICT_ALIGNMENT ? GET_MODE_ALIGNMENT (tmode)
7022 : BITS_PER_UNIT)) == 0
7023 /* We can't do this if we are widening INNER_MODE (it
7024 may not be aligned, for one thing). */
7025 && GET_MODE_BITSIZE (inner_mode) >= GET_MODE_BITSIZE (tmode)
7026 && (inner_mode == tmode
7027 || (! mode_dependent_address_p (XEXP (inner, 0))
7028 && ! MEM_VOLATILE_P (inner))))))
7029 {
7030 /* If INNER is a MEM, make a new MEM that encompasses just the desired
7031 field. If the original and current mode are the same, we need not
7032 adjust the offset. Otherwise, we do if bytes big endian.
7033
7034 If INNER is not a MEM, get a piece consisting of just the field
7035 of interest (in this case POS % BITS_PER_WORD must be 0). */
7036
7037 if (MEM_P (inner))
7038 {
7039 HOST_WIDE_INT offset;
7040
7041 /* POS counts from lsb, but make OFFSET count in memory order. */
7042 if (BYTES_BIG_ENDIAN)
7043 offset = (GET_MODE_BITSIZE (is_mode) - len - pos) / BITS_PER_UNIT;
7044 else
7045 offset = pos / BITS_PER_UNIT;
7046
7047 new_rtx = adjust_address_nv (inner, tmode, offset);
7048 }
7049 else if (REG_P (inner))
7050 {
7051 if (tmode != inner_mode)
7052 {
7053 /* We can't call gen_lowpart in a DEST since we
7054 always want a SUBREG (see below) and it would sometimes
7055 return a new hard register. */
7056 if (pos || in_dest)
7057 {
7058 HOST_WIDE_INT final_word = pos / BITS_PER_WORD;
7059
7060 if (WORDS_BIG_ENDIAN
7061 && GET_MODE_SIZE (inner_mode) > UNITS_PER_WORD)
7062 final_word = ((GET_MODE_SIZE (inner_mode)
7063 - GET_MODE_SIZE (tmode))
7064 / UNITS_PER_WORD) - final_word;
7065
7066 final_word *= UNITS_PER_WORD;
7067 if (BYTES_BIG_ENDIAN &&
7068 GET_MODE_SIZE (inner_mode) > GET_MODE_SIZE (tmode))
7069 final_word += (GET_MODE_SIZE (inner_mode)
7070 - GET_MODE_SIZE (tmode)) % UNITS_PER_WORD;
7071
7072 /* Avoid creating invalid subregs, for example when
7073 simplifying (x>>32)&255. */
7074 if (!validate_subreg (tmode, inner_mode, inner, final_word))
7075 return NULL_RTX;
7076
7077 new_rtx = gen_rtx_SUBREG (tmode, inner, final_word);
7078 }
7079 else
7080 new_rtx = gen_lowpart (tmode, inner);
7081 }
7082 else
7083 new_rtx = inner;
7084 }
7085 else
7086 new_rtx = force_to_mode (inner, tmode,
7087 len >= HOST_BITS_PER_WIDE_INT
7088 ? ~(unsigned HOST_WIDE_INT) 0
7089 : ((unsigned HOST_WIDE_INT) 1 << len) - 1,
7090 0);
7091
7092 /* If this extraction is going into the destination of a SET,
7093 make a STRICT_LOW_PART unless we made a MEM. */
7094
7095 if (in_dest)
7096 return (MEM_P (new_rtx) ? new_rtx
7097 : (GET_CODE (new_rtx) != SUBREG
7098 ? gen_rtx_CLOBBER (tmode, const0_rtx)
7099 : gen_rtx_STRICT_LOW_PART (VOIDmode, new_rtx)));
7100
7101 if (mode == tmode)
7102 return new_rtx;
7103
7104 if (CONST_INT_P (new_rtx)
7105 || GET_CODE (new_rtx) == CONST_DOUBLE)
7106 return simplify_unary_operation (unsignedp ? ZERO_EXTEND : SIGN_EXTEND,
7107 mode, new_rtx, tmode);
7108
7109 /* If we know that no extraneous bits are set, and that the high
7110 bit is not set, convert the extraction to the cheaper of
7111 sign and zero extension, that are equivalent in these cases. */
7112 if (flag_expensive_optimizations
7113 && (GET_MODE_BITSIZE (tmode) <= HOST_BITS_PER_WIDE_INT
7114 && ((nonzero_bits (new_rtx, tmode)
7115 & ~(((unsigned HOST_WIDE_INT)
7116 GET_MODE_MASK (tmode))
7117 >> 1))
7118 == 0)))
7119 {
7120 rtx temp = gen_rtx_ZERO_EXTEND (mode, new_rtx);
7121 rtx temp1 = gen_rtx_SIGN_EXTEND (mode, new_rtx);
7122
7123 /* Prefer ZERO_EXTENSION, since it gives more information to
7124 backends. */
7125 if (rtx_cost (temp, SET, optimize_this_for_speed_p)
7126 <= rtx_cost (temp1, SET, optimize_this_for_speed_p))
7127 return temp;
7128 return temp1;
7129 }
7130
7131 /* Otherwise, sign- or zero-extend unless we already are in the
7132 proper mode. */
7133
7134 return (gen_rtx_fmt_e (unsignedp ? ZERO_EXTEND : SIGN_EXTEND,
7135 mode, new_rtx));
7136 }
7137
7138 /* Unless this is a COMPARE or we have a funny memory reference,
7139 don't do anything with zero-extending field extracts starting at
7140 the low-order bit since they are simple AND operations. */
7141 if (pos_rtx == 0 && pos == 0 && ! in_dest
7142 && ! in_compare && unsignedp)
7143 return 0;
7144
7145 /* Unless INNER is not MEM, reject this if we would be spanning bytes or
7146 if the position is not a constant and the length is not 1. In all
7147 other cases, we would only be going outside our object in cases when
7148 an original shift would have been undefined. */
7149 if (MEM_P (inner)
7150 && ((pos_rtx == 0 && pos + len > GET_MODE_BITSIZE (is_mode))
7151 || (pos_rtx != 0 && len != 1)))
7152 return 0;
7153
7154 /* Get the mode to use should INNER not be a MEM, the mode for the position,
7155 and the mode for the result. */
7156 if (in_dest && mode_for_extraction (EP_insv, -1) != MAX_MACHINE_MODE)
7157 {
7158 wanted_inner_reg_mode = mode_for_extraction (EP_insv, 0);
7159 pos_mode = mode_for_extraction (EP_insv, 2);
7160 extraction_mode = mode_for_extraction (EP_insv, 3);
7161 }
7162
7163 if (! in_dest && unsignedp
7164 && mode_for_extraction (EP_extzv, -1) != MAX_MACHINE_MODE)
7165 {
7166 wanted_inner_reg_mode = mode_for_extraction (EP_extzv, 1);
7167 pos_mode = mode_for_extraction (EP_extzv, 3);
7168 extraction_mode = mode_for_extraction (EP_extzv, 0);
7169 }
7170
7171 if (! in_dest && ! unsignedp
7172 && mode_for_extraction (EP_extv, -1) != MAX_MACHINE_MODE)
7173 {
7174 wanted_inner_reg_mode = mode_for_extraction (EP_extv, 1);
7175 pos_mode = mode_for_extraction (EP_extv, 3);
7176 extraction_mode = mode_for_extraction (EP_extv, 0);
7177 }
7178
7179 /* Never narrow an object, since that might not be safe. */
7180
7181 if (mode != VOIDmode
7182 && GET_MODE_SIZE (extraction_mode) < GET_MODE_SIZE (mode))
7183 extraction_mode = mode;
7184
7185 if (pos_rtx && GET_MODE (pos_rtx) != VOIDmode
7186 && GET_MODE_SIZE (pos_mode) < GET_MODE_SIZE (GET_MODE (pos_rtx)))
7187 pos_mode = GET_MODE (pos_rtx);
7188
7189 /* If this is not from memory, the desired mode is the preferred mode
7190 for an extraction pattern's first input operand, or word_mode if there
7191 is none. */
7192 if (!MEM_P (inner))
7193 wanted_inner_mode = wanted_inner_reg_mode;
7194 else
7195 {
7196 /* Be careful not to go beyond the extracted object and maintain the
7197 natural alignment of the memory. */
7198 wanted_inner_mode = smallest_mode_for_size (len, MODE_INT);
7199 while (pos % GET_MODE_BITSIZE (wanted_inner_mode) + len
7200 > GET_MODE_BITSIZE (wanted_inner_mode))
7201 {
7202 wanted_inner_mode = GET_MODE_WIDER_MODE (wanted_inner_mode);
7203 gcc_assert (wanted_inner_mode != VOIDmode);
7204 }
7205
7206 /* If we have to change the mode of memory and cannot, the desired mode
7207 is EXTRACTION_MODE. */
7208 if (inner_mode != wanted_inner_mode
7209 && (mode_dependent_address_p (XEXP (inner, 0))
7210 || MEM_VOLATILE_P (inner)
7211 || pos_rtx))
7212 wanted_inner_mode = extraction_mode;
7213 }
7214
7215 orig_pos = pos;
7216
7217 if (BITS_BIG_ENDIAN)
7218 {
7219 /* POS is passed as if BITS_BIG_ENDIAN == 0, so we need to convert it to
7220 BITS_BIG_ENDIAN style. If position is constant, compute new
7221 position. Otherwise, build subtraction.
7222 Note that POS is relative to the mode of the original argument.
7223 If it's a MEM we need to recompute POS relative to that.
7224 However, if we're extracting from (or inserting into) a register,
7225 we want to recompute POS relative to wanted_inner_mode. */
7226 int width = (MEM_P (inner)
7227 ? GET_MODE_BITSIZE (is_mode)
7228 : GET_MODE_BITSIZE (wanted_inner_mode));
7229
7230 if (pos_rtx == 0)
7231 pos = width - len - pos;
7232 else
7233 pos_rtx
7234 = gen_rtx_MINUS (GET_MODE (pos_rtx), GEN_INT (width - len), pos_rtx);
7235 /* POS may be less than 0 now, but we check for that below.
7236 Note that it can only be less than 0 if !MEM_P (inner). */
7237 }
7238
7239 /* If INNER has a wider mode, and this is a constant extraction, try to
7240 make it smaller and adjust the byte to point to the byte containing
7241 the value. */
7242 if (wanted_inner_mode != VOIDmode
7243 && inner_mode != wanted_inner_mode
7244 && ! pos_rtx
7245 && GET_MODE_SIZE (wanted_inner_mode) < GET_MODE_SIZE (is_mode)
7246 && MEM_P (inner)
7247 && ! mode_dependent_address_p (XEXP (inner, 0))
7248 && ! MEM_VOLATILE_P (inner))
7249 {
7250 int offset = 0;
7251
7252 /* The computations below will be correct if the machine is big
7253 endian in both bits and bytes or little endian in bits and bytes.
7254 If it is mixed, we must adjust. */
7255
7256 /* If bytes are big endian and we had a paradoxical SUBREG, we must
7257 adjust OFFSET to compensate. */
7258 if (BYTES_BIG_ENDIAN
7259 && GET_MODE_SIZE (inner_mode) < GET_MODE_SIZE (is_mode))
7260 offset -= GET_MODE_SIZE (is_mode) - GET_MODE_SIZE (inner_mode);
7261
7262 /* We can now move to the desired byte. */
7263 offset += (pos / GET_MODE_BITSIZE (wanted_inner_mode))
7264 * GET_MODE_SIZE (wanted_inner_mode);
7265 pos %= GET_MODE_BITSIZE (wanted_inner_mode);
7266
7267 if (BYTES_BIG_ENDIAN != BITS_BIG_ENDIAN
7268 && is_mode != wanted_inner_mode)
7269 offset = (GET_MODE_SIZE (is_mode)
7270 - GET_MODE_SIZE (wanted_inner_mode) - offset);
7271
7272 inner = adjust_address_nv (inner, wanted_inner_mode, offset);
7273 }
7274
7275 /* If INNER is not memory, get it into the proper mode. If we are changing
7276 its mode, POS must be a constant and smaller than the size of the new
7277 mode. */
7278 else if (!MEM_P (inner))
7279 {
7280 /* On the LHS, don't create paradoxical subregs implicitely truncating
7281 the register unless TRULY_NOOP_TRUNCATION. */
7282 if (in_dest
7283 && !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (GET_MODE (inner)),
7284 GET_MODE_BITSIZE (wanted_inner_mode)))
7285 return NULL_RTX;
7286
7287 if (GET_MODE (inner) != wanted_inner_mode
7288 && (pos_rtx != 0
7289 || orig_pos + len > GET_MODE_BITSIZE (wanted_inner_mode)))
7290 return NULL_RTX;
7291
7292 if (orig_pos < 0)
7293 return NULL_RTX;
7294
7295 inner = force_to_mode (inner, wanted_inner_mode,
7296 pos_rtx
7297 || len + orig_pos >= HOST_BITS_PER_WIDE_INT
7298 ? ~(unsigned HOST_WIDE_INT) 0
7299 : ((((unsigned HOST_WIDE_INT) 1 << len) - 1)
7300 << orig_pos),
7301 0);
7302 }
7303
7304 /* Adjust mode of POS_RTX, if needed. If we want a wider mode, we
7305 have to zero extend. Otherwise, we can just use a SUBREG. */
7306 if (pos_rtx != 0
7307 && GET_MODE_SIZE (pos_mode) > GET_MODE_SIZE (GET_MODE (pos_rtx)))
7308 {
7309 rtx temp = gen_rtx_ZERO_EXTEND (pos_mode, pos_rtx);
7310
7311 /* If we know that no extraneous bits are set, and that the high
7312 bit is not set, convert extraction to cheaper one - either
7313 SIGN_EXTENSION or ZERO_EXTENSION, that are equivalent in these
7314 cases. */
7315 if (flag_expensive_optimizations
7316 && (GET_MODE_BITSIZE (GET_MODE (pos_rtx)) <= HOST_BITS_PER_WIDE_INT
7317 && ((nonzero_bits (pos_rtx, GET_MODE (pos_rtx))
7318 & ~(((unsigned HOST_WIDE_INT)
7319 GET_MODE_MASK (GET_MODE (pos_rtx)))
7320 >> 1))
7321 == 0)))
7322 {
7323 rtx temp1 = gen_rtx_SIGN_EXTEND (pos_mode, pos_rtx);
7324
7325 /* Prefer ZERO_EXTENSION, since it gives more information to
7326 backends. */
7327 if (rtx_cost (temp1, SET, optimize_this_for_speed_p)
7328 < rtx_cost (temp, SET, optimize_this_for_speed_p))
7329 temp = temp1;
7330 }
7331 pos_rtx = temp;
7332 }
7333 else if (pos_rtx != 0
7334 && GET_MODE_SIZE (pos_mode) < GET_MODE_SIZE (GET_MODE (pos_rtx)))
7335 pos_rtx = gen_lowpart (pos_mode, pos_rtx);
7336
7337 /* Make POS_RTX unless we already have it and it is correct. If we don't
7338 have a POS_RTX but we do have an ORIG_POS_RTX, the latter must
7339 be a CONST_INT. */
7340 if (pos_rtx == 0 && orig_pos_rtx != 0 && INTVAL (orig_pos_rtx) == pos)
7341 pos_rtx = orig_pos_rtx;
7342
7343 else if (pos_rtx == 0)
7344 pos_rtx = GEN_INT (pos);
7345
7346 /* Make the required operation. See if we can use existing rtx. */
7347 new_rtx = gen_rtx_fmt_eee (unsignedp ? ZERO_EXTRACT : SIGN_EXTRACT,
7348 extraction_mode, inner, GEN_INT (len), pos_rtx);
7349 if (! in_dest)
7350 new_rtx = gen_lowpart (mode, new_rtx);
7351
7352 return new_rtx;
7353 }
7354 \f
7355 /* See if X contains an ASHIFT of COUNT or more bits that can be commuted
7356 with any other operations in X. Return X without that shift if so. */
7357
7358 static rtx
7359 extract_left_shift (rtx x, int count)
7360 {
7361 enum rtx_code code = GET_CODE (x);
7362 enum machine_mode mode = GET_MODE (x);
7363 rtx tem;
7364
7365 switch (code)
7366 {
7367 case ASHIFT:
7368 /* This is the shift itself. If it is wide enough, we will return
7369 either the value being shifted if the shift count is equal to
7370 COUNT or a shift for the difference. */
7371 if (CONST_INT_P (XEXP (x, 1))
7372 && INTVAL (XEXP (x, 1)) >= count)
7373 return simplify_shift_const (NULL_RTX, ASHIFT, mode, XEXP (x, 0),
7374 INTVAL (XEXP (x, 1)) - count);
7375 break;
7376
7377 case NEG: case NOT:
7378 if ((tem = extract_left_shift (XEXP (x, 0), count)) != 0)
7379 return simplify_gen_unary (code, mode, tem, mode);
7380
7381 break;
7382
7383 case PLUS: case IOR: case XOR: case AND:
7384 /* If we can safely shift this constant and we find the inner shift,
7385 make a new operation. */
7386 if (CONST_INT_P (XEXP (x, 1))
7387 && (UINTVAL (XEXP (x, 1))
7388 & ((((unsigned HOST_WIDE_INT) 1 << count)) - 1)) == 0
7389 && (tem = extract_left_shift (XEXP (x, 0), count)) != 0)
7390 return simplify_gen_binary (code, mode, tem,
7391 GEN_INT (INTVAL (XEXP (x, 1)) >> count));
7392
7393 break;
7394
7395 default:
7396 break;
7397 }
7398
7399 return 0;
7400 }
7401 \f
7402 /* Look at the expression rooted at X. Look for expressions
7403 equivalent to ZERO_EXTRACT, SIGN_EXTRACT, ZERO_EXTEND, SIGN_EXTEND.
7404 Form these expressions.
7405
7406 Return the new rtx, usually just X.
7407
7408 Also, for machines like the VAX that don't have logical shift insns,
7409 try to convert logical to arithmetic shift operations in cases where
7410 they are equivalent. This undoes the canonicalizations to logical
7411 shifts done elsewhere.
7412
7413 We try, as much as possible, to re-use rtl expressions to save memory.
7414
7415 IN_CODE says what kind of expression we are processing. Normally, it is
7416 SET. In a memory address (inside a MEM, PLUS or minus, the latter two
7417 being kludges), it is MEM. When processing the arguments of a comparison
7418 or a COMPARE against zero, it is COMPARE. */
7419
7420 static rtx
7421 make_compound_operation (rtx x, enum rtx_code in_code)
7422 {
7423 enum rtx_code code = GET_CODE (x);
7424 enum machine_mode mode = GET_MODE (x);
7425 int mode_width = GET_MODE_BITSIZE (mode);
7426 rtx rhs, lhs;
7427 enum rtx_code next_code;
7428 int i, j;
7429 rtx new_rtx = 0;
7430 rtx tem;
7431 const char *fmt;
7432
7433 /* Select the code to be used in recursive calls. Once we are inside an
7434 address, we stay there. If we have a comparison, set to COMPARE,
7435 but once inside, go back to our default of SET. */
7436
7437 next_code = (code == MEM ? MEM
7438 : ((code == PLUS || code == MINUS)
7439 && SCALAR_INT_MODE_P (mode)) ? MEM
7440 : ((code == COMPARE || COMPARISON_P (x))
7441 && XEXP (x, 1) == const0_rtx) ? COMPARE
7442 : in_code == COMPARE ? SET : in_code);
7443
7444 /* Process depending on the code of this operation. If NEW is set
7445 nonzero, it will be returned. */
7446
7447 switch (code)
7448 {
7449 case ASHIFT:
7450 /* Convert shifts by constants into multiplications if inside
7451 an address. */
7452 if (in_code == MEM && CONST_INT_P (XEXP (x, 1))
7453 && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT
7454 && INTVAL (XEXP (x, 1)) >= 0)
7455 {
7456 HOST_WIDE_INT count = INTVAL (XEXP (x, 1));
7457 HOST_WIDE_INT multval = (HOST_WIDE_INT) 1 << count;
7458
7459 new_rtx = make_compound_operation (XEXP (x, 0), next_code);
7460 if (GET_CODE (new_rtx) == NEG)
7461 {
7462 new_rtx = XEXP (new_rtx, 0);
7463 multval = -multval;
7464 }
7465 multval = trunc_int_for_mode (multval, mode);
7466 new_rtx = gen_rtx_MULT (mode, new_rtx, GEN_INT (multval));
7467 }
7468 break;
7469
7470 case PLUS:
7471 lhs = XEXP (x, 0);
7472 rhs = XEXP (x, 1);
7473 lhs = make_compound_operation (lhs, next_code);
7474 rhs = make_compound_operation (rhs, next_code);
7475 if (GET_CODE (lhs) == MULT && GET_CODE (XEXP (lhs, 0)) == NEG
7476 && SCALAR_INT_MODE_P (mode))
7477 {
7478 tem = simplify_gen_binary (MULT, mode, XEXP (XEXP (lhs, 0), 0),
7479 XEXP (lhs, 1));
7480 new_rtx = simplify_gen_binary (MINUS, mode, rhs, tem);
7481 }
7482 else if (GET_CODE (lhs) == MULT
7483 && (CONST_INT_P (XEXP (lhs, 1)) && INTVAL (XEXP (lhs, 1)) < 0))
7484 {
7485 tem = simplify_gen_binary (MULT, mode, XEXP (lhs, 0),
7486 simplify_gen_unary (NEG, mode,
7487 XEXP (lhs, 1),
7488 mode));
7489 new_rtx = simplify_gen_binary (MINUS, mode, rhs, tem);
7490 }
7491 else
7492 {
7493 SUBST (XEXP (x, 0), lhs);
7494 SUBST (XEXP (x, 1), rhs);
7495 goto maybe_swap;
7496 }
7497 x = gen_lowpart (mode, new_rtx);
7498 goto maybe_swap;
7499
7500 case MINUS:
7501 lhs = XEXP (x, 0);
7502 rhs = XEXP (x, 1);
7503 lhs = make_compound_operation (lhs, next_code);
7504 rhs = make_compound_operation (rhs, next_code);
7505 if (GET_CODE (rhs) == MULT && GET_CODE (XEXP (rhs, 0)) == NEG
7506 && SCALAR_INT_MODE_P (mode))
7507 {
7508 tem = simplify_gen_binary (MULT, mode, XEXP (XEXP (rhs, 0), 0),
7509 XEXP (rhs, 1));
7510 new_rtx = simplify_gen_binary (PLUS, mode, tem, lhs);
7511 }
7512 else if (GET_CODE (rhs) == MULT
7513 && (CONST_INT_P (XEXP (rhs, 1)) && INTVAL (XEXP (rhs, 1)) < 0))
7514 {
7515 tem = simplify_gen_binary (MULT, mode, XEXP (rhs, 0),
7516 simplify_gen_unary (NEG, mode,
7517 XEXP (rhs, 1),
7518 mode));
7519 new_rtx = simplify_gen_binary (PLUS, mode, tem, lhs);
7520 }
7521 else
7522 {
7523 SUBST (XEXP (x, 0), lhs);
7524 SUBST (XEXP (x, 1), rhs);
7525 return x;
7526 }
7527 return gen_lowpart (mode, new_rtx);
7528
7529 case AND:
7530 /* If the second operand is not a constant, we can't do anything
7531 with it. */
7532 if (!CONST_INT_P (XEXP (x, 1)))
7533 break;
7534
7535 /* If the constant is a power of two minus one and the first operand
7536 is a logical right shift, make an extraction. */
7537 if (GET_CODE (XEXP (x, 0)) == LSHIFTRT
7538 && (i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0)
7539 {
7540 new_rtx = make_compound_operation (XEXP (XEXP (x, 0), 0), next_code);
7541 new_rtx = make_extraction (mode, new_rtx, 0, XEXP (XEXP (x, 0), 1), i, 1,
7542 0, in_code == COMPARE);
7543 }
7544
7545 /* Same as previous, but for (subreg (lshiftrt ...)) in first op. */
7546 else if (GET_CODE (XEXP (x, 0)) == SUBREG
7547 && subreg_lowpart_p (XEXP (x, 0))
7548 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == LSHIFTRT
7549 && (i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0)
7550 {
7551 new_rtx = make_compound_operation (XEXP (SUBREG_REG (XEXP (x, 0)), 0),
7552 next_code);
7553 new_rtx = make_extraction (GET_MODE (SUBREG_REG (XEXP (x, 0))), new_rtx, 0,
7554 XEXP (SUBREG_REG (XEXP (x, 0)), 1), i, 1,
7555 0, in_code == COMPARE);
7556 }
7557 /* Same as previous, but for (xor/ior (lshiftrt...) (lshiftrt...)). */
7558 else if ((GET_CODE (XEXP (x, 0)) == XOR
7559 || GET_CODE (XEXP (x, 0)) == IOR)
7560 && GET_CODE (XEXP (XEXP (x, 0), 0)) == LSHIFTRT
7561 && GET_CODE (XEXP (XEXP (x, 0), 1)) == LSHIFTRT
7562 && (i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0)
7563 {
7564 /* Apply the distributive law, and then try to make extractions. */
7565 new_rtx = gen_rtx_fmt_ee (GET_CODE (XEXP (x, 0)), mode,
7566 gen_rtx_AND (mode, XEXP (XEXP (x, 0), 0),
7567 XEXP (x, 1)),
7568 gen_rtx_AND (mode, XEXP (XEXP (x, 0), 1),
7569 XEXP (x, 1)));
7570 new_rtx = make_compound_operation (new_rtx, in_code);
7571 }
7572
7573 /* If we are have (and (rotate X C) M) and C is larger than the number
7574 of bits in M, this is an extraction. */
7575
7576 else if (GET_CODE (XEXP (x, 0)) == ROTATE
7577 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
7578 && (i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0
7579 && i <= INTVAL (XEXP (XEXP (x, 0), 1)))
7580 {
7581 new_rtx = make_compound_operation (XEXP (XEXP (x, 0), 0), next_code);
7582 new_rtx = make_extraction (mode, new_rtx,
7583 (GET_MODE_BITSIZE (mode)
7584 - INTVAL (XEXP (XEXP (x, 0), 1))),
7585 NULL_RTX, i, 1, 0, in_code == COMPARE);
7586 }
7587
7588 /* On machines without logical shifts, if the operand of the AND is
7589 a logical shift and our mask turns off all the propagated sign
7590 bits, we can replace the logical shift with an arithmetic shift. */
7591 else if (GET_CODE (XEXP (x, 0)) == LSHIFTRT
7592 && !have_insn_for (LSHIFTRT, mode)
7593 && have_insn_for (ASHIFTRT, mode)
7594 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
7595 && INTVAL (XEXP (XEXP (x, 0), 1)) >= 0
7596 && INTVAL (XEXP (XEXP (x, 0), 1)) < HOST_BITS_PER_WIDE_INT
7597 && mode_width <= HOST_BITS_PER_WIDE_INT)
7598 {
7599 unsigned HOST_WIDE_INT mask = GET_MODE_MASK (mode);
7600
7601 mask >>= INTVAL (XEXP (XEXP (x, 0), 1));
7602 if ((INTVAL (XEXP (x, 1)) & ~mask) == 0)
7603 SUBST (XEXP (x, 0),
7604 gen_rtx_ASHIFTRT (mode,
7605 make_compound_operation
7606 (XEXP (XEXP (x, 0), 0), next_code),
7607 XEXP (XEXP (x, 0), 1)));
7608 }
7609
7610 /* If the constant is one less than a power of two, this might be
7611 representable by an extraction even if no shift is present.
7612 If it doesn't end up being a ZERO_EXTEND, we will ignore it unless
7613 we are in a COMPARE. */
7614 else if ((i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0)
7615 new_rtx = make_extraction (mode,
7616 make_compound_operation (XEXP (x, 0),
7617 next_code),
7618 0, NULL_RTX, i, 1, 0, in_code == COMPARE);
7619
7620 /* If we are in a comparison and this is an AND with a power of two,
7621 convert this into the appropriate bit extract. */
7622 else if (in_code == COMPARE
7623 && (i = exact_log2 (UINTVAL (XEXP (x, 1)))) >= 0)
7624 new_rtx = make_extraction (mode,
7625 make_compound_operation (XEXP (x, 0),
7626 next_code),
7627 i, NULL_RTX, 1, 1, 0, 1);
7628
7629 break;
7630
7631 case LSHIFTRT:
7632 /* If the sign bit is known to be zero, replace this with an
7633 arithmetic shift. */
7634 if (have_insn_for (ASHIFTRT, mode)
7635 && ! have_insn_for (LSHIFTRT, mode)
7636 && mode_width <= HOST_BITS_PER_WIDE_INT
7637 && (nonzero_bits (XEXP (x, 0), mode) & (1 << (mode_width - 1))) == 0)
7638 {
7639 new_rtx = gen_rtx_ASHIFTRT (mode,
7640 make_compound_operation (XEXP (x, 0),
7641 next_code),
7642 XEXP (x, 1));
7643 break;
7644 }
7645
7646 /* ... fall through ... */
7647
7648 case ASHIFTRT:
7649 lhs = XEXP (x, 0);
7650 rhs = XEXP (x, 1);
7651
7652 /* If we have (ashiftrt (ashift foo C1) C2) with C2 >= C1,
7653 this is a SIGN_EXTRACT. */
7654 if (CONST_INT_P (rhs)
7655 && GET_CODE (lhs) == ASHIFT
7656 && CONST_INT_P (XEXP (lhs, 1))
7657 && INTVAL (rhs) >= INTVAL (XEXP (lhs, 1))
7658 && INTVAL (rhs) < mode_width)
7659 {
7660 new_rtx = make_compound_operation (XEXP (lhs, 0), next_code);
7661 new_rtx = make_extraction (mode, new_rtx,
7662 INTVAL (rhs) - INTVAL (XEXP (lhs, 1)),
7663 NULL_RTX, mode_width - INTVAL (rhs),
7664 code == LSHIFTRT, 0, in_code == COMPARE);
7665 break;
7666 }
7667
7668 /* See if we have operations between an ASHIFTRT and an ASHIFT.
7669 If so, try to merge the shifts into a SIGN_EXTEND. We could
7670 also do this for some cases of SIGN_EXTRACT, but it doesn't
7671 seem worth the effort; the case checked for occurs on Alpha. */
7672
7673 if (!OBJECT_P (lhs)
7674 && ! (GET_CODE (lhs) == SUBREG
7675 && (OBJECT_P (SUBREG_REG (lhs))))
7676 && CONST_INT_P (rhs)
7677 && INTVAL (rhs) < HOST_BITS_PER_WIDE_INT
7678 && INTVAL (rhs) < mode_width
7679 && (new_rtx = extract_left_shift (lhs, INTVAL (rhs))) != 0)
7680 new_rtx = make_extraction (mode, make_compound_operation (new_rtx, next_code),
7681 0, NULL_RTX, mode_width - INTVAL (rhs),
7682 code == LSHIFTRT, 0, in_code == COMPARE);
7683
7684 break;
7685
7686 case SUBREG:
7687 /* Call ourselves recursively on the inner expression. If we are
7688 narrowing the object and it has a different RTL code from
7689 what it originally did, do this SUBREG as a force_to_mode. */
7690 {
7691 rtx inner = SUBREG_REG (x), simplified;
7692
7693 tem = make_compound_operation (inner, in_code);
7694
7695 simplified
7696 = simplify_subreg (mode, tem, GET_MODE (inner), SUBREG_BYTE (x));
7697 if (simplified)
7698 tem = simplified;
7699
7700 if (GET_CODE (tem) != GET_CODE (inner)
7701 && GET_MODE_SIZE (mode) < GET_MODE_SIZE (GET_MODE (inner))
7702 && subreg_lowpart_p (x))
7703 {
7704 rtx newer
7705 = force_to_mode (tem, mode, ~(unsigned HOST_WIDE_INT) 0, 0);
7706
7707 /* If we have something other than a SUBREG, we might have
7708 done an expansion, so rerun ourselves. */
7709 if (GET_CODE (newer) != SUBREG)
7710 newer = make_compound_operation (newer, in_code);
7711
7712 /* force_to_mode can expand compounds. If it just re-expanded the
7713 compound, use gen_lowpart to convert to the desired mode. */
7714 if (rtx_equal_p (newer, x)
7715 /* Likewise if it re-expanded the compound only partially.
7716 This happens for SUBREG of ZERO_EXTRACT if they extract
7717 the same number of bits. */
7718 || (GET_CODE (newer) == SUBREG
7719 && (GET_CODE (SUBREG_REG (newer)) == LSHIFTRT
7720 || GET_CODE (SUBREG_REG (newer)) == ASHIFTRT)
7721 && GET_CODE (inner) == AND
7722 && rtx_equal_p (SUBREG_REG (newer), XEXP (inner, 0))))
7723 return gen_lowpart (GET_MODE (x), tem);
7724
7725 return newer;
7726 }
7727
7728 if (simplified)
7729 return tem;
7730 }
7731 break;
7732
7733 default:
7734 break;
7735 }
7736
7737 if (new_rtx)
7738 {
7739 x = gen_lowpart (mode, new_rtx);
7740 code = GET_CODE (x);
7741 }
7742
7743 /* Now recursively process each operand of this operation. */
7744 fmt = GET_RTX_FORMAT (code);
7745 for (i = 0; i < GET_RTX_LENGTH (code); i++)
7746 if (fmt[i] == 'e')
7747 {
7748 new_rtx = make_compound_operation (XEXP (x, i), next_code);
7749 SUBST (XEXP (x, i), new_rtx);
7750 }
7751 else if (fmt[i] == 'E')
7752 for (j = 0; j < XVECLEN (x, i); j++)
7753 {
7754 new_rtx = make_compound_operation (XVECEXP (x, i, j), next_code);
7755 SUBST (XVECEXP (x, i, j), new_rtx);
7756 }
7757
7758 maybe_swap:
7759 /* If this is a commutative operation, the changes to the operands
7760 may have made it noncanonical. */
7761 if (COMMUTATIVE_ARITH_P (x)
7762 && swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
7763 {
7764 tem = XEXP (x, 0);
7765 SUBST (XEXP (x, 0), XEXP (x, 1));
7766 SUBST (XEXP (x, 1), tem);
7767 }
7768
7769 return x;
7770 }
7771 \f
7772 /* Given M see if it is a value that would select a field of bits
7773 within an item, but not the entire word. Return -1 if not.
7774 Otherwise, return the starting position of the field, where 0 is the
7775 low-order bit.
7776
7777 *PLEN is set to the length of the field. */
7778
7779 static int
7780 get_pos_from_mask (unsigned HOST_WIDE_INT m, unsigned HOST_WIDE_INT *plen)
7781 {
7782 /* Get the bit number of the first 1 bit from the right, -1 if none. */
7783 int pos = m ? ctz_hwi (m) : -1;
7784 int len = 0;
7785
7786 if (pos >= 0)
7787 /* Now shift off the low-order zero bits and see if we have a
7788 power of two minus 1. */
7789 len = exact_log2 ((m >> pos) + 1);
7790
7791 if (len <= 0)
7792 pos = -1;
7793
7794 *plen = len;
7795 return pos;
7796 }
7797 \f
7798 /* If X refers to a register that equals REG in value, replace these
7799 references with REG. */
7800 static rtx
7801 canon_reg_for_combine (rtx x, rtx reg)
7802 {
7803 rtx op0, op1, op2;
7804 const char *fmt;
7805 int i;
7806 bool copied;
7807
7808 enum rtx_code code = GET_CODE (x);
7809 switch (GET_RTX_CLASS (code))
7810 {
7811 case RTX_UNARY:
7812 op0 = canon_reg_for_combine (XEXP (x, 0), reg);
7813 if (op0 != XEXP (x, 0))
7814 return simplify_gen_unary (GET_CODE (x), GET_MODE (x), op0,
7815 GET_MODE (reg));
7816 break;
7817
7818 case RTX_BIN_ARITH:
7819 case RTX_COMM_ARITH:
7820 op0 = canon_reg_for_combine (XEXP (x, 0), reg);
7821 op1 = canon_reg_for_combine (XEXP (x, 1), reg);
7822 if (op0 != XEXP (x, 0) || op1 != XEXP (x, 1))
7823 return simplify_gen_binary (GET_CODE (x), GET_MODE (x), op0, op1);
7824 break;
7825
7826 case RTX_COMPARE:
7827 case RTX_COMM_COMPARE:
7828 op0 = canon_reg_for_combine (XEXP (x, 0), reg);
7829 op1 = canon_reg_for_combine (XEXP (x, 1), reg);
7830 if (op0 != XEXP (x, 0) || op1 != XEXP (x, 1))
7831 return simplify_gen_relational (GET_CODE (x), GET_MODE (x),
7832 GET_MODE (op0), op0, op1);
7833 break;
7834
7835 case RTX_TERNARY:
7836 case RTX_BITFIELD_OPS:
7837 op0 = canon_reg_for_combine (XEXP (x, 0), reg);
7838 op1 = canon_reg_for_combine (XEXP (x, 1), reg);
7839 op2 = canon_reg_for_combine (XEXP (x, 2), reg);
7840 if (op0 != XEXP (x, 0) || op1 != XEXP (x, 1) || op2 != XEXP (x, 2))
7841 return simplify_gen_ternary (GET_CODE (x), GET_MODE (x),
7842 GET_MODE (op0), op0, op1, op2);
7843
7844 case RTX_OBJ:
7845 if (REG_P (x))
7846 {
7847 if (rtx_equal_p (get_last_value (reg), x)
7848 || rtx_equal_p (reg, get_last_value (x)))
7849 return reg;
7850 else
7851 break;
7852 }
7853
7854 /* fall through */
7855
7856 default:
7857 fmt = GET_RTX_FORMAT (code);
7858 copied = false;
7859 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
7860 if (fmt[i] == 'e')
7861 {
7862 rtx op = canon_reg_for_combine (XEXP (x, i), reg);
7863 if (op != XEXP (x, i))
7864 {
7865 if (!copied)
7866 {
7867 copied = true;
7868 x = copy_rtx (x);
7869 }
7870 XEXP (x, i) = op;
7871 }
7872 }
7873 else if (fmt[i] == 'E')
7874 {
7875 int j;
7876 for (j = 0; j < XVECLEN (x, i); j++)
7877 {
7878 rtx op = canon_reg_for_combine (XVECEXP (x, i, j), reg);
7879 if (op != XVECEXP (x, i, j))
7880 {
7881 if (!copied)
7882 {
7883 copied = true;
7884 x = copy_rtx (x);
7885 }
7886 XVECEXP (x, i, j) = op;
7887 }
7888 }
7889 }
7890
7891 break;
7892 }
7893
7894 return x;
7895 }
7896
7897 /* Return X converted to MODE. If the value is already truncated to
7898 MODE we can just return a subreg even though in the general case we
7899 would need an explicit truncation. */
7900
7901 static rtx
7902 gen_lowpart_or_truncate (enum machine_mode mode, rtx x)
7903 {
7904 if (!CONST_INT_P (x)
7905 && GET_MODE_SIZE (mode) < GET_MODE_SIZE (GET_MODE (x))
7906 && !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
7907 GET_MODE_BITSIZE (GET_MODE (x)))
7908 && !(REG_P (x) && reg_truncated_to_mode (mode, x)))
7909 {
7910 /* Bit-cast X into an integer mode. */
7911 if (!SCALAR_INT_MODE_P (GET_MODE (x)))
7912 x = gen_lowpart (int_mode_for_mode (GET_MODE (x)), x);
7913 x = simplify_gen_unary (TRUNCATE, int_mode_for_mode (mode),
7914 x, GET_MODE (x));
7915 }
7916
7917 return gen_lowpart (mode, x);
7918 }
7919
7920 /* See if X can be simplified knowing that we will only refer to it in
7921 MODE and will only refer to those bits that are nonzero in MASK.
7922 If other bits are being computed or if masking operations are done
7923 that select a superset of the bits in MASK, they can sometimes be
7924 ignored.
7925
7926 Return a possibly simplified expression, but always convert X to
7927 MODE. If X is a CONST_INT, AND the CONST_INT with MASK.
7928
7929 If JUST_SELECT is nonzero, don't optimize by noticing that bits in MASK
7930 are all off in X. This is used when X will be complemented, by either
7931 NOT, NEG, or XOR. */
7932
7933 static rtx
7934 force_to_mode (rtx x, enum machine_mode mode, unsigned HOST_WIDE_INT mask,
7935 int just_select)
7936 {
7937 enum rtx_code code = GET_CODE (x);
7938 int next_select = just_select || code == XOR || code == NOT || code == NEG;
7939 enum machine_mode op_mode;
7940 unsigned HOST_WIDE_INT fuller_mask, nonzero;
7941 rtx op0, op1, temp;
7942
7943 /* If this is a CALL or ASM_OPERANDS, don't do anything. Some of the
7944 code below will do the wrong thing since the mode of such an
7945 expression is VOIDmode.
7946
7947 Also do nothing if X is a CLOBBER; this can happen if X was
7948 the return value from a call to gen_lowpart. */
7949 if (code == CALL || code == ASM_OPERANDS || code == CLOBBER)
7950 return x;
7951
7952 /* We want to perform the operation is its present mode unless we know
7953 that the operation is valid in MODE, in which case we do the operation
7954 in MODE. */
7955 op_mode = ((GET_MODE_CLASS (mode) == GET_MODE_CLASS (GET_MODE (x))
7956 && have_insn_for (code, mode))
7957 ? mode : GET_MODE (x));
7958
7959 /* It is not valid to do a right-shift in a narrower mode
7960 than the one it came in with. */
7961 if ((code == LSHIFTRT || code == ASHIFTRT)
7962 && GET_MODE_BITSIZE (mode) < GET_MODE_BITSIZE (GET_MODE (x)))
7963 op_mode = GET_MODE (x);
7964
7965 /* Truncate MASK to fit OP_MODE. */
7966 if (op_mode)
7967 mask &= GET_MODE_MASK (op_mode);
7968
7969 /* When we have an arithmetic operation, or a shift whose count we
7970 do not know, we need to assume that all bits up to the highest-order
7971 bit in MASK will be needed. This is how we form such a mask. */
7972 if (mask & ((unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)))
7973 fuller_mask = ~(unsigned HOST_WIDE_INT) 0;
7974 else
7975 fuller_mask = (((unsigned HOST_WIDE_INT) 1 << (floor_log2 (mask) + 1))
7976 - 1);
7977
7978 /* Determine what bits of X are guaranteed to be (non)zero. */
7979 nonzero = nonzero_bits (x, mode);
7980
7981 /* If none of the bits in X are needed, return a zero. */
7982 if (!just_select && (nonzero & mask) == 0 && !side_effects_p (x))
7983 x = const0_rtx;
7984
7985 /* If X is a CONST_INT, return a new one. Do this here since the
7986 test below will fail. */
7987 if (CONST_INT_P (x))
7988 {
7989 if (SCALAR_INT_MODE_P (mode))
7990 return gen_int_mode (INTVAL (x) & mask, mode);
7991 else
7992 {
7993 x = GEN_INT (INTVAL (x) & mask);
7994 return gen_lowpart_common (mode, x);
7995 }
7996 }
7997
7998 /* If X is narrower than MODE and we want all the bits in X's mode, just
7999 get X in the proper mode. */
8000 if (GET_MODE_SIZE (GET_MODE (x)) < GET_MODE_SIZE (mode)
8001 && (GET_MODE_MASK (GET_MODE (x)) & ~mask) == 0)
8002 return gen_lowpart (mode, x);
8003
8004 /* We can ignore the effect of a SUBREG if it narrows the mode or
8005 if the constant masks to zero all the bits the mode doesn't have. */
8006 if (GET_CODE (x) == SUBREG
8007 && subreg_lowpart_p (x)
8008 && ((GET_MODE_SIZE (GET_MODE (x))
8009 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x))))
8010 || (0 == (mask
8011 & GET_MODE_MASK (GET_MODE (x))
8012 & ~GET_MODE_MASK (GET_MODE (SUBREG_REG (x)))))))
8013 return force_to_mode (SUBREG_REG (x), mode, mask, next_select);
8014
8015 /* The arithmetic simplifications here only work for scalar integer modes. */
8016 if (!SCALAR_INT_MODE_P (mode) || !SCALAR_INT_MODE_P (GET_MODE (x)))
8017 return gen_lowpart_or_truncate (mode, x);
8018
8019 switch (code)
8020 {
8021 case CLOBBER:
8022 /* If X is a (clobber (const_int)), return it since we know we are
8023 generating something that won't match. */
8024 return x;
8025
8026 case SIGN_EXTEND:
8027 case ZERO_EXTEND:
8028 case ZERO_EXTRACT:
8029 case SIGN_EXTRACT:
8030 x = expand_compound_operation (x);
8031 if (GET_CODE (x) != code)
8032 return force_to_mode (x, mode, mask, next_select);
8033 break;
8034
8035 case TRUNCATE:
8036 /* Similarly for a truncate. */
8037 return force_to_mode (XEXP (x, 0), mode, mask, next_select);
8038
8039 case AND:
8040 /* If this is an AND with a constant, convert it into an AND
8041 whose constant is the AND of that constant with MASK. If it
8042 remains an AND of MASK, delete it since it is redundant. */
8043
8044 if (CONST_INT_P (XEXP (x, 1)))
8045 {
8046 x = simplify_and_const_int (x, op_mode, XEXP (x, 0),
8047 mask & INTVAL (XEXP (x, 1)));
8048
8049 /* If X is still an AND, see if it is an AND with a mask that
8050 is just some low-order bits. If so, and it is MASK, we don't
8051 need it. */
8052
8053 if (GET_CODE (x) == AND && CONST_INT_P (XEXP (x, 1))
8054 && ((INTVAL (XEXP (x, 1)) & GET_MODE_MASK (GET_MODE (x)))
8055 == mask))
8056 x = XEXP (x, 0);
8057
8058 /* If it remains an AND, try making another AND with the bits
8059 in the mode mask that aren't in MASK turned on. If the
8060 constant in the AND is wide enough, this might make a
8061 cheaper constant. */
8062
8063 if (GET_CODE (x) == AND && CONST_INT_P (XEXP (x, 1))
8064 && GET_MODE_MASK (GET_MODE (x)) != mask
8065 && GET_MODE_BITSIZE (GET_MODE (x)) <= HOST_BITS_PER_WIDE_INT)
8066 {
8067 unsigned HOST_WIDE_INT cval
8068 = UINTVAL (XEXP (x, 1))
8069 | (GET_MODE_MASK (GET_MODE (x)) & ~mask);
8070 int width = GET_MODE_BITSIZE (GET_MODE (x));
8071 rtx y;
8072
8073 /* If MODE is narrower than HOST_WIDE_INT and CVAL is a negative
8074 number, sign extend it. */
8075 if (width > 0 && width < HOST_BITS_PER_WIDE_INT
8076 && (cval & ((unsigned HOST_WIDE_INT) 1 << (width - 1))) != 0)
8077 cval |= (unsigned HOST_WIDE_INT) -1 << width;
8078
8079 y = simplify_gen_binary (AND, GET_MODE (x),
8080 XEXP (x, 0), GEN_INT (cval));
8081 if (rtx_cost (y, SET, optimize_this_for_speed_p)
8082 < rtx_cost (x, SET, optimize_this_for_speed_p))
8083 x = y;
8084 }
8085
8086 break;
8087 }
8088
8089 goto binop;
8090
8091 case PLUS:
8092 /* In (and (plus FOO C1) M), if M is a mask that just turns off
8093 low-order bits (as in an alignment operation) and FOO is already
8094 aligned to that boundary, mask C1 to that boundary as well.
8095 This may eliminate that PLUS and, later, the AND. */
8096
8097 {
8098 unsigned int width = GET_MODE_BITSIZE (mode);
8099 unsigned HOST_WIDE_INT smask = mask;
8100
8101 /* If MODE is narrower than HOST_WIDE_INT and mask is a negative
8102 number, sign extend it. */
8103
8104 if (width < HOST_BITS_PER_WIDE_INT
8105 && (smask & ((unsigned HOST_WIDE_INT) 1 << (width - 1))) != 0)
8106 smask |= (unsigned HOST_WIDE_INT) (-1) << width;
8107
8108 if (CONST_INT_P (XEXP (x, 1))
8109 && exact_log2 (- smask) >= 0
8110 && (nonzero_bits (XEXP (x, 0), mode) & ~smask) == 0
8111 && (INTVAL (XEXP (x, 1)) & ~smask) != 0)
8112 return force_to_mode (plus_constant (XEXP (x, 0),
8113 (INTVAL (XEXP (x, 1)) & smask)),
8114 mode, smask, next_select);
8115 }
8116
8117 /* ... fall through ... */
8118
8119 case MULT:
8120 /* For PLUS, MINUS and MULT, we need any bits less significant than the
8121 most significant bit in MASK since carries from those bits will
8122 affect the bits we are interested in. */
8123 mask = fuller_mask;
8124 goto binop;
8125
8126 case MINUS:
8127 /* If X is (minus C Y) where C's least set bit is larger than any bit
8128 in the mask, then we may replace with (neg Y). */
8129 if (CONST_INT_P (XEXP (x, 0))
8130 && (((unsigned HOST_WIDE_INT) (INTVAL (XEXP (x, 0))
8131 & -INTVAL (XEXP (x, 0))))
8132 > mask))
8133 {
8134 x = simplify_gen_unary (NEG, GET_MODE (x), XEXP (x, 1),
8135 GET_MODE (x));
8136 return force_to_mode (x, mode, mask, next_select);
8137 }
8138
8139 /* Similarly, if C contains every bit in the fuller_mask, then we may
8140 replace with (not Y). */
8141 if (CONST_INT_P (XEXP (x, 0))
8142 && ((UINTVAL (XEXP (x, 0)) | fuller_mask) == UINTVAL (XEXP (x, 0))))
8143 {
8144 x = simplify_gen_unary (NOT, GET_MODE (x),
8145 XEXP (x, 1), GET_MODE (x));
8146 return force_to_mode (x, mode, mask, next_select);
8147 }
8148
8149 mask = fuller_mask;
8150 goto binop;
8151
8152 case IOR:
8153 case XOR:
8154 /* If X is (ior (lshiftrt FOO C1) C2), try to commute the IOR and
8155 LSHIFTRT so we end up with an (and (lshiftrt (ior ...) ...) ...)
8156 operation which may be a bitfield extraction. Ensure that the
8157 constant we form is not wider than the mode of X. */
8158
8159 if (GET_CODE (XEXP (x, 0)) == LSHIFTRT
8160 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
8161 && INTVAL (XEXP (XEXP (x, 0), 1)) >= 0
8162 && INTVAL (XEXP (XEXP (x, 0), 1)) < HOST_BITS_PER_WIDE_INT
8163 && CONST_INT_P (XEXP (x, 1))
8164 && ((INTVAL (XEXP (XEXP (x, 0), 1))
8165 + floor_log2 (INTVAL (XEXP (x, 1))))
8166 < GET_MODE_BITSIZE (GET_MODE (x)))
8167 && (UINTVAL (XEXP (x, 1))
8168 & ~nonzero_bits (XEXP (x, 0), GET_MODE (x))) == 0)
8169 {
8170 temp = GEN_INT ((INTVAL (XEXP (x, 1)) & mask)
8171 << INTVAL (XEXP (XEXP (x, 0), 1)));
8172 temp = simplify_gen_binary (GET_CODE (x), GET_MODE (x),
8173 XEXP (XEXP (x, 0), 0), temp);
8174 x = simplify_gen_binary (LSHIFTRT, GET_MODE (x), temp,
8175 XEXP (XEXP (x, 0), 1));
8176 return force_to_mode (x, mode, mask, next_select);
8177 }
8178
8179 binop:
8180 /* For most binary operations, just propagate into the operation and
8181 change the mode if we have an operation of that mode. */
8182
8183 op0 = force_to_mode (XEXP (x, 0), mode, mask, next_select);
8184 op1 = force_to_mode (XEXP (x, 1), mode, mask, next_select);
8185
8186 /* If we ended up truncating both operands, truncate the result of the
8187 operation instead. */
8188 if (GET_CODE (op0) == TRUNCATE
8189 && GET_CODE (op1) == TRUNCATE)
8190 {
8191 op0 = XEXP (op0, 0);
8192 op1 = XEXP (op1, 0);
8193 }
8194
8195 op0 = gen_lowpart_or_truncate (op_mode, op0);
8196 op1 = gen_lowpart_or_truncate (op_mode, op1);
8197
8198 if (op_mode != GET_MODE (x) || op0 != XEXP (x, 0) || op1 != XEXP (x, 1))
8199 x = simplify_gen_binary (code, op_mode, op0, op1);
8200 break;
8201
8202 case ASHIFT:
8203 /* For left shifts, do the same, but just for the first operand.
8204 However, we cannot do anything with shifts where we cannot
8205 guarantee that the counts are smaller than the size of the mode
8206 because such a count will have a different meaning in a
8207 wider mode. */
8208
8209 if (! (CONST_INT_P (XEXP (x, 1))
8210 && INTVAL (XEXP (x, 1)) >= 0
8211 && INTVAL (XEXP (x, 1)) < GET_MODE_BITSIZE (mode))
8212 && ! (GET_MODE (XEXP (x, 1)) != VOIDmode
8213 && (nonzero_bits (XEXP (x, 1), GET_MODE (XEXP (x, 1)))
8214 < (unsigned HOST_WIDE_INT) GET_MODE_BITSIZE (mode))))
8215 break;
8216
8217 /* If the shift count is a constant and we can do arithmetic in
8218 the mode of the shift, refine which bits we need. Otherwise, use the
8219 conservative form of the mask. */
8220 if (CONST_INT_P (XEXP (x, 1))
8221 && INTVAL (XEXP (x, 1)) >= 0
8222 && INTVAL (XEXP (x, 1)) < GET_MODE_BITSIZE (op_mode)
8223 && GET_MODE_BITSIZE (op_mode) <= HOST_BITS_PER_WIDE_INT)
8224 mask >>= INTVAL (XEXP (x, 1));
8225 else
8226 mask = fuller_mask;
8227
8228 op0 = gen_lowpart_or_truncate (op_mode,
8229 force_to_mode (XEXP (x, 0), op_mode,
8230 mask, next_select));
8231
8232 if (op_mode != GET_MODE (x) || op0 != XEXP (x, 0))
8233 x = simplify_gen_binary (code, op_mode, op0, XEXP (x, 1));
8234 break;
8235
8236 case LSHIFTRT:
8237 /* Here we can only do something if the shift count is a constant,
8238 this shift constant is valid for the host, and we can do arithmetic
8239 in OP_MODE. */
8240
8241 if (CONST_INT_P (XEXP (x, 1))
8242 && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT
8243 && GET_MODE_BITSIZE (op_mode) <= HOST_BITS_PER_WIDE_INT)
8244 {
8245 rtx inner = XEXP (x, 0);
8246 unsigned HOST_WIDE_INT inner_mask;
8247
8248 /* Select the mask of the bits we need for the shift operand. */
8249 inner_mask = mask << INTVAL (XEXP (x, 1));
8250
8251 /* We can only change the mode of the shift if we can do arithmetic
8252 in the mode of the shift and INNER_MASK is no wider than the
8253 width of X's mode. */
8254 if ((inner_mask & ~GET_MODE_MASK (GET_MODE (x))) != 0)
8255 op_mode = GET_MODE (x);
8256
8257 inner = force_to_mode (inner, op_mode, inner_mask, next_select);
8258
8259 if (GET_MODE (x) != op_mode || inner != XEXP (x, 0))
8260 x = simplify_gen_binary (LSHIFTRT, op_mode, inner, XEXP (x, 1));
8261 }
8262
8263 /* If we have (and (lshiftrt FOO C1) C2) where the combination of the
8264 shift and AND produces only copies of the sign bit (C2 is one less
8265 than a power of two), we can do this with just a shift. */
8266
8267 if (GET_CODE (x) == LSHIFTRT
8268 && CONST_INT_P (XEXP (x, 1))
8269 /* The shift puts one of the sign bit copies in the least significant
8270 bit. */
8271 && ((INTVAL (XEXP (x, 1))
8272 + num_sign_bit_copies (XEXP (x, 0), GET_MODE (XEXP (x, 0))))
8273 >= GET_MODE_BITSIZE (GET_MODE (x)))
8274 && exact_log2 (mask + 1) >= 0
8275 /* Number of bits left after the shift must be more than the mask
8276 needs. */
8277 && ((INTVAL (XEXP (x, 1)) + exact_log2 (mask + 1))
8278 <= GET_MODE_BITSIZE (GET_MODE (x)))
8279 /* Must be more sign bit copies than the mask needs. */
8280 && ((int) num_sign_bit_copies (XEXP (x, 0), GET_MODE (XEXP (x, 0)))
8281 >= exact_log2 (mask + 1)))
8282 x = simplify_gen_binary (LSHIFTRT, GET_MODE (x), XEXP (x, 0),
8283 GEN_INT (GET_MODE_BITSIZE (GET_MODE (x))
8284 - exact_log2 (mask + 1)));
8285
8286 goto shiftrt;
8287
8288 case ASHIFTRT:
8289 /* If we are just looking for the sign bit, we don't need this shift at
8290 all, even if it has a variable count. */
8291 if (GET_MODE_BITSIZE (GET_MODE (x)) <= HOST_BITS_PER_WIDE_INT
8292 && (mask == ((unsigned HOST_WIDE_INT) 1
8293 << (GET_MODE_BITSIZE (GET_MODE (x)) - 1))))
8294 return force_to_mode (XEXP (x, 0), mode, mask, next_select);
8295
8296 /* If this is a shift by a constant, get a mask that contains those bits
8297 that are not copies of the sign bit. We then have two cases: If
8298 MASK only includes those bits, this can be a logical shift, which may
8299 allow simplifications. If MASK is a single-bit field not within
8300 those bits, we are requesting a copy of the sign bit and hence can
8301 shift the sign bit to the appropriate location. */
8302
8303 if (CONST_INT_P (XEXP (x, 1)) && INTVAL (XEXP (x, 1)) >= 0
8304 && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT)
8305 {
8306 int i;
8307
8308 /* If the considered data is wider than HOST_WIDE_INT, we can't
8309 represent a mask for all its bits in a single scalar.
8310 But we only care about the lower bits, so calculate these. */
8311
8312 if (GET_MODE_BITSIZE (GET_MODE (x)) > HOST_BITS_PER_WIDE_INT)
8313 {
8314 nonzero = ~(unsigned HOST_WIDE_INT) 0;
8315
8316 /* GET_MODE_BITSIZE (GET_MODE (x)) - INTVAL (XEXP (x, 1))
8317 is the number of bits a full-width mask would have set.
8318 We need only shift if these are fewer than nonzero can
8319 hold. If not, we must keep all bits set in nonzero. */
8320
8321 if (GET_MODE_BITSIZE (GET_MODE (x)) - INTVAL (XEXP (x, 1))
8322 < HOST_BITS_PER_WIDE_INT)
8323 nonzero >>= INTVAL (XEXP (x, 1))
8324 + HOST_BITS_PER_WIDE_INT
8325 - GET_MODE_BITSIZE (GET_MODE (x)) ;
8326 }
8327 else
8328 {
8329 nonzero = GET_MODE_MASK (GET_MODE (x));
8330 nonzero >>= INTVAL (XEXP (x, 1));
8331 }
8332
8333 if ((mask & ~nonzero) == 0)
8334 {
8335 x = simplify_shift_const (NULL_RTX, LSHIFTRT, GET_MODE (x),
8336 XEXP (x, 0), INTVAL (XEXP (x, 1)));
8337 if (GET_CODE (x) != ASHIFTRT)
8338 return force_to_mode (x, mode, mask, next_select);
8339 }
8340
8341 else if ((i = exact_log2 (mask)) >= 0)
8342 {
8343 x = simplify_shift_const
8344 (NULL_RTX, LSHIFTRT, GET_MODE (x), XEXP (x, 0),
8345 GET_MODE_BITSIZE (GET_MODE (x)) - 1 - i);
8346
8347 if (GET_CODE (x) != ASHIFTRT)
8348 return force_to_mode (x, mode, mask, next_select);
8349 }
8350 }
8351
8352 /* If MASK is 1, convert this to an LSHIFTRT. This can be done
8353 even if the shift count isn't a constant. */
8354 if (mask == 1)
8355 x = simplify_gen_binary (LSHIFTRT, GET_MODE (x),
8356 XEXP (x, 0), XEXP (x, 1));
8357
8358 shiftrt:
8359
8360 /* If this is a zero- or sign-extension operation that just affects bits
8361 we don't care about, remove it. Be sure the call above returned
8362 something that is still a shift. */
8363
8364 if ((GET_CODE (x) == LSHIFTRT || GET_CODE (x) == ASHIFTRT)
8365 && CONST_INT_P (XEXP (x, 1))
8366 && INTVAL (XEXP (x, 1)) >= 0
8367 && (INTVAL (XEXP (x, 1))
8368 <= GET_MODE_BITSIZE (GET_MODE (x)) - (floor_log2 (mask) + 1))
8369 && GET_CODE (XEXP (x, 0)) == ASHIFT
8370 && XEXP (XEXP (x, 0), 1) == XEXP (x, 1))
8371 return force_to_mode (XEXP (XEXP (x, 0), 0), mode, mask,
8372 next_select);
8373
8374 break;
8375
8376 case ROTATE:
8377 case ROTATERT:
8378 /* If the shift count is constant and we can do computations
8379 in the mode of X, compute where the bits we care about are.
8380 Otherwise, we can't do anything. Don't change the mode of
8381 the shift or propagate MODE into the shift, though. */
8382 if (CONST_INT_P (XEXP (x, 1))
8383 && INTVAL (XEXP (x, 1)) >= 0)
8384 {
8385 temp = simplify_binary_operation (code == ROTATE ? ROTATERT : ROTATE,
8386 GET_MODE (x), GEN_INT (mask),
8387 XEXP (x, 1));
8388 if (temp && CONST_INT_P (temp))
8389 SUBST (XEXP (x, 0),
8390 force_to_mode (XEXP (x, 0), GET_MODE (x),
8391 INTVAL (temp), next_select));
8392 }
8393 break;
8394
8395 case NEG:
8396 /* If we just want the low-order bit, the NEG isn't needed since it
8397 won't change the low-order bit. */
8398 if (mask == 1)
8399 return force_to_mode (XEXP (x, 0), mode, mask, just_select);
8400
8401 /* We need any bits less significant than the most significant bit in
8402 MASK since carries from those bits will affect the bits we are
8403 interested in. */
8404 mask = fuller_mask;
8405 goto unop;
8406
8407 case NOT:
8408 /* (not FOO) is (xor FOO CONST), so if FOO is an LSHIFTRT, we can do the
8409 same as the XOR case above. Ensure that the constant we form is not
8410 wider than the mode of X. */
8411
8412 if (GET_CODE (XEXP (x, 0)) == LSHIFTRT
8413 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
8414 && INTVAL (XEXP (XEXP (x, 0), 1)) >= 0
8415 && (INTVAL (XEXP (XEXP (x, 0), 1)) + floor_log2 (mask)
8416 < GET_MODE_BITSIZE (GET_MODE (x)))
8417 && INTVAL (XEXP (XEXP (x, 0), 1)) < HOST_BITS_PER_WIDE_INT)
8418 {
8419 temp = gen_int_mode (mask << INTVAL (XEXP (XEXP (x, 0), 1)),
8420 GET_MODE (x));
8421 temp = simplify_gen_binary (XOR, GET_MODE (x),
8422 XEXP (XEXP (x, 0), 0), temp);
8423 x = simplify_gen_binary (LSHIFTRT, GET_MODE (x),
8424 temp, XEXP (XEXP (x, 0), 1));
8425
8426 return force_to_mode (x, mode, mask, next_select);
8427 }
8428
8429 /* (and (not FOO) CONST) is (not (or FOO (not CONST))), so we must
8430 use the full mask inside the NOT. */
8431 mask = fuller_mask;
8432
8433 unop:
8434 op0 = gen_lowpart_or_truncate (op_mode,
8435 force_to_mode (XEXP (x, 0), mode, mask,
8436 next_select));
8437 if (op_mode != GET_MODE (x) || op0 != XEXP (x, 0))
8438 x = simplify_gen_unary (code, op_mode, op0, op_mode);
8439 break;
8440
8441 case NE:
8442 /* (and (ne FOO 0) CONST) can be (and FOO CONST) if CONST is included
8443 in STORE_FLAG_VALUE and FOO has a single bit that might be nonzero,
8444 which is equal to STORE_FLAG_VALUE. */
8445 if ((mask & ~STORE_FLAG_VALUE) == 0
8446 && XEXP (x, 1) == const0_rtx
8447 && GET_MODE (XEXP (x, 0)) == mode
8448 && exact_log2 (nonzero_bits (XEXP (x, 0), mode)) >= 0
8449 && (nonzero_bits (XEXP (x, 0), mode)
8450 == (unsigned HOST_WIDE_INT) STORE_FLAG_VALUE))
8451 return force_to_mode (XEXP (x, 0), mode, mask, next_select);
8452
8453 break;
8454
8455 case IF_THEN_ELSE:
8456 /* We have no way of knowing if the IF_THEN_ELSE can itself be
8457 written in a narrower mode. We play it safe and do not do so. */
8458
8459 SUBST (XEXP (x, 1),
8460 gen_lowpart_or_truncate (GET_MODE (x),
8461 force_to_mode (XEXP (x, 1), mode,
8462 mask, next_select)));
8463 SUBST (XEXP (x, 2),
8464 gen_lowpart_or_truncate (GET_MODE (x),
8465 force_to_mode (XEXP (x, 2), mode,
8466 mask, next_select)));
8467 break;
8468
8469 default:
8470 break;
8471 }
8472
8473 /* Ensure we return a value of the proper mode. */
8474 return gen_lowpart_or_truncate (mode, x);
8475 }
8476 \f
8477 /* Return nonzero if X is an expression that has one of two values depending on
8478 whether some other value is zero or nonzero. In that case, we return the
8479 value that is being tested, *PTRUE is set to the value if the rtx being
8480 returned has a nonzero value, and *PFALSE is set to the other alternative.
8481
8482 If we return zero, we set *PTRUE and *PFALSE to X. */
8483
8484 static rtx
8485 if_then_else_cond (rtx x, rtx *ptrue, rtx *pfalse)
8486 {
8487 enum machine_mode mode = GET_MODE (x);
8488 enum rtx_code code = GET_CODE (x);
8489 rtx cond0, cond1, true0, true1, false0, false1;
8490 unsigned HOST_WIDE_INT nz;
8491
8492 /* If we are comparing a value against zero, we are done. */
8493 if ((code == NE || code == EQ)
8494 && XEXP (x, 1) == const0_rtx)
8495 {
8496 *ptrue = (code == NE) ? const_true_rtx : const0_rtx;
8497 *pfalse = (code == NE) ? const0_rtx : const_true_rtx;
8498 return XEXP (x, 0);
8499 }
8500
8501 /* If this is a unary operation whose operand has one of two values, apply
8502 our opcode to compute those values. */
8503 else if (UNARY_P (x)
8504 && (cond0 = if_then_else_cond (XEXP (x, 0), &true0, &false0)) != 0)
8505 {
8506 *ptrue = simplify_gen_unary (code, mode, true0, GET_MODE (XEXP (x, 0)));
8507 *pfalse = simplify_gen_unary (code, mode, false0,
8508 GET_MODE (XEXP (x, 0)));
8509 return cond0;
8510 }
8511
8512 /* If this is a COMPARE, do nothing, since the IF_THEN_ELSE we would
8513 make can't possibly match and would suppress other optimizations. */
8514 else if (code == COMPARE)
8515 ;
8516
8517 /* If this is a binary operation, see if either side has only one of two
8518 values. If either one does or if both do and they are conditional on
8519 the same value, compute the new true and false values. */
8520 else if (BINARY_P (x))
8521 {
8522 cond0 = if_then_else_cond (XEXP (x, 0), &true0, &false0);
8523 cond1 = if_then_else_cond (XEXP (x, 1), &true1, &false1);
8524
8525 if ((cond0 != 0 || cond1 != 0)
8526 && ! (cond0 != 0 && cond1 != 0 && ! rtx_equal_p (cond0, cond1)))
8527 {
8528 /* If if_then_else_cond returned zero, then true/false are the
8529 same rtl. We must copy one of them to prevent invalid rtl
8530 sharing. */
8531 if (cond0 == 0)
8532 true0 = copy_rtx (true0);
8533 else if (cond1 == 0)
8534 true1 = copy_rtx (true1);
8535
8536 if (COMPARISON_P (x))
8537 {
8538 *ptrue = simplify_gen_relational (code, mode, VOIDmode,
8539 true0, true1);
8540 *pfalse = simplify_gen_relational (code, mode, VOIDmode,
8541 false0, false1);
8542 }
8543 else
8544 {
8545 *ptrue = simplify_gen_binary (code, mode, true0, true1);
8546 *pfalse = simplify_gen_binary (code, mode, false0, false1);
8547 }
8548
8549 return cond0 ? cond0 : cond1;
8550 }
8551
8552 /* See if we have PLUS, IOR, XOR, MINUS or UMAX, where one of the
8553 operands is zero when the other is nonzero, and vice-versa,
8554 and STORE_FLAG_VALUE is 1 or -1. */
8555
8556 if ((STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
8557 && (code == PLUS || code == IOR || code == XOR || code == MINUS
8558 || code == UMAX)
8559 && GET_CODE (XEXP (x, 0)) == MULT && GET_CODE (XEXP (x, 1)) == MULT)
8560 {
8561 rtx op0 = XEXP (XEXP (x, 0), 1);
8562 rtx op1 = XEXP (XEXP (x, 1), 1);
8563
8564 cond0 = XEXP (XEXP (x, 0), 0);
8565 cond1 = XEXP (XEXP (x, 1), 0);
8566
8567 if (COMPARISON_P (cond0)
8568 && COMPARISON_P (cond1)
8569 && ((GET_CODE (cond0) == reversed_comparison_code (cond1, NULL)
8570 && rtx_equal_p (XEXP (cond0, 0), XEXP (cond1, 0))
8571 && rtx_equal_p (XEXP (cond0, 1), XEXP (cond1, 1)))
8572 || ((swap_condition (GET_CODE (cond0))
8573 == reversed_comparison_code (cond1, NULL))
8574 && rtx_equal_p (XEXP (cond0, 0), XEXP (cond1, 1))
8575 && rtx_equal_p (XEXP (cond0, 1), XEXP (cond1, 0))))
8576 && ! side_effects_p (x))
8577 {
8578 *ptrue = simplify_gen_binary (MULT, mode, op0, const_true_rtx);
8579 *pfalse = simplify_gen_binary (MULT, mode,
8580 (code == MINUS
8581 ? simplify_gen_unary (NEG, mode,
8582 op1, mode)
8583 : op1),
8584 const_true_rtx);
8585 return cond0;
8586 }
8587 }
8588
8589 /* Similarly for MULT, AND and UMIN, except that for these the result
8590 is always zero. */
8591 if ((STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
8592 && (code == MULT || code == AND || code == UMIN)
8593 && GET_CODE (XEXP (x, 0)) == MULT && GET_CODE (XEXP (x, 1)) == MULT)
8594 {
8595 cond0 = XEXP (XEXP (x, 0), 0);
8596 cond1 = XEXP (XEXP (x, 1), 0);
8597
8598 if (COMPARISON_P (cond0)
8599 && COMPARISON_P (cond1)
8600 && ((GET_CODE (cond0) == reversed_comparison_code (cond1, NULL)
8601 && rtx_equal_p (XEXP (cond0, 0), XEXP (cond1, 0))
8602 && rtx_equal_p (XEXP (cond0, 1), XEXP (cond1, 1)))
8603 || ((swap_condition (GET_CODE (cond0))
8604 == reversed_comparison_code (cond1, NULL))
8605 && rtx_equal_p (XEXP (cond0, 0), XEXP (cond1, 1))
8606 && rtx_equal_p (XEXP (cond0, 1), XEXP (cond1, 0))))
8607 && ! side_effects_p (x))
8608 {
8609 *ptrue = *pfalse = const0_rtx;
8610 return cond0;
8611 }
8612 }
8613 }
8614
8615 else if (code == IF_THEN_ELSE)
8616 {
8617 /* If we have IF_THEN_ELSE already, extract the condition and
8618 canonicalize it if it is NE or EQ. */
8619 cond0 = XEXP (x, 0);
8620 *ptrue = XEXP (x, 1), *pfalse = XEXP (x, 2);
8621 if (GET_CODE (cond0) == NE && XEXP (cond0, 1) == const0_rtx)
8622 return XEXP (cond0, 0);
8623 else if (GET_CODE (cond0) == EQ && XEXP (cond0, 1) == const0_rtx)
8624 {
8625 *ptrue = XEXP (x, 2), *pfalse = XEXP (x, 1);
8626 return XEXP (cond0, 0);
8627 }
8628 else
8629 return cond0;
8630 }
8631
8632 /* If X is a SUBREG, we can narrow both the true and false values
8633 if the inner expression, if there is a condition. */
8634 else if (code == SUBREG
8635 && 0 != (cond0 = if_then_else_cond (SUBREG_REG (x),
8636 &true0, &false0)))
8637 {
8638 true0 = simplify_gen_subreg (mode, true0,
8639 GET_MODE (SUBREG_REG (x)), SUBREG_BYTE (x));
8640 false0 = simplify_gen_subreg (mode, false0,
8641 GET_MODE (SUBREG_REG (x)), SUBREG_BYTE (x));
8642 if (true0 && false0)
8643 {
8644 *ptrue = true0;
8645 *pfalse = false0;
8646 return cond0;
8647 }
8648 }
8649
8650 /* If X is a constant, this isn't special and will cause confusions
8651 if we treat it as such. Likewise if it is equivalent to a constant. */
8652 else if (CONSTANT_P (x)
8653 || ((cond0 = get_last_value (x)) != 0 && CONSTANT_P (cond0)))
8654 ;
8655
8656 /* If we're in BImode, canonicalize on 0 and STORE_FLAG_VALUE, as that
8657 will be least confusing to the rest of the compiler. */
8658 else if (mode == BImode)
8659 {
8660 *ptrue = GEN_INT (STORE_FLAG_VALUE), *pfalse = const0_rtx;
8661 return x;
8662 }
8663
8664 /* If X is known to be either 0 or -1, those are the true and
8665 false values when testing X. */
8666 else if (x == constm1_rtx || x == const0_rtx
8667 || (mode != VOIDmode
8668 && num_sign_bit_copies (x, mode) == GET_MODE_BITSIZE (mode)))
8669 {
8670 *ptrue = constm1_rtx, *pfalse = const0_rtx;
8671 return x;
8672 }
8673
8674 /* Likewise for 0 or a single bit. */
8675 else if (SCALAR_INT_MODE_P (mode)
8676 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
8677 && exact_log2 (nz = nonzero_bits (x, mode)) >= 0)
8678 {
8679 *ptrue = gen_int_mode (nz, mode), *pfalse = const0_rtx;
8680 return x;
8681 }
8682
8683 /* Otherwise fail; show no condition with true and false values the same. */
8684 *ptrue = *pfalse = x;
8685 return 0;
8686 }
8687 \f
8688 /* Return the value of expression X given the fact that condition COND
8689 is known to be true when applied to REG as its first operand and VAL
8690 as its second. X is known to not be shared and so can be modified in
8691 place.
8692
8693 We only handle the simplest cases, and specifically those cases that
8694 arise with IF_THEN_ELSE expressions. */
8695
8696 static rtx
8697 known_cond (rtx x, enum rtx_code cond, rtx reg, rtx val)
8698 {
8699 enum rtx_code code = GET_CODE (x);
8700 rtx temp;
8701 const char *fmt;
8702 int i, j;
8703
8704 if (side_effects_p (x))
8705 return x;
8706
8707 /* If either operand of the condition is a floating point value,
8708 then we have to avoid collapsing an EQ comparison. */
8709 if (cond == EQ
8710 && rtx_equal_p (x, reg)
8711 && ! FLOAT_MODE_P (GET_MODE (x))
8712 && ! FLOAT_MODE_P (GET_MODE (val)))
8713 return val;
8714
8715 if (cond == UNEQ && rtx_equal_p (x, reg))
8716 return val;
8717
8718 /* If X is (abs REG) and we know something about REG's relationship
8719 with zero, we may be able to simplify this. */
8720
8721 if (code == ABS && rtx_equal_p (XEXP (x, 0), reg) && val == const0_rtx)
8722 switch (cond)
8723 {
8724 case GE: case GT: case EQ:
8725 return XEXP (x, 0);
8726 case LT: case LE:
8727 return simplify_gen_unary (NEG, GET_MODE (XEXP (x, 0)),
8728 XEXP (x, 0),
8729 GET_MODE (XEXP (x, 0)));
8730 default:
8731 break;
8732 }
8733
8734 /* The only other cases we handle are MIN, MAX, and comparisons if the
8735 operands are the same as REG and VAL. */
8736
8737 else if (COMPARISON_P (x) || COMMUTATIVE_ARITH_P (x))
8738 {
8739 if (rtx_equal_p (XEXP (x, 0), val))
8740 cond = swap_condition (cond), temp = val, val = reg, reg = temp;
8741
8742 if (rtx_equal_p (XEXP (x, 0), reg) && rtx_equal_p (XEXP (x, 1), val))
8743 {
8744 if (COMPARISON_P (x))
8745 {
8746 if (comparison_dominates_p (cond, code))
8747 return const_true_rtx;
8748
8749 code = reversed_comparison_code (x, NULL);
8750 if (code != UNKNOWN
8751 && comparison_dominates_p (cond, code))
8752 return const0_rtx;
8753 else
8754 return x;
8755 }
8756 else if (code == SMAX || code == SMIN
8757 || code == UMIN || code == UMAX)
8758 {
8759 int unsignedp = (code == UMIN || code == UMAX);
8760
8761 /* Do not reverse the condition when it is NE or EQ.
8762 This is because we cannot conclude anything about
8763 the value of 'SMAX (x, y)' when x is not equal to y,
8764 but we can when x equals y. */
8765 if ((code == SMAX || code == UMAX)
8766 && ! (cond == EQ || cond == NE))
8767 cond = reverse_condition (cond);
8768
8769 switch (cond)
8770 {
8771 case GE: case GT:
8772 return unsignedp ? x : XEXP (x, 1);
8773 case LE: case LT:
8774 return unsignedp ? x : XEXP (x, 0);
8775 case GEU: case GTU:
8776 return unsignedp ? XEXP (x, 1) : x;
8777 case LEU: case LTU:
8778 return unsignedp ? XEXP (x, 0) : x;
8779 default:
8780 break;
8781 }
8782 }
8783 }
8784 }
8785 else if (code == SUBREG)
8786 {
8787 enum machine_mode inner_mode = GET_MODE (SUBREG_REG (x));
8788 rtx new_rtx, r = known_cond (SUBREG_REG (x), cond, reg, val);
8789
8790 if (SUBREG_REG (x) != r)
8791 {
8792 /* We must simplify subreg here, before we lose track of the
8793 original inner_mode. */
8794 new_rtx = simplify_subreg (GET_MODE (x), r,
8795 inner_mode, SUBREG_BYTE (x));
8796 if (new_rtx)
8797 return new_rtx;
8798 else
8799 SUBST (SUBREG_REG (x), r);
8800 }
8801
8802 return x;
8803 }
8804 /* We don't have to handle SIGN_EXTEND here, because even in the
8805 case of replacing something with a modeless CONST_INT, a
8806 CONST_INT is already (supposed to be) a valid sign extension for
8807 its narrower mode, which implies it's already properly
8808 sign-extended for the wider mode. Now, for ZERO_EXTEND, the
8809 story is different. */
8810 else if (code == ZERO_EXTEND)
8811 {
8812 enum machine_mode inner_mode = GET_MODE (XEXP (x, 0));
8813 rtx new_rtx, r = known_cond (XEXP (x, 0), cond, reg, val);
8814
8815 if (XEXP (x, 0) != r)
8816 {
8817 /* We must simplify the zero_extend here, before we lose
8818 track of the original inner_mode. */
8819 new_rtx = simplify_unary_operation (ZERO_EXTEND, GET_MODE (x),
8820 r, inner_mode);
8821 if (new_rtx)
8822 return new_rtx;
8823 else
8824 SUBST (XEXP (x, 0), r);
8825 }
8826
8827 return x;
8828 }
8829
8830 fmt = GET_RTX_FORMAT (code);
8831 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
8832 {
8833 if (fmt[i] == 'e')
8834 SUBST (XEXP (x, i), known_cond (XEXP (x, i), cond, reg, val));
8835 else if (fmt[i] == 'E')
8836 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
8837 SUBST (XVECEXP (x, i, j), known_cond (XVECEXP (x, i, j),
8838 cond, reg, val));
8839 }
8840
8841 return x;
8842 }
8843 \f
8844 /* See if X and Y are equal for the purposes of seeing if we can rewrite an
8845 assignment as a field assignment. */
8846
8847 static int
8848 rtx_equal_for_field_assignment_p (rtx x, rtx y)
8849 {
8850 if (x == y || rtx_equal_p (x, y))
8851 return 1;
8852
8853 if (x == 0 || y == 0 || GET_MODE (x) != GET_MODE (y))
8854 return 0;
8855
8856 /* Check for a paradoxical SUBREG of a MEM compared with the MEM.
8857 Note that all SUBREGs of MEM are paradoxical; otherwise they
8858 would have been rewritten. */
8859 if (MEM_P (x) && GET_CODE (y) == SUBREG
8860 && MEM_P (SUBREG_REG (y))
8861 && rtx_equal_p (SUBREG_REG (y),
8862 gen_lowpart (GET_MODE (SUBREG_REG (y)), x)))
8863 return 1;
8864
8865 if (MEM_P (y) && GET_CODE (x) == SUBREG
8866 && MEM_P (SUBREG_REG (x))
8867 && rtx_equal_p (SUBREG_REG (x),
8868 gen_lowpart (GET_MODE (SUBREG_REG (x)), y)))
8869 return 1;
8870
8871 /* We used to see if get_last_value of X and Y were the same but that's
8872 not correct. In one direction, we'll cause the assignment to have
8873 the wrong destination and in the case, we'll import a register into this
8874 insn that might have already have been dead. So fail if none of the
8875 above cases are true. */
8876 return 0;
8877 }
8878 \f
8879 /* See if X, a SET operation, can be rewritten as a bit-field assignment.
8880 Return that assignment if so.
8881
8882 We only handle the most common cases. */
8883
8884 static rtx
8885 make_field_assignment (rtx x)
8886 {
8887 rtx dest = SET_DEST (x);
8888 rtx src = SET_SRC (x);
8889 rtx assign;
8890 rtx rhs, lhs;
8891 HOST_WIDE_INT c1;
8892 HOST_WIDE_INT pos;
8893 unsigned HOST_WIDE_INT len;
8894 rtx other;
8895 enum machine_mode mode;
8896
8897 /* If SRC was (and (not (ashift (const_int 1) POS)) DEST), this is
8898 a clear of a one-bit field. We will have changed it to
8899 (and (rotate (const_int -2) POS) DEST), so check for that. Also check
8900 for a SUBREG. */
8901
8902 if (GET_CODE (src) == AND && GET_CODE (XEXP (src, 0)) == ROTATE
8903 && CONST_INT_P (XEXP (XEXP (src, 0), 0))
8904 && INTVAL (XEXP (XEXP (src, 0), 0)) == -2
8905 && rtx_equal_for_field_assignment_p (dest, XEXP (src, 1)))
8906 {
8907 assign = make_extraction (VOIDmode, dest, 0, XEXP (XEXP (src, 0), 1),
8908 1, 1, 1, 0);
8909 if (assign != 0)
8910 return gen_rtx_SET (VOIDmode, assign, const0_rtx);
8911 return x;
8912 }
8913
8914 if (GET_CODE (src) == AND && GET_CODE (XEXP (src, 0)) == SUBREG
8915 && subreg_lowpart_p (XEXP (src, 0))
8916 && (GET_MODE_SIZE (GET_MODE (XEXP (src, 0)))
8917 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (XEXP (src, 0)))))
8918 && GET_CODE (SUBREG_REG (XEXP (src, 0))) == ROTATE
8919 && CONST_INT_P (XEXP (SUBREG_REG (XEXP (src, 0)), 0))
8920 && INTVAL (XEXP (SUBREG_REG (XEXP (src, 0)), 0)) == -2
8921 && rtx_equal_for_field_assignment_p (dest, XEXP (src, 1)))
8922 {
8923 assign = make_extraction (VOIDmode, dest, 0,
8924 XEXP (SUBREG_REG (XEXP (src, 0)), 1),
8925 1, 1, 1, 0);
8926 if (assign != 0)
8927 return gen_rtx_SET (VOIDmode, assign, const0_rtx);
8928 return x;
8929 }
8930
8931 /* If SRC is (ior (ashift (const_int 1) POS) DEST), this is a set of a
8932 one-bit field. */
8933 if (GET_CODE (src) == IOR && GET_CODE (XEXP (src, 0)) == ASHIFT
8934 && XEXP (XEXP (src, 0), 0) == const1_rtx
8935 && rtx_equal_for_field_assignment_p (dest, XEXP (src, 1)))
8936 {
8937 assign = make_extraction (VOIDmode, dest, 0, XEXP (XEXP (src, 0), 1),
8938 1, 1, 1, 0);
8939 if (assign != 0)
8940 return gen_rtx_SET (VOIDmode, assign, const1_rtx);
8941 return x;
8942 }
8943
8944 /* If DEST is already a field assignment, i.e. ZERO_EXTRACT, and the
8945 SRC is an AND with all bits of that field set, then we can discard
8946 the AND. */
8947 if (GET_CODE (dest) == ZERO_EXTRACT
8948 && CONST_INT_P (XEXP (dest, 1))
8949 && GET_CODE (src) == AND
8950 && CONST_INT_P (XEXP (src, 1)))
8951 {
8952 HOST_WIDE_INT width = INTVAL (XEXP (dest, 1));
8953 unsigned HOST_WIDE_INT and_mask = INTVAL (XEXP (src, 1));
8954 unsigned HOST_WIDE_INT ze_mask;
8955
8956 if (width >= HOST_BITS_PER_WIDE_INT)
8957 ze_mask = -1;
8958 else
8959 ze_mask = ((unsigned HOST_WIDE_INT)1 << width) - 1;
8960
8961 /* Complete overlap. We can remove the source AND. */
8962 if ((and_mask & ze_mask) == ze_mask)
8963 return gen_rtx_SET (VOIDmode, dest, XEXP (src, 0));
8964
8965 /* Partial overlap. We can reduce the source AND. */
8966 if ((and_mask & ze_mask) != and_mask)
8967 {
8968 mode = GET_MODE (src);
8969 src = gen_rtx_AND (mode, XEXP (src, 0),
8970 gen_int_mode (and_mask & ze_mask, mode));
8971 return gen_rtx_SET (VOIDmode, dest, src);
8972 }
8973 }
8974
8975 /* The other case we handle is assignments into a constant-position
8976 field. They look like (ior/xor (and DEST C1) OTHER). If C1 represents
8977 a mask that has all one bits except for a group of zero bits and
8978 OTHER is known to have zeros where C1 has ones, this is such an
8979 assignment. Compute the position and length from C1. Shift OTHER
8980 to the appropriate position, force it to the required mode, and
8981 make the extraction. Check for the AND in both operands. */
8982
8983 if (GET_CODE (src) != IOR && GET_CODE (src) != XOR)
8984 return x;
8985
8986 rhs = expand_compound_operation (XEXP (src, 0));
8987 lhs = expand_compound_operation (XEXP (src, 1));
8988
8989 if (GET_CODE (rhs) == AND
8990 && CONST_INT_P (XEXP (rhs, 1))
8991 && rtx_equal_for_field_assignment_p (XEXP (rhs, 0), dest))
8992 c1 = INTVAL (XEXP (rhs, 1)), other = lhs;
8993 else if (GET_CODE (lhs) == AND
8994 && CONST_INT_P (XEXP (lhs, 1))
8995 && rtx_equal_for_field_assignment_p (XEXP (lhs, 0), dest))
8996 c1 = INTVAL (XEXP (lhs, 1)), other = rhs;
8997 else
8998 return x;
8999
9000 pos = get_pos_from_mask ((~c1) & GET_MODE_MASK (GET_MODE (dest)), &len);
9001 if (pos < 0 || pos + len > GET_MODE_BITSIZE (GET_MODE (dest))
9002 || GET_MODE_BITSIZE (GET_MODE (dest)) > HOST_BITS_PER_WIDE_INT
9003 || (c1 & nonzero_bits (other, GET_MODE (dest))) != 0)
9004 return x;
9005
9006 assign = make_extraction (VOIDmode, dest, pos, NULL_RTX, len, 1, 1, 0);
9007 if (assign == 0)
9008 return x;
9009
9010 /* The mode to use for the source is the mode of the assignment, or of
9011 what is inside a possible STRICT_LOW_PART. */
9012 mode = (GET_CODE (assign) == STRICT_LOW_PART
9013 ? GET_MODE (XEXP (assign, 0)) : GET_MODE (assign));
9014
9015 /* Shift OTHER right POS places and make it the source, restricting it
9016 to the proper length and mode. */
9017
9018 src = canon_reg_for_combine (simplify_shift_const (NULL_RTX, LSHIFTRT,
9019 GET_MODE (src),
9020 other, pos),
9021 dest);
9022 src = force_to_mode (src, mode,
9023 GET_MODE_BITSIZE (mode) >= HOST_BITS_PER_WIDE_INT
9024 ? ~(unsigned HOST_WIDE_INT) 0
9025 : ((unsigned HOST_WIDE_INT) 1 << len) - 1,
9026 0);
9027
9028 /* If SRC is masked by an AND that does not make a difference in
9029 the value being stored, strip it. */
9030 if (GET_CODE (assign) == ZERO_EXTRACT
9031 && CONST_INT_P (XEXP (assign, 1))
9032 && INTVAL (XEXP (assign, 1)) < HOST_BITS_PER_WIDE_INT
9033 && GET_CODE (src) == AND
9034 && CONST_INT_P (XEXP (src, 1))
9035 && UINTVAL (XEXP (src, 1))
9036 == ((unsigned HOST_WIDE_INT) 1 << INTVAL (XEXP (assign, 1))) - 1)
9037 src = XEXP (src, 0);
9038
9039 return gen_rtx_SET (VOIDmode, assign, src);
9040 }
9041 \f
9042 /* See if X is of the form (+ (* a c) (* b c)) and convert to (* (+ a b) c)
9043 if so. */
9044
9045 static rtx
9046 apply_distributive_law (rtx x)
9047 {
9048 enum rtx_code code = GET_CODE (x);
9049 enum rtx_code inner_code;
9050 rtx lhs, rhs, other;
9051 rtx tem;
9052
9053 /* Distributivity is not true for floating point as it can change the
9054 value. So we don't do it unless -funsafe-math-optimizations. */
9055 if (FLOAT_MODE_P (GET_MODE (x))
9056 && ! flag_unsafe_math_optimizations)
9057 return x;
9058
9059 /* The outer operation can only be one of the following: */
9060 if (code != IOR && code != AND && code != XOR
9061 && code != PLUS && code != MINUS)
9062 return x;
9063
9064 lhs = XEXP (x, 0);
9065 rhs = XEXP (x, 1);
9066
9067 /* If either operand is a primitive we can't do anything, so get out
9068 fast. */
9069 if (OBJECT_P (lhs) || OBJECT_P (rhs))
9070 return x;
9071
9072 lhs = expand_compound_operation (lhs);
9073 rhs = expand_compound_operation (rhs);
9074 inner_code = GET_CODE (lhs);
9075 if (inner_code != GET_CODE (rhs))
9076 return x;
9077
9078 /* See if the inner and outer operations distribute. */
9079 switch (inner_code)
9080 {
9081 case LSHIFTRT:
9082 case ASHIFTRT:
9083 case AND:
9084 case IOR:
9085 /* These all distribute except over PLUS. */
9086 if (code == PLUS || code == MINUS)
9087 return x;
9088 break;
9089
9090 case MULT:
9091 if (code != PLUS && code != MINUS)
9092 return x;
9093 break;
9094
9095 case ASHIFT:
9096 /* This is also a multiply, so it distributes over everything. */
9097 break;
9098
9099 case SUBREG:
9100 /* Non-paradoxical SUBREGs distributes over all operations,
9101 provided the inner modes and byte offsets are the same, this
9102 is an extraction of a low-order part, we don't convert an fp
9103 operation to int or vice versa, this is not a vector mode,
9104 and we would not be converting a single-word operation into a
9105 multi-word operation. The latter test is not required, but
9106 it prevents generating unneeded multi-word operations. Some
9107 of the previous tests are redundant given the latter test,
9108 but are retained because they are required for correctness.
9109
9110 We produce the result slightly differently in this case. */
9111
9112 if (GET_MODE (SUBREG_REG (lhs)) != GET_MODE (SUBREG_REG (rhs))
9113 || SUBREG_BYTE (lhs) != SUBREG_BYTE (rhs)
9114 || ! subreg_lowpart_p (lhs)
9115 || (GET_MODE_CLASS (GET_MODE (lhs))
9116 != GET_MODE_CLASS (GET_MODE (SUBREG_REG (lhs))))
9117 || (GET_MODE_SIZE (GET_MODE (lhs))
9118 > GET_MODE_SIZE (GET_MODE (SUBREG_REG (lhs))))
9119 || VECTOR_MODE_P (GET_MODE (lhs))
9120 || GET_MODE_SIZE (GET_MODE (SUBREG_REG (lhs))) > UNITS_PER_WORD
9121 /* Result might need to be truncated. Don't change mode if
9122 explicit truncation is needed. */
9123 || !TRULY_NOOP_TRUNCATION
9124 (GET_MODE_BITSIZE (GET_MODE (x)),
9125 GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (lhs)))))
9126 return x;
9127
9128 tem = simplify_gen_binary (code, GET_MODE (SUBREG_REG (lhs)),
9129 SUBREG_REG (lhs), SUBREG_REG (rhs));
9130 return gen_lowpart (GET_MODE (x), tem);
9131
9132 default:
9133 return x;
9134 }
9135
9136 /* Set LHS and RHS to the inner operands (A and B in the example
9137 above) and set OTHER to the common operand (C in the example).
9138 There is only one way to do this unless the inner operation is
9139 commutative. */
9140 if (COMMUTATIVE_ARITH_P (lhs)
9141 && rtx_equal_p (XEXP (lhs, 0), XEXP (rhs, 0)))
9142 other = XEXP (lhs, 0), lhs = XEXP (lhs, 1), rhs = XEXP (rhs, 1);
9143 else if (COMMUTATIVE_ARITH_P (lhs)
9144 && rtx_equal_p (XEXP (lhs, 0), XEXP (rhs, 1)))
9145 other = XEXP (lhs, 0), lhs = XEXP (lhs, 1), rhs = XEXP (rhs, 0);
9146 else if (COMMUTATIVE_ARITH_P (lhs)
9147 && rtx_equal_p (XEXP (lhs, 1), XEXP (rhs, 0)))
9148 other = XEXP (lhs, 1), lhs = XEXP (lhs, 0), rhs = XEXP (rhs, 1);
9149 else if (rtx_equal_p (XEXP (lhs, 1), XEXP (rhs, 1)))
9150 other = XEXP (lhs, 1), lhs = XEXP (lhs, 0), rhs = XEXP (rhs, 0);
9151 else
9152 return x;
9153
9154 /* Form the new inner operation, seeing if it simplifies first. */
9155 tem = simplify_gen_binary (code, GET_MODE (x), lhs, rhs);
9156
9157 /* There is one exception to the general way of distributing:
9158 (a | c) ^ (b | c) -> (a ^ b) & ~c */
9159 if (code == XOR && inner_code == IOR)
9160 {
9161 inner_code = AND;
9162 other = simplify_gen_unary (NOT, GET_MODE (x), other, GET_MODE (x));
9163 }
9164
9165 /* We may be able to continuing distributing the result, so call
9166 ourselves recursively on the inner operation before forming the
9167 outer operation, which we return. */
9168 return simplify_gen_binary (inner_code, GET_MODE (x),
9169 apply_distributive_law (tem), other);
9170 }
9171
9172 /* See if X is of the form (* (+ A B) C), and if so convert to
9173 (+ (* A C) (* B C)) and try to simplify.
9174
9175 Most of the time, this results in no change. However, if some of
9176 the operands are the same or inverses of each other, simplifications
9177 will result.
9178
9179 For example, (and (ior A B) (not B)) can occur as the result of
9180 expanding a bit field assignment. When we apply the distributive
9181 law to this, we get (ior (and (A (not B))) (and (B (not B)))),
9182 which then simplifies to (and (A (not B))).
9183
9184 Note that no checks happen on the validity of applying the inverse
9185 distributive law. This is pointless since we can do it in the
9186 few places where this routine is called.
9187
9188 N is the index of the term that is decomposed (the arithmetic operation,
9189 i.e. (+ A B) in the first example above). !N is the index of the term that
9190 is distributed, i.e. of C in the first example above. */
9191 static rtx
9192 distribute_and_simplify_rtx (rtx x, int n)
9193 {
9194 enum machine_mode mode;
9195 enum rtx_code outer_code, inner_code;
9196 rtx decomposed, distributed, inner_op0, inner_op1, new_op0, new_op1, tmp;
9197
9198 /* Distributivity is not true for floating point as it can change the
9199 value. So we don't do it unless -funsafe-math-optimizations. */
9200 if (FLOAT_MODE_P (GET_MODE (x))
9201 && ! flag_unsafe_math_optimizations)
9202 return NULL_RTX;
9203
9204 decomposed = XEXP (x, n);
9205 if (!ARITHMETIC_P (decomposed))
9206 return NULL_RTX;
9207
9208 mode = GET_MODE (x);
9209 outer_code = GET_CODE (x);
9210 distributed = XEXP (x, !n);
9211
9212 inner_code = GET_CODE (decomposed);
9213 inner_op0 = XEXP (decomposed, 0);
9214 inner_op1 = XEXP (decomposed, 1);
9215
9216 /* Special case (and (xor B C) (not A)), which is equivalent to
9217 (xor (ior A B) (ior A C)) */
9218 if (outer_code == AND && inner_code == XOR && GET_CODE (distributed) == NOT)
9219 {
9220 distributed = XEXP (distributed, 0);
9221 outer_code = IOR;
9222 }
9223
9224 if (n == 0)
9225 {
9226 /* Distribute the second term. */
9227 new_op0 = simplify_gen_binary (outer_code, mode, inner_op0, distributed);
9228 new_op1 = simplify_gen_binary (outer_code, mode, inner_op1, distributed);
9229 }
9230 else
9231 {
9232 /* Distribute the first term. */
9233 new_op0 = simplify_gen_binary (outer_code, mode, distributed, inner_op0);
9234 new_op1 = simplify_gen_binary (outer_code, mode, distributed, inner_op1);
9235 }
9236
9237 tmp = apply_distributive_law (simplify_gen_binary (inner_code, mode,
9238 new_op0, new_op1));
9239 if (GET_CODE (tmp) != outer_code
9240 && rtx_cost (tmp, SET, optimize_this_for_speed_p)
9241 < rtx_cost (x, SET, optimize_this_for_speed_p))
9242 return tmp;
9243
9244 return NULL_RTX;
9245 }
9246 \f
9247 /* Simplify a logical `and' of VAROP with the constant CONSTOP, to be done
9248 in MODE. Return an equivalent form, if different from (and VAROP
9249 (const_int CONSTOP)). Otherwise, return NULL_RTX. */
9250
9251 static rtx
9252 simplify_and_const_int_1 (enum machine_mode mode, rtx varop,
9253 unsigned HOST_WIDE_INT constop)
9254 {
9255 unsigned HOST_WIDE_INT nonzero;
9256 unsigned HOST_WIDE_INT orig_constop;
9257 rtx orig_varop;
9258 int i;
9259
9260 orig_varop = varop;
9261 orig_constop = constop;
9262 if (GET_CODE (varop) == CLOBBER)
9263 return NULL_RTX;
9264
9265 /* Simplify VAROP knowing that we will be only looking at some of the
9266 bits in it.
9267
9268 Note by passing in CONSTOP, we guarantee that the bits not set in
9269 CONSTOP are not significant and will never be examined. We must
9270 ensure that is the case by explicitly masking out those bits
9271 before returning. */
9272 varop = force_to_mode (varop, mode, constop, 0);
9273
9274 /* If VAROP is a CLOBBER, we will fail so return it. */
9275 if (GET_CODE (varop) == CLOBBER)
9276 return varop;
9277
9278 /* If VAROP is a CONST_INT, then we need to apply the mask in CONSTOP
9279 to VAROP and return the new constant. */
9280 if (CONST_INT_P (varop))
9281 return gen_int_mode (INTVAL (varop) & constop, mode);
9282
9283 /* See what bits may be nonzero in VAROP. Unlike the general case of
9284 a call to nonzero_bits, here we don't care about bits outside
9285 MODE. */
9286
9287 nonzero = nonzero_bits (varop, mode) & GET_MODE_MASK (mode);
9288
9289 /* Turn off all bits in the constant that are known to already be zero.
9290 Thus, if the AND isn't needed at all, we will have CONSTOP == NONZERO_BITS
9291 which is tested below. */
9292
9293 constop &= nonzero;
9294
9295 /* If we don't have any bits left, return zero. */
9296 if (constop == 0)
9297 return const0_rtx;
9298
9299 /* If VAROP is a NEG of something known to be zero or 1 and CONSTOP is
9300 a power of two, we can replace this with an ASHIFT. */
9301 if (GET_CODE (varop) == NEG && nonzero_bits (XEXP (varop, 0), mode) == 1
9302 && (i = exact_log2 (constop)) >= 0)
9303 return simplify_shift_const (NULL_RTX, ASHIFT, mode, XEXP (varop, 0), i);
9304
9305 /* If VAROP is an IOR or XOR, apply the AND to both branches of the IOR
9306 or XOR, then try to apply the distributive law. This may eliminate
9307 operations if either branch can be simplified because of the AND.
9308 It may also make some cases more complex, but those cases probably
9309 won't match a pattern either with or without this. */
9310
9311 if (GET_CODE (varop) == IOR || GET_CODE (varop) == XOR)
9312 return
9313 gen_lowpart
9314 (mode,
9315 apply_distributive_law
9316 (simplify_gen_binary (GET_CODE (varop), GET_MODE (varop),
9317 simplify_and_const_int (NULL_RTX,
9318 GET_MODE (varop),
9319 XEXP (varop, 0),
9320 constop),
9321 simplify_and_const_int (NULL_RTX,
9322 GET_MODE (varop),
9323 XEXP (varop, 1),
9324 constop))));
9325
9326 /* If VAROP is PLUS, and the constant is a mask of low bits, distribute
9327 the AND and see if one of the operands simplifies to zero. If so, we
9328 may eliminate it. */
9329
9330 if (GET_CODE (varop) == PLUS
9331 && exact_log2 (constop + 1) >= 0)
9332 {
9333 rtx o0, o1;
9334
9335 o0 = simplify_and_const_int (NULL_RTX, mode, XEXP (varop, 0), constop);
9336 o1 = simplify_and_const_int (NULL_RTX, mode, XEXP (varop, 1), constop);
9337 if (o0 == const0_rtx)
9338 return o1;
9339 if (o1 == const0_rtx)
9340 return o0;
9341 }
9342
9343 /* Make a SUBREG if necessary. If we can't make it, fail. */
9344 varop = gen_lowpart (mode, varop);
9345 if (varop == NULL_RTX || GET_CODE (varop) == CLOBBER)
9346 return NULL_RTX;
9347
9348 /* If we are only masking insignificant bits, return VAROP. */
9349 if (constop == nonzero)
9350 return varop;
9351
9352 if (varop == orig_varop && constop == orig_constop)
9353 return NULL_RTX;
9354
9355 /* Otherwise, return an AND. */
9356 return simplify_gen_binary (AND, mode, varop, gen_int_mode (constop, mode));
9357 }
9358
9359
9360 /* We have X, a logical `and' of VAROP with the constant CONSTOP, to be done
9361 in MODE.
9362
9363 Return an equivalent form, if different from X. Otherwise, return X. If
9364 X is zero, we are to always construct the equivalent form. */
9365
9366 static rtx
9367 simplify_and_const_int (rtx x, enum machine_mode mode, rtx varop,
9368 unsigned HOST_WIDE_INT constop)
9369 {
9370 rtx tem = simplify_and_const_int_1 (mode, varop, constop);
9371 if (tem)
9372 return tem;
9373
9374 if (!x)
9375 x = simplify_gen_binary (AND, GET_MODE (varop), varop,
9376 gen_int_mode (constop, mode));
9377 if (GET_MODE (x) != mode)
9378 x = gen_lowpart (mode, x);
9379 return x;
9380 }
9381 \f
9382 /* Given a REG, X, compute which bits in X can be nonzero.
9383 We don't care about bits outside of those defined in MODE.
9384
9385 For most X this is simply GET_MODE_MASK (GET_MODE (MODE)), but if X is
9386 a shift, AND, or zero_extract, we can do better. */
9387
9388 static rtx
9389 reg_nonzero_bits_for_combine (const_rtx x, enum machine_mode mode,
9390 const_rtx known_x ATTRIBUTE_UNUSED,
9391 enum machine_mode known_mode ATTRIBUTE_UNUSED,
9392 unsigned HOST_WIDE_INT known_ret ATTRIBUTE_UNUSED,
9393 unsigned HOST_WIDE_INT *nonzero)
9394 {
9395 rtx tem;
9396 reg_stat_type *rsp;
9397
9398 /* If X is a register whose nonzero bits value is current, use it.
9399 Otherwise, if X is a register whose value we can find, use that
9400 value. Otherwise, use the previously-computed global nonzero bits
9401 for this register. */
9402
9403 rsp = VEC_index (reg_stat_type, reg_stat, REGNO (x));
9404 if (rsp->last_set_value != 0
9405 && (rsp->last_set_mode == mode
9406 || (GET_MODE_CLASS (rsp->last_set_mode) == MODE_INT
9407 && GET_MODE_CLASS (mode) == MODE_INT))
9408 && ((rsp->last_set_label >= label_tick_ebb_start
9409 && rsp->last_set_label < label_tick)
9410 || (rsp->last_set_label == label_tick
9411 && DF_INSN_LUID (rsp->last_set) < subst_low_luid)
9412 || (REGNO (x) >= FIRST_PSEUDO_REGISTER
9413 && REG_N_SETS (REGNO (x)) == 1
9414 && !REGNO_REG_SET_P
9415 (DF_LR_IN (ENTRY_BLOCK_PTR->next_bb), REGNO (x)))))
9416 {
9417 *nonzero &= rsp->last_set_nonzero_bits;
9418 return NULL;
9419 }
9420
9421 tem = get_last_value (x);
9422
9423 if (tem)
9424 {
9425 #ifdef SHORT_IMMEDIATES_SIGN_EXTEND
9426 /* If X is narrower than MODE and TEM is a non-negative
9427 constant that would appear negative in the mode of X,
9428 sign-extend it for use in reg_nonzero_bits because some
9429 machines (maybe most) will actually do the sign-extension
9430 and this is the conservative approach.
9431
9432 ??? For 2.5, try to tighten up the MD files in this regard
9433 instead of this kludge. */
9434
9435 if (GET_MODE_BITSIZE (GET_MODE (x)) < GET_MODE_BITSIZE (mode)
9436 && CONST_INT_P (tem)
9437 && INTVAL (tem) > 0
9438 && 0 != (UINTVAL (tem)
9439 & ((unsigned HOST_WIDE_INT) 1
9440 << (GET_MODE_BITSIZE (GET_MODE (x)) - 1))))
9441 tem = GEN_INT (UINTVAL (tem)
9442 | ((unsigned HOST_WIDE_INT) (-1)
9443 << GET_MODE_BITSIZE (GET_MODE (x))));
9444 #endif
9445 return tem;
9446 }
9447 else if (nonzero_sign_valid && rsp->nonzero_bits)
9448 {
9449 unsigned HOST_WIDE_INT mask = rsp->nonzero_bits;
9450
9451 if (GET_MODE_BITSIZE (GET_MODE (x)) < GET_MODE_BITSIZE (mode))
9452 /* We don't know anything about the upper bits. */
9453 mask |= GET_MODE_MASK (mode) ^ GET_MODE_MASK (GET_MODE (x));
9454 *nonzero &= mask;
9455 }
9456
9457 return NULL;
9458 }
9459
9460 /* Return the number of bits at the high-order end of X that are known to
9461 be equal to the sign bit. X will be used in mode MODE; if MODE is
9462 VOIDmode, X will be used in its own mode. The returned value will always
9463 be between 1 and the number of bits in MODE. */
9464
9465 static rtx
9466 reg_num_sign_bit_copies_for_combine (const_rtx x, enum machine_mode mode,
9467 const_rtx known_x ATTRIBUTE_UNUSED,
9468 enum machine_mode known_mode
9469 ATTRIBUTE_UNUSED,
9470 unsigned int known_ret ATTRIBUTE_UNUSED,
9471 unsigned int *result)
9472 {
9473 rtx tem;
9474 reg_stat_type *rsp;
9475
9476 rsp = VEC_index (reg_stat_type, reg_stat, REGNO (x));
9477 if (rsp->last_set_value != 0
9478 && rsp->last_set_mode == mode
9479 && ((rsp->last_set_label >= label_tick_ebb_start
9480 && rsp->last_set_label < label_tick)
9481 || (rsp->last_set_label == label_tick
9482 && DF_INSN_LUID (rsp->last_set) < subst_low_luid)
9483 || (REGNO (x) >= FIRST_PSEUDO_REGISTER
9484 && REG_N_SETS (REGNO (x)) == 1
9485 && !REGNO_REG_SET_P
9486 (DF_LR_IN (ENTRY_BLOCK_PTR->next_bb), REGNO (x)))))
9487 {
9488 *result = rsp->last_set_sign_bit_copies;
9489 return NULL;
9490 }
9491
9492 tem = get_last_value (x);
9493 if (tem != 0)
9494 return tem;
9495
9496 if (nonzero_sign_valid && rsp->sign_bit_copies != 0
9497 && GET_MODE_BITSIZE (GET_MODE (x)) == GET_MODE_BITSIZE (mode))
9498 *result = rsp->sign_bit_copies;
9499
9500 return NULL;
9501 }
9502 \f
9503 /* Return the number of "extended" bits there are in X, when interpreted
9504 as a quantity in MODE whose signedness is indicated by UNSIGNEDP. For
9505 unsigned quantities, this is the number of high-order zero bits.
9506 For signed quantities, this is the number of copies of the sign bit
9507 minus 1. In both case, this function returns the number of "spare"
9508 bits. For example, if two quantities for which this function returns
9509 at least 1 are added, the addition is known not to overflow.
9510
9511 This function will always return 0 unless called during combine, which
9512 implies that it must be called from a define_split. */
9513
9514 unsigned int
9515 extended_count (const_rtx x, enum machine_mode mode, int unsignedp)
9516 {
9517 if (nonzero_sign_valid == 0)
9518 return 0;
9519
9520 return (unsignedp
9521 ? (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
9522 ? (unsigned int) (GET_MODE_BITSIZE (mode) - 1
9523 - floor_log2 (nonzero_bits (x, mode)))
9524 : 0)
9525 : num_sign_bit_copies (x, mode) - 1);
9526 }
9527 \f
9528 /* This function is called from `simplify_shift_const' to merge two
9529 outer operations. Specifically, we have already found that we need
9530 to perform operation *POP0 with constant *PCONST0 at the outermost
9531 position. We would now like to also perform OP1 with constant CONST1
9532 (with *POP0 being done last).
9533
9534 Return 1 if we can do the operation and update *POP0 and *PCONST0 with
9535 the resulting operation. *PCOMP_P is set to 1 if we would need to
9536 complement the innermost operand, otherwise it is unchanged.
9537
9538 MODE is the mode in which the operation will be done. No bits outside
9539 the width of this mode matter. It is assumed that the width of this mode
9540 is smaller than or equal to HOST_BITS_PER_WIDE_INT.
9541
9542 If *POP0 or OP1 are UNKNOWN, it means no operation is required. Only NEG, PLUS,
9543 IOR, XOR, and AND are supported. We may set *POP0 to SET if the proper
9544 result is simply *PCONST0.
9545
9546 If the resulting operation cannot be expressed as one operation, we
9547 return 0 and do not change *POP0, *PCONST0, and *PCOMP_P. */
9548
9549 static int
9550 merge_outer_ops (enum rtx_code *pop0, HOST_WIDE_INT *pconst0, enum rtx_code op1, HOST_WIDE_INT const1, enum machine_mode mode, int *pcomp_p)
9551 {
9552 enum rtx_code op0 = *pop0;
9553 HOST_WIDE_INT const0 = *pconst0;
9554
9555 const0 &= GET_MODE_MASK (mode);
9556 const1 &= GET_MODE_MASK (mode);
9557
9558 /* If OP0 is an AND, clear unimportant bits in CONST1. */
9559 if (op0 == AND)
9560 const1 &= const0;
9561
9562 /* If OP0 or OP1 is UNKNOWN, this is easy. Similarly if they are the same or
9563 if OP0 is SET. */
9564
9565 if (op1 == UNKNOWN || op0 == SET)
9566 return 1;
9567
9568 else if (op0 == UNKNOWN)
9569 op0 = op1, const0 = const1;
9570
9571 else if (op0 == op1)
9572 {
9573 switch (op0)
9574 {
9575 case AND:
9576 const0 &= const1;
9577 break;
9578 case IOR:
9579 const0 |= const1;
9580 break;
9581 case XOR:
9582 const0 ^= const1;
9583 break;
9584 case PLUS:
9585 const0 += const1;
9586 break;
9587 case NEG:
9588 op0 = UNKNOWN;
9589 break;
9590 default:
9591 break;
9592 }
9593 }
9594
9595 /* Otherwise, if either is a PLUS or NEG, we can't do anything. */
9596 else if (op0 == PLUS || op1 == PLUS || op0 == NEG || op1 == NEG)
9597 return 0;
9598
9599 /* If the two constants aren't the same, we can't do anything. The
9600 remaining six cases can all be done. */
9601 else if (const0 != const1)
9602 return 0;
9603
9604 else
9605 switch (op0)
9606 {
9607 case IOR:
9608 if (op1 == AND)
9609 /* (a & b) | b == b */
9610 op0 = SET;
9611 else /* op1 == XOR */
9612 /* (a ^ b) | b == a | b */
9613 {;}
9614 break;
9615
9616 case XOR:
9617 if (op1 == AND)
9618 /* (a & b) ^ b == (~a) & b */
9619 op0 = AND, *pcomp_p = 1;
9620 else /* op1 == IOR */
9621 /* (a | b) ^ b == a & ~b */
9622 op0 = AND, const0 = ~const0;
9623 break;
9624
9625 case AND:
9626 if (op1 == IOR)
9627 /* (a | b) & b == b */
9628 op0 = SET;
9629 else /* op1 == XOR */
9630 /* (a ^ b) & b) == (~a) & b */
9631 *pcomp_p = 1;
9632 break;
9633 default:
9634 break;
9635 }
9636
9637 /* Check for NO-OP cases. */
9638 const0 &= GET_MODE_MASK (mode);
9639 if (const0 == 0
9640 && (op0 == IOR || op0 == XOR || op0 == PLUS))
9641 op0 = UNKNOWN;
9642 else if (const0 == 0 && op0 == AND)
9643 op0 = SET;
9644 else if ((unsigned HOST_WIDE_INT) const0 == GET_MODE_MASK (mode)
9645 && op0 == AND)
9646 op0 = UNKNOWN;
9647
9648 *pop0 = op0;
9649
9650 /* ??? Slightly redundant with the above mask, but not entirely.
9651 Moving this above means we'd have to sign-extend the mode mask
9652 for the final test. */
9653 if (op0 != UNKNOWN && op0 != NEG)
9654 *pconst0 = trunc_int_for_mode (const0, mode);
9655
9656 return 1;
9657 }
9658 \f
9659 /* A helper to simplify_shift_const_1 to determine the mode we can perform
9660 the shift in. The original shift operation CODE is performed on OP in
9661 ORIG_MODE. Return the wider mode MODE if we can perform the operation
9662 in that mode. Return ORIG_MODE otherwise. We can also assume that the
9663 result of the shift is subject to operation OUTER_CODE with operand
9664 OUTER_CONST. */
9665
9666 static enum machine_mode
9667 try_widen_shift_mode (enum rtx_code code, rtx op, int count,
9668 enum machine_mode orig_mode, enum machine_mode mode,
9669 enum rtx_code outer_code, HOST_WIDE_INT outer_const)
9670 {
9671 if (orig_mode == mode)
9672 return mode;
9673 gcc_assert (GET_MODE_BITSIZE (mode) > GET_MODE_BITSIZE (orig_mode));
9674
9675 /* In general we can't perform in wider mode for right shift and rotate. */
9676 switch (code)
9677 {
9678 case ASHIFTRT:
9679 /* We can still widen if the bits brought in from the left are identical
9680 to the sign bit of ORIG_MODE. */
9681 if (num_sign_bit_copies (op, mode)
9682 > (unsigned) (GET_MODE_BITSIZE (mode)
9683 - GET_MODE_BITSIZE (orig_mode)))
9684 return mode;
9685 return orig_mode;
9686
9687 case LSHIFTRT:
9688 /* Similarly here but with zero bits. */
9689 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
9690 && (nonzero_bits (op, mode) & ~GET_MODE_MASK (orig_mode)) == 0)
9691 return mode;
9692
9693 /* We can also widen if the bits brought in will be masked off. This
9694 operation is performed in ORIG_MODE. */
9695 if (outer_code == AND)
9696 {
9697 int care_bits = low_bitmask_len (orig_mode, outer_const);
9698
9699 if (care_bits >= 0
9700 && GET_MODE_BITSIZE (orig_mode) - care_bits >= count)
9701 return mode;
9702 }
9703 /* fall through */
9704
9705 case ROTATE:
9706 return orig_mode;
9707
9708 case ROTATERT:
9709 gcc_unreachable ();
9710
9711 default:
9712 return mode;
9713 }
9714 }
9715
9716 /* Simplify a shift of VAROP by COUNT bits. CODE says what kind of shift.
9717 The result of the shift is RESULT_MODE. Return NULL_RTX if we cannot
9718 simplify it. Otherwise, return a simplified value.
9719
9720 The shift is normally computed in the widest mode we find in VAROP, as
9721 long as it isn't a different number of words than RESULT_MODE. Exceptions
9722 are ASHIFTRT and ROTATE, which are always done in their original mode. */
9723
9724 static rtx
9725 simplify_shift_const_1 (enum rtx_code code, enum machine_mode result_mode,
9726 rtx varop, int orig_count)
9727 {
9728 enum rtx_code orig_code = code;
9729 rtx orig_varop = varop;
9730 int count;
9731 enum machine_mode mode = result_mode;
9732 enum machine_mode shift_mode, tmode;
9733 unsigned int mode_words
9734 = (GET_MODE_SIZE (mode) + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD;
9735 /* We form (outer_op (code varop count) (outer_const)). */
9736 enum rtx_code outer_op = UNKNOWN;
9737 HOST_WIDE_INT outer_const = 0;
9738 int complement_p = 0;
9739 rtx new_rtx, x;
9740
9741 /* Make sure and truncate the "natural" shift on the way in. We don't
9742 want to do this inside the loop as it makes it more difficult to
9743 combine shifts. */
9744 if (SHIFT_COUNT_TRUNCATED)
9745 orig_count &= GET_MODE_BITSIZE (mode) - 1;
9746
9747 /* If we were given an invalid count, don't do anything except exactly
9748 what was requested. */
9749
9750 if (orig_count < 0 || orig_count >= (int) GET_MODE_BITSIZE (mode))
9751 return NULL_RTX;
9752
9753 count = orig_count;
9754
9755 /* Unless one of the branches of the `if' in this loop does a `continue',
9756 we will `break' the loop after the `if'. */
9757
9758 while (count != 0)
9759 {
9760 /* If we have an operand of (clobber (const_int 0)), fail. */
9761 if (GET_CODE (varop) == CLOBBER)
9762 return NULL_RTX;
9763
9764 /* Convert ROTATERT to ROTATE. */
9765 if (code == ROTATERT)
9766 {
9767 unsigned int bitsize = GET_MODE_BITSIZE (result_mode);;
9768 code = ROTATE;
9769 if (VECTOR_MODE_P (result_mode))
9770 count = bitsize / GET_MODE_NUNITS (result_mode) - count;
9771 else
9772 count = bitsize - count;
9773 }
9774
9775 shift_mode = try_widen_shift_mode (code, varop, count, result_mode,
9776 mode, outer_op, outer_const);
9777
9778 /* Handle cases where the count is greater than the size of the mode
9779 minus 1. For ASHIFT, use the size minus one as the count (this can
9780 occur when simplifying (lshiftrt (ashiftrt ..))). For rotates,
9781 take the count modulo the size. For other shifts, the result is
9782 zero.
9783
9784 Since these shifts are being produced by the compiler by combining
9785 multiple operations, each of which are defined, we know what the
9786 result is supposed to be. */
9787
9788 if (count > (GET_MODE_BITSIZE (shift_mode) - 1))
9789 {
9790 if (code == ASHIFTRT)
9791 count = GET_MODE_BITSIZE (shift_mode) - 1;
9792 else if (code == ROTATE || code == ROTATERT)
9793 count %= GET_MODE_BITSIZE (shift_mode);
9794 else
9795 {
9796 /* We can't simply return zero because there may be an
9797 outer op. */
9798 varop = const0_rtx;
9799 count = 0;
9800 break;
9801 }
9802 }
9803
9804 /* If we discovered we had to complement VAROP, leave. Making a NOT
9805 here would cause an infinite loop. */
9806 if (complement_p)
9807 break;
9808
9809 /* An arithmetic right shift of a quantity known to be -1 or 0
9810 is a no-op. */
9811 if (code == ASHIFTRT
9812 && (num_sign_bit_copies (varop, shift_mode)
9813 == GET_MODE_BITSIZE (shift_mode)))
9814 {
9815 count = 0;
9816 break;
9817 }
9818
9819 /* If we are doing an arithmetic right shift and discarding all but
9820 the sign bit copies, this is equivalent to doing a shift by the
9821 bitsize minus one. Convert it into that shift because it will often
9822 allow other simplifications. */
9823
9824 if (code == ASHIFTRT
9825 && (count + num_sign_bit_copies (varop, shift_mode)
9826 >= GET_MODE_BITSIZE (shift_mode)))
9827 count = GET_MODE_BITSIZE (shift_mode) - 1;
9828
9829 /* We simplify the tests below and elsewhere by converting
9830 ASHIFTRT to LSHIFTRT if we know the sign bit is clear.
9831 `make_compound_operation' will convert it to an ASHIFTRT for
9832 those machines (such as VAX) that don't have an LSHIFTRT. */
9833 if (GET_MODE_BITSIZE (shift_mode) <= HOST_BITS_PER_WIDE_INT
9834 && code == ASHIFTRT
9835 && ((nonzero_bits (varop, shift_mode)
9836 & ((unsigned HOST_WIDE_INT) 1
9837 << (GET_MODE_BITSIZE (shift_mode) - 1))) == 0))
9838 code = LSHIFTRT;
9839
9840 if (((code == LSHIFTRT
9841 && GET_MODE_BITSIZE (shift_mode) <= HOST_BITS_PER_WIDE_INT
9842 && !(nonzero_bits (varop, shift_mode) >> count))
9843 || (code == ASHIFT
9844 && GET_MODE_BITSIZE (shift_mode) <= HOST_BITS_PER_WIDE_INT
9845 && !((nonzero_bits (varop, shift_mode) << count)
9846 & GET_MODE_MASK (shift_mode))))
9847 && !side_effects_p (varop))
9848 varop = const0_rtx;
9849
9850 switch (GET_CODE (varop))
9851 {
9852 case SIGN_EXTEND:
9853 case ZERO_EXTEND:
9854 case SIGN_EXTRACT:
9855 case ZERO_EXTRACT:
9856 new_rtx = expand_compound_operation (varop);
9857 if (new_rtx != varop)
9858 {
9859 varop = new_rtx;
9860 continue;
9861 }
9862 break;
9863
9864 case MEM:
9865 /* If we have (xshiftrt (mem ...) C) and C is MODE_WIDTH
9866 minus the width of a smaller mode, we can do this with a
9867 SIGN_EXTEND or ZERO_EXTEND from the narrower memory location. */
9868 if ((code == ASHIFTRT || code == LSHIFTRT)
9869 && ! mode_dependent_address_p (XEXP (varop, 0))
9870 && ! MEM_VOLATILE_P (varop)
9871 && (tmode = mode_for_size (GET_MODE_BITSIZE (mode) - count,
9872 MODE_INT, 1)) != BLKmode)
9873 {
9874 new_rtx = adjust_address_nv (varop, tmode,
9875 BYTES_BIG_ENDIAN ? 0
9876 : count / BITS_PER_UNIT);
9877
9878 varop = gen_rtx_fmt_e (code == ASHIFTRT ? SIGN_EXTEND
9879 : ZERO_EXTEND, mode, new_rtx);
9880 count = 0;
9881 continue;
9882 }
9883 break;
9884
9885 case SUBREG:
9886 /* If VAROP is a SUBREG, strip it as long as the inner operand has
9887 the same number of words as what we've seen so far. Then store
9888 the widest mode in MODE. */
9889 if (subreg_lowpart_p (varop)
9890 && (GET_MODE_SIZE (GET_MODE (SUBREG_REG (varop)))
9891 > GET_MODE_SIZE (GET_MODE (varop)))
9892 && (unsigned int) ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (varop)))
9893 + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)
9894 == mode_words
9895 && GET_MODE_CLASS (GET_MODE (varop)) == MODE_INT
9896 && GET_MODE_CLASS (GET_MODE (SUBREG_REG (varop))) == MODE_INT)
9897 {
9898 varop = SUBREG_REG (varop);
9899 if (GET_MODE_SIZE (GET_MODE (varop)) > GET_MODE_SIZE (mode))
9900 mode = GET_MODE (varop);
9901 continue;
9902 }
9903 break;
9904
9905 case MULT:
9906 /* Some machines use MULT instead of ASHIFT because MULT
9907 is cheaper. But it is still better on those machines to
9908 merge two shifts into one. */
9909 if (CONST_INT_P (XEXP (varop, 1))
9910 && exact_log2 (UINTVAL (XEXP (varop, 1))) >= 0)
9911 {
9912 varop
9913 = simplify_gen_binary (ASHIFT, GET_MODE (varop),
9914 XEXP (varop, 0),
9915 GEN_INT (exact_log2 (
9916 UINTVAL (XEXP (varop, 1)))));
9917 continue;
9918 }
9919 break;
9920
9921 case UDIV:
9922 /* Similar, for when divides are cheaper. */
9923 if (CONST_INT_P (XEXP (varop, 1))
9924 && exact_log2 (UINTVAL (XEXP (varop, 1))) >= 0)
9925 {
9926 varop
9927 = simplify_gen_binary (LSHIFTRT, GET_MODE (varop),
9928 XEXP (varop, 0),
9929 GEN_INT (exact_log2 (
9930 UINTVAL (XEXP (varop, 1)))));
9931 continue;
9932 }
9933 break;
9934
9935 case ASHIFTRT:
9936 /* If we are extracting just the sign bit of an arithmetic
9937 right shift, that shift is not needed. However, the sign
9938 bit of a wider mode may be different from what would be
9939 interpreted as the sign bit in a narrower mode, so, if
9940 the result is narrower, don't discard the shift. */
9941 if (code == LSHIFTRT
9942 && count == (GET_MODE_BITSIZE (result_mode) - 1)
9943 && (GET_MODE_BITSIZE (result_mode)
9944 >= GET_MODE_BITSIZE (GET_MODE (varop))))
9945 {
9946 varop = XEXP (varop, 0);
9947 continue;
9948 }
9949
9950 /* ... fall through ... */
9951
9952 case LSHIFTRT:
9953 case ASHIFT:
9954 case ROTATE:
9955 /* Here we have two nested shifts. The result is usually the
9956 AND of a new shift with a mask. We compute the result below. */
9957 if (CONST_INT_P (XEXP (varop, 1))
9958 && INTVAL (XEXP (varop, 1)) >= 0
9959 && INTVAL (XEXP (varop, 1)) < GET_MODE_BITSIZE (GET_MODE (varop))
9960 && GET_MODE_BITSIZE (result_mode) <= HOST_BITS_PER_WIDE_INT
9961 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
9962 && !VECTOR_MODE_P (result_mode))
9963 {
9964 enum rtx_code first_code = GET_CODE (varop);
9965 unsigned int first_count = INTVAL (XEXP (varop, 1));
9966 unsigned HOST_WIDE_INT mask;
9967 rtx mask_rtx;
9968
9969 /* We have one common special case. We can't do any merging if
9970 the inner code is an ASHIFTRT of a smaller mode. However, if
9971 we have (ashift:M1 (subreg:M1 (ashiftrt:M2 FOO C1) 0) C2)
9972 with C2 == GET_MODE_BITSIZE (M1) - GET_MODE_BITSIZE (M2),
9973 we can convert it to
9974 (ashiftrt:M1 (ashift:M1 (and:M1 (subreg:M1 FOO 0 C2) C3) C1).
9975 This simplifies certain SIGN_EXTEND operations. */
9976 if (code == ASHIFT && first_code == ASHIFTRT
9977 && count == (GET_MODE_BITSIZE (result_mode)
9978 - GET_MODE_BITSIZE (GET_MODE (varop))))
9979 {
9980 /* C3 has the low-order C1 bits zero. */
9981
9982 mask = GET_MODE_MASK (mode)
9983 & ~(((unsigned HOST_WIDE_INT) 1 << first_count) - 1);
9984
9985 varop = simplify_and_const_int (NULL_RTX, result_mode,
9986 XEXP (varop, 0), mask);
9987 varop = simplify_shift_const (NULL_RTX, ASHIFT, result_mode,
9988 varop, count);
9989 count = first_count;
9990 code = ASHIFTRT;
9991 continue;
9992 }
9993
9994 /* If this was (ashiftrt (ashift foo C1) C2) and FOO has more
9995 than C1 high-order bits equal to the sign bit, we can convert
9996 this to either an ASHIFT or an ASHIFTRT depending on the
9997 two counts.
9998
9999 We cannot do this if VAROP's mode is not SHIFT_MODE. */
10000
10001 if (code == ASHIFTRT && first_code == ASHIFT
10002 && GET_MODE (varop) == shift_mode
10003 && (num_sign_bit_copies (XEXP (varop, 0), shift_mode)
10004 > first_count))
10005 {
10006 varop = XEXP (varop, 0);
10007 count -= first_count;
10008 if (count < 0)
10009 {
10010 count = -count;
10011 code = ASHIFT;
10012 }
10013
10014 continue;
10015 }
10016
10017 /* There are some cases we can't do. If CODE is ASHIFTRT,
10018 we can only do this if FIRST_CODE is also ASHIFTRT.
10019
10020 We can't do the case when CODE is ROTATE and FIRST_CODE is
10021 ASHIFTRT.
10022
10023 If the mode of this shift is not the mode of the outer shift,
10024 we can't do this if either shift is a right shift or ROTATE.
10025
10026 Finally, we can't do any of these if the mode is too wide
10027 unless the codes are the same.
10028
10029 Handle the case where the shift codes are the same
10030 first. */
10031
10032 if (code == first_code)
10033 {
10034 if (GET_MODE (varop) != result_mode
10035 && (code == ASHIFTRT || code == LSHIFTRT
10036 || code == ROTATE))
10037 break;
10038
10039 count += first_count;
10040 varop = XEXP (varop, 0);
10041 continue;
10042 }
10043
10044 if (code == ASHIFTRT
10045 || (code == ROTATE && first_code == ASHIFTRT)
10046 || GET_MODE_BITSIZE (mode) > HOST_BITS_PER_WIDE_INT
10047 || (GET_MODE (varop) != result_mode
10048 && (first_code == ASHIFTRT || first_code == LSHIFTRT
10049 || first_code == ROTATE
10050 || code == ROTATE)))
10051 break;
10052
10053 /* To compute the mask to apply after the shift, shift the
10054 nonzero bits of the inner shift the same way the
10055 outer shift will. */
10056
10057 mask_rtx = GEN_INT (nonzero_bits (varop, GET_MODE (varop)));
10058
10059 mask_rtx
10060 = simplify_const_binary_operation (code, result_mode, mask_rtx,
10061 GEN_INT (count));
10062
10063 /* Give up if we can't compute an outer operation to use. */
10064 if (mask_rtx == 0
10065 || !CONST_INT_P (mask_rtx)
10066 || ! merge_outer_ops (&outer_op, &outer_const, AND,
10067 INTVAL (mask_rtx),
10068 result_mode, &complement_p))
10069 break;
10070
10071 /* If the shifts are in the same direction, we add the
10072 counts. Otherwise, we subtract them. */
10073 if ((code == ASHIFTRT || code == LSHIFTRT)
10074 == (first_code == ASHIFTRT || first_code == LSHIFTRT))
10075 count += first_count;
10076 else
10077 count -= first_count;
10078
10079 /* If COUNT is positive, the new shift is usually CODE,
10080 except for the two exceptions below, in which case it is
10081 FIRST_CODE. If the count is negative, FIRST_CODE should
10082 always be used */
10083 if (count > 0
10084 && ((first_code == ROTATE && code == ASHIFT)
10085 || (first_code == ASHIFTRT && code == LSHIFTRT)))
10086 code = first_code;
10087 else if (count < 0)
10088 code = first_code, count = -count;
10089
10090 varop = XEXP (varop, 0);
10091 continue;
10092 }
10093
10094 /* If we have (A << B << C) for any shift, we can convert this to
10095 (A << C << B). This wins if A is a constant. Only try this if
10096 B is not a constant. */
10097
10098 else if (GET_CODE (varop) == code
10099 && CONST_INT_P (XEXP (varop, 0))
10100 && !CONST_INT_P (XEXP (varop, 1)))
10101 {
10102 rtx new_rtx = simplify_const_binary_operation (code, mode,
10103 XEXP (varop, 0),
10104 GEN_INT (count));
10105 varop = gen_rtx_fmt_ee (code, mode, new_rtx, XEXP (varop, 1));
10106 count = 0;
10107 continue;
10108 }
10109 break;
10110
10111 case NOT:
10112 if (VECTOR_MODE_P (mode))
10113 break;
10114
10115 /* Make this fit the case below. */
10116 varop = gen_rtx_XOR (mode, XEXP (varop, 0),
10117 GEN_INT (GET_MODE_MASK (mode)));
10118 continue;
10119
10120 case IOR:
10121 case AND:
10122 case XOR:
10123 /* If we have (xshiftrt (ior (plus X (const_int -1)) X) C)
10124 with C the size of VAROP - 1 and the shift is logical if
10125 STORE_FLAG_VALUE is 1 and arithmetic if STORE_FLAG_VALUE is -1,
10126 we have an (le X 0) operation. If we have an arithmetic shift
10127 and STORE_FLAG_VALUE is 1 or we have a logical shift with
10128 STORE_FLAG_VALUE of -1, we have a (neg (le X 0)) operation. */
10129
10130 if (GET_CODE (varop) == IOR && GET_CODE (XEXP (varop, 0)) == PLUS
10131 && XEXP (XEXP (varop, 0), 1) == constm1_rtx
10132 && (STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
10133 && (code == LSHIFTRT || code == ASHIFTRT)
10134 && count == (GET_MODE_BITSIZE (GET_MODE (varop)) - 1)
10135 && rtx_equal_p (XEXP (XEXP (varop, 0), 0), XEXP (varop, 1)))
10136 {
10137 count = 0;
10138 varop = gen_rtx_LE (GET_MODE (varop), XEXP (varop, 1),
10139 const0_rtx);
10140
10141 if (STORE_FLAG_VALUE == 1 ? code == ASHIFTRT : code == LSHIFTRT)
10142 varop = gen_rtx_NEG (GET_MODE (varop), varop);
10143
10144 continue;
10145 }
10146
10147 /* If we have (shift (logical)), move the logical to the outside
10148 to allow it to possibly combine with another logical and the
10149 shift to combine with another shift. This also canonicalizes to
10150 what a ZERO_EXTRACT looks like. Also, some machines have
10151 (and (shift)) insns. */
10152
10153 if (CONST_INT_P (XEXP (varop, 1))
10154 /* We can't do this if we have (ashiftrt (xor)) and the
10155 constant has its sign bit set in shift_mode. */
10156 && !(code == ASHIFTRT && GET_CODE (varop) == XOR
10157 && 0 > trunc_int_for_mode (INTVAL (XEXP (varop, 1)),
10158 shift_mode))
10159 && (new_rtx = simplify_const_binary_operation (code, result_mode,
10160 XEXP (varop, 1),
10161 GEN_INT (count))) != 0
10162 && CONST_INT_P (new_rtx)
10163 && merge_outer_ops (&outer_op, &outer_const, GET_CODE (varop),
10164 INTVAL (new_rtx), result_mode, &complement_p))
10165 {
10166 varop = XEXP (varop, 0);
10167 continue;
10168 }
10169
10170 /* If we can't do that, try to simplify the shift in each arm of the
10171 logical expression, make a new logical expression, and apply
10172 the inverse distributive law. This also can't be done
10173 for some (ashiftrt (xor)). */
10174 if (CONST_INT_P (XEXP (varop, 1))
10175 && !(code == ASHIFTRT && GET_CODE (varop) == XOR
10176 && 0 > trunc_int_for_mode (INTVAL (XEXP (varop, 1)),
10177 shift_mode)))
10178 {
10179 rtx lhs = simplify_shift_const (NULL_RTX, code, shift_mode,
10180 XEXP (varop, 0), count);
10181 rtx rhs = simplify_shift_const (NULL_RTX, code, shift_mode,
10182 XEXP (varop, 1), count);
10183
10184 varop = simplify_gen_binary (GET_CODE (varop), shift_mode,
10185 lhs, rhs);
10186 varop = apply_distributive_law (varop);
10187
10188 count = 0;
10189 continue;
10190 }
10191 break;
10192
10193 case EQ:
10194 /* Convert (lshiftrt (eq FOO 0) C) to (xor FOO 1) if STORE_FLAG_VALUE
10195 says that the sign bit can be tested, FOO has mode MODE, C is
10196 GET_MODE_BITSIZE (MODE) - 1, and FOO has only its low-order bit
10197 that may be nonzero. */
10198 if (code == LSHIFTRT
10199 && XEXP (varop, 1) == const0_rtx
10200 && GET_MODE (XEXP (varop, 0)) == result_mode
10201 && count == (GET_MODE_BITSIZE (result_mode) - 1)
10202 && GET_MODE_BITSIZE (result_mode) <= HOST_BITS_PER_WIDE_INT
10203 && STORE_FLAG_VALUE == -1
10204 && nonzero_bits (XEXP (varop, 0), result_mode) == 1
10205 && merge_outer_ops (&outer_op, &outer_const, XOR, 1, result_mode,
10206 &complement_p))
10207 {
10208 varop = XEXP (varop, 0);
10209 count = 0;
10210 continue;
10211 }
10212 break;
10213
10214 case NEG:
10215 /* (lshiftrt (neg A) C) where A is either 0 or 1 and C is one less
10216 than the number of bits in the mode is equivalent to A. */
10217 if (code == LSHIFTRT
10218 && count == (GET_MODE_BITSIZE (result_mode) - 1)
10219 && nonzero_bits (XEXP (varop, 0), result_mode) == 1)
10220 {
10221 varop = XEXP (varop, 0);
10222 count = 0;
10223 continue;
10224 }
10225
10226 /* NEG commutes with ASHIFT since it is multiplication. Move the
10227 NEG outside to allow shifts to combine. */
10228 if (code == ASHIFT
10229 && merge_outer_ops (&outer_op, &outer_const, NEG, 0, result_mode,
10230 &complement_p))
10231 {
10232 varop = XEXP (varop, 0);
10233 continue;
10234 }
10235 break;
10236
10237 case PLUS:
10238 /* (lshiftrt (plus A -1) C) where A is either 0 or 1 and C
10239 is one less than the number of bits in the mode is
10240 equivalent to (xor A 1). */
10241 if (code == LSHIFTRT
10242 && count == (GET_MODE_BITSIZE (result_mode) - 1)
10243 && XEXP (varop, 1) == constm1_rtx
10244 && nonzero_bits (XEXP (varop, 0), result_mode) == 1
10245 && merge_outer_ops (&outer_op, &outer_const, XOR, 1, result_mode,
10246 &complement_p))
10247 {
10248 count = 0;
10249 varop = XEXP (varop, 0);
10250 continue;
10251 }
10252
10253 /* If we have (xshiftrt (plus FOO BAR) C), and the only bits
10254 that might be nonzero in BAR are those being shifted out and those
10255 bits are known zero in FOO, we can replace the PLUS with FOO.
10256 Similarly in the other operand order. This code occurs when
10257 we are computing the size of a variable-size array. */
10258
10259 if ((code == ASHIFTRT || code == LSHIFTRT)
10260 && count < HOST_BITS_PER_WIDE_INT
10261 && nonzero_bits (XEXP (varop, 1), result_mode) >> count == 0
10262 && (nonzero_bits (XEXP (varop, 1), result_mode)
10263 & nonzero_bits (XEXP (varop, 0), result_mode)) == 0)
10264 {
10265 varop = XEXP (varop, 0);
10266 continue;
10267 }
10268 else if ((code == ASHIFTRT || code == LSHIFTRT)
10269 && count < HOST_BITS_PER_WIDE_INT
10270 && GET_MODE_BITSIZE (result_mode) <= HOST_BITS_PER_WIDE_INT
10271 && 0 == (nonzero_bits (XEXP (varop, 0), result_mode)
10272 >> count)
10273 && 0 == (nonzero_bits (XEXP (varop, 0), result_mode)
10274 & nonzero_bits (XEXP (varop, 1),
10275 result_mode)))
10276 {
10277 varop = XEXP (varop, 1);
10278 continue;
10279 }
10280
10281 /* (ashift (plus foo C) N) is (plus (ashift foo N) C'). */
10282 if (code == ASHIFT
10283 && CONST_INT_P (XEXP (varop, 1))
10284 && (new_rtx = simplify_const_binary_operation (ASHIFT, result_mode,
10285 XEXP (varop, 1),
10286 GEN_INT (count))) != 0
10287 && CONST_INT_P (new_rtx)
10288 && merge_outer_ops (&outer_op, &outer_const, PLUS,
10289 INTVAL (new_rtx), result_mode, &complement_p))
10290 {
10291 varop = XEXP (varop, 0);
10292 continue;
10293 }
10294
10295 /* Check for 'PLUS signbit', which is the canonical form of 'XOR
10296 signbit', and attempt to change the PLUS to an XOR and move it to
10297 the outer operation as is done above in the AND/IOR/XOR case
10298 leg for shift(logical). See details in logical handling above
10299 for reasoning in doing so. */
10300 if (code == LSHIFTRT
10301 && CONST_INT_P (XEXP (varop, 1))
10302 && mode_signbit_p (result_mode, XEXP (varop, 1))
10303 && (new_rtx = simplify_const_binary_operation (code, result_mode,
10304 XEXP (varop, 1),
10305 GEN_INT (count))) != 0
10306 && CONST_INT_P (new_rtx)
10307 && merge_outer_ops (&outer_op, &outer_const, XOR,
10308 INTVAL (new_rtx), result_mode, &complement_p))
10309 {
10310 varop = XEXP (varop, 0);
10311 continue;
10312 }
10313
10314 break;
10315
10316 case MINUS:
10317 /* If we have (xshiftrt (minus (ashiftrt X C)) X) C)
10318 with C the size of VAROP - 1 and the shift is logical if
10319 STORE_FLAG_VALUE is 1 and arithmetic if STORE_FLAG_VALUE is -1,
10320 we have a (gt X 0) operation. If the shift is arithmetic with
10321 STORE_FLAG_VALUE of 1 or logical with STORE_FLAG_VALUE == -1,
10322 we have a (neg (gt X 0)) operation. */
10323
10324 if ((STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
10325 && GET_CODE (XEXP (varop, 0)) == ASHIFTRT
10326 && count == (GET_MODE_BITSIZE (GET_MODE (varop)) - 1)
10327 && (code == LSHIFTRT || code == ASHIFTRT)
10328 && CONST_INT_P (XEXP (XEXP (varop, 0), 1))
10329 && INTVAL (XEXP (XEXP (varop, 0), 1)) == count
10330 && rtx_equal_p (XEXP (XEXP (varop, 0), 0), XEXP (varop, 1)))
10331 {
10332 count = 0;
10333 varop = gen_rtx_GT (GET_MODE (varop), XEXP (varop, 1),
10334 const0_rtx);
10335
10336 if (STORE_FLAG_VALUE == 1 ? code == ASHIFTRT : code == LSHIFTRT)
10337 varop = gen_rtx_NEG (GET_MODE (varop), varop);
10338
10339 continue;
10340 }
10341 break;
10342
10343 case TRUNCATE:
10344 /* Change (lshiftrt (truncate (lshiftrt))) to (truncate (lshiftrt))
10345 if the truncate does not affect the value. */
10346 if (code == LSHIFTRT
10347 && GET_CODE (XEXP (varop, 0)) == LSHIFTRT
10348 && CONST_INT_P (XEXP (XEXP (varop, 0), 1))
10349 && (INTVAL (XEXP (XEXP (varop, 0), 1))
10350 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (varop, 0)))
10351 - GET_MODE_BITSIZE (GET_MODE (varop)))))
10352 {
10353 rtx varop_inner = XEXP (varop, 0);
10354
10355 varop_inner
10356 = gen_rtx_LSHIFTRT (GET_MODE (varop_inner),
10357 XEXP (varop_inner, 0),
10358 GEN_INT
10359 (count + INTVAL (XEXP (varop_inner, 1))));
10360 varop = gen_rtx_TRUNCATE (GET_MODE (varop), varop_inner);
10361 count = 0;
10362 continue;
10363 }
10364 break;
10365
10366 default:
10367 break;
10368 }
10369
10370 break;
10371 }
10372
10373 shift_mode = try_widen_shift_mode (code, varop, count, result_mode, mode,
10374 outer_op, outer_const);
10375
10376 /* We have now finished analyzing the shift. The result should be
10377 a shift of type CODE with SHIFT_MODE shifting VAROP COUNT places. If
10378 OUTER_OP is non-UNKNOWN, it is an operation that needs to be applied
10379 to the result of the shift. OUTER_CONST is the relevant constant,
10380 but we must turn off all bits turned off in the shift. */
10381
10382 if (outer_op == UNKNOWN
10383 && orig_code == code && orig_count == count
10384 && varop == orig_varop
10385 && shift_mode == GET_MODE (varop))
10386 return NULL_RTX;
10387
10388 /* Make a SUBREG if necessary. If we can't make it, fail. */
10389 varop = gen_lowpart (shift_mode, varop);
10390 if (varop == NULL_RTX || GET_CODE (varop) == CLOBBER)
10391 return NULL_RTX;
10392
10393 /* If we have an outer operation and we just made a shift, it is
10394 possible that we could have simplified the shift were it not
10395 for the outer operation. So try to do the simplification
10396 recursively. */
10397
10398 if (outer_op != UNKNOWN)
10399 x = simplify_shift_const_1 (code, shift_mode, varop, count);
10400 else
10401 x = NULL_RTX;
10402
10403 if (x == NULL_RTX)
10404 x = simplify_gen_binary (code, shift_mode, varop, GEN_INT (count));
10405
10406 /* If we were doing an LSHIFTRT in a wider mode than it was originally,
10407 turn off all the bits that the shift would have turned off. */
10408 if (orig_code == LSHIFTRT && result_mode != shift_mode)
10409 x = simplify_and_const_int (NULL_RTX, shift_mode, x,
10410 GET_MODE_MASK (result_mode) >> orig_count);
10411
10412 /* Do the remainder of the processing in RESULT_MODE. */
10413 x = gen_lowpart_or_truncate (result_mode, x);
10414
10415 /* If COMPLEMENT_P is set, we have to complement X before doing the outer
10416 operation. */
10417 if (complement_p)
10418 x = simplify_gen_unary (NOT, result_mode, x, result_mode);
10419
10420 if (outer_op != UNKNOWN)
10421 {
10422 if (GET_RTX_CLASS (outer_op) != RTX_UNARY
10423 && GET_MODE_BITSIZE (result_mode) < HOST_BITS_PER_WIDE_INT)
10424 outer_const = trunc_int_for_mode (outer_const, result_mode);
10425
10426 if (outer_op == AND)
10427 x = simplify_and_const_int (NULL_RTX, result_mode, x, outer_const);
10428 else if (outer_op == SET)
10429 {
10430 /* This means that we have determined that the result is
10431 equivalent to a constant. This should be rare. */
10432 if (!side_effects_p (x))
10433 x = GEN_INT (outer_const);
10434 }
10435 else if (GET_RTX_CLASS (outer_op) == RTX_UNARY)
10436 x = simplify_gen_unary (outer_op, result_mode, x, result_mode);
10437 else
10438 x = simplify_gen_binary (outer_op, result_mode, x,
10439 GEN_INT (outer_const));
10440 }
10441
10442 return x;
10443 }
10444
10445 /* Simplify a shift of VAROP by COUNT bits. CODE says what kind of shift.
10446 The result of the shift is RESULT_MODE. If we cannot simplify it,
10447 return X or, if it is NULL, synthesize the expression with
10448 simplify_gen_binary. Otherwise, return a simplified value.
10449
10450 The shift is normally computed in the widest mode we find in VAROP, as
10451 long as it isn't a different number of words than RESULT_MODE. Exceptions
10452 are ASHIFTRT and ROTATE, which are always done in their original mode. */
10453
10454 static rtx
10455 simplify_shift_const (rtx x, enum rtx_code code, enum machine_mode result_mode,
10456 rtx varop, int count)
10457 {
10458 rtx tem = simplify_shift_const_1 (code, result_mode, varop, count);
10459 if (tem)
10460 return tem;
10461
10462 if (!x)
10463 x = simplify_gen_binary (code, GET_MODE (varop), varop, GEN_INT (count));
10464 if (GET_MODE (x) != result_mode)
10465 x = gen_lowpart (result_mode, x);
10466 return x;
10467 }
10468
10469 \f
10470 /* Like recog, but we receive the address of a pointer to a new pattern.
10471 We try to match the rtx that the pointer points to.
10472 If that fails, we may try to modify or replace the pattern,
10473 storing the replacement into the same pointer object.
10474
10475 Modifications include deletion or addition of CLOBBERs.
10476
10477 PNOTES is a pointer to a location where any REG_UNUSED notes added for
10478 the CLOBBERs are placed.
10479
10480 The value is the final insn code from the pattern ultimately matched,
10481 or -1. */
10482
10483 static int
10484 recog_for_combine (rtx *pnewpat, rtx insn, rtx *pnotes)
10485 {
10486 rtx pat = *pnewpat;
10487 int insn_code_number;
10488 int num_clobbers_to_add = 0;
10489 int i;
10490 rtx notes = 0;
10491 rtx old_notes, old_pat;
10492
10493 /* If PAT is a PARALLEL, check to see if it contains the CLOBBER
10494 we use to indicate that something didn't match. If we find such a
10495 thing, force rejection. */
10496 if (GET_CODE (pat) == PARALLEL)
10497 for (i = XVECLEN (pat, 0) - 1; i >= 0; i--)
10498 if (GET_CODE (XVECEXP (pat, 0, i)) == CLOBBER
10499 && XEXP (XVECEXP (pat, 0, i), 0) == const0_rtx)
10500 return -1;
10501
10502 old_pat = PATTERN (insn);
10503 old_notes = REG_NOTES (insn);
10504 PATTERN (insn) = pat;
10505 REG_NOTES (insn) = 0;
10506
10507 insn_code_number = recog (pat, insn, &num_clobbers_to_add);
10508 if (dump_file && (dump_flags & TDF_DETAILS))
10509 {
10510 if (insn_code_number < 0)
10511 fputs ("Failed to match this instruction:\n", dump_file);
10512 else
10513 fputs ("Successfully matched this instruction:\n", dump_file);
10514 print_rtl_single (dump_file, pat);
10515 }
10516
10517 /* If it isn't, there is the possibility that we previously had an insn
10518 that clobbered some register as a side effect, but the combined
10519 insn doesn't need to do that. So try once more without the clobbers
10520 unless this represents an ASM insn. */
10521
10522 if (insn_code_number < 0 && ! check_asm_operands (pat)
10523 && GET_CODE (pat) == PARALLEL)
10524 {
10525 int pos;
10526
10527 for (pos = 0, i = 0; i < XVECLEN (pat, 0); i++)
10528 if (GET_CODE (XVECEXP (pat, 0, i)) != CLOBBER)
10529 {
10530 if (i != pos)
10531 SUBST (XVECEXP (pat, 0, pos), XVECEXP (pat, 0, i));
10532 pos++;
10533 }
10534
10535 SUBST_INT (XVECLEN (pat, 0), pos);
10536
10537 if (pos == 1)
10538 pat = XVECEXP (pat, 0, 0);
10539
10540 PATTERN (insn) = pat;
10541 insn_code_number = recog (pat, insn, &num_clobbers_to_add);
10542 if (dump_file && (dump_flags & TDF_DETAILS))
10543 {
10544 if (insn_code_number < 0)
10545 fputs ("Failed to match this instruction:\n", dump_file);
10546 else
10547 fputs ("Successfully matched this instruction:\n", dump_file);
10548 print_rtl_single (dump_file, pat);
10549 }
10550 }
10551 PATTERN (insn) = old_pat;
10552 REG_NOTES (insn) = old_notes;
10553
10554 /* Recognize all noop sets, these will be killed by followup pass. */
10555 if (insn_code_number < 0 && GET_CODE (pat) == SET && set_noop_p (pat))
10556 insn_code_number = NOOP_MOVE_INSN_CODE, num_clobbers_to_add = 0;
10557
10558 /* If we had any clobbers to add, make a new pattern than contains
10559 them. Then check to make sure that all of them are dead. */
10560 if (num_clobbers_to_add)
10561 {
10562 rtx newpat = gen_rtx_PARALLEL (VOIDmode,
10563 rtvec_alloc (GET_CODE (pat) == PARALLEL
10564 ? (XVECLEN (pat, 0)
10565 + num_clobbers_to_add)
10566 : num_clobbers_to_add + 1));
10567
10568 if (GET_CODE (pat) == PARALLEL)
10569 for (i = 0; i < XVECLEN (pat, 0); i++)
10570 XVECEXP (newpat, 0, i) = XVECEXP (pat, 0, i);
10571 else
10572 XVECEXP (newpat, 0, 0) = pat;
10573
10574 add_clobbers (newpat, insn_code_number);
10575
10576 for (i = XVECLEN (newpat, 0) - num_clobbers_to_add;
10577 i < XVECLEN (newpat, 0); i++)
10578 {
10579 if (REG_P (XEXP (XVECEXP (newpat, 0, i), 0))
10580 && ! reg_dead_at_p (XEXP (XVECEXP (newpat, 0, i), 0), insn))
10581 return -1;
10582 if (GET_CODE (XEXP (XVECEXP (newpat, 0, i), 0)) != SCRATCH)
10583 {
10584 gcc_assert (REG_P (XEXP (XVECEXP (newpat, 0, i), 0)));
10585 notes = alloc_reg_note (REG_UNUSED,
10586 XEXP (XVECEXP (newpat, 0, i), 0), notes);
10587 }
10588 }
10589 pat = newpat;
10590 }
10591
10592 *pnewpat = pat;
10593 *pnotes = notes;
10594
10595 return insn_code_number;
10596 }
10597 \f
10598 /* Like gen_lowpart_general but for use by combine. In combine it
10599 is not possible to create any new pseudoregs. However, it is
10600 safe to create invalid memory addresses, because combine will
10601 try to recognize them and all they will do is make the combine
10602 attempt fail.
10603
10604 If for some reason this cannot do its job, an rtx
10605 (clobber (const_int 0)) is returned.
10606 An insn containing that will not be recognized. */
10607
10608 static rtx
10609 gen_lowpart_for_combine (enum machine_mode omode, rtx x)
10610 {
10611 enum machine_mode imode = GET_MODE (x);
10612 unsigned int osize = GET_MODE_SIZE (omode);
10613 unsigned int isize = GET_MODE_SIZE (imode);
10614 rtx result;
10615
10616 if (omode == imode)
10617 return x;
10618
10619 /* Return identity if this is a CONST or symbolic reference. */
10620 if (omode == Pmode
10621 && (GET_CODE (x) == CONST
10622 || GET_CODE (x) == SYMBOL_REF
10623 || GET_CODE (x) == LABEL_REF))
10624 return x;
10625
10626 /* We can only support MODE being wider than a word if X is a
10627 constant integer or has a mode the same size. */
10628 if (GET_MODE_SIZE (omode) > UNITS_PER_WORD
10629 && ! ((imode == VOIDmode
10630 && (CONST_INT_P (x)
10631 || GET_CODE (x) == CONST_DOUBLE))
10632 || isize == osize))
10633 goto fail;
10634
10635 /* X might be a paradoxical (subreg (mem)). In that case, gen_lowpart
10636 won't know what to do. So we will strip off the SUBREG here and
10637 process normally. */
10638 if (GET_CODE (x) == SUBREG && MEM_P (SUBREG_REG (x)))
10639 {
10640 x = SUBREG_REG (x);
10641
10642 /* For use in case we fall down into the address adjustments
10643 further below, we need to adjust the known mode and size of
10644 x; imode and isize, since we just adjusted x. */
10645 imode = GET_MODE (x);
10646
10647 if (imode == omode)
10648 return x;
10649
10650 isize = GET_MODE_SIZE (imode);
10651 }
10652
10653 result = gen_lowpart_common (omode, x);
10654
10655 if (result)
10656 return result;
10657
10658 if (MEM_P (x))
10659 {
10660 int offset = 0;
10661
10662 /* Refuse to work on a volatile memory ref or one with a mode-dependent
10663 address. */
10664 if (MEM_VOLATILE_P (x) || mode_dependent_address_p (XEXP (x, 0)))
10665 goto fail;
10666
10667 /* If we want to refer to something bigger than the original memref,
10668 generate a paradoxical subreg instead. That will force a reload
10669 of the original memref X. */
10670 if (isize < osize)
10671 return gen_rtx_SUBREG (omode, x, 0);
10672
10673 if (WORDS_BIG_ENDIAN)
10674 offset = MAX (isize, UNITS_PER_WORD) - MAX (osize, UNITS_PER_WORD);
10675
10676 /* Adjust the address so that the address-after-the-data is
10677 unchanged. */
10678 if (BYTES_BIG_ENDIAN)
10679 offset -= MIN (UNITS_PER_WORD, osize) - MIN (UNITS_PER_WORD, isize);
10680
10681 return adjust_address_nv (x, omode, offset);
10682 }
10683
10684 /* If X is a comparison operator, rewrite it in a new mode. This
10685 probably won't match, but may allow further simplifications. */
10686 else if (COMPARISON_P (x))
10687 return gen_rtx_fmt_ee (GET_CODE (x), omode, XEXP (x, 0), XEXP (x, 1));
10688
10689 /* If we couldn't simplify X any other way, just enclose it in a
10690 SUBREG. Normally, this SUBREG won't match, but some patterns may
10691 include an explicit SUBREG or we may simplify it further in combine. */
10692 else
10693 {
10694 int offset = 0;
10695 rtx res;
10696
10697 offset = subreg_lowpart_offset (omode, imode);
10698 if (imode == VOIDmode)
10699 {
10700 imode = int_mode_for_mode (omode);
10701 x = gen_lowpart_common (imode, x);
10702 if (x == NULL)
10703 goto fail;
10704 }
10705 res = simplify_gen_subreg (omode, x, imode, offset);
10706 if (res)
10707 return res;
10708 }
10709
10710 fail:
10711 return gen_rtx_CLOBBER (omode, const0_rtx);
10712 }
10713 \f
10714 /* Simplify a comparison between *POP0 and *POP1 where CODE is the
10715 comparison code that will be tested.
10716
10717 The result is a possibly different comparison code to use. *POP0 and
10718 *POP1 may be updated.
10719
10720 It is possible that we might detect that a comparison is either always
10721 true or always false. However, we do not perform general constant
10722 folding in combine, so this knowledge isn't useful. Such tautologies
10723 should have been detected earlier. Hence we ignore all such cases. */
10724
10725 static enum rtx_code
10726 simplify_comparison (enum rtx_code code, rtx *pop0, rtx *pop1)
10727 {
10728 rtx op0 = *pop0;
10729 rtx op1 = *pop1;
10730 rtx tem, tem1;
10731 int i;
10732 enum machine_mode mode, tmode;
10733
10734 /* Try a few ways of applying the same transformation to both operands. */
10735 while (1)
10736 {
10737 #ifndef WORD_REGISTER_OPERATIONS
10738 /* The test below this one won't handle SIGN_EXTENDs on these machines,
10739 so check specially. */
10740 if (code != GTU && code != GEU && code != LTU && code != LEU
10741 && GET_CODE (op0) == ASHIFTRT && GET_CODE (op1) == ASHIFTRT
10742 && GET_CODE (XEXP (op0, 0)) == ASHIFT
10743 && GET_CODE (XEXP (op1, 0)) == ASHIFT
10744 && GET_CODE (XEXP (XEXP (op0, 0), 0)) == SUBREG
10745 && GET_CODE (XEXP (XEXP (op1, 0), 0)) == SUBREG
10746 && (GET_MODE (SUBREG_REG (XEXP (XEXP (op0, 0), 0)))
10747 == GET_MODE (SUBREG_REG (XEXP (XEXP (op1, 0), 0))))
10748 && CONST_INT_P (XEXP (op0, 1))
10749 && XEXP (op0, 1) == XEXP (op1, 1)
10750 && XEXP (op0, 1) == XEXP (XEXP (op0, 0), 1)
10751 && XEXP (op0, 1) == XEXP (XEXP (op1, 0), 1)
10752 && (INTVAL (XEXP (op0, 1))
10753 == (GET_MODE_BITSIZE (GET_MODE (op0))
10754 - (GET_MODE_BITSIZE
10755 (GET_MODE (SUBREG_REG (XEXP (XEXP (op0, 0), 0))))))))
10756 {
10757 op0 = SUBREG_REG (XEXP (XEXP (op0, 0), 0));
10758 op1 = SUBREG_REG (XEXP (XEXP (op1, 0), 0));
10759 }
10760 #endif
10761
10762 /* If both operands are the same constant shift, see if we can ignore the
10763 shift. We can if the shift is a rotate or if the bits shifted out of
10764 this shift are known to be zero for both inputs and if the type of
10765 comparison is compatible with the shift. */
10766 if (GET_CODE (op0) == GET_CODE (op1)
10767 && GET_MODE_BITSIZE (GET_MODE (op0)) <= HOST_BITS_PER_WIDE_INT
10768 && ((GET_CODE (op0) == ROTATE && (code == NE || code == EQ))
10769 || ((GET_CODE (op0) == LSHIFTRT || GET_CODE (op0) == ASHIFT)
10770 && (code != GT && code != LT && code != GE && code != LE))
10771 || (GET_CODE (op0) == ASHIFTRT
10772 && (code != GTU && code != LTU
10773 && code != GEU && code != LEU)))
10774 && CONST_INT_P (XEXP (op0, 1))
10775 && INTVAL (XEXP (op0, 1)) >= 0
10776 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT
10777 && XEXP (op0, 1) == XEXP (op1, 1))
10778 {
10779 enum machine_mode mode = GET_MODE (op0);
10780 unsigned HOST_WIDE_INT mask = GET_MODE_MASK (mode);
10781 int shift_count = INTVAL (XEXP (op0, 1));
10782
10783 if (GET_CODE (op0) == LSHIFTRT || GET_CODE (op0) == ASHIFTRT)
10784 mask &= (mask >> shift_count) << shift_count;
10785 else if (GET_CODE (op0) == ASHIFT)
10786 mask = (mask & (mask << shift_count)) >> shift_count;
10787
10788 if ((nonzero_bits (XEXP (op0, 0), mode) & ~mask) == 0
10789 && (nonzero_bits (XEXP (op1, 0), mode) & ~mask) == 0)
10790 op0 = XEXP (op0, 0), op1 = XEXP (op1, 0);
10791 else
10792 break;
10793 }
10794
10795 /* If both operands are AND's of a paradoxical SUBREG by constant, the
10796 SUBREGs are of the same mode, and, in both cases, the AND would
10797 be redundant if the comparison was done in the narrower mode,
10798 do the comparison in the narrower mode (e.g., we are AND'ing with 1
10799 and the operand's possibly nonzero bits are 0xffffff01; in that case
10800 if we only care about QImode, we don't need the AND). This case
10801 occurs if the output mode of an scc insn is not SImode and
10802 STORE_FLAG_VALUE == 1 (e.g., the 386).
10803
10804 Similarly, check for a case where the AND's are ZERO_EXTEND
10805 operations from some narrower mode even though a SUBREG is not
10806 present. */
10807
10808 else if (GET_CODE (op0) == AND && GET_CODE (op1) == AND
10809 && CONST_INT_P (XEXP (op0, 1))
10810 && CONST_INT_P (XEXP (op1, 1)))
10811 {
10812 rtx inner_op0 = XEXP (op0, 0);
10813 rtx inner_op1 = XEXP (op1, 0);
10814 HOST_WIDE_INT c0 = INTVAL (XEXP (op0, 1));
10815 HOST_WIDE_INT c1 = INTVAL (XEXP (op1, 1));
10816 int changed = 0;
10817
10818 if (GET_CODE (inner_op0) == SUBREG && GET_CODE (inner_op1) == SUBREG
10819 && (GET_MODE_SIZE (GET_MODE (inner_op0))
10820 > GET_MODE_SIZE (GET_MODE (SUBREG_REG (inner_op0))))
10821 && (GET_MODE (SUBREG_REG (inner_op0))
10822 == GET_MODE (SUBREG_REG (inner_op1)))
10823 && (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (inner_op0)))
10824 <= HOST_BITS_PER_WIDE_INT)
10825 && (0 == ((~c0) & nonzero_bits (SUBREG_REG (inner_op0),
10826 GET_MODE (SUBREG_REG (inner_op0)))))
10827 && (0 == ((~c1) & nonzero_bits (SUBREG_REG (inner_op1),
10828 GET_MODE (SUBREG_REG (inner_op1))))))
10829 {
10830 op0 = SUBREG_REG (inner_op0);
10831 op1 = SUBREG_REG (inner_op1);
10832
10833 /* The resulting comparison is always unsigned since we masked
10834 off the original sign bit. */
10835 code = unsigned_condition (code);
10836
10837 changed = 1;
10838 }
10839
10840 else if (c0 == c1)
10841 for (tmode = GET_CLASS_NARROWEST_MODE
10842 (GET_MODE_CLASS (GET_MODE (op0)));
10843 tmode != GET_MODE (op0); tmode = GET_MODE_WIDER_MODE (tmode))
10844 if ((unsigned HOST_WIDE_INT) c0 == GET_MODE_MASK (tmode))
10845 {
10846 op0 = gen_lowpart (tmode, inner_op0);
10847 op1 = gen_lowpart (tmode, inner_op1);
10848 code = unsigned_condition (code);
10849 changed = 1;
10850 break;
10851 }
10852
10853 if (! changed)
10854 break;
10855 }
10856
10857 /* If both operands are NOT, we can strip off the outer operation
10858 and adjust the comparison code for swapped operands; similarly for
10859 NEG, except that this must be an equality comparison. */
10860 else if ((GET_CODE (op0) == NOT && GET_CODE (op1) == NOT)
10861 || (GET_CODE (op0) == NEG && GET_CODE (op1) == NEG
10862 && (code == EQ || code == NE)))
10863 op0 = XEXP (op0, 0), op1 = XEXP (op1, 0), code = swap_condition (code);
10864
10865 else
10866 break;
10867 }
10868
10869 /* If the first operand is a constant, swap the operands and adjust the
10870 comparison code appropriately, but don't do this if the second operand
10871 is already a constant integer. */
10872 if (swap_commutative_operands_p (op0, op1))
10873 {
10874 tem = op0, op0 = op1, op1 = tem;
10875 code = swap_condition (code);
10876 }
10877
10878 /* We now enter a loop during which we will try to simplify the comparison.
10879 For the most part, we only are concerned with comparisons with zero,
10880 but some things may really be comparisons with zero but not start
10881 out looking that way. */
10882
10883 while (CONST_INT_P (op1))
10884 {
10885 enum machine_mode mode = GET_MODE (op0);
10886 unsigned int mode_width = GET_MODE_BITSIZE (mode);
10887 unsigned HOST_WIDE_INT mask = GET_MODE_MASK (mode);
10888 int equality_comparison_p;
10889 int sign_bit_comparison_p;
10890 int unsigned_comparison_p;
10891 HOST_WIDE_INT const_op;
10892
10893 /* We only want to handle integral modes. This catches VOIDmode,
10894 CCmode, and the floating-point modes. An exception is that we
10895 can handle VOIDmode if OP0 is a COMPARE or a comparison
10896 operation. */
10897
10898 if (GET_MODE_CLASS (mode) != MODE_INT
10899 && ! (mode == VOIDmode
10900 && (GET_CODE (op0) == COMPARE || COMPARISON_P (op0))))
10901 break;
10902
10903 /* Get the constant we are comparing against and turn off all bits
10904 not on in our mode. */
10905 const_op = INTVAL (op1);
10906 if (mode != VOIDmode)
10907 const_op = trunc_int_for_mode (const_op, mode);
10908 op1 = GEN_INT (const_op);
10909
10910 /* If we are comparing against a constant power of two and the value
10911 being compared can only have that single bit nonzero (e.g., it was
10912 `and'ed with that bit), we can replace this with a comparison
10913 with zero. */
10914 if (const_op
10915 && (code == EQ || code == NE || code == GE || code == GEU
10916 || code == LT || code == LTU)
10917 && mode_width <= HOST_BITS_PER_WIDE_INT
10918 && exact_log2 (const_op) >= 0
10919 && nonzero_bits (op0, mode) == (unsigned HOST_WIDE_INT) const_op)
10920 {
10921 code = (code == EQ || code == GE || code == GEU ? NE : EQ);
10922 op1 = const0_rtx, const_op = 0;
10923 }
10924
10925 /* Similarly, if we are comparing a value known to be either -1 or
10926 0 with -1, change it to the opposite comparison against zero. */
10927
10928 if (const_op == -1
10929 && (code == EQ || code == NE || code == GT || code == LE
10930 || code == GEU || code == LTU)
10931 && num_sign_bit_copies (op0, mode) == mode_width)
10932 {
10933 code = (code == EQ || code == LE || code == GEU ? NE : EQ);
10934 op1 = const0_rtx, const_op = 0;
10935 }
10936
10937 /* Do some canonicalizations based on the comparison code. We prefer
10938 comparisons against zero and then prefer equality comparisons.
10939 If we can reduce the size of a constant, we will do that too. */
10940
10941 switch (code)
10942 {
10943 case LT:
10944 /* < C is equivalent to <= (C - 1) */
10945 if (const_op > 0)
10946 {
10947 const_op -= 1;
10948 op1 = GEN_INT (const_op);
10949 code = LE;
10950 /* ... fall through to LE case below. */
10951 }
10952 else
10953 break;
10954
10955 case LE:
10956 /* <= C is equivalent to < (C + 1); we do this for C < 0 */
10957 if (const_op < 0)
10958 {
10959 const_op += 1;
10960 op1 = GEN_INT (const_op);
10961 code = LT;
10962 }
10963
10964 /* If we are doing a <= 0 comparison on a value known to have
10965 a zero sign bit, we can replace this with == 0. */
10966 else if (const_op == 0
10967 && mode_width <= HOST_BITS_PER_WIDE_INT
10968 && (nonzero_bits (op0, mode)
10969 & ((unsigned HOST_WIDE_INT) 1 << (mode_width - 1)))
10970 == 0)
10971 code = EQ;
10972 break;
10973
10974 case GE:
10975 /* >= C is equivalent to > (C - 1). */
10976 if (const_op > 0)
10977 {
10978 const_op -= 1;
10979 op1 = GEN_INT (const_op);
10980 code = GT;
10981 /* ... fall through to GT below. */
10982 }
10983 else
10984 break;
10985
10986 case GT:
10987 /* > C is equivalent to >= (C + 1); we do this for C < 0. */
10988 if (const_op < 0)
10989 {
10990 const_op += 1;
10991 op1 = GEN_INT (const_op);
10992 code = GE;
10993 }
10994
10995 /* If we are doing a > 0 comparison on a value known to have
10996 a zero sign bit, we can replace this with != 0. */
10997 else if (const_op == 0
10998 && mode_width <= HOST_BITS_PER_WIDE_INT
10999 && (nonzero_bits (op0, mode)
11000 & ((unsigned HOST_WIDE_INT) 1 << (mode_width - 1)))
11001 == 0)
11002 code = NE;
11003 break;
11004
11005 case LTU:
11006 /* < C is equivalent to <= (C - 1). */
11007 if (const_op > 0)
11008 {
11009 const_op -= 1;
11010 op1 = GEN_INT (const_op);
11011 code = LEU;
11012 /* ... fall through ... */
11013 }
11014
11015 /* (unsigned) < 0x80000000 is equivalent to >= 0. */
11016 else if (mode_width <= HOST_BITS_PER_WIDE_INT
11017 && (unsigned HOST_WIDE_INT) const_op
11018 == (unsigned HOST_WIDE_INT) 1 << (mode_width - 1))
11019 {
11020 const_op = 0, op1 = const0_rtx;
11021 code = GE;
11022 break;
11023 }
11024 else
11025 break;
11026
11027 case LEU:
11028 /* unsigned <= 0 is equivalent to == 0 */
11029 if (const_op == 0)
11030 code = EQ;
11031
11032 /* (unsigned) <= 0x7fffffff is equivalent to >= 0. */
11033 else if (mode_width <= HOST_BITS_PER_WIDE_INT
11034 && (unsigned HOST_WIDE_INT) const_op
11035 == ((unsigned HOST_WIDE_INT) 1 << (mode_width - 1)) - 1)
11036 {
11037 const_op = 0, op1 = const0_rtx;
11038 code = GE;
11039 }
11040 break;
11041
11042 case GEU:
11043 /* >= C is equivalent to > (C - 1). */
11044 if (const_op > 1)
11045 {
11046 const_op -= 1;
11047 op1 = GEN_INT (const_op);
11048 code = GTU;
11049 /* ... fall through ... */
11050 }
11051
11052 /* (unsigned) >= 0x80000000 is equivalent to < 0. */
11053 else if (mode_width <= HOST_BITS_PER_WIDE_INT
11054 && (unsigned HOST_WIDE_INT) const_op
11055 == (unsigned HOST_WIDE_INT) 1 << (mode_width - 1))
11056 {
11057 const_op = 0, op1 = const0_rtx;
11058 code = LT;
11059 break;
11060 }
11061 else
11062 break;
11063
11064 case GTU:
11065 /* unsigned > 0 is equivalent to != 0 */
11066 if (const_op == 0)
11067 code = NE;
11068
11069 /* (unsigned) > 0x7fffffff is equivalent to < 0. */
11070 else if (mode_width <= HOST_BITS_PER_WIDE_INT
11071 && (unsigned HOST_WIDE_INT) const_op
11072 == ((unsigned HOST_WIDE_INT) 1 << (mode_width - 1)) - 1)
11073 {
11074 const_op = 0, op1 = const0_rtx;
11075 code = LT;
11076 }
11077 break;
11078
11079 default:
11080 break;
11081 }
11082
11083 /* Compute some predicates to simplify code below. */
11084
11085 equality_comparison_p = (code == EQ || code == NE);
11086 sign_bit_comparison_p = ((code == LT || code == GE) && const_op == 0);
11087 unsigned_comparison_p = (code == LTU || code == LEU || code == GTU
11088 || code == GEU);
11089
11090 /* If this is a sign bit comparison and we can do arithmetic in
11091 MODE, say that we will only be needing the sign bit of OP0. */
11092 if (sign_bit_comparison_p
11093 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
11094 op0 = force_to_mode (op0, mode,
11095 (unsigned HOST_WIDE_INT) 1
11096 << (GET_MODE_BITSIZE (mode) - 1),
11097 0);
11098
11099 /* Now try cases based on the opcode of OP0. If none of the cases
11100 does a "continue", we exit this loop immediately after the
11101 switch. */
11102
11103 switch (GET_CODE (op0))
11104 {
11105 case ZERO_EXTRACT:
11106 /* If we are extracting a single bit from a variable position in
11107 a constant that has only a single bit set and are comparing it
11108 with zero, we can convert this into an equality comparison
11109 between the position and the location of the single bit. */
11110 /* Except we can't if SHIFT_COUNT_TRUNCATED is set, since we might
11111 have already reduced the shift count modulo the word size. */
11112 if (!SHIFT_COUNT_TRUNCATED
11113 && CONST_INT_P (XEXP (op0, 0))
11114 && XEXP (op0, 1) == const1_rtx
11115 && equality_comparison_p && const_op == 0
11116 && (i = exact_log2 (UINTVAL (XEXP (op0, 0)))) >= 0)
11117 {
11118 if (BITS_BIG_ENDIAN)
11119 {
11120 enum machine_mode new_mode
11121 = mode_for_extraction (EP_extzv, 1);
11122 if (new_mode == MAX_MACHINE_MODE)
11123 i = BITS_PER_WORD - 1 - i;
11124 else
11125 {
11126 mode = new_mode;
11127 i = (GET_MODE_BITSIZE (mode) - 1 - i);
11128 }
11129 }
11130
11131 op0 = XEXP (op0, 2);
11132 op1 = GEN_INT (i);
11133 const_op = i;
11134
11135 /* Result is nonzero iff shift count is equal to I. */
11136 code = reverse_condition (code);
11137 continue;
11138 }
11139
11140 /* ... fall through ... */
11141
11142 case SIGN_EXTRACT:
11143 tem = expand_compound_operation (op0);
11144 if (tem != op0)
11145 {
11146 op0 = tem;
11147 continue;
11148 }
11149 break;
11150
11151 case NOT:
11152 /* If testing for equality, we can take the NOT of the constant. */
11153 if (equality_comparison_p
11154 && (tem = simplify_unary_operation (NOT, mode, op1, mode)) != 0)
11155 {
11156 op0 = XEXP (op0, 0);
11157 op1 = tem;
11158 continue;
11159 }
11160
11161 /* If just looking at the sign bit, reverse the sense of the
11162 comparison. */
11163 if (sign_bit_comparison_p)
11164 {
11165 op0 = XEXP (op0, 0);
11166 code = (code == GE ? LT : GE);
11167 continue;
11168 }
11169 break;
11170
11171 case NEG:
11172 /* If testing for equality, we can take the NEG of the constant. */
11173 if (equality_comparison_p
11174 && (tem = simplify_unary_operation (NEG, mode, op1, mode)) != 0)
11175 {
11176 op0 = XEXP (op0, 0);
11177 op1 = tem;
11178 continue;
11179 }
11180
11181 /* The remaining cases only apply to comparisons with zero. */
11182 if (const_op != 0)
11183 break;
11184
11185 /* When X is ABS or is known positive,
11186 (neg X) is < 0 if and only if X != 0. */
11187
11188 if (sign_bit_comparison_p
11189 && (GET_CODE (XEXP (op0, 0)) == ABS
11190 || (mode_width <= HOST_BITS_PER_WIDE_INT
11191 && (nonzero_bits (XEXP (op0, 0), mode)
11192 & ((unsigned HOST_WIDE_INT) 1 << (mode_width - 1)))
11193 == 0)))
11194 {
11195 op0 = XEXP (op0, 0);
11196 code = (code == LT ? NE : EQ);
11197 continue;
11198 }
11199
11200 /* If we have NEG of something whose two high-order bits are the
11201 same, we know that "(-a) < 0" is equivalent to "a > 0". */
11202 if (num_sign_bit_copies (op0, mode) >= 2)
11203 {
11204 op0 = XEXP (op0, 0);
11205 code = swap_condition (code);
11206 continue;
11207 }
11208 break;
11209
11210 case ROTATE:
11211 /* If we are testing equality and our count is a constant, we
11212 can perform the inverse operation on our RHS. */
11213 if (equality_comparison_p && CONST_INT_P (XEXP (op0, 1))
11214 && (tem = simplify_binary_operation (ROTATERT, mode,
11215 op1, XEXP (op0, 1))) != 0)
11216 {
11217 op0 = XEXP (op0, 0);
11218 op1 = tem;
11219 continue;
11220 }
11221
11222 /* If we are doing a < 0 or >= 0 comparison, it means we are testing
11223 a particular bit. Convert it to an AND of a constant of that
11224 bit. This will be converted into a ZERO_EXTRACT. */
11225 if (const_op == 0 && sign_bit_comparison_p
11226 && CONST_INT_P (XEXP (op0, 1))
11227 && mode_width <= HOST_BITS_PER_WIDE_INT)
11228 {
11229 op0 = simplify_and_const_int (NULL_RTX, mode, XEXP (op0, 0),
11230 ((unsigned HOST_WIDE_INT) 1
11231 << (mode_width - 1
11232 - INTVAL (XEXP (op0, 1)))));
11233 code = (code == LT ? NE : EQ);
11234 continue;
11235 }
11236
11237 /* Fall through. */
11238
11239 case ABS:
11240 /* ABS is ignorable inside an equality comparison with zero. */
11241 if (const_op == 0 && equality_comparison_p)
11242 {
11243 op0 = XEXP (op0, 0);
11244 continue;
11245 }
11246 break;
11247
11248 case SIGN_EXTEND:
11249 /* Can simplify (compare (zero/sign_extend FOO) CONST) to
11250 (compare FOO CONST) if CONST fits in FOO's mode and we
11251 are either testing inequality or have an unsigned
11252 comparison with ZERO_EXTEND or a signed comparison with
11253 SIGN_EXTEND. But don't do it if we don't have a compare
11254 insn of the given mode, since we'd have to revert it
11255 later on, and then we wouldn't know whether to sign- or
11256 zero-extend. */
11257 mode = GET_MODE (XEXP (op0, 0));
11258 if (mode != VOIDmode && GET_MODE_CLASS (mode) == MODE_INT
11259 && ! unsigned_comparison_p
11260 && (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
11261 && ((unsigned HOST_WIDE_INT) const_op
11262 < (((unsigned HOST_WIDE_INT) 1
11263 << (GET_MODE_BITSIZE (mode) - 1))))
11264 && have_insn_for (COMPARE, mode))
11265 {
11266 op0 = XEXP (op0, 0);
11267 continue;
11268 }
11269 break;
11270
11271 case SUBREG:
11272 /* Check for the case where we are comparing A - C1 with C2, that is
11273
11274 (subreg:MODE (plus (A) (-C1))) op (C2)
11275
11276 with C1 a constant, and try to lift the SUBREG, i.e. to do the
11277 comparison in the wider mode. One of the following two conditions
11278 must be true in order for this to be valid:
11279
11280 1. The mode extension results in the same bit pattern being added
11281 on both sides and the comparison is equality or unsigned. As
11282 C2 has been truncated to fit in MODE, the pattern can only be
11283 all 0s or all 1s.
11284
11285 2. The mode extension results in the sign bit being copied on
11286 each side.
11287
11288 The difficulty here is that we have predicates for A but not for
11289 (A - C1) so we need to check that C1 is within proper bounds so
11290 as to perturbate A as little as possible. */
11291
11292 if (mode_width <= HOST_BITS_PER_WIDE_INT
11293 && subreg_lowpart_p (op0)
11294 && GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0))) > mode_width
11295 && GET_CODE (SUBREG_REG (op0)) == PLUS
11296 && CONST_INT_P (XEXP (SUBREG_REG (op0), 1)))
11297 {
11298 enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op0));
11299 rtx a = XEXP (SUBREG_REG (op0), 0);
11300 HOST_WIDE_INT c1 = -INTVAL (XEXP (SUBREG_REG (op0), 1));
11301
11302 if ((c1 > 0
11303 && (unsigned HOST_WIDE_INT) c1
11304 < (unsigned HOST_WIDE_INT) 1 << (mode_width - 1)
11305 && (equality_comparison_p || unsigned_comparison_p)
11306 /* (A - C1) zero-extends if it is positive and sign-extends
11307 if it is negative, C2 both zero- and sign-extends. */
11308 && ((0 == (nonzero_bits (a, inner_mode)
11309 & ~GET_MODE_MASK (mode))
11310 && const_op >= 0)
11311 /* (A - C1) sign-extends if it is positive and 1-extends
11312 if it is negative, C2 both sign- and 1-extends. */
11313 || (num_sign_bit_copies (a, inner_mode)
11314 > (unsigned int) (GET_MODE_BITSIZE (inner_mode)
11315 - mode_width)
11316 && const_op < 0)))
11317 || ((unsigned HOST_WIDE_INT) c1
11318 < (unsigned HOST_WIDE_INT) 1 << (mode_width - 2)
11319 /* (A - C1) always sign-extends, like C2. */
11320 && num_sign_bit_copies (a, inner_mode)
11321 > (unsigned int) (GET_MODE_BITSIZE (inner_mode)
11322 - (mode_width - 1))))
11323 {
11324 op0 = SUBREG_REG (op0);
11325 continue;
11326 }
11327 }
11328
11329 /* If the inner mode is narrower and we are extracting the low part,
11330 we can treat the SUBREG as if it were a ZERO_EXTEND. */
11331 if (subreg_lowpart_p (op0)
11332 && GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0))) < mode_width)
11333 /* Fall through */ ;
11334 else
11335 break;
11336
11337 /* ... fall through ... */
11338
11339 case ZERO_EXTEND:
11340 mode = GET_MODE (XEXP (op0, 0));
11341 if (mode != VOIDmode && GET_MODE_CLASS (mode) == MODE_INT
11342 && (unsigned_comparison_p || equality_comparison_p)
11343 && (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
11344 && ((unsigned HOST_WIDE_INT) const_op < GET_MODE_MASK (mode))
11345 && have_insn_for (COMPARE, mode))
11346 {
11347 op0 = XEXP (op0, 0);
11348 continue;
11349 }
11350 break;
11351
11352 case PLUS:
11353 /* (eq (plus X A) B) -> (eq X (minus B A)). We can only do
11354 this for equality comparisons due to pathological cases involving
11355 overflows. */
11356 if (equality_comparison_p
11357 && 0 != (tem = simplify_binary_operation (MINUS, mode,
11358 op1, XEXP (op0, 1))))
11359 {
11360 op0 = XEXP (op0, 0);
11361 op1 = tem;
11362 continue;
11363 }
11364
11365 /* (plus (abs X) (const_int -1)) is < 0 if and only if X == 0. */
11366 if (const_op == 0 && XEXP (op0, 1) == constm1_rtx
11367 && GET_CODE (XEXP (op0, 0)) == ABS && sign_bit_comparison_p)
11368 {
11369 op0 = XEXP (XEXP (op0, 0), 0);
11370 code = (code == LT ? EQ : NE);
11371 continue;
11372 }
11373 break;
11374
11375 case MINUS:
11376 /* We used to optimize signed comparisons against zero, but that
11377 was incorrect. Unsigned comparisons against zero (GTU, LEU)
11378 arrive here as equality comparisons, or (GEU, LTU) are
11379 optimized away. No need to special-case them. */
11380
11381 /* (eq (minus A B) C) -> (eq A (plus B C)) or
11382 (eq B (minus A C)), whichever simplifies. We can only do
11383 this for equality comparisons due to pathological cases involving
11384 overflows. */
11385 if (equality_comparison_p
11386 && 0 != (tem = simplify_binary_operation (PLUS, mode,
11387 XEXP (op0, 1), op1)))
11388 {
11389 op0 = XEXP (op0, 0);
11390 op1 = tem;
11391 continue;
11392 }
11393
11394 if (equality_comparison_p
11395 && 0 != (tem = simplify_binary_operation (MINUS, mode,
11396 XEXP (op0, 0), op1)))
11397 {
11398 op0 = XEXP (op0, 1);
11399 op1 = tem;
11400 continue;
11401 }
11402
11403 /* The sign bit of (minus (ashiftrt X C) X), where C is the number
11404 of bits in X minus 1, is one iff X > 0. */
11405 if (sign_bit_comparison_p && GET_CODE (XEXP (op0, 0)) == ASHIFTRT
11406 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
11407 && UINTVAL (XEXP (XEXP (op0, 0), 1)) == mode_width - 1
11408 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), XEXP (op0, 1)))
11409 {
11410 op0 = XEXP (op0, 1);
11411 code = (code == GE ? LE : GT);
11412 continue;
11413 }
11414 break;
11415
11416 case XOR:
11417 /* (eq (xor A B) C) -> (eq A (xor B C)). This is a simplification
11418 if C is zero or B is a constant. */
11419 if (equality_comparison_p
11420 && 0 != (tem = simplify_binary_operation (XOR, mode,
11421 XEXP (op0, 1), op1)))
11422 {
11423 op0 = XEXP (op0, 0);
11424 op1 = tem;
11425 continue;
11426 }
11427 break;
11428
11429 case EQ: case NE:
11430 case UNEQ: case LTGT:
11431 case LT: case LTU: case UNLT: case LE: case LEU: case UNLE:
11432 case GT: case GTU: case UNGT: case GE: case GEU: case UNGE:
11433 case UNORDERED: case ORDERED:
11434 /* We can't do anything if OP0 is a condition code value, rather
11435 than an actual data value. */
11436 if (const_op != 0
11437 || CC0_P (XEXP (op0, 0))
11438 || GET_MODE_CLASS (GET_MODE (XEXP (op0, 0))) == MODE_CC)
11439 break;
11440
11441 /* Get the two operands being compared. */
11442 if (GET_CODE (XEXP (op0, 0)) == COMPARE)
11443 tem = XEXP (XEXP (op0, 0), 0), tem1 = XEXP (XEXP (op0, 0), 1);
11444 else
11445 tem = XEXP (op0, 0), tem1 = XEXP (op0, 1);
11446
11447 /* Check for the cases where we simply want the result of the
11448 earlier test or the opposite of that result. */
11449 if (code == NE || code == EQ
11450 || (GET_MODE_BITSIZE (GET_MODE (op0)) <= HOST_BITS_PER_WIDE_INT
11451 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT
11452 && (STORE_FLAG_VALUE
11453 & (((unsigned HOST_WIDE_INT) 1
11454 << (GET_MODE_BITSIZE (GET_MODE (op0)) - 1))))
11455 && (code == LT || code == GE)))
11456 {
11457 enum rtx_code new_code;
11458 if (code == LT || code == NE)
11459 new_code = GET_CODE (op0);
11460 else
11461 new_code = reversed_comparison_code (op0, NULL);
11462
11463 if (new_code != UNKNOWN)
11464 {
11465 code = new_code;
11466 op0 = tem;
11467 op1 = tem1;
11468 continue;
11469 }
11470 }
11471 break;
11472
11473 case IOR:
11474 /* The sign bit of (ior (plus X (const_int -1)) X) is nonzero
11475 iff X <= 0. */
11476 if (sign_bit_comparison_p && GET_CODE (XEXP (op0, 0)) == PLUS
11477 && XEXP (XEXP (op0, 0), 1) == constm1_rtx
11478 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), XEXP (op0, 1)))
11479 {
11480 op0 = XEXP (op0, 1);
11481 code = (code == GE ? GT : LE);
11482 continue;
11483 }
11484 break;
11485
11486 case AND:
11487 /* Convert (and (xshift 1 X) Y) to (and (lshiftrt Y X) 1). This
11488 will be converted to a ZERO_EXTRACT later. */
11489 if (const_op == 0 && equality_comparison_p
11490 && GET_CODE (XEXP (op0, 0)) == ASHIFT
11491 && XEXP (XEXP (op0, 0), 0) == const1_rtx)
11492 {
11493 op0 = gen_rtx_LSHIFTRT (mode, XEXP (op0, 1),
11494 XEXP (XEXP (op0, 0), 1));
11495 op0 = simplify_and_const_int (NULL_RTX, mode, op0, 1);
11496 continue;
11497 }
11498
11499 /* If we are comparing (and (lshiftrt X C1) C2) for equality with
11500 zero and X is a comparison and C1 and C2 describe only bits set
11501 in STORE_FLAG_VALUE, we can compare with X. */
11502 if (const_op == 0 && equality_comparison_p
11503 && mode_width <= HOST_BITS_PER_WIDE_INT
11504 && CONST_INT_P (XEXP (op0, 1))
11505 && GET_CODE (XEXP (op0, 0)) == LSHIFTRT
11506 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
11507 && INTVAL (XEXP (XEXP (op0, 0), 1)) >= 0
11508 && INTVAL (XEXP (XEXP (op0, 0), 1)) < HOST_BITS_PER_WIDE_INT)
11509 {
11510 mask = ((INTVAL (XEXP (op0, 1)) & GET_MODE_MASK (mode))
11511 << INTVAL (XEXP (XEXP (op0, 0), 1)));
11512 if ((~STORE_FLAG_VALUE & mask) == 0
11513 && (COMPARISON_P (XEXP (XEXP (op0, 0), 0))
11514 || ((tem = get_last_value (XEXP (XEXP (op0, 0), 0))) != 0
11515 && COMPARISON_P (tem))))
11516 {
11517 op0 = XEXP (XEXP (op0, 0), 0);
11518 continue;
11519 }
11520 }
11521
11522 /* If we are doing an equality comparison of an AND of a bit equal
11523 to the sign bit, replace this with a LT or GE comparison of
11524 the underlying value. */
11525 if (equality_comparison_p
11526 && const_op == 0
11527 && CONST_INT_P (XEXP (op0, 1))
11528 && mode_width <= HOST_BITS_PER_WIDE_INT
11529 && ((INTVAL (XEXP (op0, 1)) & GET_MODE_MASK (mode))
11530 == (unsigned HOST_WIDE_INT) 1 << (mode_width - 1)))
11531 {
11532 op0 = XEXP (op0, 0);
11533 code = (code == EQ ? GE : LT);
11534 continue;
11535 }
11536
11537 /* If this AND operation is really a ZERO_EXTEND from a narrower
11538 mode, the constant fits within that mode, and this is either an
11539 equality or unsigned comparison, try to do this comparison in
11540 the narrower mode.
11541
11542 Note that in:
11543
11544 (ne:DI (and:DI (reg:DI 4) (const_int 0xffffffff)) (const_int 0))
11545 -> (ne:DI (reg:SI 4) (const_int 0))
11546
11547 unless TRULY_NOOP_TRUNCATION allows it or the register is
11548 known to hold a value of the required mode the
11549 transformation is invalid. */
11550 if ((equality_comparison_p || unsigned_comparison_p)
11551 && CONST_INT_P (XEXP (op0, 1))
11552 && (i = exact_log2 ((UINTVAL (XEXP (op0, 1))
11553 & GET_MODE_MASK (mode))
11554 + 1)) >= 0
11555 && const_op >> i == 0
11556 && (tmode = mode_for_size (i, MODE_INT, 1)) != BLKmode
11557 && (TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (tmode),
11558 GET_MODE_BITSIZE (GET_MODE (op0)))
11559 || (REG_P (XEXP (op0, 0))
11560 && reg_truncated_to_mode (tmode, XEXP (op0, 0)))))
11561 {
11562 op0 = gen_lowpart (tmode, XEXP (op0, 0));
11563 continue;
11564 }
11565
11566 /* If this is (and:M1 (subreg:M2 X 0) (const_int C1)) where C1
11567 fits in both M1 and M2 and the SUBREG is either paradoxical
11568 or represents the low part, permute the SUBREG and the AND
11569 and try again. */
11570 if (GET_CODE (XEXP (op0, 0)) == SUBREG)
11571 {
11572 unsigned HOST_WIDE_INT c1;
11573 tmode = GET_MODE (SUBREG_REG (XEXP (op0, 0)));
11574 /* Require an integral mode, to avoid creating something like
11575 (AND:SF ...). */
11576 if (SCALAR_INT_MODE_P (tmode)
11577 /* It is unsafe to commute the AND into the SUBREG if the
11578 SUBREG is paradoxical and WORD_REGISTER_OPERATIONS is
11579 not defined. As originally written the upper bits
11580 have a defined value due to the AND operation.
11581 However, if we commute the AND inside the SUBREG then
11582 they no longer have defined values and the meaning of
11583 the code has been changed. */
11584 && (0
11585 #ifdef WORD_REGISTER_OPERATIONS
11586 || (mode_width > GET_MODE_BITSIZE (tmode)
11587 && mode_width <= BITS_PER_WORD)
11588 #endif
11589 || (mode_width <= GET_MODE_BITSIZE (tmode)
11590 && subreg_lowpart_p (XEXP (op0, 0))))
11591 && CONST_INT_P (XEXP (op0, 1))
11592 && mode_width <= HOST_BITS_PER_WIDE_INT
11593 && GET_MODE_BITSIZE (tmode) <= HOST_BITS_PER_WIDE_INT
11594 && ((c1 = INTVAL (XEXP (op0, 1))) & ~mask) == 0
11595 && (c1 & ~GET_MODE_MASK (tmode)) == 0
11596 && c1 != mask
11597 && c1 != GET_MODE_MASK (tmode))
11598 {
11599 op0 = simplify_gen_binary (AND, tmode,
11600 SUBREG_REG (XEXP (op0, 0)),
11601 gen_int_mode (c1, tmode));
11602 op0 = gen_lowpart (mode, op0);
11603 continue;
11604 }
11605 }
11606
11607 /* Convert (ne (and (not X) 1) 0) to (eq (and X 1) 0). */
11608 if (const_op == 0 && equality_comparison_p
11609 && XEXP (op0, 1) == const1_rtx
11610 && GET_CODE (XEXP (op0, 0)) == NOT)
11611 {
11612 op0 = simplify_and_const_int (NULL_RTX, mode,
11613 XEXP (XEXP (op0, 0), 0), 1);
11614 code = (code == NE ? EQ : NE);
11615 continue;
11616 }
11617
11618 /* Convert (ne (and (lshiftrt (not X)) 1) 0) to
11619 (eq (and (lshiftrt X) 1) 0).
11620 Also handle the case where (not X) is expressed using xor. */
11621 if (const_op == 0 && equality_comparison_p
11622 && XEXP (op0, 1) == const1_rtx
11623 && GET_CODE (XEXP (op0, 0)) == LSHIFTRT)
11624 {
11625 rtx shift_op = XEXP (XEXP (op0, 0), 0);
11626 rtx shift_count = XEXP (XEXP (op0, 0), 1);
11627
11628 if (GET_CODE (shift_op) == NOT
11629 || (GET_CODE (shift_op) == XOR
11630 && CONST_INT_P (XEXP (shift_op, 1))
11631 && CONST_INT_P (shift_count)
11632 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
11633 && (UINTVAL (XEXP (shift_op, 1))
11634 == (unsigned HOST_WIDE_INT) 1
11635 << INTVAL (shift_count))))
11636 {
11637 op0
11638 = gen_rtx_LSHIFTRT (mode, XEXP (shift_op, 0), shift_count);
11639 op0 = simplify_and_const_int (NULL_RTX, mode, op0, 1);
11640 code = (code == NE ? EQ : NE);
11641 continue;
11642 }
11643 }
11644 break;
11645
11646 case ASHIFT:
11647 /* If we have (compare (ashift FOO N) (const_int C)) and
11648 the high order N bits of FOO (N+1 if an inequality comparison)
11649 are known to be zero, we can do this by comparing FOO with C
11650 shifted right N bits so long as the low-order N bits of C are
11651 zero. */
11652 if (CONST_INT_P (XEXP (op0, 1))
11653 && INTVAL (XEXP (op0, 1)) >= 0
11654 && ((INTVAL (XEXP (op0, 1)) + ! equality_comparison_p)
11655 < HOST_BITS_PER_WIDE_INT)
11656 && (((unsigned HOST_WIDE_INT) const_op
11657 & (((unsigned HOST_WIDE_INT) 1 << INTVAL (XEXP (op0, 1)))
11658 - 1)) == 0)
11659 && mode_width <= HOST_BITS_PER_WIDE_INT
11660 && (nonzero_bits (XEXP (op0, 0), mode)
11661 & ~(mask >> (INTVAL (XEXP (op0, 1))
11662 + ! equality_comparison_p))) == 0)
11663 {
11664 /* We must perform a logical shift, not an arithmetic one,
11665 as we want the top N bits of C to be zero. */
11666 unsigned HOST_WIDE_INT temp = const_op & GET_MODE_MASK (mode);
11667
11668 temp >>= INTVAL (XEXP (op0, 1));
11669 op1 = gen_int_mode (temp, mode);
11670 op0 = XEXP (op0, 0);
11671 continue;
11672 }
11673
11674 /* If we are doing a sign bit comparison, it means we are testing
11675 a particular bit. Convert it to the appropriate AND. */
11676 if (sign_bit_comparison_p && CONST_INT_P (XEXP (op0, 1))
11677 && mode_width <= HOST_BITS_PER_WIDE_INT)
11678 {
11679 op0 = simplify_and_const_int (NULL_RTX, mode, XEXP (op0, 0),
11680 ((unsigned HOST_WIDE_INT) 1
11681 << (mode_width - 1
11682 - INTVAL (XEXP (op0, 1)))));
11683 code = (code == LT ? NE : EQ);
11684 continue;
11685 }
11686
11687 /* If this an equality comparison with zero and we are shifting
11688 the low bit to the sign bit, we can convert this to an AND of the
11689 low-order bit. */
11690 if (const_op == 0 && equality_comparison_p
11691 && CONST_INT_P (XEXP (op0, 1))
11692 && UINTVAL (XEXP (op0, 1)) == mode_width - 1)
11693 {
11694 op0 = simplify_and_const_int (NULL_RTX, mode, XEXP (op0, 0), 1);
11695 continue;
11696 }
11697 break;
11698
11699 case ASHIFTRT:
11700 /* If this is an equality comparison with zero, we can do this
11701 as a logical shift, which might be much simpler. */
11702 if (equality_comparison_p && const_op == 0
11703 && CONST_INT_P (XEXP (op0, 1)))
11704 {
11705 op0 = simplify_shift_const (NULL_RTX, LSHIFTRT, mode,
11706 XEXP (op0, 0),
11707 INTVAL (XEXP (op0, 1)));
11708 continue;
11709 }
11710
11711 /* If OP0 is a sign extension and CODE is not an unsigned comparison,
11712 do the comparison in a narrower mode. */
11713 if (! unsigned_comparison_p
11714 && CONST_INT_P (XEXP (op0, 1))
11715 && GET_CODE (XEXP (op0, 0)) == ASHIFT
11716 && XEXP (op0, 1) == XEXP (XEXP (op0, 0), 1)
11717 && (tmode = mode_for_size (mode_width - INTVAL (XEXP (op0, 1)),
11718 MODE_INT, 1)) != BLKmode
11719 && (((unsigned HOST_WIDE_INT) const_op
11720 + (GET_MODE_MASK (tmode) >> 1) + 1)
11721 <= GET_MODE_MASK (tmode)))
11722 {
11723 op0 = gen_lowpart (tmode, XEXP (XEXP (op0, 0), 0));
11724 continue;
11725 }
11726
11727 /* Likewise if OP0 is a PLUS of a sign extension with a
11728 constant, which is usually represented with the PLUS
11729 between the shifts. */
11730 if (! unsigned_comparison_p
11731 && CONST_INT_P (XEXP (op0, 1))
11732 && GET_CODE (XEXP (op0, 0)) == PLUS
11733 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
11734 && GET_CODE (XEXP (XEXP (op0, 0), 0)) == ASHIFT
11735 && XEXP (op0, 1) == XEXP (XEXP (XEXP (op0, 0), 0), 1)
11736 && (tmode = mode_for_size (mode_width - INTVAL (XEXP (op0, 1)),
11737 MODE_INT, 1)) != BLKmode
11738 && (((unsigned HOST_WIDE_INT) const_op
11739 + (GET_MODE_MASK (tmode) >> 1) + 1)
11740 <= GET_MODE_MASK (tmode)))
11741 {
11742 rtx inner = XEXP (XEXP (XEXP (op0, 0), 0), 0);
11743 rtx add_const = XEXP (XEXP (op0, 0), 1);
11744 rtx new_const = simplify_gen_binary (ASHIFTRT, GET_MODE (op0),
11745 add_const, XEXP (op0, 1));
11746
11747 op0 = simplify_gen_binary (PLUS, tmode,
11748 gen_lowpart (tmode, inner),
11749 new_const);
11750 continue;
11751 }
11752
11753 /* ... fall through ... */
11754 case LSHIFTRT:
11755 /* If we have (compare (xshiftrt FOO N) (const_int C)) and
11756 the low order N bits of FOO are known to be zero, we can do this
11757 by comparing FOO with C shifted left N bits so long as no
11758 overflow occurs. Even if the low order N bits of FOO aren't known
11759 to be zero, if the comparison is >= or < we can use the same
11760 optimization and for > or <= by setting all the low
11761 order N bits in the comparison constant. */
11762 if (CONST_INT_P (XEXP (op0, 1))
11763 && INTVAL (XEXP (op0, 1)) > 0
11764 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT
11765 && mode_width <= HOST_BITS_PER_WIDE_INT
11766 && (((unsigned HOST_WIDE_INT) const_op
11767 + (GET_CODE (op0) != LSHIFTRT
11768 ? ((GET_MODE_MASK (mode) >> INTVAL (XEXP (op0, 1)) >> 1)
11769 + 1)
11770 : 0))
11771 <= GET_MODE_MASK (mode) >> INTVAL (XEXP (op0, 1))))
11772 {
11773 unsigned HOST_WIDE_INT low_bits
11774 = (nonzero_bits (XEXP (op0, 0), mode)
11775 & (((unsigned HOST_WIDE_INT) 1
11776 << INTVAL (XEXP (op0, 1))) - 1));
11777 if (low_bits == 0 || !equality_comparison_p)
11778 {
11779 /* If the shift was logical, then we must make the condition
11780 unsigned. */
11781 if (GET_CODE (op0) == LSHIFTRT)
11782 code = unsigned_condition (code);
11783
11784 const_op <<= INTVAL (XEXP (op0, 1));
11785 if (low_bits != 0
11786 && (code == GT || code == GTU
11787 || code == LE || code == LEU))
11788 const_op
11789 |= (((HOST_WIDE_INT) 1 << INTVAL (XEXP (op0, 1))) - 1);
11790 op1 = GEN_INT (const_op);
11791 op0 = XEXP (op0, 0);
11792 continue;
11793 }
11794 }
11795
11796 /* If we are using this shift to extract just the sign bit, we
11797 can replace this with an LT or GE comparison. */
11798 if (const_op == 0
11799 && (equality_comparison_p || sign_bit_comparison_p)
11800 && CONST_INT_P (XEXP (op0, 1))
11801 && UINTVAL (XEXP (op0, 1)) == mode_width - 1)
11802 {
11803 op0 = XEXP (op0, 0);
11804 code = (code == NE || code == GT ? LT : GE);
11805 continue;
11806 }
11807 break;
11808
11809 default:
11810 break;
11811 }
11812
11813 break;
11814 }
11815
11816 /* Now make any compound operations involved in this comparison. Then,
11817 check for an outmost SUBREG on OP0 that is not doing anything or is
11818 paradoxical. The latter transformation must only be performed when
11819 it is known that the "extra" bits will be the same in op0 and op1 or
11820 that they don't matter. There are three cases to consider:
11821
11822 1. SUBREG_REG (op0) is a register. In this case the bits are don't
11823 care bits and we can assume they have any convenient value. So
11824 making the transformation is safe.
11825
11826 2. SUBREG_REG (op0) is a memory and LOAD_EXTEND_OP is not defined.
11827 In this case the upper bits of op0 are undefined. We should not make
11828 the simplification in that case as we do not know the contents of
11829 those bits.
11830
11831 3. SUBREG_REG (op0) is a memory and LOAD_EXTEND_OP is defined and not
11832 UNKNOWN. In that case we know those bits are zeros or ones. We must
11833 also be sure that they are the same as the upper bits of op1.
11834
11835 We can never remove a SUBREG for a non-equality comparison because
11836 the sign bit is in a different place in the underlying object. */
11837
11838 op0 = make_compound_operation (op0, op1 == const0_rtx ? COMPARE : SET);
11839 op1 = make_compound_operation (op1, SET);
11840
11841 if (GET_CODE (op0) == SUBREG && subreg_lowpart_p (op0)
11842 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT
11843 && GET_MODE_CLASS (GET_MODE (SUBREG_REG (op0))) == MODE_INT
11844 && (code == NE || code == EQ))
11845 {
11846 if (GET_MODE_SIZE (GET_MODE (op0))
11847 > GET_MODE_SIZE (GET_MODE (SUBREG_REG (op0))))
11848 {
11849 /* For paradoxical subregs, allow case 1 as above. Case 3 isn't
11850 implemented. */
11851 if (REG_P (SUBREG_REG (op0)))
11852 {
11853 op0 = SUBREG_REG (op0);
11854 op1 = gen_lowpart (GET_MODE (op0), op1);
11855 }
11856 }
11857 else if ((GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0)))
11858 <= HOST_BITS_PER_WIDE_INT)
11859 && (nonzero_bits (SUBREG_REG (op0),
11860 GET_MODE (SUBREG_REG (op0)))
11861 & ~GET_MODE_MASK (GET_MODE (op0))) == 0)
11862 {
11863 tem = gen_lowpart (GET_MODE (SUBREG_REG (op0)), op1);
11864
11865 if ((nonzero_bits (tem, GET_MODE (SUBREG_REG (op0)))
11866 & ~GET_MODE_MASK (GET_MODE (op0))) == 0)
11867 op0 = SUBREG_REG (op0), op1 = tem;
11868 }
11869 }
11870
11871 /* We now do the opposite procedure: Some machines don't have compare
11872 insns in all modes. If OP0's mode is an integer mode smaller than a
11873 word and we can't do a compare in that mode, see if there is a larger
11874 mode for which we can do the compare. There are a number of cases in
11875 which we can use the wider mode. */
11876
11877 mode = GET_MODE (op0);
11878 if (mode != VOIDmode && GET_MODE_CLASS (mode) == MODE_INT
11879 && GET_MODE_SIZE (mode) < UNITS_PER_WORD
11880 && ! have_insn_for (COMPARE, mode))
11881 for (tmode = GET_MODE_WIDER_MODE (mode);
11882 (tmode != VOIDmode
11883 && GET_MODE_BITSIZE (tmode) <= HOST_BITS_PER_WIDE_INT);
11884 tmode = GET_MODE_WIDER_MODE (tmode))
11885 if (have_insn_for (COMPARE, tmode))
11886 {
11887 int zero_extended;
11888
11889 /* If this is a test for negative, we can make an explicit
11890 test of the sign bit. Test this first so we can use
11891 a paradoxical subreg to extend OP0. */
11892
11893 if (op1 == const0_rtx && (code == LT || code == GE)
11894 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
11895 {
11896 op0 = simplify_gen_binary (AND, tmode,
11897 gen_lowpart (tmode, op0),
11898 GEN_INT ((unsigned HOST_WIDE_INT) 1
11899 << (GET_MODE_BITSIZE (mode)
11900 - 1)));
11901 code = (code == LT) ? NE : EQ;
11902 break;
11903 }
11904
11905 /* If the only nonzero bits in OP0 and OP1 are those in the
11906 narrower mode and this is an equality or unsigned comparison,
11907 we can use the wider mode. Similarly for sign-extended
11908 values, in which case it is true for all comparisons. */
11909 zero_extended = ((code == EQ || code == NE
11910 || code == GEU || code == GTU
11911 || code == LEU || code == LTU)
11912 && (nonzero_bits (op0, tmode)
11913 & ~GET_MODE_MASK (mode)) == 0
11914 && ((CONST_INT_P (op1)
11915 || (nonzero_bits (op1, tmode)
11916 & ~GET_MODE_MASK (mode)) == 0)));
11917
11918 if (zero_extended
11919 || ((num_sign_bit_copies (op0, tmode)
11920 > (unsigned int) (GET_MODE_BITSIZE (tmode)
11921 - GET_MODE_BITSIZE (mode)))
11922 && (num_sign_bit_copies (op1, tmode)
11923 > (unsigned int) (GET_MODE_BITSIZE (tmode)
11924 - GET_MODE_BITSIZE (mode)))))
11925 {
11926 /* If OP0 is an AND and we don't have an AND in MODE either,
11927 make a new AND in the proper mode. */
11928 if (GET_CODE (op0) == AND
11929 && !have_insn_for (AND, mode))
11930 op0 = simplify_gen_binary (AND, tmode,
11931 gen_lowpart (tmode,
11932 XEXP (op0, 0)),
11933 gen_lowpart (tmode,
11934 XEXP (op0, 1)));
11935 else
11936 {
11937 if (zero_extended)
11938 {
11939 op0 = simplify_gen_unary (ZERO_EXTEND, tmode, op0, mode);
11940 op1 = simplify_gen_unary (ZERO_EXTEND, tmode, op1, mode);
11941 }
11942 else
11943 {
11944 op0 = simplify_gen_unary (SIGN_EXTEND, tmode, op0, mode);
11945 op1 = simplify_gen_unary (SIGN_EXTEND, tmode, op1, mode);
11946 }
11947 break;
11948 }
11949 }
11950 }
11951
11952 #ifdef CANONICALIZE_COMPARISON
11953 /* If this machine only supports a subset of valid comparisons, see if we
11954 can convert an unsupported one into a supported one. */
11955 CANONICALIZE_COMPARISON (code, op0, op1);
11956 #endif
11957
11958 *pop0 = op0;
11959 *pop1 = op1;
11960
11961 return code;
11962 }
11963 \f
11964 /* Utility function for record_value_for_reg. Count number of
11965 rtxs in X. */
11966 static int
11967 count_rtxs (rtx x)
11968 {
11969 enum rtx_code code = GET_CODE (x);
11970 const char *fmt;
11971 int i, j, ret = 1;
11972
11973 if (GET_RTX_CLASS (code) == '2'
11974 || GET_RTX_CLASS (code) == 'c')
11975 {
11976 rtx x0 = XEXP (x, 0);
11977 rtx x1 = XEXP (x, 1);
11978
11979 if (x0 == x1)
11980 return 1 + 2 * count_rtxs (x0);
11981
11982 if ((GET_RTX_CLASS (GET_CODE (x1)) == '2'
11983 || GET_RTX_CLASS (GET_CODE (x1)) == 'c')
11984 && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
11985 return 2 + 2 * count_rtxs (x0)
11986 + count_rtxs (x == XEXP (x1, 0)
11987 ? XEXP (x1, 1) : XEXP (x1, 0));
11988
11989 if ((GET_RTX_CLASS (GET_CODE (x0)) == '2'
11990 || GET_RTX_CLASS (GET_CODE (x0)) == 'c')
11991 && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
11992 return 2 + 2 * count_rtxs (x1)
11993 + count_rtxs (x == XEXP (x0, 0)
11994 ? XEXP (x0, 1) : XEXP (x0, 0));
11995 }
11996
11997 fmt = GET_RTX_FORMAT (code);
11998 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
11999 if (fmt[i] == 'e')
12000 ret += count_rtxs (XEXP (x, i));
12001 else if (fmt[i] == 'E')
12002 for (j = 0; j < XVECLEN (x, i); j++)
12003 ret += count_rtxs (XVECEXP (x, i, j));
12004
12005 return ret;
12006 }
12007 \f
12008 /* Utility function for following routine. Called when X is part of a value
12009 being stored into last_set_value. Sets last_set_table_tick
12010 for each register mentioned. Similar to mention_regs in cse.c */
12011
12012 static void
12013 update_table_tick (rtx x)
12014 {
12015 enum rtx_code code = GET_CODE (x);
12016 const char *fmt = GET_RTX_FORMAT (code);
12017 int i, j;
12018
12019 if (code == REG)
12020 {
12021 unsigned int regno = REGNO (x);
12022 unsigned int endregno = END_REGNO (x);
12023 unsigned int r;
12024
12025 for (r = regno; r < endregno; r++)
12026 {
12027 reg_stat_type *rsp = VEC_index (reg_stat_type, reg_stat, r);
12028 rsp->last_set_table_tick = label_tick;
12029 }
12030
12031 return;
12032 }
12033
12034 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
12035 if (fmt[i] == 'e')
12036 {
12037 /* Check for identical subexpressions. If x contains
12038 identical subexpression we only have to traverse one of
12039 them. */
12040 if (i == 0 && ARITHMETIC_P (x))
12041 {
12042 /* Note that at this point x1 has already been
12043 processed. */
12044 rtx x0 = XEXP (x, 0);
12045 rtx x1 = XEXP (x, 1);
12046
12047 /* If x0 and x1 are identical then there is no need to
12048 process x0. */
12049 if (x0 == x1)
12050 break;
12051
12052 /* If x0 is identical to a subexpression of x1 then while
12053 processing x1, x0 has already been processed. Thus we
12054 are done with x. */
12055 if (ARITHMETIC_P (x1)
12056 && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
12057 break;
12058
12059 /* If x1 is identical to a subexpression of x0 then we
12060 still have to process the rest of x0. */
12061 if (ARITHMETIC_P (x0)
12062 && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
12063 {
12064 update_table_tick (XEXP (x0, x1 == XEXP (x0, 0) ? 1 : 0));
12065 break;
12066 }
12067 }
12068
12069 update_table_tick (XEXP (x, i));
12070 }
12071 else if (fmt[i] == 'E')
12072 for (j = 0; j < XVECLEN (x, i); j++)
12073 update_table_tick (XVECEXP (x, i, j));
12074 }
12075
12076 /* Record that REG is set to VALUE in insn INSN. If VALUE is zero, we
12077 are saying that the register is clobbered and we no longer know its
12078 value. If INSN is zero, don't update reg_stat[].last_set; this is
12079 only permitted with VALUE also zero and is used to invalidate the
12080 register. */
12081
12082 static void
12083 record_value_for_reg (rtx reg, rtx insn, rtx value)
12084 {
12085 unsigned int regno = REGNO (reg);
12086 unsigned int endregno = END_REGNO (reg);
12087 unsigned int i;
12088 reg_stat_type *rsp;
12089
12090 /* If VALUE contains REG and we have a previous value for REG, substitute
12091 the previous value. */
12092 if (value && insn && reg_overlap_mentioned_p (reg, value))
12093 {
12094 rtx tem;
12095
12096 /* Set things up so get_last_value is allowed to see anything set up to
12097 our insn. */
12098 subst_low_luid = DF_INSN_LUID (insn);
12099 tem = get_last_value (reg);
12100
12101 /* If TEM is simply a binary operation with two CLOBBERs as operands,
12102 it isn't going to be useful and will take a lot of time to process,
12103 so just use the CLOBBER. */
12104
12105 if (tem)
12106 {
12107 if (ARITHMETIC_P (tem)
12108 && GET_CODE (XEXP (tem, 0)) == CLOBBER
12109 && GET_CODE (XEXP (tem, 1)) == CLOBBER)
12110 tem = XEXP (tem, 0);
12111 else if (count_occurrences (value, reg, 1) >= 2)
12112 {
12113 /* If there are two or more occurrences of REG in VALUE,
12114 prevent the value from growing too much. */
12115 if (count_rtxs (tem) > MAX_LAST_VALUE_RTL)
12116 tem = gen_rtx_CLOBBER (GET_MODE (tem), const0_rtx);
12117 }
12118
12119 value = replace_rtx (copy_rtx (value), reg, tem);
12120 }
12121 }
12122
12123 /* For each register modified, show we don't know its value, that
12124 we don't know about its bitwise content, that its value has been
12125 updated, and that we don't know the location of the death of the
12126 register. */
12127 for (i = regno; i < endregno; i++)
12128 {
12129 rsp = VEC_index (reg_stat_type, reg_stat, i);
12130
12131 if (insn)
12132 rsp->last_set = insn;
12133
12134 rsp->last_set_value = 0;
12135 rsp->last_set_mode = VOIDmode;
12136 rsp->last_set_nonzero_bits = 0;
12137 rsp->last_set_sign_bit_copies = 0;
12138 rsp->last_death = 0;
12139 rsp->truncated_to_mode = VOIDmode;
12140 }
12141
12142 /* Mark registers that are being referenced in this value. */
12143 if (value)
12144 update_table_tick (value);
12145
12146 /* Now update the status of each register being set.
12147 If someone is using this register in this block, set this register
12148 to invalid since we will get confused between the two lives in this
12149 basic block. This makes using this register always invalid. In cse, we
12150 scan the table to invalidate all entries using this register, but this
12151 is too much work for us. */
12152
12153 for (i = regno; i < endregno; i++)
12154 {
12155 rsp = VEC_index (reg_stat_type, reg_stat, i);
12156 rsp->last_set_label = label_tick;
12157 if (!insn
12158 || (value && rsp->last_set_table_tick >= label_tick_ebb_start))
12159 rsp->last_set_invalid = 1;
12160 else
12161 rsp->last_set_invalid = 0;
12162 }
12163
12164 /* The value being assigned might refer to X (like in "x++;"). In that
12165 case, we must replace it with (clobber (const_int 0)) to prevent
12166 infinite loops. */
12167 rsp = VEC_index (reg_stat_type, reg_stat, regno);
12168 if (value && !get_last_value_validate (&value, insn, label_tick, 0))
12169 {
12170 value = copy_rtx (value);
12171 if (!get_last_value_validate (&value, insn, label_tick, 1))
12172 value = 0;
12173 }
12174
12175 /* For the main register being modified, update the value, the mode, the
12176 nonzero bits, and the number of sign bit copies. */
12177
12178 rsp->last_set_value = value;
12179
12180 if (value)
12181 {
12182 enum machine_mode mode = GET_MODE (reg);
12183 subst_low_luid = DF_INSN_LUID (insn);
12184 rsp->last_set_mode = mode;
12185 if (GET_MODE_CLASS (mode) == MODE_INT
12186 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
12187 mode = nonzero_bits_mode;
12188 rsp->last_set_nonzero_bits = nonzero_bits (value, mode);
12189 rsp->last_set_sign_bit_copies
12190 = num_sign_bit_copies (value, GET_MODE (reg));
12191 }
12192 }
12193
12194 /* Called via note_stores from record_dead_and_set_regs to handle one
12195 SET or CLOBBER in an insn. DATA is the instruction in which the
12196 set is occurring. */
12197
12198 static void
12199 record_dead_and_set_regs_1 (rtx dest, const_rtx setter, void *data)
12200 {
12201 rtx record_dead_insn = (rtx) data;
12202
12203 if (GET_CODE (dest) == SUBREG)
12204 dest = SUBREG_REG (dest);
12205
12206 if (!record_dead_insn)
12207 {
12208 if (REG_P (dest))
12209 record_value_for_reg (dest, NULL_RTX, NULL_RTX);
12210 return;
12211 }
12212
12213 if (REG_P (dest))
12214 {
12215 /* If we are setting the whole register, we know its value. Otherwise
12216 show that we don't know the value. We can handle SUBREG in
12217 some cases. */
12218 if (GET_CODE (setter) == SET && dest == SET_DEST (setter))
12219 record_value_for_reg (dest, record_dead_insn, SET_SRC (setter));
12220 else if (GET_CODE (setter) == SET
12221 && GET_CODE (SET_DEST (setter)) == SUBREG
12222 && SUBREG_REG (SET_DEST (setter)) == dest
12223 && GET_MODE_BITSIZE (GET_MODE (dest)) <= BITS_PER_WORD
12224 && subreg_lowpart_p (SET_DEST (setter)))
12225 record_value_for_reg (dest, record_dead_insn,
12226 gen_lowpart (GET_MODE (dest),
12227 SET_SRC (setter)));
12228 else
12229 record_value_for_reg (dest, record_dead_insn, NULL_RTX);
12230 }
12231 else if (MEM_P (dest)
12232 /* Ignore pushes, they clobber nothing. */
12233 && ! push_operand (dest, GET_MODE (dest)))
12234 mem_last_set = DF_INSN_LUID (record_dead_insn);
12235 }
12236
12237 /* Update the records of when each REG was most recently set or killed
12238 for the things done by INSN. This is the last thing done in processing
12239 INSN in the combiner loop.
12240
12241 We update reg_stat[], in particular fields last_set, last_set_value,
12242 last_set_mode, last_set_nonzero_bits, last_set_sign_bit_copies,
12243 last_death, and also the similar information mem_last_set (which insn
12244 most recently modified memory) and last_call_luid (which insn was the
12245 most recent subroutine call). */
12246
12247 static void
12248 record_dead_and_set_regs (rtx insn)
12249 {
12250 rtx link;
12251 unsigned int i;
12252
12253 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
12254 {
12255 if (REG_NOTE_KIND (link) == REG_DEAD
12256 && REG_P (XEXP (link, 0)))
12257 {
12258 unsigned int regno = REGNO (XEXP (link, 0));
12259 unsigned int endregno = END_REGNO (XEXP (link, 0));
12260
12261 for (i = regno; i < endregno; i++)
12262 {
12263 reg_stat_type *rsp;
12264
12265 rsp = VEC_index (reg_stat_type, reg_stat, i);
12266 rsp->last_death = insn;
12267 }
12268 }
12269 else if (REG_NOTE_KIND (link) == REG_INC)
12270 record_value_for_reg (XEXP (link, 0), insn, NULL_RTX);
12271 }
12272
12273 if (CALL_P (insn))
12274 {
12275 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
12276 if (TEST_HARD_REG_BIT (regs_invalidated_by_call, i))
12277 {
12278 reg_stat_type *rsp;
12279
12280 rsp = VEC_index (reg_stat_type, reg_stat, i);
12281 rsp->last_set_invalid = 1;
12282 rsp->last_set = insn;
12283 rsp->last_set_value = 0;
12284 rsp->last_set_mode = VOIDmode;
12285 rsp->last_set_nonzero_bits = 0;
12286 rsp->last_set_sign_bit_copies = 0;
12287 rsp->last_death = 0;
12288 rsp->truncated_to_mode = VOIDmode;
12289 }
12290
12291 last_call_luid = mem_last_set = DF_INSN_LUID (insn);
12292
12293 /* We can't combine into a call pattern. Remember, though, that
12294 the return value register is set at this LUID. We could
12295 still replace a register with the return value from the
12296 wrong subroutine call! */
12297 note_stores (PATTERN (insn), record_dead_and_set_regs_1, NULL_RTX);
12298 }
12299 else
12300 note_stores (PATTERN (insn), record_dead_and_set_regs_1, insn);
12301 }
12302
12303 /* If a SUBREG has the promoted bit set, it is in fact a property of the
12304 register present in the SUBREG, so for each such SUBREG go back and
12305 adjust nonzero and sign bit information of the registers that are
12306 known to have some zero/sign bits set.
12307
12308 This is needed because when combine blows the SUBREGs away, the
12309 information on zero/sign bits is lost and further combines can be
12310 missed because of that. */
12311
12312 static void
12313 record_promoted_value (rtx insn, rtx subreg)
12314 {
12315 rtx links, set;
12316 unsigned int regno = REGNO (SUBREG_REG (subreg));
12317 enum machine_mode mode = GET_MODE (subreg);
12318
12319 if (GET_MODE_BITSIZE (mode) > HOST_BITS_PER_WIDE_INT)
12320 return;
12321
12322 for (links = LOG_LINKS (insn); links;)
12323 {
12324 reg_stat_type *rsp;
12325
12326 insn = XEXP (links, 0);
12327 set = single_set (insn);
12328
12329 if (! set || !REG_P (SET_DEST (set))
12330 || REGNO (SET_DEST (set)) != regno
12331 || GET_MODE (SET_DEST (set)) != GET_MODE (SUBREG_REG (subreg)))
12332 {
12333 links = XEXP (links, 1);
12334 continue;
12335 }
12336
12337 rsp = VEC_index (reg_stat_type, reg_stat, regno);
12338 if (rsp->last_set == insn)
12339 {
12340 if (SUBREG_PROMOTED_UNSIGNED_P (subreg) > 0)
12341 rsp->last_set_nonzero_bits &= GET_MODE_MASK (mode);
12342 }
12343
12344 if (REG_P (SET_SRC (set)))
12345 {
12346 regno = REGNO (SET_SRC (set));
12347 links = LOG_LINKS (insn);
12348 }
12349 else
12350 break;
12351 }
12352 }
12353
12354 /* Check if X, a register, is known to contain a value already
12355 truncated to MODE. In this case we can use a subreg to refer to
12356 the truncated value even though in the generic case we would need
12357 an explicit truncation. */
12358
12359 static bool
12360 reg_truncated_to_mode (enum machine_mode mode, const_rtx x)
12361 {
12362 reg_stat_type *rsp = VEC_index (reg_stat_type, reg_stat, REGNO (x));
12363 enum machine_mode truncated = rsp->truncated_to_mode;
12364
12365 if (truncated == 0
12366 || rsp->truncation_label < label_tick_ebb_start)
12367 return false;
12368 if (GET_MODE_SIZE (truncated) <= GET_MODE_SIZE (mode))
12369 return true;
12370 if (TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
12371 GET_MODE_BITSIZE (truncated)))
12372 return true;
12373 return false;
12374 }
12375
12376 /* Callback for for_each_rtx. If *P is a hard reg or a subreg record the mode
12377 that the register is accessed in. For non-TRULY_NOOP_TRUNCATION targets we
12378 might be able to turn a truncate into a subreg using this information.
12379 Return -1 if traversing *P is complete or 0 otherwise. */
12380
12381 static int
12382 record_truncated_value (rtx *p, void *data ATTRIBUTE_UNUSED)
12383 {
12384 rtx x = *p;
12385 enum machine_mode truncated_mode;
12386 reg_stat_type *rsp;
12387
12388 if (GET_CODE (x) == SUBREG && REG_P (SUBREG_REG (x)))
12389 {
12390 enum machine_mode original_mode = GET_MODE (SUBREG_REG (x));
12391 truncated_mode = GET_MODE (x);
12392
12393 if (GET_MODE_SIZE (original_mode) <= GET_MODE_SIZE (truncated_mode))
12394 return -1;
12395
12396 if (TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (truncated_mode),
12397 GET_MODE_BITSIZE (original_mode)))
12398 return -1;
12399
12400 x = SUBREG_REG (x);
12401 }
12402 /* ??? For hard-regs we now record everything. We might be able to
12403 optimize this using last_set_mode. */
12404 else if (REG_P (x) && REGNO (x) < FIRST_PSEUDO_REGISTER)
12405 truncated_mode = GET_MODE (x);
12406 else
12407 return 0;
12408
12409 rsp = VEC_index (reg_stat_type, reg_stat, REGNO (x));
12410 if (rsp->truncated_to_mode == 0
12411 || rsp->truncation_label < label_tick_ebb_start
12412 || (GET_MODE_SIZE (truncated_mode)
12413 < GET_MODE_SIZE (rsp->truncated_to_mode)))
12414 {
12415 rsp->truncated_to_mode = truncated_mode;
12416 rsp->truncation_label = label_tick;
12417 }
12418
12419 return -1;
12420 }
12421
12422 /* Callback for note_uses. Find hardregs and subregs of pseudos and
12423 the modes they are used in. This can help truning TRUNCATEs into
12424 SUBREGs. */
12425
12426 static void
12427 record_truncated_values (rtx *x, void *data ATTRIBUTE_UNUSED)
12428 {
12429 for_each_rtx (x, record_truncated_value, NULL);
12430 }
12431
12432 /* Scan X for promoted SUBREGs. For each one found,
12433 note what it implies to the registers used in it. */
12434
12435 static void
12436 check_promoted_subreg (rtx insn, rtx x)
12437 {
12438 if (GET_CODE (x) == SUBREG
12439 && SUBREG_PROMOTED_VAR_P (x)
12440 && REG_P (SUBREG_REG (x)))
12441 record_promoted_value (insn, x);
12442 else
12443 {
12444 const char *format = GET_RTX_FORMAT (GET_CODE (x));
12445 int i, j;
12446
12447 for (i = 0; i < GET_RTX_LENGTH (GET_CODE (x)); i++)
12448 switch (format[i])
12449 {
12450 case 'e':
12451 check_promoted_subreg (insn, XEXP (x, i));
12452 break;
12453 case 'V':
12454 case 'E':
12455 if (XVEC (x, i) != 0)
12456 for (j = 0; j < XVECLEN (x, i); j++)
12457 check_promoted_subreg (insn, XVECEXP (x, i, j));
12458 break;
12459 }
12460 }
12461 }
12462 \f
12463 /* Verify that all the registers and memory references mentioned in *LOC are
12464 still valid. *LOC was part of a value set in INSN when label_tick was
12465 equal to TICK. Return 0 if some are not. If REPLACE is nonzero, replace
12466 the invalid references with (clobber (const_int 0)) and return 1. This
12467 replacement is useful because we often can get useful information about
12468 the form of a value (e.g., if it was produced by a shift that always
12469 produces -1 or 0) even though we don't know exactly what registers it
12470 was produced from. */
12471
12472 static int
12473 get_last_value_validate (rtx *loc, rtx insn, int tick, int replace)
12474 {
12475 rtx x = *loc;
12476 const char *fmt = GET_RTX_FORMAT (GET_CODE (x));
12477 int len = GET_RTX_LENGTH (GET_CODE (x));
12478 int i, j;
12479
12480 if (REG_P (x))
12481 {
12482 unsigned int regno = REGNO (x);
12483 unsigned int endregno = END_REGNO (x);
12484 unsigned int j;
12485
12486 for (j = regno; j < endregno; j++)
12487 {
12488 reg_stat_type *rsp = VEC_index (reg_stat_type, reg_stat, j);
12489 if (rsp->last_set_invalid
12490 /* If this is a pseudo-register that was only set once and not
12491 live at the beginning of the function, it is always valid. */
12492 || (! (regno >= FIRST_PSEUDO_REGISTER
12493 && REG_N_SETS (regno) == 1
12494 && (!REGNO_REG_SET_P
12495 (DF_LR_IN (ENTRY_BLOCK_PTR->next_bb), regno)))
12496 && rsp->last_set_label > tick))
12497 {
12498 if (replace)
12499 *loc = gen_rtx_CLOBBER (GET_MODE (x), const0_rtx);
12500 return replace;
12501 }
12502 }
12503
12504 return 1;
12505 }
12506 /* If this is a memory reference, make sure that there were no stores after
12507 it that might have clobbered the value. We don't have alias info, so we
12508 assume any store invalidates it. Moreover, we only have local UIDs, so
12509 we also assume that there were stores in the intervening basic blocks. */
12510 else if (MEM_P (x) && !MEM_READONLY_P (x)
12511 && (tick != label_tick || DF_INSN_LUID (insn) <= mem_last_set))
12512 {
12513 if (replace)
12514 *loc = gen_rtx_CLOBBER (GET_MODE (x), const0_rtx);
12515 return replace;
12516 }
12517
12518 for (i = 0; i < len; i++)
12519 {
12520 if (fmt[i] == 'e')
12521 {
12522 /* Check for identical subexpressions. If x contains
12523 identical subexpression we only have to traverse one of
12524 them. */
12525 if (i == 1 && ARITHMETIC_P (x))
12526 {
12527 /* Note that at this point x0 has already been checked
12528 and found valid. */
12529 rtx x0 = XEXP (x, 0);
12530 rtx x1 = XEXP (x, 1);
12531
12532 /* If x0 and x1 are identical then x is also valid. */
12533 if (x0 == x1)
12534 return 1;
12535
12536 /* If x1 is identical to a subexpression of x0 then
12537 while checking x0, x1 has already been checked. Thus
12538 it is valid and so as x. */
12539 if (ARITHMETIC_P (x0)
12540 && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
12541 return 1;
12542
12543 /* If x0 is identical to a subexpression of x1 then x is
12544 valid iff the rest of x1 is valid. */
12545 if (ARITHMETIC_P (x1)
12546 && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
12547 return
12548 get_last_value_validate (&XEXP (x1,
12549 x0 == XEXP (x1, 0) ? 1 : 0),
12550 insn, tick, replace);
12551 }
12552
12553 if (get_last_value_validate (&XEXP (x, i), insn, tick,
12554 replace) == 0)
12555 return 0;
12556 }
12557 else if (fmt[i] == 'E')
12558 for (j = 0; j < XVECLEN (x, i); j++)
12559 if (get_last_value_validate (&XVECEXP (x, i, j),
12560 insn, tick, replace) == 0)
12561 return 0;
12562 }
12563
12564 /* If we haven't found a reason for it to be invalid, it is valid. */
12565 return 1;
12566 }
12567
12568 /* Get the last value assigned to X, if known. Some registers
12569 in the value may be replaced with (clobber (const_int 0)) if their value
12570 is known longer known reliably. */
12571
12572 static rtx
12573 get_last_value (const_rtx x)
12574 {
12575 unsigned int regno;
12576 rtx value;
12577 reg_stat_type *rsp;
12578
12579 /* If this is a non-paradoxical SUBREG, get the value of its operand and
12580 then convert it to the desired mode. If this is a paradoxical SUBREG,
12581 we cannot predict what values the "extra" bits might have. */
12582 if (GET_CODE (x) == SUBREG
12583 && subreg_lowpart_p (x)
12584 && (GET_MODE_SIZE (GET_MODE (x))
12585 <= GET_MODE_SIZE (GET_MODE (SUBREG_REG (x))))
12586 && (value = get_last_value (SUBREG_REG (x))) != 0)
12587 return gen_lowpart (GET_MODE (x), value);
12588
12589 if (!REG_P (x))
12590 return 0;
12591
12592 regno = REGNO (x);
12593 rsp = VEC_index (reg_stat_type, reg_stat, regno);
12594 value = rsp->last_set_value;
12595
12596 /* If we don't have a value, or if it isn't for this basic block and
12597 it's either a hard register, set more than once, or it's a live
12598 at the beginning of the function, return 0.
12599
12600 Because if it's not live at the beginning of the function then the reg
12601 is always set before being used (is never used without being set).
12602 And, if it's set only once, and it's always set before use, then all
12603 uses must have the same last value, even if it's not from this basic
12604 block. */
12605
12606 if (value == 0
12607 || (rsp->last_set_label < label_tick_ebb_start
12608 && (regno < FIRST_PSEUDO_REGISTER
12609 || REG_N_SETS (regno) != 1
12610 || REGNO_REG_SET_P
12611 (DF_LR_IN (ENTRY_BLOCK_PTR->next_bb), regno))))
12612 return 0;
12613
12614 /* If the value was set in a later insn than the ones we are processing,
12615 we can't use it even if the register was only set once. */
12616 if (rsp->last_set_label == label_tick
12617 && DF_INSN_LUID (rsp->last_set) >= subst_low_luid)
12618 return 0;
12619
12620 /* If the value has all its registers valid, return it. */
12621 if (get_last_value_validate (&value, rsp->last_set, rsp->last_set_label, 0))
12622 return value;
12623
12624 /* Otherwise, make a copy and replace any invalid register with
12625 (clobber (const_int 0)). If that fails for some reason, return 0. */
12626
12627 value = copy_rtx (value);
12628 if (get_last_value_validate (&value, rsp->last_set, rsp->last_set_label, 1))
12629 return value;
12630
12631 return 0;
12632 }
12633 \f
12634 /* Return nonzero if expression X refers to a REG or to memory
12635 that is set in an instruction more recent than FROM_LUID. */
12636
12637 static int
12638 use_crosses_set_p (const_rtx x, int from_luid)
12639 {
12640 const char *fmt;
12641 int i;
12642 enum rtx_code code = GET_CODE (x);
12643
12644 if (code == REG)
12645 {
12646 unsigned int regno = REGNO (x);
12647 unsigned endreg = END_REGNO (x);
12648
12649 #ifdef PUSH_ROUNDING
12650 /* Don't allow uses of the stack pointer to be moved,
12651 because we don't know whether the move crosses a push insn. */
12652 if (regno == STACK_POINTER_REGNUM && PUSH_ARGS)
12653 return 1;
12654 #endif
12655 for (; regno < endreg; regno++)
12656 {
12657 reg_stat_type *rsp = VEC_index (reg_stat_type, reg_stat, regno);
12658 if (rsp->last_set
12659 && rsp->last_set_label == label_tick
12660 && DF_INSN_LUID (rsp->last_set) > from_luid)
12661 return 1;
12662 }
12663 return 0;
12664 }
12665
12666 if (code == MEM && mem_last_set > from_luid)
12667 return 1;
12668
12669 fmt = GET_RTX_FORMAT (code);
12670
12671 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
12672 {
12673 if (fmt[i] == 'E')
12674 {
12675 int j;
12676 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
12677 if (use_crosses_set_p (XVECEXP (x, i, j), from_luid))
12678 return 1;
12679 }
12680 else if (fmt[i] == 'e'
12681 && use_crosses_set_p (XEXP (x, i), from_luid))
12682 return 1;
12683 }
12684 return 0;
12685 }
12686 \f
12687 /* Define three variables used for communication between the following
12688 routines. */
12689
12690 static unsigned int reg_dead_regno, reg_dead_endregno;
12691 static int reg_dead_flag;
12692
12693 /* Function called via note_stores from reg_dead_at_p.
12694
12695 If DEST is within [reg_dead_regno, reg_dead_endregno), set
12696 reg_dead_flag to 1 if X is a CLOBBER and to -1 it is a SET. */
12697
12698 static void
12699 reg_dead_at_p_1 (rtx dest, const_rtx x, void *data ATTRIBUTE_UNUSED)
12700 {
12701 unsigned int regno, endregno;
12702
12703 if (!REG_P (dest))
12704 return;
12705
12706 regno = REGNO (dest);
12707 endregno = END_REGNO (dest);
12708 if (reg_dead_endregno > regno && reg_dead_regno < endregno)
12709 reg_dead_flag = (GET_CODE (x) == CLOBBER) ? 1 : -1;
12710 }
12711
12712 /* Return nonzero if REG is known to be dead at INSN.
12713
12714 We scan backwards from INSN. If we hit a REG_DEAD note or a CLOBBER
12715 referencing REG, it is dead. If we hit a SET referencing REG, it is
12716 live. Otherwise, see if it is live or dead at the start of the basic
12717 block we are in. Hard regs marked as being live in NEWPAT_USED_REGS
12718 must be assumed to be always live. */
12719
12720 static int
12721 reg_dead_at_p (rtx reg, rtx insn)
12722 {
12723 basic_block block;
12724 unsigned int i;
12725
12726 /* Set variables for reg_dead_at_p_1. */
12727 reg_dead_regno = REGNO (reg);
12728 reg_dead_endregno = END_REGNO (reg);
12729
12730 reg_dead_flag = 0;
12731
12732 /* Check that reg isn't mentioned in NEWPAT_USED_REGS. For fixed registers
12733 we allow the machine description to decide whether use-and-clobber
12734 patterns are OK. */
12735 if (reg_dead_regno < FIRST_PSEUDO_REGISTER)
12736 {
12737 for (i = reg_dead_regno; i < reg_dead_endregno; i++)
12738 if (!fixed_regs[i] && TEST_HARD_REG_BIT (newpat_used_regs, i))
12739 return 0;
12740 }
12741
12742 /* Scan backwards until we find a REG_DEAD note, SET, CLOBBER, or
12743 beginning of basic block. */
12744 block = BLOCK_FOR_INSN (insn);
12745 for (;;)
12746 {
12747 if (INSN_P (insn))
12748 {
12749 note_stores (PATTERN (insn), reg_dead_at_p_1, NULL);
12750 if (reg_dead_flag)
12751 return reg_dead_flag == 1 ? 1 : 0;
12752
12753 if (find_regno_note (insn, REG_DEAD, reg_dead_regno))
12754 return 1;
12755 }
12756
12757 if (insn == BB_HEAD (block))
12758 break;
12759
12760 insn = PREV_INSN (insn);
12761 }
12762
12763 /* Look at live-in sets for the basic block that we were in. */
12764 for (i = reg_dead_regno; i < reg_dead_endregno; i++)
12765 if (REGNO_REG_SET_P (df_get_live_in (block), i))
12766 return 0;
12767
12768 return 1;
12769 }
12770 \f
12771 /* Note hard registers in X that are used. */
12772
12773 static void
12774 mark_used_regs_combine (rtx x)
12775 {
12776 RTX_CODE code = GET_CODE (x);
12777 unsigned int regno;
12778 int i;
12779
12780 switch (code)
12781 {
12782 case LABEL_REF:
12783 case SYMBOL_REF:
12784 case CONST_INT:
12785 case CONST:
12786 case CONST_DOUBLE:
12787 case CONST_VECTOR:
12788 case PC:
12789 case ADDR_VEC:
12790 case ADDR_DIFF_VEC:
12791 case ASM_INPUT:
12792 #ifdef HAVE_cc0
12793 /* CC0 must die in the insn after it is set, so we don't need to take
12794 special note of it here. */
12795 case CC0:
12796 #endif
12797 return;
12798
12799 case CLOBBER:
12800 /* If we are clobbering a MEM, mark any hard registers inside the
12801 address as used. */
12802 if (MEM_P (XEXP (x, 0)))
12803 mark_used_regs_combine (XEXP (XEXP (x, 0), 0));
12804 return;
12805
12806 case REG:
12807 regno = REGNO (x);
12808 /* A hard reg in a wide mode may really be multiple registers.
12809 If so, mark all of them just like the first. */
12810 if (regno < FIRST_PSEUDO_REGISTER)
12811 {
12812 /* None of this applies to the stack, frame or arg pointers. */
12813 if (regno == STACK_POINTER_REGNUM
12814 #if !HARD_FRAME_POINTER_IS_FRAME_POINTER
12815 || regno == HARD_FRAME_POINTER_REGNUM
12816 #endif
12817 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
12818 || (regno == ARG_POINTER_REGNUM && fixed_regs[regno])
12819 #endif
12820 || regno == FRAME_POINTER_REGNUM)
12821 return;
12822
12823 add_to_hard_reg_set (&newpat_used_regs, GET_MODE (x), regno);
12824 }
12825 return;
12826
12827 case SET:
12828 {
12829 /* If setting a MEM, or a SUBREG of a MEM, then note any hard regs in
12830 the address. */
12831 rtx testreg = SET_DEST (x);
12832
12833 while (GET_CODE (testreg) == SUBREG
12834 || GET_CODE (testreg) == ZERO_EXTRACT
12835 || GET_CODE (testreg) == STRICT_LOW_PART)
12836 testreg = XEXP (testreg, 0);
12837
12838 if (MEM_P (testreg))
12839 mark_used_regs_combine (XEXP (testreg, 0));
12840
12841 mark_used_regs_combine (SET_SRC (x));
12842 }
12843 return;
12844
12845 default:
12846 break;
12847 }
12848
12849 /* Recursively scan the operands of this expression. */
12850
12851 {
12852 const char *fmt = GET_RTX_FORMAT (code);
12853
12854 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
12855 {
12856 if (fmt[i] == 'e')
12857 mark_used_regs_combine (XEXP (x, i));
12858 else if (fmt[i] == 'E')
12859 {
12860 int j;
12861
12862 for (j = 0; j < XVECLEN (x, i); j++)
12863 mark_used_regs_combine (XVECEXP (x, i, j));
12864 }
12865 }
12866 }
12867 }
12868 \f
12869 /* Remove register number REGNO from the dead registers list of INSN.
12870
12871 Return the note used to record the death, if there was one. */
12872
12873 rtx
12874 remove_death (unsigned int regno, rtx insn)
12875 {
12876 rtx note = find_regno_note (insn, REG_DEAD, regno);
12877
12878 if (note)
12879 remove_note (insn, note);
12880
12881 return note;
12882 }
12883
12884 /* For each register (hardware or pseudo) used within expression X, if its
12885 death is in an instruction with luid between FROM_LUID (inclusive) and
12886 TO_INSN (exclusive), put a REG_DEAD note for that register in the
12887 list headed by PNOTES.
12888
12889 That said, don't move registers killed by maybe_kill_insn.
12890
12891 This is done when X is being merged by combination into TO_INSN. These
12892 notes will then be distributed as needed. */
12893
12894 static void
12895 move_deaths (rtx x, rtx maybe_kill_insn, int from_luid, rtx to_insn,
12896 rtx *pnotes)
12897 {
12898 const char *fmt;
12899 int len, i;
12900 enum rtx_code code = GET_CODE (x);
12901
12902 if (code == REG)
12903 {
12904 unsigned int regno = REGNO (x);
12905 rtx where_dead = VEC_index (reg_stat_type, reg_stat, regno)->last_death;
12906
12907 /* Don't move the register if it gets killed in between from and to. */
12908 if (maybe_kill_insn && reg_set_p (x, maybe_kill_insn)
12909 && ! reg_referenced_p (x, maybe_kill_insn))
12910 return;
12911
12912 if (where_dead
12913 && BLOCK_FOR_INSN (where_dead) == BLOCK_FOR_INSN (to_insn)
12914 && DF_INSN_LUID (where_dead) >= from_luid
12915 && DF_INSN_LUID (where_dead) < DF_INSN_LUID (to_insn))
12916 {
12917 rtx note = remove_death (regno, where_dead);
12918
12919 /* It is possible for the call above to return 0. This can occur
12920 when last_death points to I2 or I1 that we combined with.
12921 In that case make a new note.
12922
12923 We must also check for the case where X is a hard register
12924 and NOTE is a death note for a range of hard registers
12925 including X. In that case, we must put REG_DEAD notes for
12926 the remaining registers in place of NOTE. */
12927
12928 if (note != 0 && regno < FIRST_PSEUDO_REGISTER
12929 && (GET_MODE_SIZE (GET_MODE (XEXP (note, 0)))
12930 > GET_MODE_SIZE (GET_MODE (x))))
12931 {
12932 unsigned int deadregno = REGNO (XEXP (note, 0));
12933 unsigned int deadend = END_HARD_REGNO (XEXP (note, 0));
12934 unsigned int ourend = END_HARD_REGNO (x);
12935 unsigned int i;
12936
12937 for (i = deadregno; i < deadend; i++)
12938 if (i < regno || i >= ourend)
12939 add_reg_note (where_dead, REG_DEAD, regno_reg_rtx[i]);
12940 }
12941
12942 /* If we didn't find any note, or if we found a REG_DEAD note that
12943 covers only part of the given reg, and we have a multi-reg hard
12944 register, then to be safe we must check for REG_DEAD notes
12945 for each register other than the first. They could have
12946 their own REG_DEAD notes lying around. */
12947 else if ((note == 0
12948 || (note != 0
12949 && (GET_MODE_SIZE (GET_MODE (XEXP (note, 0)))
12950 < GET_MODE_SIZE (GET_MODE (x)))))
12951 && regno < FIRST_PSEUDO_REGISTER
12952 && hard_regno_nregs[regno][GET_MODE (x)] > 1)
12953 {
12954 unsigned int ourend = END_HARD_REGNO (x);
12955 unsigned int i, offset;
12956 rtx oldnotes = 0;
12957
12958 if (note)
12959 offset = hard_regno_nregs[regno][GET_MODE (XEXP (note, 0))];
12960 else
12961 offset = 1;
12962
12963 for (i = regno + offset; i < ourend; i++)
12964 move_deaths (regno_reg_rtx[i],
12965 maybe_kill_insn, from_luid, to_insn, &oldnotes);
12966 }
12967
12968 if (note != 0 && GET_MODE (XEXP (note, 0)) == GET_MODE (x))
12969 {
12970 XEXP (note, 1) = *pnotes;
12971 *pnotes = note;
12972 }
12973 else
12974 *pnotes = alloc_reg_note (REG_DEAD, x, *pnotes);
12975 }
12976
12977 return;
12978 }
12979
12980 else if (GET_CODE (x) == SET)
12981 {
12982 rtx dest = SET_DEST (x);
12983
12984 move_deaths (SET_SRC (x), maybe_kill_insn, from_luid, to_insn, pnotes);
12985
12986 /* In the case of a ZERO_EXTRACT, a STRICT_LOW_PART, or a SUBREG
12987 that accesses one word of a multi-word item, some
12988 piece of everything register in the expression is used by
12989 this insn, so remove any old death. */
12990 /* ??? So why do we test for equality of the sizes? */
12991
12992 if (GET_CODE (dest) == ZERO_EXTRACT
12993 || GET_CODE (dest) == STRICT_LOW_PART
12994 || (GET_CODE (dest) == SUBREG
12995 && (((GET_MODE_SIZE (GET_MODE (dest))
12996 + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
12997 == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest)))
12998 + UNITS_PER_WORD - 1) / UNITS_PER_WORD))))
12999 {
13000 move_deaths (dest, maybe_kill_insn, from_luid, to_insn, pnotes);
13001 return;
13002 }
13003
13004 /* If this is some other SUBREG, we know it replaces the entire
13005 value, so use that as the destination. */
13006 if (GET_CODE (dest) == SUBREG)
13007 dest = SUBREG_REG (dest);
13008
13009 /* If this is a MEM, adjust deaths of anything used in the address.
13010 For a REG (the only other possibility), the entire value is
13011 being replaced so the old value is not used in this insn. */
13012
13013 if (MEM_P (dest))
13014 move_deaths (XEXP (dest, 0), maybe_kill_insn, from_luid,
13015 to_insn, pnotes);
13016 return;
13017 }
13018
13019 else if (GET_CODE (x) == CLOBBER)
13020 return;
13021
13022 len = GET_RTX_LENGTH (code);
13023 fmt = GET_RTX_FORMAT (code);
13024
13025 for (i = 0; i < len; i++)
13026 {
13027 if (fmt[i] == 'E')
13028 {
13029 int j;
13030 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
13031 move_deaths (XVECEXP (x, i, j), maybe_kill_insn, from_luid,
13032 to_insn, pnotes);
13033 }
13034 else if (fmt[i] == 'e')
13035 move_deaths (XEXP (x, i), maybe_kill_insn, from_luid, to_insn, pnotes);
13036 }
13037 }
13038 \f
13039 /* Return 1 if X is the target of a bit-field assignment in BODY, the
13040 pattern of an insn. X must be a REG. */
13041
13042 static int
13043 reg_bitfield_target_p (rtx x, rtx body)
13044 {
13045 int i;
13046
13047 if (GET_CODE (body) == SET)
13048 {
13049 rtx dest = SET_DEST (body);
13050 rtx target;
13051 unsigned int regno, tregno, endregno, endtregno;
13052
13053 if (GET_CODE (dest) == ZERO_EXTRACT)
13054 target = XEXP (dest, 0);
13055 else if (GET_CODE (dest) == STRICT_LOW_PART)
13056 target = SUBREG_REG (XEXP (dest, 0));
13057 else
13058 return 0;
13059
13060 if (GET_CODE (target) == SUBREG)
13061 target = SUBREG_REG (target);
13062
13063 if (!REG_P (target))
13064 return 0;
13065
13066 tregno = REGNO (target), regno = REGNO (x);
13067 if (tregno >= FIRST_PSEUDO_REGISTER || regno >= FIRST_PSEUDO_REGISTER)
13068 return target == x;
13069
13070 endtregno = end_hard_regno (GET_MODE (target), tregno);
13071 endregno = end_hard_regno (GET_MODE (x), regno);
13072
13073 return endregno > tregno && regno < endtregno;
13074 }
13075
13076 else if (GET_CODE (body) == PARALLEL)
13077 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
13078 if (reg_bitfield_target_p (x, XVECEXP (body, 0, i)))
13079 return 1;
13080
13081 return 0;
13082 }
13083 \f
13084 /* Given a chain of REG_NOTES originally from FROM_INSN, try to place them
13085 as appropriate. I3 and I2 are the insns resulting from the combination
13086 insns including FROM (I2 may be zero).
13087
13088 ELIM_I2 and ELIM_I1 are either zero or registers that we know will
13089 not need REG_DEAD notes because they are being substituted for. This
13090 saves searching in the most common cases.
13091
13092 Each note in the list is either ignored or placed on some insns, depending
13093 on the type of note. */
13094
13095 static void
13096 distribute_notes (rtx notes, rtx from_insn, rtx i3, rtx i2, rtx elim_i2,
13097 rtx elim_i1, rtx elim_i0)
13098 {
13099 rtx note, next_note;
13100 rtx tem;
13101
13102 for (note = notes; note; note = next_note)
13103 {
13104 rtx place = 0, place2 = 0;
13105
13106 next_note = XEXP (note, 1);
13107 switch (REG_NOTE_KIND (note))
13108 {
13109 case REG_BR_PROB:
13110 case REG_BR_PRED:
13111 /* Doesn't matter much where we put this, as long as it's somewhere.
13112 It is preferable to keep these notes on branches, which is most
13113 likely to be i3. */
13114 place = i3;
13115 break;
13116
13117 case REG_VALUE_PROFILE:
13118 /* Just get rid of this note, as it is unused later anyway. */
13119 break;
13120
13121 case REG_NON_LOCAL_GOTO:
13122 if (JUMP_P (i3))
13123 place = i3;
13124 else
13125 {
13126 gcc_assert (i2 && JUMP_P (i2));
13127 place = i2;
13128 }
13129 break;
13130
13131 case REG_EH_REGION:
13132 /* These notes must remain with the call or trapping instruction. */
13133 if (CALL_P (i3))
13134 place = i3;
13135 else if (i2 && CALL_P (i2))
13136 place = i2;
13137 else
13138 {
13139 gcc_assert (cfun->can_throw_non_call_exceptions);
13140 if (may_trap_p (i3))
13141 place = i3;
13142 else if (i2 && may_trap_p (i2))
13143 place = i2;
13144 /* ??? Otherwise assume we've combined things such that we
13145 can now prove that the instructions can't trap. Drop the
13146 note in this case. */
13147 }
13148 break;
13149
13150 case REG_NORETURN:
13151 case REG_SETJMP:
13152 /* These notes must remain with the call. It should not be
13153 possible for both I2 and I3 to be a call. */
13154 if (CALL_P (i3))
13155 place = i3;
13156 else
13157 {
13158 gcc_assert (i2 && CALL_P (i2));
13159 place = i2;
13160 }
13161 break;
13162
13163 case REG_UNUSED:
13164 /* Any clobbers for i3 may still exist, and so we must process
13165 REG_UNUSED notes from that insn.
13166
13167 Any clobbers from i2 or i1 can only exist if they were added by
13168 recog_for_combine. In that case, recog_for_combine created the
13169 necessary REG_UNUSED notes. Trying to keep any original
13170 REG_UNUSED notes from these insns can cause incorrect output
13171 if it is for the same register as the original i3 dest.
13172 In that case, we will notice that the register is set in i3,
13173 and then add a REG_UNUSED note for the destination of i3, which
13174 is wrong. However, it is possible to have REG_UNUSED notes from
13175 i2 or i1 for register which were both used and clobbered, so
13176 we keep notes from i2 or i1 if they will turn into REG_DEAD
13177 notes. */
13178
13179 /* If this register is set or clobbered in I3, put the note there
13180 unless there is one already. */
13181 if (reg_set_p (XEXP (note, 0), PATTERN (i3)))
13182 {
13183 if (from_insn != i3)
13184 break;
13185
13186 if (! (REG_P (XEXP (note, 0))
13187 ? find_regno_note (i3, REG_UNUSED, REGNO (XEXP (note, 0)))
13188 : find_reg_note (i3, REG_UNUSED, XEXP (note, 0))))
13189 place = i3;
13190 }
13191 /* Otherwise, if this register is used by I3, then this register
13192 now dies here, so we must put a REG_DEAD note here unless there
13193 is one already. */
13194 else if (reg_referenced_p (XEXP (note, 0), PATTERN (i3))
13195 && ! (REG_P (XEXP (note, 0))
13196 ? find_regno_note (i3, REG_DEAD,
13197 REGNO (XEXP (note, 0)))
13198 : find_reg_note (i3, REG_DEAD, XEXP (note, 0))))
13199 {
13200 PUT_REG_NOTE_KIND (note, REG_DEAD);
13201 place = i3;
13202 }
13203 break;
13204
13205 case REG_EQUAL:
13206 case REG_EQUIV:
13207 case REG_NOALIAS:
13208 /* These notes say something about results of an insn. We can
13209 only support them if they used to be on I3 in which case they
13210 remain on I3. Otherwise they are ignored.
13211
13212 If the note refers to an expression that is not a constant, we
13213 must also ignore the note since we cannot tell whether the
13214 equivalence is still true. It might be possible to do
13215 slightly better than this (we only have a problem if I2DEST
13216 or I1DEST is present in the expression), but it doesn't
13217 seem worth the trouble. */
13218
13219 if (from_insn == i3
13220 && (XEXP (note, 0) == 0 || CONSTANT_P (XEXP (note, 0))))
13221 place = i3;
13222 break;
13223
13224 case REG_INC:
13225 /* These notes say something about how a register is used. They must
13226 be present on any use of the register in I2 or I3. */
13227 if (reg_mentioned_p (XEXP (note, 0), PATTERN (i3)))
13228 place = i3;
13229
13230 if (i2 && reg_mentioned_p (XEXP (note, 0), PATTERN (i2)))
13231 {
13232 if (place)
13233 place2 = i2;
13234 else
13235 place = i2;
13236 }
13237 break;
13238
13239 case REG_LABEL_TARGET:
13240 case REG_LABEL_OPERAND:
13241 /* This can show up in several ways -- either directly in the
13242 pattern, or hidden off in the constant pool with (or without?)
13243 a REG_EQUAL note. */
13244 /* ??? Ignore the without-reg_equal-note problem for now. */
13245 if (reg_mentioned_p (XEXP (note, 0), PATTERN (i3))
13246 || ((tem = find_reg_note (i3, REG_EQUAL, NULL_RTX))
13247 && GET_CODE (XEXP (tem, 0)) == LABEL_REF
13248 && XEXP (XEXP (tem, 0), 0) == XEXP (note, 0)))
13249 place = i3;
13250
13251 if (i2
13252 && (reg_mentioned_p (XEXP (note, 0), PATTERN (i2))
13253 || ((tem = find_reg_note (i2, REG_EQUAL, NULL_RTX))
13254 && GET_CODE (XEXP (tem, 0)) == LABEL_REF
13255 && XEXP (XEXP (tem, 0), 0) == XEXP (note, 0))))
13256 {
13257 if (place)
13258 place2 = i2;
13259 else
13260 place = i2;
13261 }
13262
13263 /* For REG_LABEL_TARGET on a JUMP_P, we prefer to put the note
13264 as a JUMP_LABEL or decrement LABEL_NUSES if it's already
13265 there. */
13266 if (place && JUMP_P (place)
13267 && REG_NOTE_KIND (note) == REG_LABEL_TARGET
13268 && (JUMP_LABEL (place) == NULL
13269 || JUMP_LABEL (place) == XEXP (note, 0)))
13270 {
13271 rtx label = JUMP_LABEL (place);
13272
13273 if (!label)
13274 JUMP_LABEL (place) = XEXP (note, 0);
13275 else if (LABEL_P (label))
13276 LABEL_NUSES (label)--;
13277 }
13278
13279 if (place2 && JUMP_P (place2)
13280 && REG_NOTE_KIND (note) == REG_LABEL_TARGET
13281 && (JUMP_LABEL (place2) == NULL
13282 || JUMP_LABEL (place2) == XEXP (note, 0)))
13283 {
13284 rtx label = JUMP_LABEL (place2);
13285
13286 if (!label)
13287 JUMP_LABEL (place2) = XEXP (note, 0);
13288 else if (LABEL_P (label))
13289 LABEL_NUSES (label)--;
13290 place2 = 0;
13291 }
13292 break;
13293
13294 case REG_NONNEG:
13295 /* This note says something about the value of a register prior
13296 to the execution of an insn. It is too much trouble to see
13297 if the note is still correct in all situations. It is better
13298 to simply delete it. */
13299 break;
13300
13301 case REG_DEAD:
13302 /* If we replaced the right hand side of FROM_INSN with a
13303 REG_EQUAL note, the original use of the dying register
13304 will not have been combined into I3 and I2. In such cases,
13305 FROM_INSN is guaranteed to be the first of the combined
13306 instructions, so we simply need to search back before
13307 FROM_INSN for the previous use or set of this register,
13308 then alter the notes there appropriately.
13309
13310 If the register is used as an input in I3, it dies there.
13311 Similarly for I2, if it is nonzero and adjacent to I3.
13312
13313 If the register is not used as an input in either I3 or I2
13314 and it is not one of the registers we were supposed to eliminate,
13315 there are two possibilities. We might have a non-adjacent I2
13316 or we might have somehow eliminated an additional register
13317 from a computation. For example, we might have had A & B where
13318 we discover that B will always be zero. In this case we will
13319 eliminate the reference to A.
13320
13321 In both cases, we must search to see if we can find a previous
13322 use of A and put the death note there. */
13323
13324 if (from_insn
13325 && from_insn == i2mod
13326 && !reg_overlap_mentioned_p (XEXP (note, 0), i2mod_new_rhs))
13327 tem = from_insn;
13328 else
13329 {
13330 if (from_insn
13331 && CALL_P (from_insn)
13332 && find_reg_fusage (from_insn, USE, XEXP (note, 0)))
13333 place = from_insn;
13334 else if (reg_referenced_p (XEXP (note, 0), PATTERN (i3)))
13335 place = i3;
13336 else if (i2 != 0 && next_nonnote_nondebug_insn (i2) == i3
13337 && reg_referenced_p (XEXP (note, 0), PATTERN (i2)))
13338 place = i2;
13339 else if ((rtx_equal_p (XEXP (note, 0), elim_i2)
13340 && !(i2mod
13341 && reg_overlap_mentioned_p (XEXP (note, 0),
13342 i2mod_old_rhs)))
13343 || rtx_equal_p (XEXP (note, 0), elim_i1)
13344 || rtx_equal_p (XEXP (note, 0), elim_i0))
13345 break;
13346 tem = i3;
13347 }
13348
13349 if (place == 0)
13350 {
13351 basic_block bb = this_basic_block;
13352
13353 for (tem = PREV_INSN (tem); place == 0; tem = PREV_INSN (tem))
13354 {
13355 if (!NONDEBUG_INSN_P (tem))
13356 {
13357 if (tem == BB_HEAD (bb))
13358 break;
13359 continue;
13360 }
13361
13362 /* If the register is being set at TEM, see if that is all
13363 TEM is doing. If so, delete TEM. Otherwise, make this
13364 into a REG_UNUSED note instead. Don't delete sets to
13365 global register vars. */
13366 if ((REGNO (XEXP (note, 0)) >= FIRST_PSEUDO_REGISTER
13367 || !global_regs[REGNO (XEXP (note, 0))])
13368 && reg_set_p (XEXP (note, 0), PATTERN (tem)))
13369 {
13370 rtx set = single_set (tem);
13371 rtx inner_dest = 0;
13372 #ifdef HAVE_cc0
13373 rtx cc0_setter = NULL_RTX;
13374 #endif
13375
13376 if (set != 0)
13377 for (inner_dest = SET_DEST (set);
13378 (GET_CODE (inner_dest) == STRICT_LOW_PART
13379 || GET_CODE (inner_dest) == SUBREG
13380 || GET_CODE (inner_dest) == ZERO_EXTRACT);
13381 inner_dest = XEXP (inner_dest, 0))
13382 ;
13383
13384 /* Verify that it was the set, and not a clobber that
13385 modified the register.
13386
13387 CC0 targets must be careful to maintain setter/user
13388 pairs. If we cannot delete the setter due to side
13389 effects, mark the user with an UNUSED note instead
13390 of deleting it. */
13391
13392 if (set != 0 && ! side_effects_p (SET_SRC (set))
13393 && rtx_equal_p (XEXP (note, 0), inner_dest)
13394 #ifdef HAVE_cc0
13395 && (! reg_mentioned_p (cc0_rtx, SET_SRC (set))
13396 || ((cc0_setter = prev_cc0_setter (tem)) != NULL
13397 && sets_cc0_p (PATTERN (cc0_setter)) > 0))
13398 #endif
13399 )
13400 {
13401 /* Move the notes and links of TEM elsewhere.
13402 This might delete other dead insns recursively.
13403 First set the pattern to something that won't use
13404 any register. */
13405 rtx old_notes = REG_NOTES (tem);
13406
13407 PATTERN (tem) = pc_rtx;
13408 REG_NOTES (tem) = NULL;
13409
13410 distribute_notes (old_notes, tem, tem, NULL_RTX,
13411 NULL_RTX, NULL_RTX, NULL_RTX);
13412 distribute_links (LOG_LINKS (tem));
13413
13414 SET_INSN_DELETED (tem);
13415 if (tem == i2)
13416 i2 = NULL_RTX;
13417
13418 #ifdef HAVE_cc0
13419 /* Delete the setter too. */
13420 if (cc0_setter)
13421 {
13422 PATTERN (cc0_setter) = pc_rtx;
13423 old_notes = REG_NOTES (cc0_setter);
13424 REG_NOTES (cc0_setter) = NULL;
13425
13426 distribute_notes (old_notes, cc0_setter,
13427 cc0_setter, NULL_RTX,
13428 NULL_RTX, NULL_RTX, NULL_RTX);
13429 distribute_links (LOG_LINKS (cc0_setter));
13430
13431 SET_INSN_DELETED (cc0_setter);
13432 if (cc0_setter == i2)
13433 i2 = NULL_RTX;
13434 }
13435 #endif
13436 }
13437 else
13438 {
13439 PUT_REG_NOTE_KIND (note, REG_UNUSED);
13440
13441 /* If there isn't already a REG_UNUSED note, put one
13442 here. Do not place a REG_DEAD note, even if
13443 the register is also used here; that would not
13444 match the algorithm used in lifetime analysis
13445 and can cause the consistency check in the
13446 scheduler to fail. */
13447 if (! find_regno_note (tem, REG_UNUSED,
13448 REGNO (XEXP (note, 0))))
13449 place = tem;
13450 break;
13451 }
13452 }
13453 else if (reg_referenced_p (XEXP (note, 0), PATTERN (tem))
13454 || (CALL_P (tem)
13455 && find_reg_fusage (tem, USE, XEXP (note, 0))))
13456 {
13457 place = tem;
13458
13459 /* If we are doing a 3->2 combination, and we have a
13460 register which formerly died in i3 and was not used
13461 by i2, which now no longer dies in i3 and is used in
13462 i2 but does not die in i2, and place is between i2
13463 and i3, then we may need to move a link from place to
13464 i2. */
13465 if (i2 && DF_INSN_LUID (place) > DF_INSN_LUID (i2)
13466 && from_insn
13467 && DF_INSN_LUID (from_insn) > DF_INSN_LUID (i2)
13468 && reg_referenced_p (XEXP (note, 0), PATTERN (i2)))
13469 {
13470 rtx links = LOG_LINKS (place);
13471 LOG_LINKS (place) = 0;
13472 distribute_links (links);
13473 }
13474 break;
13475 }
13476
13477 if (tem == BB_HEAD (bb))
13478 break;
13479 }
13480
13481 }
13482
13483 /* If the register is set or already dead at PLACE, we needn't do
13484 anything with this note if it is still a REG_DEAD note.
13485 We check here if it is set at all, not if is it totally replaced,
13486 which is what `dead_or_set_p' checks, so also check for it being
13487 set partially. */
13488
13489 if (place && REG_NOTE_KIND (note) == REG_DEAD)
13490 {
13491 unsigned int regno = REGNO (XEXP (note, 0));
13492 reg_stat_type *rsp = VEC_index (reg_stat_type, reg_stat, regno);
13493
13494 if (dead_or_set_p (place, XEXP (note, 0))
13495 || reg_bitfield_target_p (XEXP (note, 0), PATTERN (place)))
13496 {
13497 /* Unless the register previously died in PLACE, clear
13498 last_death. [I no longer understand why this is
13499 being done.] */
13500 if (rsp->last_death != place)
13501 rsp->last_death = 0;
13502 place = 0;
13503 }
13504 else
13505 rsp->last_death = place;
13506
13507 /* If this is a death note for a hard reg that is occupying
13508 multiple registers, ensure that we are still using all
13509 parts of the object. If we find a piece of the object
13510 that is unused, we must arrange for an appropriate REG_DEAD
13511 note to be added for it. However, we can't just emit a USE
13512 and tag the note to it, since the register might actually
13513 be dead; so we recourse, and the recursive call then finds
13514 the previous insn that used this register. */
13515
13516 if (place && regno < FIRST_PSEUDO_REGISTER
13517 && hard_regno_nregs[regno][GET_MODE (XEXP (note, 0))] > 1)
13518 {
13519 unsigned int endregno = END_HARD_REGNO (XEXP (note, 0));
13520 int all_used = 1;
13521 unsigned int i;
13522
13523 for (i = regno; i < endregno; i++)
13524 if ((! refers_to_regno_p (i, i + 1, PATTERN (place), 0)
13525 && ! find_regno_fusage (place, USE, i))
13526 || dead_or_set_regno_p (place, i))
13527 all_used = 0;
13528
13529 if (! all_used)
13530 {
13531 /* Put only REG_DEAD notes for pieces that are
13532 not already dead or set. */
13533
13534 for (i = regno; i < endregno;
13535 i += hard_regno_nregs[i][reg_raw_mode[i]])
13536 {
13537 rtx piece = regno_reg_rtx[i];
13538 basic_block bb = this_basic_block;
13539
13540 if (! dead_or_set_p (place, piece)
13541 && ! reg_bitfield_target_p (piece,
13542 PATTERN (place)))
13543 {
13544 rtx new_note = alloc_reg_note (REG_DEAD, piece,
13545 NULL_RTX);
13546
13547 distribute_notes (new_note, place, place,
13548 NULL_RTX, NULL_RTX, NULL_RTX,
13549 NULL_RTX);
13550 }
13551 else if (! refers_to_regno_p (i, i + 1,
13552 PATTERN (place), 0)
13553 && ! find_regno_fusage (place, USE, i))
13554 for (tem = PREV_INSN (place); ;
13555 tem = PREV_INSN (tem))
13556 {
13557 if (!NONDEBUG_INSN_P (tem))
13558 {
13559 if (tem == BB_HEAD (bb))
13560 break;
13561 continue;
13562 }
13563 if (dead_or_set_p (tem, piece)
13564 || reg_bitfield_target_p (piece,
13565 PATTERN (tem)))
13566 {
13567 add_reg_note (tem, REG_UNUSED, piece);
13568 break;
13569 }
13570 }
13571
13572 }
13573
13574 place = 0;
13575 }
13576 }
13577 }
13578 break;
13579
13580 default:
13581 /* Any other notes should not be present at this point in the
13582 compilation. */
13583 gcc_unreachable ();
13584 }
13585
13586 if (place)
13587 {
13588 XEXP (note, 1) = REG_NOTES (place);
13589 REG_NOTES (place) = note;
13590 }
13591
13592 if (place2)
13593 add_reg_note (place2, REG_NOTE_KIND (note), XEXP (note, 0));
13594 }
13595 }
13596 \f
13597 /* Similarly to above, distribute the LOG_LINKS that used to be present on
13598 I3, I2, and I1 to new locations. This is also called to add a link
13599 pointing at I3 when I3's destination is changed. */
13600
13601 static void
13602 distribute_links (rtx links)
13603 {
13604 rtx link, next_link;
13605
13606 for (link = links; link; link = next_link)
13607 {
13608 rtx place = 0;
13609 rtx insn;
13610 rtx set, reg;
13611
13612 next_link = XEXP (link, 1);
13613
13614 /* If the insn that this link points to is a NOTE or isn't a single
13615 set, ignore it. In the latter case, it isn't clear what we
13616 can do other than ignore the link, since we can't tell which
13617 register it was for. Such links wouldn't be used by combine
13618 anyway.
13619
13620 It is not possible for the destination of the target of the link to
13621 have been changed by combine. The only potential of this is if we
13622 replace I3, I2, and I1 by I3 and I2. But in that case the
13623 destination of I2 also remains unchanged. */
13624
13625 if (NOTE_P (XEXP (link, 0))
13626 || (set = single_set (XEXP (link, 0))) == 0)
13627 continue;
13628
13629 reg = SET_DEST (set);
13630 while (GET_CODE (reg) == SUBREG || GET_CODE (reg) == ZERO_EXTRACT
13631 || GET_CODE (reg) == STRICT_LOW_PART)
13632 reg = XEXP (reg, 0);
13633
13634 /* A LOG_LINK is defined as being placed on the first insn that uses
13635 a register and points to the insn that sets the register. Start
13636 searching at the next insn after the target of the link and stop
13637 when we reach a set of the register or the end of the basic block.
13638
13639 Note that this correctly handles the link that used to point from
13640 I3 to I2. Also note that not much searching is typically done here
13641 since most links don't point very far away. */
13642
13643 for (insn = NEXT_INSN (XEXP (link, 0));
13644 (insn && (this_basic_block->next_bb == EXIT_BLOCK_PTR
13645 || BB_HEAD (this_basic_block->next_bb) != insn));
13646 insn = NEXT_INSN (insn))
13647 if (DEBUG_INSN_P (insn))
13648 continue;
13649 else if (INSN_P (insn) && reg_overlap_mentioned_p (reg, PATTERN (insn)))
13650 {
13651 if (reg_referenced_p (reg, PATTERN (insn)))
13652 place = insn;
13653 break;
13654 }
13655 else if (CALL_P (insn)
13656 && find_reg_fusage (insn, USE, reg))
13657 {
13658 place = insn;
13659 break;
13660 }
13661 else if (INSN_P (insn) && reg_set_p (reg, insn))
13662 break;
13663
13664 /* If we found a place to put the link, place it there unless there
13665 is already a link to the same insn as LINK at that point. */
13666
13667 if (place)
13668 {
13669 rtx link2;
13670
13671 for (link2 = LOG_LINKS (place); link2; link2 = XEXP (link2, 1))
13672 if (XEXP (link2, 0) == XEXP (link, 0))
13673 break;
13674
13675 if (link2 == 0)
13676 {
13677 XEXP (link, 1) = LOG_LINKS (place);
13678 LOG_LINKS (place) = link;
13679
13680 /* Set added_links_insn to the earliest insn we added a
13681 link to. */
13682 if (added_links_insn == 0
13683 || DF_INSN_LUID (added_links_insn) > DF_INSN_LUID (place))
13684 added_links_insn = place;
13685 }
13686 }
13687 }
13688 }
13689 \f
13690 /* Subroutine of unmentioned_reg_p and callback from for_each_rtx.
13691 Check whether the expression pointer to by LOC is a register or
13692 memory, and if so return 1 if it isn't mentioned in the rtx EXPR.
13693 Otherwise return zero. */
13694
13695 static int
13696 unmentioned_reg_p_1 (rtx *loc, void *expr)
13697 {
13698 rtx x = *loc;
13699
13700 if (x != NULL_RTX
13701 && (REG_P (x) || MEM_P (x))
13702 && ! reg_mentioned_p (x, (rtx) expr))
13703 return 1;
13704 return 0;
13705 }
13706
13707 /* Check for any register or memory mentioned in EQUIV that is not
13708 mentioned in EXPR. This is used to restrict EQUIV to "specializations"
13709 of EXPR where some registers may have been replaced by constants. */
13710
13711 static bool
13712 unmentioned_reg_p (rtx equiv, rtx expr)
13713 {
13714 return for_each_rtx (&equiv, unmentioned_reg_p_1, expr);
13715 }
13716 \f
13717 void
13718 dump_combine_stats (FILE *file)
13719 {
13720 fprintf
13721 (file,
13722 ";; Combiner statistics: %d attempts, %d substitutions (%d requiring new space),\n;; %d successes.\n\n",
13723 combine_attempts, combine_merges, combine_extras, combine_successes);
13724 }
13725
13726 void
13727 dump_combine_total_stats (FILE *file)
13728 {
13729 fprintf
13730 (file,
13731 "\n;; Combiner totals: %d attempts, %d substitutions (%d requiring new space),\n;; %d successes.\n",
13732 total_attempts, total_merges, total_extras, total_successes);
13733 }
13734 \f
13735 static bool
13736 gate_handle_combine (void)
13737 {
13738 return (optimize > 0);
13739 }
13740
13741 /* Try combining insns through substitution. */
13742 static unsigned int
13743 rest_of_handle_combine (void)
13744 {
13745 int rebuild_jump_labels_after_combine;
13746
13747 df_set_flags (DF_LR_RUN_DCE + DF_DEFER_INSN_RESCAN);
13748 df_note_add_problem ();
13749 df_analyze ();
13750
13751 regstat_init_n_sets_and_refs ();
13752
13753 rebuild_jump_labels_after_combine
13754 = combine_instructions (get_insns (), max_reg_num ());
13755
13756 /* Combining insns may have turned an indirect jump into a
13757 direct jump. Rebuild the JUMP_LABEL fields of jumping
13758 instructions. */
13759 if (rebuild_jump_labels_after_combine)
13760 {
13761 timevar_push (TV_JUMP);
13762 rebuild_jump_labels (get_insns ());
13763 cleanup_cfg (0);
13764 timevar_pop (TV_JUMP);
13765 }
13766
13767 regstat_free_n_sets_and_refs ();
13768 return 0;
13769 }
13770
13771 struct rtl_opt_pass pass_combine =
13772 {
13773 {
13774 RTL_PASS,
13775 "combine", /* name */
13776 gate_handle_combine, /* gate */
13777 rest_of_handle_combine, /* execute */
13778 NULL, /* sub */
13779 NULL, /* next */
13780 0, /* static_pass_number */
13781 TV_COMBINE, /* tv_id */
13782 PROP_cfglayout, /* properties_required */
13783 0, /* properties_provided */
13784 0, /* properties_destroyed */
13785 0, /* todo_flags_start */
13786 TODO_dump_func |
13787 TODO_df_finish | TODO_verify_rtl_sharing |
13788 TODO_ggc_collect, /* todo_flags_finish */
13789 }
13790 };