Add -static-libasan option to the GCC driver
[gcc.git] / gcc / combine.c
1 /* Optimize by combining instructions for GNU compiler.
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
4 2011, 2012 Free Software Foundation, Inc.
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
12
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
21
22 /* This module is essentially the "combiner" phase of the U. of Arizona
23 Portable Optimizer, but redone to work on our list-structured
24 representation for RTL instead of their string representation.
25
26 The LOG_LINKS of each insn identify the most recent assignment
27 to each REG used in the insn. It is a list of previous insns,
28 each of which contains a SET for a REG that is used in this insn
29 and not used or set in between. LOG_LINKs never cross basic blocks.
30 They were set up by the preceding pass (lifetime analysis).
31
32 We try to combine each pair of insns joined by a logical link.
33 We also try to combine triplets of insns A, B and C when C has
34 a link back to B and B has a link back to A. Likewise for a
35 small number of quadruplets of insns A, B, C and D for which
36 there's high likelihood of of success.
37
38 LOG_LINKS does not have links for use of the CC0. They don't
39 need to, because the insn that sets the CC0 is always immediately
40 before the insn that tests it. So we always regard a branch
41 insn as having a logical link to the preceding insn. The same is true
42 for an insn explicitly using CC0.
43
44 We check (with use_crosses_set_p) to avoid combining in such a way
45 as to move a computation to a place where its value would be different.
46
47 Combination is done by mathematically substituting the previous
48 insn(s) values for the regs they set into the expressions in
49 the later insns that refer to these regs. If the result is a valid insn
50 for our target machine, according to the machine description,
51 we install it, delete the earlier insns, and update the data flow
52 information (LOG_LINKS and REG_NOTES) for what we did.
53
54 There are a few exceptions where the dataflow information isn't
55 completely updated (however this is only a local issue since it is
56 regenerated before the next pass that uses it):
57
58 - reg_live_length is not updated
59 - reg_n_refs is not adjusted in the rare case when a register is
60 no longer required in a computation
61 - there are extremely rare cases (see distribute_notes) when a
62 REG_DEAD note is lost
63 - a LOG_LINKS entry that refers to an insn with multiple SETs may be
64 removed because there is no way to know which register it was
65 linking
66
67 To simplify substitution, we combine only when the earlier insn(s)
68 consist of only a single assignment. To simplify updating afterward,
69 we never combine when a subroutine call appears in the middle.
70
71 Since we do not represent assignments to CC0 explicitly except when that
72 is all an insn does, there is no LOG_LINKS entry in an insn that uses
73 the condition code for the insn that set the condition code.
74 Fortunately, these two insns must be consecutive.
75 Therefore, every JUMP_INSN is taken to have an implicit logical link
76 to the preceding insn. This is not quite right, since non-jumps can
77 also use the condition code; but in practice such insns would not
78 combine anyway. */
79
80 #include "config.h"
81 #include "system.h"
82 #include "coretypes.h"
83 #include "tm.h"
84 #include "rtl.h"
85 #include "tree.h"
86 #include "tm_p.h"
87 #include "flags.h"
88 #include "regs.h"
89 #include "hard-reg-set.h"
90 #include "basic-block.h"
91 #include "insn-config.h"
92 #include "function.h"
93 /* Include expr.h after insn-config.h so we get HAVE_conditional_move. */
94 #include "expr.h"
95 #include "insn-attr.h"
96 #include "recog.h"
97 #include "diagnostic-core.h"
98 #include "target.h"
99 #include "optabs.h"
100 #include "insn-codes.h"
101 #include "rtlhooks-def.h"
102 #include "params.h"
103 #include "tree-pass.h"
104 #include "df.h"
105 #include "valtrack.h"
106 #include "cgraph.h"
107 #include "obstack.h"
108
109 /* Number of attempts to combine instructions in this function. */
110
111 static int combine_attempts;
112
113 /* Number of attempts that got as far as substitution in this function. */
114
115 static int combine_merges;
116
117 /* Number of instructions combined with added SETs in this function. */
118
119 static int combine_extras;
120
121 /* Number of instructions combined in this function. */
122
123 static int combine_successes;
124
125 /* Totals over entire compilation. */
126
127 static int total_attempts, total_merges, total_extras, total_successes;
128
129 /* combine_instructions may try to replace the right hand side of the
130 second instruction with the value of an associated REG_EQUAL note
131 before throwing it at try_combine. That is problematic when there
132 is a REG_DEAD note for a register used in the old right hand side
133 and can cause distribute_notes to do wrong things. This is the
134 second instruction if it has been so modified, null otherwise. */
135
136 static rtx i2mod;
137
138 /* When I2MOD is nonnull, this is a copy of the old right hand side. */
139
140 static rtx i2mod_old_rhs;
141
142 /* When I2MOD is nonnull, this is a copy of the new right hand side. */
143
144 static rtx i2mod_new_rhs;
145 \f
146 typedef struct reg_stat_struct {
147 /* Record last point of death of (hard or pseudo) register n. */
148 rtx last_death;
149
150 /* Record last point of modification of (hard or pseudo) register n. */
151 rtx last_set;
152
153 /* The next group of fields allows the recording of the last value assigned
154 to (hard or pseudo) register n. We use this information to see if an
155 operation being processed is redundant given a prior operation performed
156 on the register. For example, an `and' with a constant is redundant if
157 all the zero bits are already known to be turned off.
158
159 We use an approach similar to that used by cse, but change it in the
160 following ways:
161
162 (1) We do not want to reinitialize at each label.
163 (2) It is useful, but not critical, to know the actual value assigned
164 to a register. Often just its form is helpful.
165
166 Therefore, we maintain the following fields:
167
168 last_set_value the last value assigned
169 last_set_label records the value of label_tick when the
170 register was assigned
171 last_set_table_tick records the value of label_tick when a
172 value using the register is assigned
173 last_set_invalid set to nonzero when it is not valid
174 to use the value of this register in some
175 register's value
176
177 To understand the usage of these tables, it is important to understand
178 the distinction between the value in last_set_value being valid and
179 the register being validly contained in some other expression in the
180 table.
181
182 (The next two parameters are out of date).
183
184 reg_stat[i].last_set_value is valid if it is nonzero, and either
185 reg_n_sets[i] is 1 or reg_stat[i].last_set_label == label_tick.
186
187 Register I may validly appear in any expression returned for the value
188 of another register if reg_n_sets[i] is 1. It may also appear in the
189 value for register J if reg_stat[j].last_set_invalid is zero, or
190 reg_stat[i].last_set_label < reg_stat[j].last_set_label.
191
192 If an expression is found in the table containing a register which may
193 not validly appear in an expression, the register is replaced by
194 something that won't match, (clobber (const_int 0)). */
195
196 /* Record last value assigned to (hard or pseudo) register n. */
197
198 rtx last_set_value;
199
200 /* Record the value of label_tick when an expression involving register n
201 is placed in last_set_value. */
202
203 int last_set_table_tick;
204
205 /* Record the value of label_tick when the value for register n is placed in
206 last_set_value. */
207
208 int last_set_label;
209
210 /* These fields are maintained in parallel with last_set_value and are
211 used to store the mode in which the register was last set, the bits
212 that were known to be zero when it was last set, and the number of
213 sign bits copies it was known to have when it was last set. */
214
215 unsigned HOST_WIDE_INT last_set_nonzero_bits;
216 char last_set_sign_bit_copies;
217 ENUM_BITFIELD(machine_mode) last_set_mode : 8;
218
219 /* Set nonzero if references to register n in expressions should not be
220 used. last_set_invalid is set nonzero when this register is being
221 assigned to and last_set_table_tick == label_tick. */
222
223 char last_set_invalid;
224
225 /* Some registers that are set more than once and used in more than one
226 basic block are nevertheless always set in similar ways. For example,
227 a QImode register may be loaded from memory in two places on a machine
228 where byte loads zero extend.
229
230 We record in the following fields if a register has some leading bits
231 that are always equal to the sign bit, and what we know about the
232 nonzero bits of a register, specifically which bits are known to be
233 zero.
234
235 If an entry is zero, it means that we don't know anything special. */
236
237 unsigned char sign_bit_copies;
238
239 unsigned HOST_WIDE_INT nonzero_bits;
240
241 /* Record the value of the label_tick when the last truncation
242 happened. The field truncated_to_mode is only valid if
243 truncation_label == label_tick. */
244
245 int truncation_label;
246
247 /* Record the last truncation seen for this register. If truncation
248 is not a nop to this mode we might be able to save an explicit
249 truncation if we know that value already contains a truncated
250 value. */
251
252 ENUM_BITFIELD(machine_mode) truncated_to_mode : 8;
253 } reg_stat_type;
254
255 DEF_VEC_O(reg_stat_type);
256 DEF_VEC_ALLOC_O(reg_stat_type,heap);
257
258 static VEC(reg_stat_type,heap) *reg_stat;
259
260 /* Record the luid of the last insn that invalidated memory
261 (anything that writes memory, and subroutine calls, but not pushes). */
262
263 static int mem_last_set;
264
265 /* Record the luid of the last CALL_INSN
266 so we can tell whether a potential combination crosses any calls. */
267
268 static int last_call_luid;
269
270 /* When `subst' is called, this is the insn that is being modified
271 (by combining in a previous insn). The PATTERN of this insn
272 is still the old pattern partially modified and it should not be
273 looked at, but this may be used to examine the successors of the insn
274 to judge whether a simplification is valid. */
275
276 static rtx subst_insn;
277
278 /* This is the lowest LUID that `subst' is currently dealing with.
279 get_last_value will not return a value if the register was set at or
280 after this LUID. If not for this mechanism, we could get confused if
281 I2 or I1 in try_combine were an insn that used the old value of a register
282 to obtain a new value. In that case, we might erroneously get the
283 new value of the register when we wanted the old one. */
284
285 static int subst_low_luid;
286
287 /* This contains any hard registers that are used in newpat; reg_dead_at_p
288 must consider all these registers to be always live. */
289
290 static HARD_REG_SET newpat_used_regs;
291
292 /* This is an insn to which a LOG_LINKS entry has been added. If this
293 insn is the earlier than I2 or I3, combine should rescan starting at
294 that location. */
295
296 static rtx added_links_insn;
297
298 /* Basic block in which we are performing combines. */
299 static basic_block this_basic_block;
300 static bool optimize_this_for_speed_p;
301
302 \f
303 /* Length of the currently allocated uid_insn_cost array. */
304
305 static int max_uid_known;
306
307 /* The following array records the insn_rtx_cost for every insn
308 in the instruction stream. */
309
310 static int *uid_insn_cost;
311
312 /* The following array records the LOG_LINKS for every insn in the
313 instruction stream as struct insn_link pointers. */
314
315 struct insn_link {
316 rtx insn;
317 struct insn_link *next;
318 };
319
320 static struct insn_link **uid_log_links;
321
322 #define INSN_COST(INSN) (uid_insn_cost[INSN_UID (INSN)])
323 #define LOG_LINKS(INSN) (uid_log_links[INSN_UID (INSN)])
324
325 #define FOR_EACH_LOG_LINK(L, INSN) \
326 for ((L) = LOG_LINKS (INSN); (L); (L) = (L)->next)
327
328 /* Links for LOG_LINKS are allocated from this obstack. */
329
330 static struct obstack insn_link_obstack;
331
332 /* Allocate a link. */
333
334 static inline struct insn_link *
335 alloc_insn_link (rtx insn, struct insn_link *next)
336 {
337 struct insn_link *l
338 = (struct insn_link *) obstack_alloc (&insn_link_obstack,
339 sizeof (struct insn_link));
340 l->insn = insn;
341 l->next = next;
342 return l;
343 }
344
345 /* Incremented for each basic block. */
346
347 static int label_tick;
348
349 /* Reset to label_tick for each extended basic block in scanning order. */
350
351 static int label_tick_ebb_start;
352
353 /* Mode used to compute significance in reg_stat[].nonzero_bits. It is the
354 largest integer mode that can fit in HOST_BITS_PER_WIDE_INT. */
355
356 static enum machine_mode nonzero_bits_mode;
357
358 /* Nonzero when reg_stat[].nonzero_bits and reg_stat[].sign_bit_copies can
359 be safely used. It is zero while computing them and after combine has
360 completed. This former test prevents propagating values based on
361 previously set values, which can be incorrect if a variable is modified
362 in a loop. */
363
364 static int nonzero_sign_valid;
365
366 \f
367 /* Record one modification to rtl structure
368 to be undone by storing old_contents into *where. */
369
370 enum undo_kind { UNDO_RTX, UNDO_INT, UNDO_MODE, UNDO_LINKS };
371
372 struct undo
373 {
374 struct undo *next;
375 enum undo_kind kind;
376 union { rtx r; int i; enum machine_mode m; struct insn_link *l; } old_contents;
377 union { rtx *r; int *i; struct insn_link **l; } where;
378 };
379
380 /* Record a bunch of changes to be undone, up to MAX_UNDO of them.
381 num_undo says how many are currently recorded.
382
383 other_insn is nonzero if we have modified some other insn in the process
384 of working on subst_insn. It must be verified too. */
385
386 struct undobuf
387 {
388 struct undo *undos;
389 struct undo *frees;
390 rtx other_insn;
391 };
392
393 static struct undobuf undobuf;
394
395 /* Number of times the pseudo being substituted for
396 was found and replaced. */
397
398 static int n_occurrences;
399
400 static rtx reg_nonzero_bits_for_combine (const_rtx, enum machine_mode, const_rtx,
401 enum machine_mode,
402 unsigned HOST_WIDE_INT,
403 unsigned HOST_WIDE_INT *);
404 static rtx reg_num_sign_bit_copies_for_combine (const_rtx, enum machine_mode, const_rtx,
405 enum machine_mode,
406 unsigned int, unsigned int *);
407 static void do_SUBST (rtx *, rtx);
408 static void do_SUBST_INT (int *, int);
409 static void init_reg_last (void);
410 static void setup_incoming_promotions (rtx);
411 static void set_nonzero_bits_and_sign_copies (rtx, const_rtx, void *);
412 static int cant_combine_insn_p (rtx);
413 static int can_combine_p (rtx, rtx, rtx, rtx, rtx, rtx, rtx *, rtx *);
414 static int combinable_i3pat (rtx, rtx *, rtx, rtx, rtx, int, int, rtx *);
415 static int contains_muldiv (rtx);
416 static rtx try_combine (rtx, rtx, rtx, rtx, int *, rtx);
417 static void undo_all (void);
418 static void undo_commit (void);
419 static rtx *find_split_point (rtx *, rtx, bool);
420 static rtx subst (rtx, rtx, rtx, int, int, int);
421 static rtx combine_simplify_rtx (rtx, enum machine_mode, int, int);
422 static rtx simplify_if_then_else (rtx);
423 static rtx simplify_set (rtx);
424 static rtx simplify_logical (rtx);
425 static rtx expand_compound_operation (rtx);
426 static const_rtx expand_field_assignment (const_rtx);
427 static rtx make_extraction (enum machine_mode, rtx, HOST_WIDE_INT,
428 rtx, unsigned HOST_WIDE_INT, int, int, int);
429 static rtx extract_left_shift (rtx, int);
430 static int get_pos_from_mask (unsigned HOST_WIDE_INT,
431 unsigned HOST_WIDE_INT *);
432 static rtx canon_reg_for_combine (rtx, rtx);
433 static rtx force_to_mode (rtx, enum machine_mode,
434 unsigned HOST_WIDE_INT, int);
435 static rtx if_then_else_cond (rtx, rtx *, rtx *);
436 static rtx known_cond (rtx, enum rtx_code, rtx, rtx);
437 static int rtx_equal_for_field_assignment_p (rtx, rtx);
438 static rtx make_field_assignment (rtx);
439 static rtx apply_distributive_law (rtx);
440 static rtx distribute_and_simplify_rtx (rtx, int);
441 static rtx simplify_and_const_int_1 (enum machine_mode, rtx,
442 unsigned HOST_WIDE_INT);
443 static rtx simplify_and_const_int (rtx, enum machine_mode, rtx,
444 unsigned HOST_WIDE_INT);
445 static int merge_outer_ops (enum rtx_code *, HOST_WIDE_INT *, enum rtx_code,
446 HOST_WIDE_INT, enum machine_mode, int *);
447 static rtx simplify_shift_const_1 (enum rtx_code, enum machine_mode, rtx, int);
448 static rtx simplify_shift_const (rtx, enum rtx_code, enum machine_mode, rtx,
449 int);
450 static int recog_for_combine (rtx *, rtx, rtx *);
451 static rtx gen_lowpart_for_combine (enum machine_mode, rtx);
452 static enum rtx_code simplify_compare_const (enum rtx_code, rtx, rtx *);
453 static enum rtx_code simplify_comparison (enum rtx_code, rtx *, rtx *);
454 static void update_table_tick (rtx);
455 static void record_value_for_reg (rtx, rtx, rtx);
456 static void check_promoted_subreg (rtx, rtx);
457 static void record_dead_and_set_regs_1 (rtx, const_rtx, void *);
458 static void record_dead_and_set_regs (rtx);
459 static int get_last_value_validate (rtx *, rtx, int, int);
460 static rtx get_last_value (const_rtx);
461 static int use_crosses_set_p (const_rtx, int);
462 static void reg_dead_at_p_1 (rtx, const_rtx, void *);
463 static int reg_dead_at_p (rtx, rtx);
464 static void move_deaths (rtx, rtx, int, rtx, rtx *);
465 static int reg_bitfield_target_p (rtx, rtx);
466 static void distribute_notes (rtx, rtx, rtx, rtx, rtx, rtx, rtx);
467 static void distribute_links (struct insn_link *);
468 static void mark_used_regs_combine (rtx);
469 static void record_promoted_value (rtx, rtx);
470 static int unmentioned_reg_p_1 (rtx *, void *);
471 static bool unmentioned_reg_p (rtx, rtx);
472 static int record_truncated_value (rtx *, void *);
473 static void record_truncated_values (rtx *, void *);
474 static bool reg_truncated_to_mode (enum machine_mode, const_rtx);
475 static rtx gen_lowpart_or_truncate (enum machine_mode, rtx);
476 \f
477
478 /* It is not safe to use ordinary gen_lowpart in combine.
479 See comments in gen_lowpart_for_combine. */
480 #undef RTL_HOOKS_GEN_LOWPART
481 #define RTL_HOOKS_GEN_LOWPART gen_lowpart_for_combine
482
483 /* Our implementation of gen_lowpart never emits a new pseudo. */
484 #undef RTL_HOOKS_GEN_LOWPART_NO_EMIT
485 #define RTL_HOOKS_GEN_LOWPART_NO_EMIT gen_lowpart_for_combine
486
487 #undef RTL_HOOKS_REG_NONZERO_REG_BITS
488 #define RTL_HOOKS_REG_NONZERO_REG_BITS reg_nonzero_bits_for_combine
489
490 #undef RTL_HOOKS_REG_NUM_SIGN_BIT_COPIES
491 #define RTL_HOOKS_REG_NUM_SIGN_BIT_COPIES reg_num_sign_bit_copies_for_combine
492
493 #undef RTL_HOOKS_REG_TRUNCATED_TO_MODE
494 #define RTL_HOOKS_REG_TRUNCATED_TO_MODE reg_truncated_to_mode
495
496 static const struct rtl_hooks combine_rtl_hooks = RTL_HOOKS_INITIALIZER;
497
498 \f
499 /* Try to split PATTERN found in INSN. This returns NULL_RTX if
500 PATTERN can not be split. Otherwise, it returns an insn sequence.
501 This is a wrapper around split_insns which ensures that the
502 reg_stat vector is made larger if the splitter creates a new
503 register. */
504
505 static rtx
506 combine_split_insns (rtx pattern, rtx insn)
507 {
508 rtx ret;
509 unsigned int nregs;
510
511 ret = split_insns (pattern, insn);
512 nregs = max_reg_num ();
513 if (nregs > VEC_length (reg_stat_type, reg_stat))
514 VEC_safe_grow_cleared (reg_stat_type, heap, reg_stat, nregs);
515 return ret;
516 }
517
518 /* This is used by find_single_use to locate an rtx in LOC that
519 contains exactly one use of DEST, which is typically either a REG
520 or CC0. It returns a pointer to the innermost rtx expression
521 containing DEST. Appearances of DEST that are being used to
522 totally replace it are not counted. */
523
524 static rtx *
525 find_single_use_1 (rtx dest, rtx *loc)
526 {
527 rtx x = *loc;
528 enum rtx_code code = GET_CODE (x);
529 rtx *result = NULL;
530 rtx *this_result;
531 int i;
532 const char *fmt;
533
534 switch (code)
535 {
536 case CONST:
537 case LABEL_REF:
538 case SYMBOL_REF:
539 CASE_CONST_ANY:
540 case CLOBBER:
541 return 0;
542
543 case SET:
544 /* If the destination is anything other than CC0, PC, a REG or a SUBREG
545 of a REG that occupies all of the REG, the insn uses DEST if
546 it is mentioned in the destination or the source. Otherwise, we
547 need just check the source. */
548 if (GET_CODE (SET_DEST (x)) != CC0
549 && GET_CODE (SET_DEST (x)) != PC
550 && !REG_P (SET_DEST (x))
551 && ! (GET_CODE (SET_DEST (x)) == SUBREG
552 && REG_P (SUBREG_REG (SET_DEST (x)))
553 && (((GET_MODE_SIZE (GET_MODE (SUBREG_REG (SET_DEST (x))))
554 + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)
555 == ((GET_MODE_SIZE (GET_MODE (SET_DEST (x)))
556 + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD))))
557 break;
558
559 return find_single_use_1 (dest, &SET_SRC (x));
560
561 case MEM:
562 case SUBREG:
563 return find_single_use_1 (dest, &XEXP (x, 0));
564
565 default:
566 break;
567 }
568
569 /* If it wasn't one of the common cases above, check each expression and
570 vector of this code. Look for a unique usage of DEST. */
571
572 fmt = GET_RTX_FORMAT (code);
573 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
574 {
575 if (fmt[i] == 'e')
576 {
577 if (dest == XEXP (x, i)
578 || (REG_P (dest) && REG_P (XEXP (x, i))
579 && REGNO (dest) == REGNO (XEXP (x, i))))
580 this_result = loc;
581 else
582 this_result = find_single_use_1 (dest, &XEXP (x, i));
583
584 if (result == NULL)
585 result = this_result;
586 else if (this_result)
587 /* Duplicate usage. */
588 return NULL;
589 }
590 else if (fmt[i] == 'E')
591 {
592 int j;
593
594 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
595 {
596 if (XVECEXP (x, i, j) == dest
597 || (REG_P (dest)
598 && REG_P (XVECEXP (x, i, j))
599 && REGNO (XVECEXP (x, i, j)) == REGNO (dest)))
600 this_result = loc;
601 else
602 this_result = find_single_use_1 (dest, &XVECEXP (x, i, j));
603
604 if (result == NULL)
605 result = this_result;
606 else if (this_result)
607 return NULL;
608 }
609 }
610 }
611
612 return result;
613 }
614
615
616 /* See if DEST, produced in INSN, is used only a single time in the
617 sequel. If so, return a pointer to the innermost rtx expression in which
618 it is used.
619
620 If PLOC is nonzero, *PLOC is set to the insn containing the single use.
621
622 If DEST is cc0_rtx, we look only at the next insn. In that case, we don't
623 care about REG_DEAD notes or LOG_LINKS.
624
625 Otherwise, we find the single use by finding an insn that has a
626 LOG_LINKS pointing at INSN and has a REG_DEAD note for DEST. If DEST is
627 only referenced once in that insn, we know that it must be the first
628 and last insn referencing DEST. */
629
630 static rtx *
631 find_single_use (rtx dest, rtx insn, rtx *ploc)
632 {
633 basic_block bb;
634 rtx next;
635 rtx *result;
636 struct insn_link *link;
637
638 #ifdef HAVE_cc0
639 if (dest == cc0_rtx)
640 {
641 next = NEXT_INSN (insn);
642 if (next == 0
643 || (!NONJUMP_INSN_P (next) && !JUMP_P (next)))
644 return 0;
645
646 result = find_single_use_1 (dest, &PATTERN (next));
647 if (result && ploc)
648 *ploc = next;
649 return result;
650 }
651 #endif
652
653 if (!REG_P (dest))
654 return 0;
655
656 bb = BLOCK_FOR_INSN (insn);
657 for (next = NEXT_INSN (insn);
658 next && BLOCK_FOR_INSN (next) == bb;
659 next = NEXT_INSN (next))
660 if (INSN_P (next) && dead_or_set_p (next, dest))
661 {
662 FOR_EACH_LOG_LINK (link, next)
663 if (link->insn == insn)
664 break;
665
666 if (link)
667 {
668 result = find_single_use_1 (dest, &PATTERN (next));
669 if (ploc)
670 *ploc = next;
671 return result;
672 }
673 }
674
675 return 0;
676 }
677 \f
678 /* Substitute NEWVAL, an rtx expression, into INTO, a place in some
679 insn. The substitution can be undone by undo_all. If INTO is already
680 set to NEWVAL, do not record this change. Because computing NEWVAL might
681 also call SUBST, we have to compute it before we put anything into
682 the undo table. */
683
684 static void
685 do_SUBST (rtx *into, rtx newval)
686 {
687 struct undo *buf;
688 rtx oldval = *into;
689
690 if (oldval == newval)
691 return;
692
693 /* We'd like to catch as many invalid transformations here as
694 possible. Unfortunately, there are way too many mode changes
695 that are perfectly valid, so we'd waste too much effort for
696 little gain doing the checks here. Focus on catching invalid
697 transformations involving integer constants. */
698 if (GET_MODE_CLASS (GET_MODE (oldval)) == MODE_INT
699 && CONST_INT_P (newval))
700 {
701 /* Sanity check that we're replacing oldval with a CONST_INT
702 that is a valid sign-extension for the original mode. */
703 gcc_assert (INTVAL (newval)
704 == trunc_int_for_mode (INTVAL (newval), GET_MODE (oldval)));
705
706 /* Replacing the operand of a SUBREG or a ZERO_EXTEND with a
707 CONST_INT is not valid, because after the replacement, the
708 original mode would be gone. Unfortunately, we can't tell
709 when do_SUBST is called to replace the operand thereof, so we
710 perform this test on oldval instead, checking whether an
711 invalid replacement took place before we got here. */
712 gcc_assert (!(GET_CODE (oldval) == SUBREG
713 && CONST_INT_P (SUBREG_REG (oldval))));
714 gcc_assert (!(GET_CODE (oldval) == ZERO_EXTEND
715 && CONST_INT_P (XEXP (oldval, 0))));
716 }
717
718 if (undobuf.frees)
719 buf = undobuf.frees, undobuf.frees = buf->next;
720 else
721 buf = XNEW (struct undo);
722
723 buf->kind = UNDO_RTX;
724 buf->where.r = into;
725 buf->old_contents.r = oldval;
726 *into = newval;
727
728 buf->next = undobuf.undos, undobuf.undos = buf;
729 }
730
731 #define SUBST(INTO, NEWVAL) do_SUBST(&(INTO), (NEWVAL))
732
733 /* Similar to SUBST, but NEWVAL is an int expression. Note that substitution
734 for the value of a HOST_WIDE_INT value (including CONST_INT) is
735 not safe. */
736
737 static void
738 do_SUBST_INT (int *into, int newval)
739 {
740 struct undo *buf;
741 int oldval = *into;
742
743 if (oldval == newval)
744 return;
745
746 if (undobuf.frees)
747 buf = undobuf.frees, undobuf.frees = buf->next;
748 else
749 buf = XNEW (struct undo);
750
751 buf->kind = UNDO_INT;
752 buf->where.i = into;
753 buf->old_contents.i = oldval;
754 *into = newval;
755
756 buf->next = undobuf.undos, undobuf.undos = buf;
757 }
758
759 #define SUBST_INT(INTO, NEWVAL) do_SUBST_INT(&(INTO), (NEWVAL))
760
761 /* Similar to SUBST, but just substitute the mode. This is used when
762 changing the mode of a pseudo-register, so that any other
763 references to the entry in the regno_reg_rtx array will change as
764 well. */
765
766 static void
767 do_SUBST_MODE (rtx *into, enum machine_mode newval)
768 {
769 struct undo *buf;
770 enum machine_mode oldval = GET_MODE (*into);
771
772 if (oldval == newval)
773 return;
774
775 if (undobuf.frees)
776 buf = undobuf.frees, undobuf.frees = buf->next;
777 else
778 buf = XNEW (struct undo);
779
780 buf->kind = UNDO_MODE;
781 buf->where.r = into;
782 buf->old_contents.m = oldval;
783 adjust_reg_mode (*into, newval);
784
785 buf->next = undobuf.undos, undobuf.undos = buf;
786 }
787
788 #define SUBST_MODE(INTO, NEWVAL) do_SUBST_MODE(&(INTO), (NEWVAL))
789
790 #ifndef HAVE_cc0
791 /* Similar to SUBST, but NEWVAL is a LOG_LINKS expression. */
792
793 static void
794 do_SUBST_LINK (struct insn_link **into, struct insn_link *newval)
795 {
796 struct undo *buf;
797 struct insn_link * oldval = *into;
798
799 if (oldval == newval)
800 return;
801
802 if (undobuf.frees)
803 buf = undobuf.frees, undobuf.frees = buf->next;
804 else
805 buf = XNEW (struct undo);
806
807 buf->kind = UNDO_LINKS;
808 buf->where.l = into;
809 buf->old_contents.l = oldval;
810 *into = newval;
811
812 buf->next = undobuf.undos, undobuf.undos = buf;
813 }
814
815 #define SUBST_LINK(oldval, newval) do_SUBST_LINK (&oldval, newval)
816 #endif
817 \f
818 /* Subroutine of try_combine. Determine whether the replacement patterns
819 NEWPAT, NEWI2PAT and NEWOTHERPAT are cheaper according to insn_rtx_cost
820 than the original sequence I0, I1, I2, I3 and undobuf.other_insn. Note
821 that I0, I1 and/or NEWI2PAT may be NULL_RTX. Similarly, NEWOTHERPAT and
822 undobuf.other_insn may also both be NULL_RTX. Return false if the cost
823 of all the instructions can be estimated and the replacements are more
824 expensive than the original sequence. */
825
826 static bool
827 combine_validate_cost (rtx i0, rtx i1, rtx i2, rtx i3, rtx newpat,
828 rtx newi2pat, rtx newotherpat)
829 {
830 int i0_cost, i1_cost, i2_cost, i3_cost;
831 int new_i2_cost, new_i3_cost;
832 int old_cost, new_cost;
833
834 /* Lookup the original insn_rtx_costs. */
835 i2_cost = INSN_COST (i2);
836 i3_cost = INSN_COST (i3);
837
838 if (i1)
839 {
840 i1_cost = INSN_COST (i1);
841 if (i0)
842 {
843 i0_cost = INSN_COST (i0);
844 old_cost = (i0_cost > 0 && i1_cost > 0 && i2_cost > 0 && i3_cost > 0
845 ? i0_cost + i1_cost + i2_cost + i3_cost : 0);
846 }
847 else
848 {
849 old_cost = (i1_cost > 0 && i2_cost > 0 && i3_cost > 0
850 ? i1_cost + i2_cost + i3_cost : 0);
851 i0_cost = 0;
852 }
853 }
854 else
855 {
856 old_cost = (i2_cost > 0 && i3_cost > 0) ? i2_cost + i3_cost : 0;
857 i1_cost = i0_cost = 0;
858 }
859
860 /* Calculate the replacement insn_rtx_costs. */
861 new_i3_cost = insn_rtx_cost (newpat, optimize_this_for_speed_p);
862 if (newi2pat)
863 {
864 new_i2_cost = insn_rtx_cost (newi2pat, optimize_this_for_speed_p);
865 new_cost = (new_i2_cost > 0 && new_i3_cost > 0)
866 ? new_i2_cost + new_i3_cost : 0;
867 }
868 else
869 {
870 new_cost = new_i3_cost;
871 new_i2_cost = 0;
872 }
873
874 if (undobuf.other_insn)
875 {
876 int old_other_cost, new_other_cost;
877
878 old_other_cost = INSN_COST (undobuf.other_insn);
879 new_other_cost = insn_rtx_cost (newotherpat, optimize_this_for_speed_p);
880 if (old_other_cost > 0 && new_other_cost > 0)
881 {
882 old_cost += old_other_cost;
883 new_cost += new_other_cost;
884 }
885 else
886 old_cost = 0;
887 }
888
889 /* Disallow this combination if both new_cost and old_cost are greater than
890 zero, and new_cost is greater than old cost. */
891 if (old_cost > 0 && new_cost > old_cost)
892 {
893 if (dump_file)
894 {
895 if (i0)
896 {
897 fprintf (dump_file,
898 "rejecting combination of insns %d, %d, %d and %d\n",
899 INSN_UID (i0), INSN_UID (i1), INSN_UID (i2),
900 INSN_UID (i3));
901 fprintf (dump_file, "original costs %d + %d + %d + %d = %d\n",
902 i0_cost, i1_cost, i2_cost, i3_cost, old_cost);
903 }
904 else if (i1)
905 {
906 fprintf (dump_file,
907 "rejecting combination of insns %d, %d and %d\n",
908 INSN_UID (i1), INSN_UID (i2), INSN_UID (i3));
909 fprintf (dump_file, "original costs %d + %d + %d = %d\n",
910 i1_cost, i2_cost, i3_cost, old_cost);
911 }
912 else
913 {
914 fprintf (dump_file,
915 "rejecting combination of insns %d and %d\n",
916 INSN_UID (i2), INSN_UID (i3));
917 fprintf (dump_file, "original costs %d + %d = %d\n",
918 i2_cost, i3_cost, old_cost);
919 }
920
921 if (newi2pat)
922 {
923 fprintf (dump_file, "replacement costs %d + %d = %d\n",
924 new_i2_cost, new_i3_cost, new_cost);
925 }
926 else
927 fprintf (dump_file, "replacement cost %d\n", new_cost);
928 }
929
930 return false;
931 }
932
933 /* Update the uid_insn_cost array with the replacement costs. */
934 INSN_COST (i2) = new_i2_cost;
935 INSN_COST (i3) = new_i3_cost;
936 if (i1)
937 {
938 INSN_COST (i1) = 0;
939 if (i0)
940 INSN_COST (i0) = 0;
941 }
942
943 return true;
944 }
945
946
947 /* Delete any insns that copy a register to itself. */
948
949 static void
950 delete_noop_moves (void)
951 {
952 rtx insn, next;
953 basic_block bb;
954
955 FOR_EACH_BB (bb)
956 {
957 for (insn = BB_HEAD (bb); insn != NEXT_INSN (BB_END (bb)); insn = next)
958 {
959 next = NEXT_INSN (insn);
960 if (INSN_P (insn) && noop_move_p (insn))
961 {
962 if (dump_file)
963 fprintf (dump_file, "deleting noop move %d\n", INSN_UID (insn));
964
965 delete_insn_and_edges (insn);
966 }
967 }
968 }
969 }
970
971 \f
972 /* Fill in log links field for all insns. */
973
974 static void
975 create_log_links (void)
976 {
977 basic_block bb;
978 rtx *next_use, insn;
979 df_ref *def_vec, *use_vec;
980
981 next_use = XCNEWVEC (rtx, max_reg_num ());
982
983 /* Pass through each block from the end, recording the uses of each
984 register and establishing log links when def is encountered.
985 Note that we do not clear next_use array in order to save time,
986 so we have to test whether the use is in the same basic block as def.
987
988 There are a few cases below when we do not consider the definition or
989 usage -- these are taken from original flow.c did. Don't ask me why it is
990 done this way; I don't know and if it works, I don't want to know. */
991
992 FOR_EACH_BB (bb)
993 {
994 FOR_BB_INSNS_REVERSE (bb, insn)
995 {
996 if (!NONDEBUG_INSN_P (insn))
997 continue;
998
999 /* Log links are created only once. */
1000 gcc_assert (!LOG_LINKS (insn));
1001
1002 for (def_vec = DF_INSN_DEFS (insn); *def_vec; def_vec++)
1003 {
1004 df_ref def = *def_vec;
1005 int regno = DF_REF_REGNO (def);
1006 rtx use_insn;
1007
1008 if (!next_use[regno])
1009 continue;
1010
1011 /* Do not consider if it is pre/post modification in MEM. */
1012 if (DF_REF_FLAGS (def) & DF_REF_PRE_POST_MODIFY)
1013 continue;
1014
1015 /* Do not make the log link for frame pointer. */
1016 if ((regno == FRAME_POINTER_REGNUM
1017 && (! reload_completed || frame_pointer_needed))
1018 #if !HARD_FRAME_POINTER_IS_FRAME_POINTER
1019 || (regno == HARD_FRAME_POINTER_REGNUM
1020 && (! reload_completed || frame_pointer_needed))
1021 #endif
1022 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
1023 || (regno == ARG_POINTER_REGNUM && fixed_regs[regno])
1024 #endif
1025 )
1026 continue;
1027
1028 use_insn = next_use[regno];
1029 if (BLOCK_FOR_INSN (use_insn) == bb)
1030 {
1031 /* flow.c claimed:
1032
1033 We don't build a LOG_LINK for hard registers contained
1034 in ASM_OPERANDs. If these registers get replaced,
1035 we might wind up changing the semantics of the insn,
1036 even if reload can make what appear to be valid
1037 assignments later. */
1038 if (regno >= FIRST_PSEUDO_REGISTER
1039 || asm_noperands (PATTERN (use_insn)) < 0)
1040 {
1041 /* Don't add duplicate links between instructions. */
1042 struct insn_link *links;
1043 FOR_EACH_LOG_LINK (links, use_insn)
1044 if (insn == links->insn)
1045 break;
1046
1047 if (!links)
1048 LOG_LINKS (use_insn)
1049 = alloc_insn_link (insn, LOG_LINKS (use_insn));
1050 }
1051 }
1052 next_use[regno] = NULL_RTX;
1053 }
1054
1055 for (use_vec = DF_INSN_USES (insn); *use_vec; use_vec++)
1056 {
1057 df_ref use = *use_vec;
1058 int regno = DF_REF_REGNO (use);
1059
1060 /* Do not consider the usage of the stack pointer
1061 by function call. */
1062 if (DF_REF_FLAGS (use) & DF_REF_CALL_STACK_USAGE)
1063 continue;
1064
1065 next_use[regno] = insn;
1066 }
1067 }
1068 }
1069
1070 free (next_use);
1071 }
1072
1073 /* Walk the LOG_LINKS of insn B to see if we find a reference to A. Return
1074 true if we found a LOG_LINK that proves that A feeds B. This only works
1075 if there are no instructions between A and B which could have a link
1076 depending on A, since in that case we would not record a link for B.
1077 We also check the implicit dependency created by a cc0 setter/user
1078 pair. */
1079
1080 static bool
1081 insn_a_feeds_b (rtx a, rtx b)
1082 {
1083 struct insn_link *links;
1084 FOR_EACH_LOG_LINK (links, b)
1085 if (links->insn == a)
1086 return true;
1087 #ifdef HAVE_cc0
1088 if (sets_cc0_p (a))
1089 return true;
1090 #endif
1091 return false;
1092 }
1093 \f
1094 /* Main entry point for combiner. F is the first insn of the function.
1095 NREGS is the first unused pseudo-reg number.
1096
1097 Return nonzero if the combiner has turned an indirect jump
1098 instruction into a direct jump. */
1099 static int
1100 combine_instructions (rtx f, unsigned int nregs)
1101 {
1102 rtx insn, next;
1103 #ifdef HAVE_cc0
1104 rtx prev;
1105 #endif
1106 struct insn_link *links, *nextlinks;
1107 rtx first;
1108 basic_block last_bb;
1109
1110 int new_direct_jump_p = 0;
1111
1112 for (first = f; first && !INSN_P (first); )
1113 first = NEXT_INSN (first);
1114 if (!first)
1115 return 0;
1116
1117 combine_attempts = 0;
1118 combine_merges = 0;
1119 combine_extras = 0;
1120 combine_successes = 0;
1121
1122 rtl_hooks = combine_rtl_hooks;
1123
1124 VEC_safe_grow_cleared (reg_stat_type, heap, reg_stat, nregs);
1125
1126 init_recog_no_volatile ();
1127
1128 /* Allocate array for insn info. */
1129 max_uid_known = get_max_uid ();
1130 uid_log_links = XCNEWVEC (struct insn_link *, max_uid_known + 1);
1131 uid_insn_cost = XCNEWVEC (int, max_uid_known + 1);
1132 gcc_obstack_init (&insn_link_obstack);
1133
1134 nonzero_bits_mode = mode_for_size (HOST_BITS_PER_WIDE_INT, MODE_INT, 0);
1135
1136 /* Don't use reg_stat[].nonzero_bits when computing it. This can cause
1137 problems when, for example, we have j <<= 1 in a loop. */
1138
1139 nonzero_sign_valid = 0;
1140 label_tick = label_tick_ebb_start = 1;
1141
1142 /* Scan all SETs and see if we can deduce anything about what
1143 bits are known to be zero for some registers and how many copies
1144 of the sign bit are known to exist for those registers.
1145
1146 Also set any known values so that we can use it while searching
1147 for what bits are known to be set. */
1148
1149 setup_incoming_promotions (first);
1150 /* Allow the entry block and the first block to fall into the same EBB.
1151 Conceptually the incoming promotions are assigned to the entry block. */
1152 last_bb = ENTRY_BLOCK_PTR;
1153
1154 create_log_links ();
1155 FOR_EACH_BB (this_basic_block)
1156 {
1157 optimize_this_for_speed_p = optimize_bb_for_speed_p (this_basic_block);
1158 last_call_luid = 0;
1159 mem_last_set = -1;
1160
1161 label_tick++;
1162 if (!single_pred_p (this_basic_block)
1163 || single_pred (this_basic_block) != last_bb)
1164 label_tick_ebb_start = label_tick;
1165 last_bb = this_basic_block;
1166
1167 FOR_BB_INSNS (this_basic_block, insn)
1168 if (INSN_P (insn) && BLOCK_FOR_INSN (insn))
1169 {
1170 #ifdef AUTO_INC_DEC
1171 rtx links;
1172 #endif
1173
1174 subst_low_luid = DF_INSN_LUID (insn);
1175 subst_insn = insn;
1176
1177 note_stores (PATTERN (insn), set_nonzero_bits_and_sign_copies,
1178 insn);
1179 record_dead_and_set_regs (insn);
1180
1181 #ifdef AUTO_INC_DEC
1182 for (links = REG_NOTES (insn); links; links = XEXP (links, 1))
1183 if (REG_NOTE_KIND (links) == REG_INC)
1184 set_nonzero_bits_and_sign_copies (XEXP (links, 0), NULL_RTX,
1185 insn);
1186 #endif
1187
1188 /* Record the current insn_rtx_cost of this instruction. */
1189 if (NONJUMP_INSN_P (insn))
1190 INSN_COST (insn) = insn_rtx_cost (PATTERN (insn),
1191 optimize_this_for_speed_p);
1192 if (dump_file)
1193 fprintf(dump_file, "insn_cost %d: %d\n",
1194 INSN_UID (insn), INSN_COST (insn));
1195 }
1196 }
1197
1198 nonzero_sign_valid = 1;
1199
1200 /* Now scan all the insns in forward order. */
1201 label_tick = label_tick_ebb_start = 1;
1202 init_reg_last ();
1203 setup_incoming_promotions (first);
1204 last_bb = ENTRY_BLOCK_PTR;
1205
1206 FOR_EACH_BB (this_basic_block)
1207 {
1208 rtx last_combined_insn = NULL_RTX;
1209 optimize_this_for_speed_p = optimize_bb_for_speed_p (this_basic_block);
1210 last_call_luid = 0;
1211 mem_last_set = -1;
1212
1213 label_tick++;
1214 if (!single_pred_p (this_basic_block)
1215 || single_pred (this_basic_block) != last_bb)
1216 label_tick_ebb_start = label_tick;
1217 last_bb = this_basic_block;
1218
1219 rtl_profile_for_bb (this_basic_block);
1220 for (insn = BB_HEAD (this_basic_block);
1221 insn != NEXT_INSN (BB_END (this_basic_block));
1222 insn = next ? next : NEXT_INSN (insn))
1223 {
1224 next = 0;
1225 if (NONDEBUG_INSN_P (insn))
1226 {
1227 while (last_combined_insn
1228 && INSN_DELETED_P (last_combined_insn))
1229 last_combined_insn = PREV_INSN (last_combined_insn);
1230 if (last_combined_insn == NULL_RTX
1231 || BARRIER_P (last_combined_insn)
1232 || BLOCK_FOR_INSN (last_combined_insn) != this_basic_block
1233 || DF_INSN_LUID (last_combined_insn) <= DF_INSN_LUID (insn))
1234 last_combined_insn = insn;
1235
1236 /* See if we know about function return values before this
1237 insn based upon SUBREG flags. */
1238 check_promoted_subreg (insn, PATTERN (insn));
1239
1240 /* See if we can find hardregs and subreg of pseudos in
1241 narrower modes. This could help turning TRUNCATEs
1242 into SUBREGs. */
1243 note_uses (&PATTERN (insn), record_truncated_values, NULL);
1244
1245 /* Try this insn with each insn it links back to. */
1246
1247 FOR_EACH_LOG_LINK (links, insn)
1248 if ((next = try_combine (insn, links->insn, NULL_RTX,
1249 NULL_RTX, &new_direct_jump_p,
1250 last_combined_insn)) != 0)
1251 goto retry;
1252
1253 /* Try each sequence of three linked insns ending with this one. */
1254
1255 FOR_EACH_LOG_LINK (links, insn)
1256 {
1257 rtx link = links->insn;
1258
1259 /* If the linked insn has been replaced by a note, then there
1260 is no point in pursuing this chain any further. */
1261 if (NOTE_P (link))
1262 continue;
1263
1264 FOR_EACH_LOG_LINK (nextlinks, link)
1265 if ((next = try_combine (insn, link, nextlinks->insn,
1266 NULL_RTX, &new_direct_jump_p,
1267 last_combined_insn)) != 0)
1268 goto retry;
1269 }
1270
1271 #ifdef HAVE_cc0
1272 /* Try to combine a jump insn that uses CC0
1273 with a preceding insn that sets CC0, and maybe with its
1274 logical predecessor as well.
1275 This is how we make decrement-and-branch insns.
1276 We need this special code because data flow connections
1277 via CC0 do not get entered in LOG_LINKS. */
1278
1279 if (JUMP_P (insn)
1280 && (prev = prev_nonnote_insn (insn)) != 0
1281 && NONJUMP_INSN_P (prev)
1282 && sets_cc0_p (PATTERN (prev)))
1283 {
1284 if ((next = try_combine (insn, prev, NULL_RTX, NULL_RTX,
1285 &new_direct_jump_p,
1286 last_combined_insn)) != 0)
1287 goto retry;
1288
1289 FOR_EACH_LOG_LINK (nextlinks, prev)
1290 if ((next = try_combine (insn, prev, nextlinks->insn,
1291 NULL_RTX, &new_direct_jump_p,
1292 last_combined_insn)) != 0)
1293 goto retry;
1294 }
1295
1296 /* Do the same for an insn that explicitly references CC0. */
1297 if (NONJUMP_INSN_P (insn)
1298 && (prev = prev_nonnote_insn (insn)) != 0
1299 && NONJUMP_INSN_P (prev)
1300 && sets_cc0_p (PATTERN (prev))
1301 && GET_CODE (PATTERN (insn)) == SET
1302 && reg_mentioned_p (cc0_rtx, SET_SRC (PATTERN (insn))))
1303 {
1304 if ((next = try_combine (insn, prev, NULL_RTX, NULL_RTX,
1305 &new_direct_jump_p,
1306 last_combined_insn)) != 0)
1307 goto retry;
1308
1309 FOR_EACH_LOG_LINK (nextlinks, prev)
1310 if ((next = try_combine (insn, prev, nextlinks->insn,
1311 NULL_RTX, &new_direct_jump_p,
1312 last_combined_insn)) != 0)
1313 goto retry;
1314 }
1315
1316 /* Finally, see if any of the insns that this insn links to
1317 explicitly references CC0. If so, try this insn, that insn,
1318 and its predecessor if it sets CC0. */
1319 FOR_EACH_LOG_LINK (links, insn)
1320 if (NONJUMP_INSN_P (links->insn)
1321 && GET_CODE (PATTERN (links->insn)) == SET
1322 && reg_mentioned_p (cc0_rtx, SET_SRC (PATTERN (links->insn)))
1323 && (prev = prev_nonnote_insn (links->insn)) != 0
1324 && NONJUMP_INSN_P (prev)
1325 && sets_cc0_p (PATTERN (prev))
1326 && (next = try_combine (insn, links->insn,
1327 prev, NULL_RTX, &new_direct_jump_p,
1328 last_combined_insn)) != 0)
1329 goto retry;
1330 #endif
1331
1332 /* Try combining an insn with two different insns whose results it
1333 uses. */
1334 FOR_EACH_LOG_LINK (links, insn)
1335 for (nextlinks = links->next; nextlinks;
1336 nextlinks = nextlinks->next)
1337 if ((next = try_combine (insn, links->insn,
1338 nextlinks->insn, NULL_RTX,
1339 &new_direct_jump_p,
1340 last_combined_insn)) != 0)
1341 goto retry;
1342
1343 /* Try four-instruction combinations. */
1344 FOR_EACH_LOG_LINK (links, insn)
1345 {
1346 struct insn_link *next1;
1347 rtx link = links->insn;
1348
1349 /* If the linked insn has been replaced by a note, then there
1350 is no point in pursuing this chain any further. */
1351 if (NOTE_P (link))
1352 continue;
1353
1354 FOR_EACH_LOG_LINK (next1, link)
1355 {
1356 rtx link1 = next1->insn;
1357 if (NOTE_P (link1))
1358 continue;
1359 /* I0 -> I1 -> I2 -> I3. */
1360 FOR_EACH_LOG_LINK (nextlinks, link1)
1361 if ((next = try_combine (insn, link, link1,
1362 nextlinks->insn,
1363 &new_direct_jump_p,
1364 last_combined_insn)) != 0)
1365 goto retry;
1366 /* I0, I1 -> I2, I2 -> I3. */
1367 for (nextlinks = next1->next; nextlinks;
1368 nextlinks = nextlinks->next)
1369 if ((next = try_combine (insn, link, link1,
1370 nextlinks->insn,
1371 &new_direct_jump_p,
1372 last_combined_insn)) != 0)
1373 goto retry;
1374 }
1375
1376 for (next1 = links->next; next1; next1 = next1->next)
1377 {
1378 rtx link1 = next1->insn;
1379 if (NOTE_P (link1))
1380 continue;
1381 /* I0 -> I2; I1, I2 -> I3. */
1382 FOR_EACH_LOG_LINK (nextlinks, link)
1383 if ((next = try_combine (insn, link, link1,
1384 nextlinks->insn,
1385 &new_direct_jump_p,
1386 last_combined_insn)) != 0)
1387 goto retry;
1388 /* I0 -> I1; I1, I2 -> I3. */
1389 FOR_EACH_LOG_LINK (nextlinks, link1)
1390 if ((next = try_combine (insn, link, link1,
1391 nextlinks->insn,
1392 &new_direct_jump_p,
1393 last_combined_insn)) != 0)
1394 goto retry;
1395 }
1396 }
1397
1398 /* Try this insn with each REG_EQUAL note it links back to. */
1399 FOR_EACH_LOG_LINK (links, insn)
1400 {
1401 rtx set, note;
1402 rtx temp = links->insn;
1403 if ((set = single_set (temp)) != 0
1404 && (note = find_reg_equal_equiv_note (temp)) != 0
1405 && (note = XEXP (note, 0), GET_CODE (note)) != EXPR_LIST
1406 /* Avoid using a register that may already been marked
1407 dead by an earlier instruction. */
1408 && ! unmentioned_reg_p (note, SET_SRC (set))
1409 && (GET_MODE (note) == VOIDmode
1410 ? SCALAR_INT_MODE_P (GET_MODE (SET_DEST (set)))
1411 : GET_MODE (SET_DEST (set)) == GET_MODE (note)))
1412 {
1413 /* Temporarily replace the set's source with the
1414 contents of the REG_EQUAL note. The insn will
1415 be deleted or recognized by try_combine. */
1416 rtx orig = SET_SRC (set);
1417 SET_SRC (set) = note;
1418 i2mod = temp;
1419 i2mod_old_rhs = copy_rtx (orig);
1420 i2mod_new_rhs = copy_rtx (note);
1421 next = try_combine (insn, i2mod, NULL_RTX, NULL_RTX,
1422 &new_direct_jump_p,
1423 last_combined_insn);
1424 i2mod = NULL_RTX;
1425 if (next)
1426 goto retry;
1427 SET_SRC (set) = orig;
1428 }
1429 }
1430
1431 if (!NOTE_P (insn))
1432 record_dead_and_set_regs (insn);
1433
1434 retry:
1435 ;
1436 }
1437 }
1438 }
1439
1440 default_rtl_profile ();
1441 clear_bb_flags ();
1442 new_direct_jump_p |= purge_all_dead_edges ();
1443 delete_noop_moves ();
1444
1445 /* Clean up. */
1446 obstack_free (&insn_link_obstack, NULL);
1447 free (uid_log_links);
1448 free (uid_insn_cost);
1449 VEC_free (reg_stat_type, heap, reg_stat);
1450
1451 {
1452 struct undo *undo, *next;
1453 for (undo = undobuf.frees; undo; undo = next)
1454 {
1455 next = undo->next;
1456 free (undo);
1457 }
1458 undobuf.frees = 0;
1459 }
1460
1461 total_attempts += combine_attempts;
1462 total_merges += combine_merges;
1463 total_extras += combine_extras;
1464 total_successes += combine_successes;
1465
1466 nonzero_sign_valid = 0;
1467 rtl_hooks = general_rtl_hooks;
1468
1469 /* Make recognizer allow volatile MEMs again. */
1470 init_recog ();
1471
1472 return new_direct_jump_p;
1473 }
1474
1475 /* Wipe the last_xxx fields of reg_stat in preparation for another pass. */
1476
1477 static void
1478 init_reg_last (void)
1479 {
1480 unsigned int i;
1481 reg_stat_type *p;
1482
1483 FOR_EACH_VEC_ELT (reg_stat_type, reg_stat, i, p)
1484 memset (p, 0, offsetof (reg_stat_type, sign_bit_copies));
1485 }
1486 \f
1487 /* Set up any promoted values for incoming argument registers. */
1488
1489 static void
1490 setup_incoming_promotions (rtx first)
1491 {
1492 tree arg;
1493 bool strictly_local = false;
1494
1495 for (arg = DECL_ARGUMENTS (current_function_decl); arg;
1496 arg = DECL_CHAIN (arg))
1497 {
1498 rtx x, reg = DECL_INCOMING_RTL (arg);
1499 int uns1, uns3;
1500 enum machine_mode mode1, mode2, mode3, mode4;
1501
1502 /* Only continue if the incoming argument is in a register. */
1503 if (!REG_P (reg))
1504 continue;
1505
1506 /* Determine, if possible, whether all call sites of the current
1507 function lie within the current compilation unit. (This does
1508 take into account the exporting of a function via taking its
1509 address, and so forth.) */
1510 strictly_local = cgraph_local_info (current_function_decl)->local;
1511
1512 /* The mode and signedness of the argument before any promotions happen
1513 (equal to the mode of the pseudo holding it at that stage). */
1514 mode1 = TYPE_MODE (TREE_TYPE (arg));
1515 uns1 = TYPE_UNSIGNED (TREE_TYPE (arg));
1516
1517 /* The mode and signedness of the argument after any source language and
1518 TARGET_PROMOTE_PROTOTYPES-driven promotions. */
1519 mode2 = TYPE_MODE (DECL_ARG_TYPE (arg));
1520 uns3 = TYPE_UNSIGNED (DECL_ARG_TYPE (arg));
1521
1522 /* The mode and signedness of the argument as it is actually passed,
1523 after any TARGET_PROMOTE_FUNCTION_ARGS-driven ABI promotions. */
1524 mode3 = promote_function_mode (DECL_ARG_TYPE (arg), mode2, &uns3,
1525 TREE_TYPE (cfun->decl), 0);
1526
1527 /* The mode of the register in which the argument is being passed. */
1528 mode4 = GET_MODE (reg);
1529
1530 /* Eliminate sign extensions in the callee when:
1531 (a) A mode promotion has occurred; */
1532 if (mode1 == mode3)
1533 continue;
1534 /* (b) The mode of the register is the same as the mode of
1535 the argument as it is passed; */
1536 if (mode3 != mode4)
1537 continue;
1538 /* (c) There's no language level extension; */
1539 if (mode1 == mode2)
1540 ;
1541 /* (c.1) All callers are from the current compilation unit. If that's
1542 the case we don't have to rely on an ABI, we only have to know
1543 what we're generating right now, and we know that we will do the
1544 mode1 to mode2 promotion with the given sign. */
1545 else if (!strictly_local)
1546 continue;
1547 /* (c.2) The combination of the two promotions is useful. This is
1548 true when the signs match, or if the first promotion is unsigned.
1549 In the later case, (sign_extend (zero_extend x)) is the same as
1550 (zero_extend (zero_extend x)), so make sure to force UNS3 true. */
1551 else if (uns1)
1552 uns3 = true;
1553 else if (uns3)
1554 continue;
1555
1556 /* Record that the value was promoted from mode1 to mode3,
1557 so that any sign extension at the head of the current
1558 function may be eliminated. */
1559 x = gen_rtx_CLOBBER (mode1, const0_rtx);
1560 x = gen_rtx_fmt_e ((uns3 ? ZERO_EXTEND : SIGN_EXTEND), mode3, x);
1561 record_value_for_reg (reg, first, x);
1562 }
1563 }
1564
1565 /* Called via note_stores. If X is a pseudo that is narrower than
1566 HOST_BITS_PER_WIDE_INT and is being set, record what bits are known zero.
1567
1568 If we are setting only a portion of X and we can't figure out what
1569 portion, assume all bits will be used since we don't know what will
1570 be happening.
1571
1572 Similarly, set how many bits of X are known to be copies of the sign bit
1573 at all locations in the function. This is the smallest number implied
1574 by any set of X. */
1575
1576 static void
1577 set_nonzero_bits_and_sign_copies (rtx x, const_rtx set, void *data)
1578 {
1579 rtx insn = (rtx) data;
1580 unsigned int num;
1581
1582 if (REG_P (x)
1583 && REGNO (x) >= FIRST_PSEUDO_REGISTER
1584 /* If this register is undefined at the start of the file, we can't
1585 say what its contents were. */
1586 && ! REGNO_REG_SET_P
1587 (DF_LR_IN (ENTRY_BLOCK_PTR->next_bb), REGNO (x))
1588 && HWI_COMPUTABLE_MODE_P (GET_MODE (x)))
1589 {
1590 reg_stat_type *rsp = &VEC_index (reg_stat_type, reg_stat, REGNO (x));
1591
1592 if (set == 0 || GET_CODE (set) == CLOBBER)
1593 {
1594 rsp->nonzero_bits = GET_MODE_MASK (GET_MODE (x));
1595 rsp->sign_bit_copies = 1;
1596 return;
1597 }
1598
1599 /* If this register is being initialized using itself, and the
1600 register is uninitialized in this basic block, and there are
1601 no LOG_LINKS which set the register, then part of the
1602 register is uninitialized. In that case we can't assume
1603 anything about the number of nonzero bits.
1604
1605 ??? We could do better if we checked this in
1606 reg_{nonzero_bits,num_sign_bit_copies}_for_combine. Then we
1607 could avoid making assumptions about the insn which initially
1608 sets the register, while still using the information in other
1609 insns. We would have to be careful to check every insn
1610 involved in the combination. */
1611
1612 if (insn
1613 && reg_referenced_p (x, PATTERN (insn))
1614 && !REGNO_REG_SET_P (DF_LR_IN (BLOCK_FOR_INSN (insn)),
1615 REGNO (x)))
1616 {
1617 struct insn_link *link;
1618
1619 FOR_EACH_LOG_LINK (link, insn)
1620 if (dead_or_set_p (link->insn, x))
1621 break;
1622 if (!link)
1623 {
1624 rsp->nonzero_bits = GET_MODE_MASK (GET_MODE (x));
1625 rsp->sign_bit_copies = 1;
1626 return;
1627 }
1628 }
1629
1630 /* If this is a complex assignment, see if we can convert it into a
1631 simple assignment. */
1632 set = expand_field_assignment (set);
1633
1634 /* If this is a simple assignment, or we have a paradoxical SUBREG,
1635 set what we know about X. */
1636
1637 if (SET_DEST (set) == x
1638 || (paradoxical_subreg_p (SET_DEST (set))
1639 && SUBREG_REG (SET_DEST (set)) == x))
1640 {
1641 rtx src = SET_SRC (set);
1642
1643 #ifdef SHORT_IMMEDIATES_SIGN_EXTEND
1644 /* If X is narrower than a word and SRC is a non-negative
1645 constant that would appear negative in the mode of X,
1646 sign-extend it for use in reg_stat[].nonzero_bits because some
1647 machines (maybe most) will actually do the sign-extension
1648 and this is the conservative approach.
1649
1650 ??? For 2.5, try to tighten up the MD files in this regard
1651 instead of this kludge. */
1652
1653 if (GET_MODE_PRECISION (GET_MODE (x)) < BITS_PER_WORD
1654 && CONST_INT_P (src)
1655 && INTVAL (src) > 0
1656 && val_signbit_known_set_p (GET_MODE (x), INTVAL (src)))
1657 src = GEN_INT (INTVAL (src) | ~GET_MODE_MASK (GET_MODE (x)));
1658 #endif
1659
1660 /* Don't call nonzero_bits if it cannot change anything. */
1661 if (rsp->nonzero_bits != ~(unsigned HOST_WIDE_INT) 0)
1662 rsp->nonzero_bits |= nonzero_bits (src, nonzero_bits_mode);
1663 num = num_sign_bit_copies (SET_SRC (set), GET_MODE (x));
1664 if (rsp->sign_bit_copies == 0
1665 || rsp->sign_bit_copies > num)
1666 rsp->sign_bit_copies = num;
1667 }
1668 else
1669 {
1670 rsp->nonzero_bits = GET_MODE_MASK (GET_MODE (x));
1671 rsp->sign_bit_copies = 1;
1672 }
1673 }
1674 }
1675 \f
1676 /* See if INSN can be combined into I3. PRED, PRED2, SUCC and SUCC2 are
1677 optionally insns that were previously combined into I3 or that will be
1678 combined into the merger of INSN and I3. The order is PRED, PRED2,
1679 INSN, SUCC, SUCC2, I3.
1680
1681 Return 0 if the combination is not allowed for any reason.
1682
1683 If the combination is allowed, *PDEST will be set to the single
1684 destination of INSN and *PSRC to the single source, and this function
1685 will return 1. */
1686
1687 static int
1688 can_combine_p (rtx insn, rtx i3, rtx pred ATTRIBUTE_UNUSED,
1689 rtx pred2 ATTRIBUTE_UNUSED, rtx succ, rtx succ2,
1690 rtx *pdest, rtx *psrc)
1691 {
1692 int i;
1693 const_rtx set = 0;
1694 rtx src, dest;
1695 rtx p;
1696 #ifdef AUTO_INC_DEC
1697 rtx link;
1698 #endif
1699 bool all_adjacent = true;
1700 int (*is_volatile_p) (const_rtx);
1701
1702 if (succ)
1703 {
1704 if (succ2)
1705 {
1706 if (next_active_insn (succ2) != i3)
1707 all_adjacent = false;
1708 if (next_active_insn (succ) != succ2)
1709 all_adjacent = false;
1710 }
1711 else if (next_active_insn (succ) != i3)
1712 all_adjacent = false;
1713 if (next_active_insn (insn) != succ)
1714 all_adjacent = false;
1715 }
1716 else if (next_active_insn (insn) != i3)
1717 all_adjacent = false;
1718
1719 /* Can combine only if previous insn is a SET of a REG, a SUBREG or CC0.
1720 or a PARALLEL consisting of such a SET and CLOBBERs.
1721
1722 If INSN has CLOBBER parallel parts, ignore them for our processing.
1723 By definition, these happen during the execution of the insn. When it
1724 is merged with another insn, all bets are off. If they are, in fact,
1725 needed and aren't also supplied in I3, they may be added by
1726 recog_for_combine. Otherwise, it won't match.
1727
1728 We can also ignore a SET whose SET_DEST is mentioned in a REG_UNUSED
1729 note.
1730
1731 Get the source and destination of INSN. If more than one, can't
1732 combine. */
1733
1734 if (GET_CODE (PATTERN (insn)) == SET)
1735 set = PATTERN (insn);
1736 else if (GET_CODE (PATTERN (insn)) == PARALLEL
1737 && GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) == SET)
1738 {
1739 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
1740 {
1741 rtx elt = XVECEXP (PATTERN (insn), 0, i);
1742
1743 switch (GET_CODE (elt))
1744 {
1745 /* This is important to combine floating point insns
1746 for the SH4 port. */
1747 case USE:
1748 /* Combining an isolated USE doesn't make sense.
1749 We depend here on combinable_i3pat to reject them. */
1750 /* The code below this loop only verifies that the inputs of
1751 the SET in INSN do not change. We call reg_set_between_p
1752 to verify that the REG in the USE does not change between
1753 I3 and INSN.
1754 If the USE in INSN was for a pseudo register, the matching
1755 insn pattern will likely match any register; combining this
1756 with any other USE would only be safe if we knew that the
1757 used registers have identical values, or if there was
1758 something to tell them apart, e.g. different modes. For
1759 now, we forgo such complicated tests and simply disallow
1760 combining of USES of pseudo registers with any other USE. */
1761 if (REG_P (XEXP (elt, 0))
1762 && GET_CODE (PATTERN (i3)) == PARALLEL)
1763 {
1764 rtx i3pat = PATTERN (i3);
1765 int i = XVECLEN (i3pat, 0) - 1;
1766 unsigned int regno = REGNO (XEXP (elt, 0));
1767
1768 do
1769 {
1770 rtx i3elt = XVECEXP (i3pat, 0, i);
1771
1772 if (GET_CODE (i3elt) == USE
1773 && REG_P (XEXP (i3elt, 0))
1774 && (REGNO (XEXP (i3elt, 0)) == regno
1775 ? reg_set_between_p (XEXP (elt, 0),
1776 PREV_INSN (insn), i3)
1777 : regno >= FIRST_PSEUDO_REGISTER))
1778 return 0;
1779 }
1780 while (--i >= 0);
1781 }
1782 break;
1783
1784 /* We can ignore CLOBBERs. */
1785 case CLOBBER:
1786 break;
1787
1788 case SET:
1789 /* Ignore SETs whose result isn't used but not those that
1790 have side-effects. */
1791 if (find_reg_note (insn, REG_UNUSED, SET_DEST (elt))
1792 && insn_nothrow_p (insn)
1793 && !side_effects_p (elt))
1794 break;
1795
1796 /* If we have already found a SET, this is a second one and
1797 so we cannot combine with this insn. */
1798 if (set)
1799 return 0;
1800
1801 set = elt;
1802 break;
1803
1804 default:
1805 /* Anything else means we can't combine. */
1806 return 0;
1807 }
1808 }
1809
1810 if (set == 0
1811 /* If SET_SRC is an ASM_OPERANDS we can't throw away these CLOBBERs,
1812 so don't do anything with it. */
1813 || GET_CODE (SET_SRC (set)) == ASM_OPERANDS)
1814 return 0;
1815 }
1816 else
1817 return 0;
1818
1819 if (set == 0)
1820 return 0;
1821
1822 /* The simplification in expand_field_assignment may call back to
1823 get_last_value, so set safe guard here. */
1824 subst_low_luid = DF_INSN_LUID (insn);
1825
1826 set = expand_field_assignment (set);
1827 src = SET_SRC (set), dest = SET_DEST (set);
1828
1829 /* Don't eliminate a store in the stack pointer. */
1830 if (dest == stack_pointer_rtx
1831 /* Don't combine with an insn that sets a register to itself if it has
1832 a REG_EQUAL note. This may be part of a LIBCALL sequence. */
1833 || (rtx_equal_p (src, dest) && find_reg_note (insn, REG_EQUAL, NULL_RTX))
1834 /* Can't merge an ASM_OPERANDS. */
1835 || GET_CODE (src) == ASM_OPERANDS
1836 /* Can't merge a function call. */
1837 || GET_CODE (src) == CALL
1838 /* Don't eliminate a function call argument. */
1839 || (CALL_P (i3)
1840 && (find_reg_fusage (i3, USE, dest)
1841 || (REG_P (dest)
1842 && REGNO (dest) < FIRST_PSEUDO_REGISTER
1843 && global_regs[REGNO (dest)])))
1844 /* Don't substitute into an incremented register. */
1845 || FIND_REG_INC_NOTE (i3, dest)
1846 || (succ && FIND_REG_INC_NOTE (succ, dest))
1847 || (succ2 && FIND_REG_INC_NOTE (succ2, dest))
1848 /* Don't substitute into a non-local goto, this confuses CFG. */
1849 || (JUMP_P (i3) && find_reg_note (i3, REG_NON_LOCAL_GOTO, NULL_RTX))
1850 /* Make sure that DEST is not used after SUCC but before I3. */
1851 || (!all_adjacent
1852 && ((succ2
1853 && (reg_used_between_p (dest, succ2, i3)
1854 || reg_used_between_p (dest, succ, succ2)))
1855 || (!succ2 && succ && reg_used_between_p (dest, succ, i3))))
1856 /* Make sure that the value that is to be substituted for the register
1857 does not use any registers whose values alter in between. However,
1858 If the insns are adjacent, a use can't cross a set even though we
1859 think it might (this can happen for a sequence of insns each setting
1860 the same destination; last_set of that register might point to
1861 a NOTE). If INSN has a REG_EQUIV note, the register is always
1862 equivalent to the memory so the substitution is valid even if there
1863 are intervening stores. Also, don't move a volatile asm or
1864 UNSPEC_VOLATILE across any other insns. */
1865 || (! all_adjacent
1866 && (((!MEM_P (src)
1867 || ! find_reg_note (insn, REG_EQUIV, src))
1868 && use_crosses_set_p (src, DF_INSN_LUID (insn)))
1869 || (GET_CODE (src) == ASM_OPERANDS && MEM_VOLATILE_P (src))
1870 || GET_CODE (src) == UNSPEC_VOLATILE))
1871 /* Don't combine across a CALL_INSN, because that would possibly
1872 change whether the life span of some REGs crosses calls or not,
1873 and it is a pain to update that information.
1874 Exception: if source is a constant, moving it later can't hurt.
1875 Accept that as a special case. */
1876 || (DF_INSN_LUID (insn) < last_call_luid && ! CONSTANT_P (src)))
1877 return 0;
1878
1879 /* DEST must either be a REG or CC0. */
1880 if (REG_P (dest))
1881 {
1882 /* If register alignment is being enforced for multi-word items in all
1883 cases except for parameters, it is possible to have a register copy
1884 insn referencing a hard register that is not allowed to contain the
1885 mode being copied and which would not be valid as an operand of most
1886 insns. Eliminate this problem by not combining with such an insn.
1887
1888 Also, on some machines we don't want to extend the life of a hard
1889 register. */
1890
1891 if (REG_P (src)
1892 && ((REGNO (dest) < FIRST_PSEUDO_REGISTER
1893 && ! HARD_REGNO_MODE_OK (REGNO (dest), GET_MODE (dest)))
1894 /* Don't extend the life of a hard register unless it is
1895 user variable (if we have few registers) or it can't
1896 fit into the desired register (meaning something special
1897 is going on).
1898 Also avoid substituting a return register into I3, because
1899 reload can't handle a conflict with constraints of other
1900 inputs. */
1901 || (REGNO (src) < FIRST_PSEUDO_REGISTER
1902 && ! HARD_REGNO_MODE_OK (REGNO (src), GET_MODE (src)))))
1903 return 0;
1904 }
1905 else if (GET_CODE (dest) != CC0)
1906 return 0;
1907
1908
1909 if (GET_CODE (PATTERN (i3)) == PARALLEL)
1910 for (i = XVECLEN (PATTERN (i3), 0) - 1; i >= 0; i--)
1911 if (GET_CODE (XVECEXP (PATTERN (i3), 0, i)) == CLOBBER)
1912 {
1913 /* Don't substitute for a register intended as a clobberable
1914 operand. */
1915 rtx reg = XEXP (XVECEXP (PATTERN (i3), 0, i), 0);
1916 if (rtx_equal_p (reg, dest))
1917 return 0;
1918
1919 /* If the clobber represents an earlyclobber operand, we must not
1920 substitute an expression containing the clobbered register.
1921 As we do not analyze the constraint strings here, we have to
1922 make the conservative assumption. However, if the register is
1923 a fixed hard reg, the clobber cannot represent any operand;
1924 we leave it up to the machine description to either accept or
1925 reject use-and-clobber patterns. */
1926 if (!REG_P (reg)
1927 || REGNO (reg) >= FIRST_PSEUDO_REGISTER
1928 || !fixed_regs[REGNO (reg)])
1929 if (reg_overlap_mentioned_p (reg, src))
1930 return 0;
1931 }
1932
1933 /* If INSN contains anything volatile, or is an `asm' (whether volatile
1934 or not), reject, unless nothing volatile comes between it and I3 */
1935
1936 if (GET_CODE (src) == ASM_OPERANDS || volatile_refs_p (src))
1937 {
1938 /* Make sure neither succ nor succ2 contains a volatile reference. */
1939 if (succ2 != 0 && volatile_refs_p (PATTERN (succ2)))
1940 return 0;
1941 if (succ != 0 && volatile_refs_p (PATTERN (succ)))
1942 return 0;
1943 /* We'll check insns between INSN and I3 below. */
1944 }
1945
1946 /* If INSN is an asm, and DEST is a hard register, reject, since it has
1947 to be an explicit register variable, and was chosen for a reason. */
1948
1949 if (GET_CODE (src) == ASM_OPERANDS
1950 && REG_P (dest) && REGNO (dest) < FIRST_PSEUDO_REGISTER)
1951 return 0;
1952
1953 /* If INSN contains volatile references (specifically volatile MEMs),
1954 we cannot combine across any other volatile references.
1955 Even if INSN doesn't contain volatile references, any intervening
1956 volatile insn might affect machine state. */
1957
1958 is_volatile_p = volatile_refs_p (PATTERN (insn))
1959 ? volatile_refs_p
1960 : volatile_insn_p;
1961
1962 for (p = NEXT_INSN (insn); p != i3; p = NEXT_INSN (p))
1963 if (INSN_P (p) && p != succ && p != succ2 && is_volatile_p (PATTERN (p)))
1964 return 0;
1965
1966 /* If INSN contains an autoincrement or autodecrement, make sure that
1967 register is not used between there and I3, and not already used in
1968 I3 either. Neither must it be used in PRED or SUCC, if they exist.
1969 Also insist that I3 not be a jump; if it were one
1970 and the incremented register were spilled, we would lose. */
1971
1972 #ifdef AUTO_INC_DEC
1973 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
1974 if (REG_NOTE_KIND (link) == REG_INC
1975 && (JUMP_P (i3)
1976 || reg_used_between_p (XEXP (link, 0), insn, i3)
1977 || (pred != NULL_RTX
1978 && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (pred)))
1979 || (pred2 != NULL_RTX
1980 && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (pred2)))
1981 || (succ != NULL_RTX
1982 && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (succ)))
1983 || (succ2 != NULL_RTX
1984 && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (succ2)))
1985 || reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (i3))))
1986 return 0;
1987 #endif
1988
1989 #ifdef HAVE_cc0
1990 /* Don't combine an insn that follows a CC0-setting insn.
1991 An insn that uses CC0 must not be separated from the one that sets it.
1992 We do, however, allow I2 to follow a CC0-setting insn if that insn
1993 is passed as I1; in that case it will be deleted also.
1994 We also allow combining in this case if all the insns are adjacent
1995 because that would leave the two CC0 insns adjacent as well.
1996 It would be more logical to test whether CC0 occurs inside I1 or I2,
1997 but that would be much slower, and this ought to be equivalent. */
1998
1999 p = prev_nonnote_insn (insn);
2000 if (p && p != pred && NONJUMP_INSN_P (p) && sets_cc0_p (PATTERN (p))
2001 && ! all_adjacent)
2002 return 0;
2003 #endif
2004
2005 /* If we get here, we have passed all the tests and the combination is
2006 to be allowed. */
2007
2008 *pdest = dest;
2009 *psrc = src;
2010
2011 return 1;
2012 }
2013 \f
2014 /* LOC is the location within I3 that contains its pattern or the component
2015 of a PARALLEL of the pattern. We validate that it is valid for combining.
2016
2017 One problem is if I3 modifies its output, as opposed to replacing it
2018 entirely, we can't allow the output to contain I2DEST, I1DEST or I0DEST as
2019 doing so would produce an insn that is not equivalent to the original insns.
2020
2021 Consider:
2022
2023 (set (reg:DI 101) (reg:DI 100))
2024 (set (subreg:SI (reg:DI 101) 0) <foo>)
2025
2026 This is NOT equivalent to:
2027
2028 (parallel [(set (subreg:SI (reg:DI 100) 0) <foo>)
2029 (set (reg:DI 101) (reg:DI 100))])
2030
2031 Not only does this modify 100 (in which case it might still be valid
2032 if 100 were dead in I2), it sets 101 to the ORIGINAL value of 100.
2033
2034 We can also run into a problem if I2 sets a register that I1
2035 uses and I1 gets directly substituted into I3 (not via I2). In that
2036 case, we would be getting the wrong value of I2DEST into I3, so we
2037 must reject the combination. This case occurs when I2 and I1 both
2038 feed into I3, rather than when I1 feeds into I2, which feeds into I3.
2039 If I1_NOT_IN_SRC is nonzero, it means that finding I1 in the source
2040 of a SET must prevent combination from occurring. The same situation
2041 can occur for I0, in which case I0_NOT_IN_SRC is set.
2042
2043 Before doing the above check, we first try to expand a field assignment
2044 into a set of logical operations.
2045
2046 If PI3_DEST_KILLED is nonzero, it is a pointer to a location in which
2047 we place a register that is both set and used within I3. If more than one
2048 such register is detected, we fail.
2049
2050 Return 1 if the combination is valid, zero otherwise. */
2051
2052 static int
2053 combinable_i3pat (rtx i3, rtx *loc, rtx i2dest, rtx i1dest, rtx i0dest,
2054 int i1_not_in_src, int i0_not_in_src, rtx *pi3dest_killed)
2055 {
2056 rtx x = *loc;
2057
2058 if (GET_CODE (x) == SET)
2059 {
2060 rtx set = x ;
2061 rtx dest = SET_DEST (set);
2062 rtx src = SET_SRC (set);
2063 rtx inner_dest = dest;
2064 rtx subdest;
2065
2066 while (GET_CODE (inner_dest) == STRICT_LOW_PART
2067 || GET_CODE (inner_dest) == SUBREG
2068 || GET_CODE (inner_dest) == ZERO_EXTRACT)
2069 inner_dest = XEXP (inner_dest, 0);
2070
2071 /* Check for the case where I3 modifies its output, as discussed
2072 above. We don't want to prevent pseudos from being combined
2073 into the address of a MEM, so only prevent the combination if
2074 i1 or i2 set the same MEM. */
2075 if ((inner_dest != dest &&
2076 (!MEM_P (inner_dest)
2077 || rtx_equal_p (i2dest, inner_dest)
2078 || (i1dest && rtx_equal_p (i1dest, inner_dest))
2079 || (i0dest && rtx_equal_p (i0dest, inner_dest)))
2080 && (reg_overlap_mentioned_p (i2dest, inner_dest)
2081 || (i1dest && reg_overlap_mentioned_p (i1dest, inner_dest))
2082 || (i0dest && reg_overlap_mentioned_p (i0dest, inner_dest))))
2083
2084 /* This is the same test done in can_combine_p except we can't test
2085 all_adjacent; we don't have to, since this instruction will stay
2086 in place, thus we are not considering increasing the lifetime of
2087 INNER_DEST.
2088
2089 Also, if this insn sets a function argument, combining it with
2090 something that might need a spill could clobber a previous
2091 function argument; the all_adjacent test in can_combine_p also
2092 checks this; here, we do a more specific test for this case. */
2093
2094 || (REG_P (inner_dest)
2095 && REGNO (inner_dest) < FIRST_PSEUDO_REGISTER
2096 && (! HARD_REGNO_MODE_OK (REGNO (inner_dest),
2097 GET_MODE (inner_dest))))
2098 || (i1_not_in_src && reg_overlap_mentioned_p (i1dest, src))
2099 || (i0_not_in_src && reg_overlap_mentioned_p (i0dest, src)))
2100 return 0;
2101
2102 /* If DEST is used in I3, it is being killed in this insn, so
2103 record that for later. We have to consider paradoxical
2104 subregs here, since they kill the whole register, but we
2105 ignore partial subregs, STRICT_LOW_PART, etc.
2106 Never add REG_DEAD notes for the FRAME_POINTER_REGNUM or the
2107 STACK_POINTER_REGNUM, since these are always considered to be
2108 live. Similarly for ARG_POINTER_REGNUM if it is fixed. */
2109 subdest = dest;
2110 if (GET_CODE (subdest) == SUBREG
2111 && (GET_MODE_SIZE (GET_MODE (subdest))
2112 >= GET_MODE_SIZE (GET_MODE (SUBREG_REG (subdest)))))
2113 subdest = SUBREG_REG (subdest);
2114 if (pi3dest_killed
2115 && REG_P (subdest)
2116 && reg_referenced_p (subdest, PATTERN (i3))
2117 && REGNO (subdest) != FRAME_POINTER_REGNUM
2118 #if !HARD_FRAME_POINTER_IS_FRAME_POINTER
2119 && REGNO (subdest) != HARD_FRAME_POINTER_REGNUM
2120 #endif
2121 #if ARG_POINTER_REGNUM != FRAME_POINTER_REGNUM
2122 && (REGNO (subdest) != ARG_POINTER_REGNUM
2123 || ! fixed_regs [REGNO (subdest)])
2124 #endif
2125 && REGNO (subdest) != STACK_POINTER_REGNUM)
2126 {
2127 if (*pi3dest_killed)
2128 return 0;
2129
2130 *pi3dest_killed = subdest;
2131 }
2132 }
2133
2134 else if (GET_CODE (x) == PARALLEL)
2135 {
2136 int i;
2137
2138 for (i = 0; i < XVECLEN (x, 0); i++)
2139 if (! combinable_i3pat (i3, &XVECEXP (x, 0, i), i2dest, i1dest, i0dest,
2140 i1_not_in_src, i0_not_in_src, pi3dest_killed))
2141 return 0;
2142 }
2143
2144 return 1;
2145 }
2146 \f
2147 /* Return 1 if X is an arithmetic expression that contains a multiplication
2148 and division. We don't count multiplications by powers of two here. */
2149
2150 static int
2151 contains_muldiv (rtx x)
2152 {
2153 switch (GET_CODE (x))
2154 {
2155 case MOD: case DIV: case UMOD: case UDIV:
2156 return 1;
2157
2158 case MULT:
2159 return ! (CONST_INT_P (XEXP (x, 1))
2160 && exact_log2 (UINTVAL (XEXP (x, 1))) >= 0);
2161 default:
2162 if (BINARY_P (x))
2163 return contains_muldiv (XEXP (x, 0))
2164 || contains_muldiv (XEXP (x, 1));
2165
2166 if (UNARY_P (x))
2167 return contains_muldiv (XEXP (x, 0));
2168
2169 return 0;
2170 }
2171 }
2172 \f
2173 /* Determine whether INSN can be used in a combination. Return nonzero if
2174 not. This is used in try_combine to detect early some cases where we
2175 can't perform combinations. */
2176
2177 static int
2178 cant_combine_insn_p (rtx insn)
2179 {
2180 rtx set;
2181 rtx src, dest;
2182
2183 /* If this isn't really an insn, we can't do anything.
2184 This can occur when flow deletes an insn that it has merged into an
2185 auto-increment address. */
2186 if (! INSN_P (insn))
2187 return 1;
2188
2189 /* Never combine loads and stores involving hard regs that are likely
2190 to be spilled. The register allocator can usually handle such
2191 reg-reg moves by tying. If we allow the combiner to make
2192 substitutions of likely-spilled regs, reload might die.
2193 As an exception, we allow combinations involving fixed regs; these are
2194 not available to the register allocator so there's no risk involved. */
2195
2196 set = single_set (insn);
2197 if (! set)
2198 return 0;
2199 src = SET_SRC (set);
2200 dest = SET_DEST (set);
2201 if (GET_CODE (src) == SUBREG)
2202 src = SUBREG_REG (src);
2203 if (GET_CODE (dest) == SUBREG)
2204 dest = SUBREG_REG (dest);
2205 if (REG_P (src) && REG_P (dest)
2206 && ((HARD_REGISTER_P (src)
2207 && ! TEST_HARD_REG_BIT (fixed_reg_set, REGNO (src))
2208 && targetm.class_likely_spilled_p (REGNO_REG_CLASS (REGNO (src))))
2209 || (HARD_REGISTER_P (dest)
2210 && ! TEST_HARD_REG_BIT (fixed_reg_set, REGNO (dest))
2211 && targetm.class_likely_spilled_p (REGNO_REG_CLASS (REGNO (dest))))))
2212 return 1;
2213
2214 return 0;
2215 }
2216
2217 struct likely_spilled_retval_info
2218 {
2219 unsigned regno, nregs;
2220 unsigned mask;
2221 };
2222
2223 /* Called via note_stores by likely_spilled_retval_p. Remove from info->mask
2224 hard registers that are known to be written to / clobbered in full. */
2225 static void
2226 likely_spilled_retval_1 (rtx x, const_rtx set, void *data)
2227 {
2228 struct likely_spilled_retval_info *const info =
2229 (struct likely_spilled_retval_info *) data;
2230 unsigned regno, nregs;
2231 unsigned new_mask;
2232
2233 if (!REG_P (XEXP (set, 0)))
2234 return;
2235 regno = REGNO (x);
2236 if (regno >= info->regno + info->nregs)
2237 return;
2238 nregs = hard_regno_nregs[regno][GET_MODE (x)];
2239 if (regno + nregs <= info->regno)
2240 return;
2241 new_mask = (2U << (nregs - 1)) - 1;
2242 if (regno < info->regno)
2243 new_mask >>= info->regno - regno;
2244 else
2245 new_mask <<= regno - info->regno;
2246 info->mask &= ~new_mask;
2247 }
2248
2249 /* Return nonzero iff part of the return value is live during INSN, and
2250 it is likely spilled. This can happen when more than one insn is needed
2251 to copy the return value, e.g. when we consider to combine into the
2252 second copy insn for a complex value. */
2253
2254 static int
2255 likely_spilled_retval_p (rtx insn)
2256 {
2257 rtx use = BB_END (this_basic_block);
2258 rtx reg, p;
2259 unsigned regno, nregs;
2260 /* We assume here that no machine mode needs more than
2261 32 hard registers when the value overlaps with a register
2262 for which TARGET_FUNCTION_VALUE_REGNO_P is true. */
2263 unsigned mask;
2264 struct likely_spilled_retval_info info;
2265
2266 if (!NONJUMP_INSN_P (use) || GET_CODE (PATTERN (use)) != USE || insn == use)
2267 return 0;
2268 reg = XEXP (PATTERN (use), 0);
2269 if (!REG_P (reg) || !targetm.calls.function_value_regno_p (REGNO (reg)))
2270 return 0;
2271 regno = REGNO (reg);
2272 nregs = hard_regno_nregs[regno][GET_MODE (reg)];
2273 if (nregs == 1)
2274 return 0;
2275 mask = (2U << (nregs - 1)) - 1;
2276
2277 /* Disregard parts of the return value that are set later. */
2278 info.regno = regno;
2279 info.nregs = nregs;
2280 info.mask = mask;
2281 for (p = PREV_INSN (use); info.mask && p != insn; p = PREV_INSN (p))
2282 if (INSN_P (p))
2283 note_stores (PATTERN (p), likely_spilled_retval_1, &info);
2284 mask = info.mask;
2285
2286 /* Check if any of the (probably) live return value registers is
2287 likely spilled. */
2288 nregs --;
2289 do
2290 {
2291 if ((mask & 1 << nregs)
2292 && targetm.class_likely_spilled_p (REGNO_REG_CLASS (regno + nregs)))
2293 return 1;
2294 } while (nregs--);
2295 return 0;
2296 }
2297
2298 /* Adjust INSN after we made a change to its destination.
2299
2300 Changing the destination can invalidate notes that say something about
2301 the results of the insn and a LOG_LINK pointing to the insn. */
2302
2303 static void
2304 adjust_for_new_dest (rtx insn)
2305 {
2306 /* For notes, be conservative and simply remove them. */
2307 remove_reg_equal_equiv_notes (insn);
2308
2309 /* The new insn will have a destination that was previously the destination
2310 of an insn just above it. Call distribute_links to make a LOG_LINK from
2311 the next use of that destination. */
2312 distribute_links (alloc_insn_link (insn, NULL));
2313
2314 df_insn_rescan (insn);
2315 }
2316
2317 /* Return TRUE if combine can reuse reg X in mode MODE.
2318 ADDED_SETS is nonzero if the original set is still required. */
2319 static bool
2320 can_change_dest_mode (rtx x, int added_sets, enum machine_mode mode)
2321 {
2322 unsigned int regno;
2323
2324 if (!REG_P(x))
2325 return false;
2326
2327 regno = REGNO (x);
2328 /* Allow hard registers if the new mode is legal, and occupies no more
2329 registers than the old mode. */
2330 if (regno < FIRST_PSEUDO_REGISTER)
2331 return (HARD_REGNO_MODE_OK (regno, mode)
2332 && (hard_regno_nregs[regno][GET_MODE (x)]
2333 >= hard_regno_nregs[regno][mode]));
2334
2335 /* Or a pseudo that is only used once. */
2336 return (REG_N_SETS (regno) == 1 && !added_sets
2337 && !REG_USERVAR_P (x));
2338 }
2339
2340
2341 /* Check whether X, the destination of a set, refers to part of
2342 the register specified by REG. */
2343
2344 static bool
2345 reg_subword_p (rtx x, rtx reg)
2346 {
2347 /* Check that reg is an integer mode register. */
2348 if (!REG_P (reg) || GET_MODE_CLASS (GET_MODE (reg)) != MODE_INT)
2349 return false;
2350
2351 if (GET_CODE (x) == STRICT_LOW_PART
2352 || GET_CODE (x) == ZERO_EXTRACT)
2353 x = XEXP (x, 0);
2354
2355 return GET_CODE (x) == SUBREG
2356 && SUBREG_REG (x) == reg
2357 && GET_MODE_CLASS (GET_MODE (x)) == MODE_INT;
2358 }
2359
2360 /* Delete the unconditional jump INSN and adjust the CFG correspondingly.
2361 Note that the INSN should be deleted *after* removing dead edges, so
2362 that the kept edge is the fallthrough edge for a (set (pc) (pc))
2363 but not for a (set (pc) (label_ref FOO)). */
2364
2365 static void
2366 update_cfg_for_uncondjump (rtx insn)
2367 {
2368 basic_block bb = BLOCK_FOR_INSN (insn);
2369 gcc_assert (BB_END (bb) == insn);
2370
2371 purge_dead_edges (bb);
2372
2373 delete_insn (insn);
2374 if (EDGE_COUNT (bb->succs) == 1)
2375 {
2376 rtx insn;
2377
2378 single_succ_edge (bb)->flags |= EDGE_FALLTHRU;
2379
2380 /* Remove barriers from the footer if there are any. */
2381 for (insn = BB_FOOTER (bb); insn; insn = NEXT_INSN (insn))
2382 if (BARRIER_P (insn))
2383 {
2384 if (PREV_INSN (insn))
2385 NEXT_INSN (PREV_INSN (insn)) = NEXT_INSN (insn);
2386 else
2387 BB_FOOTER (bb) = NEXT_INSN (insn);
2388 if (NEXT_INSN (insn))
2389 PREV_INSN (NEXT_INSN (insn)) = PREV_INSN (insn);
2390 }
2391 else if (LABEL_P (insn))
2392 break;
2393 }
2394 }
2395
2396 /* Try to combine the insns I0, I1 and I2 into I3.
2397 Here I0, I1 and I2 appear earlier than I3.
2398 I0 and I1 can be zero; then we combine just I2 into I3, or I1 and I2 into
2399 I3.
2400
2401 If we are combining more than two insns and the resulting insn is not
2402 recognized, try splitting it into two insns. If that happens, I2 and I3
2403 are retained and I1/I0 are pseudo-deleted by turning them into a NOTE.
2404 Otherwise, I0, I1 and I2 are pseudo-deleted.
2405
2406 Return 0 if the combination does not work. Then nothing is changed.
2407 If we did the combination, return the insn at which combine should
2408 resume scanning.
2409
2410 Set NEW_DIRECT_JUMP_P to a nonzero value if try_combine creates a
2411 new direct jump instruction.
2412
2413 LAST_COMBINED_INSN is either I3, or some insn after I3 that has
2414 been I3 passed to an earlier try_combine within the same basic
2415 block. */
2416
2417 static rtx
2418 try_combine (rtx i3, rtx i2, rtx i1, rtx i0, int *new_direct_jump_p,
2419 rtx last_combined_insn)
2420 {
2421 /* New patterns for I3 and I2, respectively. */
2422 rtx newpat, newi2pat = 0;
2423 rtvec newpat_vec_with_clobbers = 0;
2424 int substed_i2 = 0, substed_i1 = 0, substed_i0 = 0;
2425 /* Indicates need to preserve SET in I0, I1 or I2 in I3 if it is not
2426 dead. */
2427 int added_sets_0, added_sets_1, added_sets_2;
2428 /* Total number of SETs to put into I3. */
2429 int total_sets;
2430 /* Nonzero if I2's or I1's body now appears in I3. */
2431 int i2_is_used = 0, i1_is_used = 0;
2432 /* INSN_CODEs for new I3, new I2, and user of condition code. */
2433 int insn_code_number, i2_code_number = 0, other_code_number = 0;
2434 /* Contains I3 if the destination of I3 is used in its source, which means
2435 that the old life of I3 is being killed. If that usage is placed into
2436 I2 and not in I3, a REG_DEAD note must be made. */
2437 rtx i3dest_killed = 0;
2438 /* SET_DEST and SET_SRC of I2, I1 and I0. */
2439 rtx i2dest = 0, i2src = 0, i1dest = 0, i1src = 0, i0dest = 0, i0src = 0;
2440 /* Copy of SET_SRC of I1 and I0, if needed. */
2441 rtx i1src_copy = 0, i0src_copy = 0, i0src_copy2 = 0;
2442 /* Set if I2DEST was reused as a scratch register. */
2443 bool i2scratch = false;
2444 /* The PATTERNs of I0, I1, and I2, or a copy of them in certain cases. */
2445 rtx i0pat = 0, i1pat = 0, i2pat = 0;
2446 /* Indicates if I2DEST or I1DEST is in I2SRC or I1_SRC. */
2447 int i2dest_in_i2src = 0, i1dest_in_i1src = 0, i2dest_in_i1src = 0;
2448 int i0dest_in_i0src = 0, i1dest_in_i0src = 0, i2dest_in_i0src = 0;
2449 int i2dest_killed = 0, i1dest_killed = 0, i0dest_killed = 0;
2450 int i1_feeds_i2_n = 0, i0_feeds_i2_n = 0, i0_feeds_i1_n = 0;
2451 /* Notes that must be added to REG_NOTES in I3 and I2. */
2452 rtx new_i3_notes, new_i2_notes;
2453 /* Notes that we substituted I3 into I2 instead of the normal case. */
2454 int i3_subst_into_i2 = 0;
2455 /* Notes that I1, I2 or I3 is a MULT operation. */
2456 int have_mult = 0;
2457 int swap_i2i3 = 0;
2458 int changed_i3_dest = 0;
2459
2460 int maxreg;
2461 rtx temp;
2462 struct insn_link *link;
2463 rtx other_pat = 0;
2464 rtx new_other_notes;
2465 int i;
2466
2467 /* Only try four-insn combinations when there's high likelihood of
2468 success. Look for simple insns, such as loads of constants or
2469 binary operations involving a constant. */
2470 if (i0)
2471 {
2472 int i;
2473 int ngood = 0;
2474 int nshift = 0;
2475
2476 if (!flag_expensive_optimizations)
2477 return 0;
2478
2479 for (i = 0; i < 4; i++)
2480 {
2481 rtx insn = i == 0 ? i0 : i == 1 ? i1 : i == 2 ? i2 : i3;
2482 rtx set = single_set (insn);
2483 rtx src;
2484 if (!set)
2485 continue;
2486 src = SET_SRC (set);
2487 if (CONSTANT_P (src))
2488 {
2489 ngood += 2;
2490 break;
2491 }
2492 else if (BINARY_P (src) && CONSTANT_P (XEXP (src, 1)))
2493 ngood++;
2494 else if (GET_CODE (src) == ASHIFT || GET_CODE (src) == ASHIFTRT
2495 || GET_CODE (src) == LSHIFTRT)
2496 nshift++;
2497 }
2498 if (ngood < 2 && nshift < 2)
2499 return 0;
2500 }
2501
2502 /* Exit early if one of the insns involved can't be used for
2503 combinations. */
2504 if (cant_combine_insn_p (i3)
2505 || cant_combine_insn_p (i2)
2506 || (i1 && cant_combine_insn_p (i1))
2507 || (i0 && cant_combine_insn_p (i0))
2508 || likely_spilled_retval_p (i3))
2509 return 0;
2510
2511 combine_attempts++;
2512 undobuf.other_insn = 0;
2513
2514 /* Reset the hard register usage information. */
2515 CLEAR_HARD_REG_SET (newpat_used_regs);
2516
2517 if (dump_file && (dump_flags & TDF_DETAILS))
2518 {
2519 if (i0)
2520 fprintf (dump_file, "\nTrying %d, %d, %d -> %d:\n",
2521 INSN_UID (i0), INSN_UID (i1), INSN_UID (i2), INSN_UID (i3));
2522 else if (i1)
2523 fprintf (dump_file, "\nTrying %d, %d -> %d:\n",
2524 INSN_UID (i1), INSN_UID (i2), INSN_UID (i3));
2525 else
2526 fprintf (dump_file, "\nTrying %d -> %d:\n",
2527 INSN_UID (i2), INSN_UID (i3));
2528 }
2529
2530 /* If multiple insns feed into one of I2 or I3, they can be in any
2531 order. To simplify the code below, reorder them in sequence. */
2532 if (i0 && DF_INSN_LUID (i0) > DF_INSN_LUID (i2))
2533 temp = i2, i2 = i0, i0 = temp;
2534 if (i0 && DF_INSN_LUID (i0) > DF_INSN_LUID (i1))
2535 temp = i1, i1 = i0, i0 = temp;
2536 if (i1 && DF_INSN_LUID (i1) > DF_INSN_LUID (i2))
2537 temp = i1, i1 = i2, i2 = temp;
2538
2539 added_links_insn = 0;
2540
2541 /* First check for one important special case that the code below will
2542 not handle. Namely, the case where I1 is zero, I2 is a PARALLEL
2543 and I3 is a SET whose SET_SRC is a SET_DEST in I2. In that case,
2544 we may be able to replace that destination with the destination of I3.
2545 This occurs in the common code where we compute both a quotient and
2546 remainder into a structure, in which case we want to do the computation
2547 directly into the structure to avoid register-register copies.
2548
2549 Note that this case handles both multiple sets in I2 and also cases
2550 where I2 has a number of CLOBBERs inside the PARALLEL.
2551
2552 We make very conservative checks below and only try to handle the
2553 most common cases of this. For example, we only handle the case
2554 where I2 and I3 are adjacent to avoid making difficult register
2555 usage tests. */
2556
2557 if (i1 == 0 && NONJUMP_INSN_P (i3) && GET_CODE (PATTERN (i3)) == SET
2558 && REG_P (SET_SRC (PATTERN (i3)))
2559 && REGNO (SET_SRC (PATTERN (i3))) >= FIRST_PSEUDO_REGISTER
2560 && find_reg_note (i3, REG_DEAD, SET_SRC (PATTERN (i3)))
2561 && GET_CODE (PATTERN (i2)) == PARALLEL
2562 && ! side_effects_p (SET_DEST (PATTERN (i3)))
2563 /* If the dest of I3 is a ZERO_EXTRACT or STRICT_LOW_PART, the code
2564 below would need to check what is inside (and reg_overlap_mentioned_p
2565 doesn't support those codes anyway). Don't allow those destinations;
2566 the resulting insn isn't likely to be recognized anyway. */
2567 && GET_CODE (SET_DEST (PATTERN (i3))) != ZERO_EXTRACT
2568 && GET_CODE (SET_DEST (PATTERN (i3))) != STRICT_LOW_PART
2569 && ! reg_overlap_mentioned_p (SET_SRC (PATTERN (i3)),
2570 SET_DEST (PATTERN (i3)))
2571 && next_active_insn (i2) == i3)
2572 {
2573 rtx p2 = PATTERN (i2);
2574
2575 /* Make sure that the destination of I3,
2576 which we are going to substitute into one output of I2,
2577 is not used within another output of I2. We must avoid making this:
2578 (parallel [(set (mem (reg 69)) ...)
2579 (set (reg 69) ...)])
2580 which is not well-defined as to order of actions.
2581 (Besides, reload can't handle output reloads for this.)
2582
2583 The problem can also happen if the dest of I3 is a memory ref,
2584 if another dest in I2 is an indirect memory ref. */
2585 for (i = 0; i < XVECLEN (p2, 0); i++)
2586 if ((GET_CODE (XVECEXP (p2, 0, i)) == SET
2587 || GET_CODE (XVECEXP (p2, 0, i)) == CLOBBER)
2588 && reg_overlap_mentioned_p (SET_DEST (PATTERN (i3)),
2589 SET_DEST (XVECEXP (p2, 0, i))))
2590 break;
2591
2592 if (i == XVECLEN (p2, 0))
2593 for (i = 0; i < XVECLEN (p2, 0); i++)
2594 if (GET_CODE (XVECEXP (p2, 0, i)) == SET
2595 && SET_DEST (XVECEXP (p2, 0, i)) == SET_SRC (PATTERN (i3)))
2596 {
2597 combine_merges++;
2598
2599 subst_insn = i3;
2600 subst_low_luid = DF_INSN_LUID (i2);
2601
2602 added_sets_2 = added_sets_1 = added_sets_0 = 0;
2603 i2src = SET_SRC (XVECEXP (p2, 0, i));
2604 i2dest = SET_DEST (XVECEXP (p2, 0, i));
2605 i2dest_killed = dead_or_set_p (i2, i2dest);
2606
2607 /* Replace the dest in I2 with our dest and make the resulting
2608 insn the new pattern for I3. Then skip to where we validate
2609 the pattern. Everything was set up above. */
2610 SUBST (SET_DEST (XVECEXP (p2, 0, i)), SET_DEST (PATTERN (i3)));
2611 newpat = p2;
2612 i3_subst_into_i2 = 1;
2613 goto validate_replacement;
2614 }
2615 }
2616
2617 /* If I2 is setting a pseudo to a constant and I3 is setting some
2618 sub-part of it to another constant, merge them by making a new
2619 constant. */
2620 if (i1 == 0
2621 && (temp = single_set (i2)) != 0
2622 && CONST_SCALAR_INT_P (SET_SRC (temp))
2623 && GET_CODE (PATTERN (i3)) == SET
2624 && CONST_SCALAR_INT_P (SET_SRC (PATTERN (i3)))
2625 && reg_subword_p (SET_DEST (PATTERN (i3)), SET_DEST (temp)))
2626 {
2627 rtx dest = SET_DEST (PATTERN (i3));
2628 int offset = -1;
2629 int width = 0;
2630
2631 /* There are not explicit tests to make sure that this is not a
2632 float, but there is code here that would not be correct if it
2633 was. */
2634 gcc_assert (GET_MODE_CLASS (GET_MODE (SET_SRC (temp))) != MODE_FLOAT);
2635
2636 if (GET_CODE (dest) == ZERO_EXTRACT)
2637 {
2638 if (CONST_INT_P (XEXP (dest, 1))
2639 && CONST_INT_P (XEXP (dest, 2)))
2640 {
2641 width = INTVAL (XEXP (dest, 1));
2642 offset = INTVAL (XEXP (dest, 2));
2643 dest = XEXP (dest, 0);
2644 if (BITS_BIG_ENDIAN)
2645 offset = GET_MODE_PRECISION (GET_MODE (dest)) - width - offset;
2646 }
2647 }
2648 else
2649 {
2650 if (GET_CODE (dest) == STRICT_LOW_PART)
2651 dest = XEXP (dest, 0);
2652 width = GET_MODE_PRECISION (GET_MODE (dest));
2653 offset = 0;
2654 }
2655
2656 if (offset >= 0)
2657 {
2658 /* If this is the low part, we're done. */
2659 if (subreg_lowpart_p (dest))
2660 ;
2661 /* Handle the case where inner is twice the size of outer. */
2662 else if (GET_MODE_PRECISION (GET_MODE (SET_DEST (temp)))
2663 == 2 * GET_MODE_PRECISION (GET_MODE (dest)))
2664 offset += GET_MODE_PRECISION (GET_MODE (dest));
2665 /* Otherwise give up for now. */
2666 else
2667 offset = -1;
2668 }
2669
2670 if (offset >= 0
2671 && (GET_MODE_PRECISION (GET_MODE (SET_DEST (temp)))
2672 <= HOST_BITS_PER_DOUBLE_INT))
2673 {
2674 double_int m, o, i;
2675 rtx inner = SET_SRC (PATTERN (i3));
2676 rtx outer = SET_SRC (temp);
2677
2678 o = rtx_to_double_int (outer);
2679 i = rtx_to_double_int (inner);
2680
2681 m = double_int::mask (width);
2682 i &= m;
2683 m = m.llshift (offset, HOST_BITS_PER_DOUBLE_INT);
2684 i = i.llshift (offset, HOST_BITS_PER_DOUBLE_INT);
2685 o = o.and_not (m) | i;
2686
2687 combine_merges++;
2688 subst_insn = i3;
2689 subst_low_luid = DF_INSN_LUID (i2);
2690 added_sets_2 = added_sets_1 = added_sets_0 = 0;
2691 i2dest = SET_DEST (temp);
2692 i2dest_killed = dead_or_set_p (i2, i2dest);
2693
2694 /* Replace the source in I2 with the new constant and make the
2695 resulting insn the new pattern for I3. Then skip to where we
2696 validate the pattern. Everything was set up above. */
2697 SUBST (SET_SRC (temp),
2698 immed_double_int_const (o, GET_MODE (SET_DEST (temp))));
2699
2700 newpat = PATTERN (i2);
2701
2702 /* The dest of I3 has been replaced with the dest of I2. */
2703 changed_i3_dest = 1;
2704 goto validate_replacement;
2705 }
2706 }
2707
2708 #ifndef HAVE_cc0
2709 /* If we have no I1 and I2 looks like:
2710 (parallel [(set (reg:CC X) (compare:CC OP (const_int 0)))
2711 (set Y OP)])
2712 make up a dummy I1 that is
2713 (set Y OP)
2714 and change I2 to be
2715 (set (reg:CC X) (compare:CC Y (const_int 0)))
2716
2717 (We can ignore any trailing CLOBBERs.)
2718
2719 This undoes a previous combination and allows us to match a branch-and-
2720 decrement insn. */
2721
2722 if (i1 == 0 && GET_CODE (PATTERN (i2)) == PARALLEL
2723 && XVECLEN (PATTERN (i2), 0) >= 2
2724 && GET_CODE (XVECEXP (PATTERN (i2), 0, 0)) == SET
2725 && (GET_MODE_CLASS (GET_MODE (SET_DEST (XVECEXP (PATTERN (i2), 0, 0))))
2726 == MODE_CC)
2727 && GET_CODE (SET_SRC (XVECEXP (PATTERN (i2), 0, 0))) == COMPARE
2728 && XEXP (SET_SRC (XVECEXP (PATTERN (i2), 0, 0)), 1) == const0_rtx
2729 && GET_CODE (XVECEXP (PATTERN (i2), 0, 1)) == SET
2730 && REG_P (SET_DEST (XVECEXP (PATTERN (i2), 0, 1)))
2731 && rtx_equal_p (XEXP (SET_SRC (XVECEXP (PATTERN (i2), 0, 0)), 0),
2732 SET_SRC (XVECEXP (PATTERN (i2), 0, 1))))
2733 {
2734 for (i = XVECLEN (PATTERN (i2), 0) - 1; i >= 2; i--)
2735 if (GET_CODE (XVECEXP (PATTERN (i2), 0, i)) != CLOBBER)
2736 break;
2737
2738 if (i == 1)
2739 {
2740 /* We make I1 with the same INSN_UID as I2. This gives it
2741 the same DF_INSN_LUID for value tracking. Our fake I1 will
2742 never appear in the insn stream so giving it the same INSN_UID
2743 as I2 will not cause a problem. */
2744
2745 i1 = gen_rtx_INSN (VOIDmode, INSN_UID (i2), NULL_RTX, i2,
2746 BLOCK_FOR_INSN (i2), XVECEXP (PATTERN (i2), 0, 1),
2747 INSN_LOCATION (i2), -1, NULL_RTX);
2748
2749 SUBST (PATTERN (i2), XVECEXP (PATTERN (i2), 0, 0));
2750 SUBST (XEXP (SET_SRC (PATTERN (i2)), 0),
2751 SET_DEST (PATTERN (i1)));
2752 SUBST_LINK (LOG_LINKS (i2), alloc_insn_link (i1, LOG_LINKS (i2)));
2753 }
2754 }
2755 #endif
2756
2757 /* Verify that I2 and I1 are valid for combining. */
2758 if (! can_combine_p (i2, i3, i0, i1, NULL_RTX, NULL_RTX, &i2dest, &i2src)
2759 || (i1 && ! can_combine_p (i1, i3, i0, NULL_RTX, i2, NULL_RTX,
2760 &i1dest, &i1src))
2761 || (i0 && ! can_combine_p (i0, i3, NULL_RTX, NULL_RTX, i1, i2,
2762 &i0dest, &i0src)))
2763 {
2764 undo_all ();
2765 return 0;
2766 }
2767
2768 /* Record whether I2DEST is used in I2SRC and similarly for the other
2769 cases. Knowing this will help in register status updating below. */
2770 i2dest_in_i2src = reg_overlap_mentioned_p (i2dest, i2src);
2771 i1dest_in_i1src = i1 && reg_overlap_mentioned_p (i1dest, i1src);
2772 i2dest_in_i1src = i1 && reg_overlap_mentioned_p (i2dest, i1src);
2773 i0dest_in_i0src = i0 && reg_overlap_mentioned_p (i0dest, i0src);
2774 i1dest_in_i0src = i0 && reg_overlap_mentioned_p (i1dest, i0src);
2775 i2dest_in_i0src = i0 && reg_overlap_mentioned_p (i2dest, i0src);
2776 i2dest_killed = dead_or_set_p (i2, i2dest);
2777 i1dest_killed = i1 && dead_or_set_p (i1, i1dest);
2778 i0dest_killed = i0 && dead_or_set_p (i0, i0dest);
2779
2780 /* For the earlier insns, determine which of the subsequent ones they
2781 feed. */
2782 i1_feeds_i2_n = i1 && insn_a_feeds_b (i1, i2);
2783 i0_feeds_i1_n = i0 && insn_a_feeds_b (i0, i1);
2784 i0_feeds_i2_n = (i0 && (!i0_feeds_i1_n ? insn_a_feeds_b (i0, i2)
2785 : (!reg_overlap_mentioned_p (i1dest, i0dest)
2786 && reg_overlap_mentioned_p (i0dest, i2src))));
2787
2788 /* Ensure that I3's pattern can be the destination of combines. */
2789 if (! combinable_i3pat (i3, &PATTERN (i3), i2dest, i1dest, i0dest,
2790 i1 && i2dest_in_i1src && !i1_feeds_i2_n,
2791 i0 && ((i2dest_in_i0src && !i0_feeds_i2_n)
2792 || (i1dest_in_i0src && !i0_feeds_i1_n)),
2793 &i3dest_killed))
2794 {
2795 undo_all ();
2796 return 0;
2797 }
2798
2799 /* See if any of the insns is a MULT operation. Unless one is, we will
2800 reject a combination that is, since it must be slower. Be conservative
2801 here. */
2802 if (GET_CODE (i2src) == MULT
2803 || (i1 != 0 && GET_CODE (i1src) == MULT)
2804 || (i0 != 0 && GET_CODE (i0src) == MULT)
2805 || (GET_CODE (PATTERN (i3)) == SET
2806 && GET_CODE (SET_SRC (PATTERN (i3))) == MULT))
2807 have_mult = 1;
2808
2809 /* If I3 has an inc, then give up if I1 or I2 uses the reg that is inc'd.
2810 We used to do this EXCEPT in one case: I3 has a post-inc in an
2811 output operand. However, that exception can give rise to insns like
2812 mov r3,(r3)+
2813 which is a famous insn on the PDP-11 where the value of r3 used as the
2814 source was model-dependent. Avoid this sort of thing. */
2815
2816 #if 0
2817 if (!(GET_CODE (PATTERN (i3)) == SET
2818 && REG_P (SET_SRC (PATTERN (i3)))
2819 && MEM_P (SET_DEST (PATTERN (i3)))
2820 && (GET_CODE (XEXP (SET_DEST (PATTERN (i3)), 0)) == POST_INC
2821 || GET_CODE (XEXP (SET_DEST (PATTERN (i3)), 0)) == POST_DEC)))
2822 /* It's not the exception. */
2823 #endif
2824 #ifdef AUTO_INC_DEC
2825 {
2826 rtx link;
2827 for (link = REG_NOTES (i3); link; link = XEXP (link, 1))
2828 if (REG_NOTE_KIND (link) == REG_INC
2829 && (reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (i2))
2830 || (i1 != 0
2831 && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (i1)))))
2832 {
2833 undo_all ();
2834 return 0;
2835 }
2836 }
2837 #endif
2838
2839 /* See if the SETs in I1 or I2 need to be kept around in the merged
2840 instruction: whenever the value set there is still needed past I3.
2841 For the SETs in I2, this is easy: we see if I2DEST dies or is set in I3.
2842
2843 For the SET in I1, we have two cases: If I1 and I2 independently
2844 feed into I3, the set in I1 needs to be kept around if I1DEST dies
2845 or is set in I3. Otherwise (if I1 feeds I2 which feeds I3), the set
2846 in I1 needs to be kept around unless I1DEST dies or is set in either
2847 I2 or I3. The same consideration applies to I0. */
2848
2849 added_sets_2 = !dead_or_set_p (i3, i2dest);
2850
2851 if (i1)
2852 added_sets_1 = !(dead_or_set_p (i3, i1dest)
2853 || (i1_feeds_i2_n && dead_or_set_p (i2, i1dest)));
2854 else
2855 added_sets_1 = 0;
2856
2857 if (i0)
2858 added_sets_0 = !(dead_or_set_p (i3, i0dest)
2859 || (i0_feeds_i2_n && dead_or_set_p (i2, i0dest))
2860 || (i0_feeds_i1_n && dead_or_set_p (i1, i0dest)));
2861 else
2862 added_sets_0 = 0;
2863
2864 /* We are about to copy insns for the case where they need to be kept
2865 around. Check that they can be copied in the merged instruction. */
2866
2867 if (targetm.cannot_copy_insn_p
2868 && ((added_sets_2 && targetm.cannot_copy_insn_p (i2))
2869 || (i1 && added_sets_1 && targetm.cannot_copy_insn_p (i1))
2870 || (i0 && added_sets_0 && targetm.cannot_copy_insn_p (i0))))
2871 {
2872 undo_all ();
2873 return 0;
2874 }
2875
2876 /* If the set in I2 needs to be kept around, we must make a copy of
2877 PATTERN (I2), so that when we substitute I1SRC for I1DEST in
2878 PATTERN (I2), we are only substituting for the original I1DEST, not into
2879 an already-substituted copy. This also prevents making self-referential
2880 rtx. If I2 is a PARALLEL, we just need the piece that assigns I2SRC to
2881 I2DEST. */
2882
2883 if (added_sets_2)
2884 {
2885 if (GET_CODE (PATTERN (i2)) == PARALLEL)
2886 i2pat = gen_rtx_SET (VOIDmode, i2dest, copy_rtx (i2src));
2887 else
2888 i2pat = copy_rtx (PATTERN (i2));
2889 }
2890
2891 if (added_sets_1)
2892 {
2893 if (GET_CODE (PATTERN (i1)) == PARALLEL)
2894 i1pat = gen_rtx_SET (VOIDmode, i1dest, copy_rtx (i1src));
2895 else
2896 i1pat = copy_rtx (PATTERN (i1));
2897 }
2898
2899 if (added_sets_0)
2900 {
2901 if (GET_CODE (PATTERN (i0)) == PARALLEL)
2902 i0pat = gen_rtx_SET (VOIDmode, i0dest, copy_rtx (i0src));
2903 else
2904 i0pat = copy_rtx (PATTERN (i0));
2905 }
2906
2907 combine_merges++;
2908
2909 /* Substitute in the latest insn for the regs set by the earlier ones. */
2910
2911 maxreg = max_reg_num ();
2912
2913 subst_insn = i3;
2914
2915 #ifndef HAVE_cc0
2916 /* Many machines that don't use CC0 have insns that can both perform an
2917 arithmetic operation and set the condition code. These operations will
2918 be represented as a PARALLEL with the first element of the vector
2919 being a COMPARE of an arithmetic operation with the constant zero.
2920 The second element of the vector will set some pseudo to the result
2921 of the same arithmetic operation. If we simplify the COMPARE, we won't
2922 match such a pattern and so will generate an extra insn. Here we test
2923 for this case, where both the comparison and the operation result are
2924 needed, and make the PARALLEL by just replacing I2DEST in I3SRC with
2925 I2SRC. Later we will make the PARALLEL that contains I2. */
2926
2927 if (i1 == 0 && added_sets_2 && GET_CODE (PATTERN (i3)) == SET
2928 && GET_CODE (SET_SRC (PATTERN (i3))) == COMPARE
2929 && CONST_INT_P (XEXP (SET_SRC (PATTERN (i3)), 1))
2930 && rtx_equal_p (XEXP (SET_SRC (PATTERN (i3)), 0), i2dest))
2931 {
2932 rtx newpat_dest;
2933 rtx *cc_use_loc = NULL, cc_use_insn = NULL_RTX;
2934 rtx op0 = i2src, op1 = XEXP (SET_SRC (PATTERN (i3)), 1);
2935 enum machine_mode compare_mode, orig_compare_mode;
2936 enum rtx_code compare_code = UNKNOWN, orig_compare_code = UNKNOWN;
2937
2938 newpat = PATTERN (i3);
2939 newpat_dest = SET_DEST (newpat);
2940 compare_mode = orig_compare_mode = GET_MODE (newpat_dest);
2941
2942 if (undobuf.other_insn == 0
2943 && (cc_use_loc = find_single_use (SET_DEST (newpat), i3,
2944 &cc_use_insn)))
2945 {
2946 compare_code = orig_compare_code = GET_CODE (*cc_use_loc);
2947 compare_code = simplify_compare_const (compare_code,
2948 op0, &op1);
2949 #ifdef CANONICALIZE_COMPARISON
2950 CANONICALIZE_COMPARISON (compare_code, op0, op1);
2951 #endif
2952 }
2953
2954 /* Do the rest only if op1 is const0_rtx, which may be the
2955 result of simplification. */
2956 if (op1 == const0_rtx)
2957 {
2958 /* If a single use of the CC is found, prepare to modify it
2959 when SELECT_CC_MODE returns a new CC-class mode, or when
2960 the above simplify_compare_const() returned a new comparison
2961 operator. undobuf.other_insn is assigned the CC use insn
2962 when modifying it. */
2963 if (cc_use_loc)
2964 {
2965 #ifdef SELECT_CC_MODE
2966 enum machine_mode new_mode
2967 = SELECT_CC_MODE (compare_code, op0, op1);
2968 if (new_mode != orig_compare_mode
2969 && can_change_dest_mode (SET_DEST (newpat),
2970 added_sets_2, new_mode))
2971 {
2972 unsigned int regno = REGNO (newpat_dest);
2973 compare_mode = new_mode;
2974 if (regno < FIRST_PSEUDO_REGISTER)
2975 newpat_dest = gen_rtx_REG (compare_mode, regno);
2976 else
2977 {
2978 SUBST_MODE (regno_reg_rtx[regno], compare_mode);
2979 newpat_dest = regno_reg_rtx[regno];
2980 }
2981 }
2982 #endif
2983 /* Cases for modifying the CC-using comparison. */
2984 if (compare_code != orig_compare_code
2985 /* ??? Do we need to verify the zero rtx? */
2986 && XEXP (*cc_use_loc, 1) == const0_rtx)
2987 {
2988 /* Replace cc_use_loc with entire new RTX. */
2989 SUBST (*cc_use_loc,
2990 gen_rtx_fmt_ee (compare_code, compare_mode,
2991 newpat_dest, const0_rtx));
2992 undobuf.other_insn = cc_use_insn;
2993 }
2994 else if (compare_mode != orig_compare_mode)
2995 {
2996 /* Just replace the CC reg with a new mode. */
2997 SUBST (XEXP (*cc_use_loc, 0), newpat_dest);
2998 undobuf.other_insn = cc_use_insn;
2999 }
3000 }
3001
3002 /* Now we modify the current newpat:
3003 First, SET_DEST(newpat) is updated if the CC mode has been
3004 altered. For targets without SELECT_CC_MODE, this should be
3005 optimized away. */
3006 if (compare_mode != orig_compare_mode)
3007 SUBST (SET_DEST (newpat), newpat_dest);
3008 /* This is always done to propagate i2src into newpat. */
3009 SUBST (SET_SRC (newpat),
3010 gen_rtx_COMPARE (compare_mode, op0, op1));
3011 /* Create new version of i2pat if needed; the below PARALLEL
3012 creation needs this to work correctly. */
3013 if (! rtx_equal_p (i2src, op0))
3014 i2pat = gen_rtx_SET (VOIDmode, i2dest, op0);
3015 i2_is_used = 1;
3016 }
3017 }
3018 #endif
3019
3020 if (i2_is_used == 0)
3021 {
3022 /* It is possible that the source of I2 or I1 may be performing
3023 an unneeded operation, such as a ZERO_EXTEND of something
3024 that is known to have the high part zero. Handle that case
3025 by letting subst look at the inner insns.
3026
3027 Another way to do this would be to have a function that tries
3028 to simplify a single insn instead of merging two or more
3029 insns. We don't do this because of the potential of infinite
3030 loops and because of the potential extra memory required.
3031 However, doing it the way we are is a bit of a kludge and
3032 doesn't catch all cases.
3033
3034 But only do this if -fexpensive-optimizations since it slows
3035 things down and doesn't usually win.
3036
3037 This is not done in the COMPARE case above because the
3038 unmodified I2PAT is used in the PARALLEL and so a pattern
3039 with a modified I2SRC would not match. */
3040
3041 if (flag_expensive_optimizations)
3042 {
3043 /* Pass pc_rtx so no substitutions are done, just
3044 simplifications. */
3045 if (i1)
3046 {
3047 subst_low_luid = DF_INSN_LUID (i1);
3048 i1src = subst (i1src, pc_rtx, pc_rtx, 0, 0, 0);
3049 }
3050
3051 subst_low_luid = DF_INSN_LUID (i2);
3052 i2src = subst (i2src, pc_rtx, pc_rtx, 0, 0, 0);
3053 }
3054
3055 n_occurrences = 0; /* `subst' counts here */
3056 subst_low_luid = DF_INSN_LUID (i2);
3057
3058 /* If I1 feeds into I2 and I1DEST is in I1SRC, we need to make a unique
3059 copy of I2SRC each time we substitute it, in order to avoid creating
3060 self-referential RTL when we will be substituting I1SRC for I1DEST
3061 later. Likewise if I0 feeds into I2, either directly or indirectly
3062 through I1, and I0DEST is in I0SRC. */
3063 newpat = subst (PATTERN (i3), i2dest, i2src, 0, 0,
3064 (i1_feeds_i2_n && i1dest_in_i1src)
3065 || ((i0_feeds_i2_n || (i0_feeds_i1_n && i1_feeds_i2_n))
3066 && i0dest_in_i0src));
3067 substed_i2 = 1;
3068
3069 /* Record whether I2's body now appears within I3's body. */
3070 i2_is_used = n_occurrences;
3071 }
3072
3073 /* If we already got a failure, don't try to do more. Otherwise, try to
3074 substitute I1 if we have it. */
3075
3076 if (i1 && GET_CODE (newpat) != CLOBBER)
3077 {
3078 /* Check that an autoincrement side-effect on I1 has not been lost.
3079 This happens if I1DEST is mentioned in I2 and dies there, and
3080 has disappeared from the new pattern. */
3081 if ((FIND_REG_INC_NOTE (i1, NULL_RTX) != 0
3082 && i1_feeds_i2_n
3083 && dead_or_set_p (i2, i1dest)
3084 && !reg_overlap_mentioned_p (i1dest, newpat))
3085 /* Before we can do this substitution, we must redo the test done
3086 above (see detailed comments there) that ensures I1DEST isn't
3087 mentioned in any SETs in NEWPAT that are field assignments. */
3088 || !combinable_i3pat (NULL_RTX, &newpat, i1dest, NULL_RTX, NULL_RTX,
3089 0, 0, 0))
3090 {
3091 undo_all ();
3092 return 0;
3093 }
3094
3095 n_occurrences = 0;
3096 subst_low_luid = DF_INSN_LUID (i1);
3097
3098 /* If the following substitution will modify I1SRC, make a copy of it
3099 for the case where it is substituted for I1DEST in I2PAT later. */
3100 if (added_sets_2 && i1_feeds_i2_n)
3101 i1src_copy = copy_rtx (i1src);
3102
3103 /* If I0 feeds into I1 and I0DEST is in I0SRC, we need to make a unique
3104 copy of I1SRC each time we substitute it, in order to avoid creating
3105 self-referential RTL when we will be substituting I0SRC for I0DEST
3106 later. */
3107 newpat = subst (newpat, i1dest, i1src, 0, 0,
3108 i0_feeds_i1_n && i0dest_in_i0src);
3109 substed_i1 = 1;
3110
3111 /* Record whether I1's body now appears within I3's body. */
3112 i1_is_used = n_occurrences;
3113 }
3114
3115 /* Likewise for I0 if we have it. */
3116
3117 if (i0 && GET_CODE (newpat) != CLOBBER)
3118 {
3119 if ((FIND_REG_INC_NOTE (i0, NULL_RTX) != 0
3120 && ((i0_feeds_i2_n && dead_or_set_p (i2, i0dest))
3121 || (i0_feeds_i1_n && dead_or_set_p (i1, i0dest)))
3122 && !reg_overlap_mentioned_p (i0dest, newpat))
3123 || !combinable_i3pat (NULL_RTX, &newpat, i0dest, NULL_RTX, NULL_RTX,
3124 0, 0, 0))
3125 {
3126 undo_all ();
3127 return 0;
3128 }
3129
3130 /* If the following substitution will modify I0SRC, make a copy of it
3131 for the case where it is substituted for I0DEST in I1PAT later. */
3132 if (added_sets_1 && i0_feeds_i1_n)
3133 i0src_copy = copy_rtx (i0src);
3134 /* And a copy for I0DEST in I2PAT substitution. */
3135 if (added_sets_2 && ((i0_feeds_i1_n && i1_feeds_i2_n)
3136 || (i0_feeds_i2_n)))
3137 i0src_copy2 = copy_rtx (i0src);
3138
3139 n_occurrences = 0;
3140 subst_low_luid = DF_INSN_LUID (i0);
3141 newpat = subst (newpat, i0dest, i0src, 0, 0, 0);
3142 substed_i0 = 1;
3143 }
3144
3145 /* Fail if an autoincrement side-effect has been duplicated. Be careful
3146 to count all the ways that I2SRC and I1SRC can be used. */
3147 if ((FIND_REG_INC_NOTE (i2, NULL_RTX) != 0
3148 && i2_is_used + added_sets_2 > 1)
3149 || (i1 != 0 && FIND_REG_INC_NOTE (i1, NULL_RTX) != 0
3150 && (i1_is_used + added_sets_1 + (added_sets_2 && i1_feeds_i2_n)
3151 > 1))
3152 || (i0 != 0 && FIND_REG_INC_NOTE (i0, NULL_RTX) != 0
3153 && (n_occurrences + added_sets_0
3154 + (added_sets_1 && i0_feeds_i1_n)
3155 + (added_sets_2 && i0_feeds_i2_n)
3156 > 1))
3157 /* Fail if we tried to make a new register. */
3158 || max_reg_num () != maxreg
3159 /* Fail if we couldn't do something and have a CLOBBER. */
3160 || GET_CODE (newpat) == CLOBBER
3161 /* Fail if this new pattern is a MULT and we didn't have one before
3162 at the outer level. */
3163 || (GET_CODE (newpat) == SET && GET_CODE (SET_SRC (newpat)) == MULT
3164 && ! have_mult))
3165 {
3166 undo_all ();
3167 return 0;
3168 }
3169
3170 /* If the actions of the earlier insns must be kept
3171 in addition to substituting them into the latest one,
3172 we must make a new PARALLEL for the latest insn
3173 to hold additional the SETs. */
3174
3175 if (added_sets_0 || added_sets_1 || added_sets_2)
3176 {
3177 int extra_sets = added_sets_0 + added_sets_1 + added_sets_2;
3178 combine_extras++;
3179
3180 if (GET_CODE (newpat) == PARALLEL)
3181 {
3182 rtvec old = XVEC (newpat, 0);
3183 total_sets = XVECLEN (newpat, 0) + extra_sets;
3184 newpat = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (total_sets));
3185 memcpy (XVEC (newpat, 0)->elem, &old->elem[0],
3186 sizeof (old->elem[0]) * old->num_elem);
3187 }
3188 else
3189 {
3190 rtx old = newpat;
3191 total_sets = 1 + extra_sets;
3192 newpat = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (total_sets));
3193 XVECEXP (newpat, 0, 0) = old;
3194 }
3195
3196 if (added_sets_0)
3197 XVECEXP (newpat, 0, --total_sets) = i0pat;
3198
3199 if (added_sets_1)
3200 {
3201 rtx t = i1pat;
3202 if (i0_feeds_i1_n)
3203 t = subst (t, i0dest, i0src_copy ? i0src_copy : i0src, 0, 0, 0);
3204
3205 XVECEXP (newpat, 0, --total_sets) = t;
3206 }
3207 if (added_sets_2)
3208 {
3209 rtx t = i2pat;
3210 if (i1_feeds_i2_n)
3211 t = subst (t, i1dest, i1src_copy ? i1src_copy : i1src, 0, 0,
3212 i0_feeds_i1_n && i0dest_in_i0src);
3213 if ((i0_feeds_i1_n && i1_feeds_i2_n) || i0_feeds_i2_n)
3214 t = subst (t, i0dest, i0src_copy2 ? i0src_copy2 : i0src, 0, 0, 0);
3215
3216 XVECEXP (newpat, 0, --total_sets) = t;
3217 }
3218 }
3219
3220 validate_replacement:
3221
3222 /* Note which hard regs this insn has as inputs. */
3223 mark_used_regs_combine (newpat);
3224
3225 /* If recog_for_combine fails, it strips existing clobbers. If we'll
3226 consider splitting this pattern, we might need these clobbers. */
3227 if (i1 && GET_CODE (newpat) == PARALLEL
3228 && GET_CODE (XVECEXP (newpat, 0, XVECLEN (newpat, 0) - 1)) == CLOBBER)
3229 {
3230 int len = XVECLEN (newpat, 0);
3231
3232 newpat_vec_with_clobbers = rtvec_alloc (len);
3233 for (i = 0; i < len; i++)
3234 RTVEC_ELT (newpat_vec_with_clobbers, i) = XVECEXP (newpat, 0, i);
3235 }
3236
3237 /* Is the result of combination a valid instruction? */
3238 insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
3239
3240 /* If the result isn't valid, see if it is a PARALLEL of two SETs where
3241 the second SET's destination is a register that is unused and isn't
3242 marked as an instruction that might trap in an EH region. In that case,
3243 we just need the first SET. This can occur when simplifying a divmod
3244 insn. We *must* test for this case here because the code below that
3245 splits two independent SETs doesn't handle this case correctly when it
3246 updates the register status.
3247
3248 It's pointless doing this if we originally had two sets, one from
3249 i3, and one from i2. Combining then splitting the parallel results
3250 in the original i2 again plus an invalid insn (which we delete).
3251 The net effect is only to move instructions around, which makes
3252 debug info less accurate.
3253
3254 Also check the case where the first SET's destination is unused.
3255 That would not cause incorrect code, but does cause an unneeded
3256 insn to remain. */
3257
3258 if (insn_code_number < 0
3259 && !(added_sets_2 && i1 == 0)
3260 && GET_CODE (newpat) == PARALLEL
3261 && XVECLEN (newpat, 0) == 2
3262 && GET_CODE (XVECEXP (newpat, 0, 0)) == SET
3263 && GET_CODE (XVECEXP (newpat, 0, 1)) == SET
3264 && asm_noperands (newpat) < 0)
3265 {
3266 rtx set0 = XVECEXP (newpat, 0, 0);
3267 rtx set1 = XVECEXP (newpat, 0, 1);
3268
3269 if (((REG_P (SET_DEST (set1))
3270 && find_reg_note (i3, REG_UNUSED, SET_DEST (set1)))
3271 || (GET_CODE (SET_DEST (set1)) == SUBREG
3272 && find_reg_note (i3, REG_UNUSED, SUBREG_REG (SET_DEST (set1)))))
3273 && insn_nothrow_p (i3)
3274 && !side_effects_p (SET_SRC (set1)))
3275 {
3276 newpat = set0;
3277 insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
3278 }
3279
3280 else if (((REG_P (SET_DEST (set0))
3281 && find_reg_note (i3, REG_UNUSED, SET_DEST (set0)))
3282 || (GET_CODE (SET_DEST (set0)) == SUBREG
3283 && find_reg_note (i3, REG_UNUSED,
3284 SUBREG_REG (SET_DEST (set0)))))
3285 && insn_nothrow_p (i3)
3286 && !side_effects_p (SET_SRC (set0)))
3287 {
3288 newpat = set1;
3289 insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
3290
3291 if (insn_code_number >= 0)
3292 changed_i3_dest = 1;
3293 }
3294 }
3295
3296 /* If we were combining three insns and the result is a simple SET
3297 with no ASM_OPERANDS that wasn't recognized, try to split it into two
3298 insns. There are two ways to do this. It can be split using a
3299 machine-specific method (like when you have an addition of a large
3300 constant) or by combine in the function find_split_point. */
3301
3302 if (i1 && insn_code_number < 0 && GET_CODE (newpat) == SET
3303 && asm_noperands (newpat) < 0)
3304 {
3305 rtx parallel, m_split, *split;
3306
3307 /* See if the MD file can split NEWPAT. If it can't, see if letting it
3308 use I2DEST as a scratch register will help. In the latter case,
3309 convert I2DEST to the mode of the source of NEWPAT if we can. */
3310
3311 m_split = combine_split_insns (newpat, i3);
3312
3313 /* We can only use I2DEST as a scratch reg if it doesn't overlap any
3314 inputs of NEWPAT. */
3315
3316 /* ??? If I2DEST is not safe, and I1DEST exists, then it would be
3317 possible to try that as a scratch reg. This would require adding
3318 more code to make it work though. */
3319
3320 if (m_split == 0 && ! reg_overlap_mentioned_p (i2dest, newpat))
3321 {
3322 enum machine_mode new_mode = GET_MODE (SET_DEST (newpat));
3323
3324 /* First try to split using the original register as a
3325 scratch register. */
3326 parallel = gen_rtx_PARALLEL (VOIDmode,
3327 gen_rtvec (2, newpat,
3328 gen_rtx_CLOBBER (VOIDmode,
3329 i2dest)));
3330 m_split = combine_split_insns (parallel, i3);
3331
3332 /* If that didn't work, try changing the mode of I2DEST if
3333 we can. */
3334 if (m_split == 0
3335 && new_mode != GET_MODE (i2dest)
3336 && new_mode != VOIDmode
3337 && can_change_dest_mode (i2dest, added_sets_2, new_mode))
3338 {
3339 enum machine_mode old_mode = GET_MODE (i2dest);
3340 rtx ni2dest;
3341
3342 if (REGNO (i2dest) < FIRST_PSEUDO_REGISTER)
3343 ni2dest = gen_rtx_REG (new_mode, REGNO (i2dest));
3344 else
3345 {
3346 SUBST_MODE (regno_reg_rtx[REGNO (i2dest)], new_mode);
3347 ni2dest = regno_reg_rtx[REGNO (i2dest)];
3348 }
3349
3350 parallel = (gen_rtx_PARALLEL
3351 (VOIDmode,
3352 gen_rtvec (2, newpat,
3353 gen_rtx_CLOBBER (VOIDmode,
3354 ni2dest))));
3355 m_split = combine_split_insns (parallel, i3);
3356
3357 if (m_split == 0
3358 && REGNO (i2dest) >= FIRST_PSEUDO_REGISTER)
3359 {
3360 struct undo *buf;
3361
3362 adjust_reg_mode (regno_reg_rtx[REGNO (i2dest)], old_mode);
3363 buf = undobuf.undos;
3364 undobuf.undos = buf->next;
3365 buf->next = undobuf.frees;
3366 undobuf.frees = buf;
3367 }
3368 }
3369
3370 i2scratch = m_split != 0;
3371 }
3372
3373 /* If recog_for_combine has discarded clobbers, try to use them
3374 again for the split. */
3375 if (m_split == 0 && newpat_vec_with_clobbers)
3376 {
3377 parallel = gen_rtx_PARALLEL (VOIDmode, newpat_vec_with_clobbers);
3378 m_split = combine_split_insns (parallel, i3);
3379 }
3380
3381 if (m_split && NEXT_INSN (m_split) == NULL_RTX)
3382 {
3383 m_split = PATTERN (m_split);
3384 insn_code_number = recog_for_combine (&m_split, i3, &new_i3_notes);
3385 if (insn_code_number >= 0)
3386 newpat = m_split;
3387 }
3388 else if (m_split && NEXT_INSN (NEXT_INSN (m_split)) == NULL_RTX
3389 && (next_nonnote_nondebug_insn (i2) == i3
3390 || ! use_crosses_set_p (PATTERN (m_split), DF_INSN_LUID (i2))))
3391 {
3392 rtx i2set, i3set;
3393 rtx newi3pat = PATTERN (NEXT_INSN (m_split));
3394 newi2pat = PATTERN (m_split);
3395
3396 i3set = single_set (NEXT_INSN (m_split));
3397 i2set = single_set (m_split);
3398
3399 i2_code_number = recog_for_combine (&newi2pat, i2, &new_i2_notes);
3400
3401 /* If I2 or I3 has multiple SETs, we won't know how to track
3402 register status, so don't use these insns. If I2's destination
3403 is used between I2 and I3, we also can't use these insns. */
3404
3405 if (i2_code_number >= 0 && i2set && i3set
3406 && (next_nonnote_nondebug_insn (i2) == i3
3407 || ! reg_used_between_p (SET_DEST (i2set), i2, i3)))
3408 insn_code_number = recog_for_combine (&newi3pat, i3,
3409 &new_i3_notes);
3410 if (insn_code_number >= 0)
3411 newpat = newi3pat;
3412
3413 /* It is possible that both insns now set the destination of I3.
3414 If so, we must show an extra use of it. */
3415
3416 if (insn_code_number >= 0)
3417 {
3418 rtx new_i3_dest = SET_DEST (i3set);
3419 rtx new_i2_dest = SET_DEST (i2set);
3420
3421 while (GET_CODE (new_i3_dest) == ZERO_EXTRACT
3422 || GET_CODE (new_i3_dest) == STRICT_LOW_PART
3423 || GET_CODE (new_i3_dest) == SUBREG)
3424 new_i3_dest = XEXP (new_i3_dest, 0);
3425
3426 while (GET_CODE (new_i2_dest) == ZERO_EXTRACT
3427 || GET_CODE (new_i2_dest) == STRICT_LOW_PART
3428 || GET_CODE (new_i2_dest) == SUBREG)
3429 new_i2_dest = XEXP (new_i2_dest, 0);
3430
3431 if (REG_P (new_i3_dest)
3432 && REG_P (new_i2_dest)
3433 && REGNO (new_i3_dest) == REGNO (new_i2_dest))
3434 INC_REG_N_SETS (REGNO (new_i2_dest), 1);
3435 }
3436 }
3437
3438 /* If we can split it and use I2DEST, go ahead and see if that
3439 helps things be recognized. Verify that none of the registers
3440 are set between I2 and I3. */
3441 if (insn_code_number < 0
3442 && (split = find_split_point (&newpat, i3, false)) != 0
3443 #ifdef HAVE_cc0
3444 && REG_P (i2dest)
3445 #endif
3446 /* We need I2DEST in the proper mode. If it is a hard register
3447 or the only use of a pseudo, we can change its mode.
3448 Make sure we don't change a hard register to have a mode that
3449 isn't valid for it, or change the number of registers. */
3450 && (GET_MODE (*split) == GET_MODE (i2dest)
3451 || GET_MODE (*split) == VOIDmode
3452 || can_change_dest_mode (i2dest, added_sets_2,
3453 GET_MODE (*split)))
3454 && (next_nonnote_nondebug_insn (i2) == i3
3455 || ! use_crosses_set_p (*split, DF_INSN_LUID (i2)))
3456 /* We can't overwrite I2DEST if its value is still used by
3457 NEWPAT. */
3458 && ! reg_referenced_p (i2dest, newpat))
3459 {
3460 rtx newdest = i2dest;
3461 enum rtx_code split_code = GET_CODE (*split);
3462 enum machine_mode split_mode = GET_MODE (*split);
3463 bool subst_done = false;
3464 newi2pat = NULL_RTX;
3465
3466 i2scratch = true;
3467
3468 /* *SPLIT may be part of I2SRC, so make sure we have the
3469 original expression around for later debug processing.
3470 We should not need I2SRC any more in other cases. */
3471 if (MAY_HAVE_DEBUG_INSNS)
3472 i2src = copy_rtx (i2src);
3473 else
3474 i2src = NULL;
3475
3476 /* Get NEWDEST as a register in the proper mode. We have already
3477 validated that we can do this. */
3478 if (GET_MODE (i2dest) != split_mode && split_mode != VOIDmode)
3479 {
3480 if (REGNO (i2dest) < FIRST_PSEUDO_REGISTER)
3481 newdest = gen_rtx_REG (split_mode, REGNO (i2dest));
3482 else
3483 {
3484 SUBST_MODE (regno_reg_rtx[REGNO (i2dest)], split_mode);
3485 newdest = regno_reg_rtx[REGNO (i2dest)];
3486 }
3487 }
3488
3489 /* If *SPLIT is a (mult FOO (const_int pow2)), convert it to
3490 an ASHIFT. This can occur if it was inside a PLUS and hence
3491 appeared to be a memory address. This is a kludge. */
3492 if (split_code == MULT
3493 && CONST_INT_P (XEXP (*split, 1))
3494 && INTVAL (XEXP (*split, 1)) > 0
3495 && (i = exact_log2 (UINTVAL (XEXP (*split, 1)))) >= 0)
3496 {
3497 SUBST (*split, gen_rtx_ASHIFT (split_mode,
3498 XEXP (*split, 0), GEN_INT (i)));
3499 /* Update split_code because we may not have a multiply
3500 anymore. */
3501 split_code = GET_CODE (*split);
3502 }
3503
3504 #ifdef INSN_SCHEDULING
3505 /* If *SPLIT is a paradoxical SUBREG, when we split it, it should
3506 be written as a ZERO_EXTEND. */
3507 if (split_code == SUBREG && MEM_P (SUBREG_REG (*split)))
3508 {
3509 #ifdef LOAD_EXTEND_OP
3510 /* Or as a SIGN_EXTEND if LOAD_EXTEND_OP says that that's
3511 what it really is. */
3512 if (LOAD_EXTEND_OP (GET_MODE (SUBREG_REG (*split)))
3513 == SIGN_EXTEND)
3514 SUBST (*split, gen_rtx_SIGN_EXTEND (split_mode,
3515 SUBREG_REG (*split)));
3516 else
3517 #endif
3518 SUBST (*split, gen_rtx_ZERO_EXTEND (split_mode,
3519 SUBREG_REG (*split)));
3520 }
3521 #endif
3522
3523 /* Attempt to split binary operators using arithmetic identities. */
3524 if (BINARY_P (SET_SRC (newpat))
3525 && split_mode == GET_MODE (SET_SRC (newpat))
3526 && ! side_effects_p (SET_SRC (newpat)))
3527 {
3528 rtx setsrc = SET_SRC (newpat);
3529 enum machine_mode mode = GET_MODE (setsrc);
3530 enum rtx_code code = GET_CODE (setsrc);
3531 rtx src_op0 = XEXP (setsrc, 0);
3532 rtx src_op1 = XEXP (setsrc, 1);
3533
3534 /* Split "X = Y op Y" as "Z = Y; X = Z op Z". */
3535 if (rtx_equal_p (src_op0, src_op1))
3536 {
3537 newi2pat = gen_rtx_SET (VOIDmode, newdest, src_op0);
3538 SUBST (XEXP (setsrc, 0), newdest);
3539 SUBST (XEXP (setsrc, 1), newdest);
3540 subst_done = true;
3541 }
3542 /* Split "((P op Q) op R) op S" where op is PLUS or MULT. */
3543 else if ((code == PLUS || code == MULT)
3544 && GET_CODE (src_op0) == code
3545 && GET_CODE (XEXP (src_op0, 0)) == code
3546 && (INTEGRAL_MODE_P (mode)
3547 || (FLOAT_MODE_P (mode)
3548 && flag_unsafe_math_optimizations)))
3549 {
3550 rtx p = XEXP (XEXP (src_op0, 0), 0);
3551 rtx q = XEXP (XEXP (src_op0, 0), 1);
3552 rtx r = XEXP (src_op0, 1);
3553 rtx s = src_op1;
3554
3555 /* Split both "((X op Y) op X) op Y" and
3556 "((X op Y) op Y) op X" as "T op T" where T is
3557 "X op Y". */
3558 if ((rtx_equal_p (p,r) && rtx_equal_p (q,s))
3559 || (rtx_equal_p (p,s) && rtx_equal_p (q,r)))
3560 {
3561 newi2pat = gen_rtx_SET (VOIDmode, newdest,
3562 XEXP (src_op0, 0));
3563 SUBST (XEXP (setsrc, 0), newdest);
3564 SUBST (XEXP (setsrc, 1), newdest);
3565 subst_done = true;
3566 }
3567 /* Split "((X op X) op Y) op Y)" as "T op T" where
3568 T is "X op Y". */
3569 else if (rtx_equal_p (p,q) && rtx_equal_p (r,s))
3570 {
3571 rtx tmp = simplify_gen_binary (code, mode, p, r);
3572 newi2pat = gen_rtx_SET (VOIDmode, newdest, tmp);
3573 SUBST (XEXP (setsrc, 0), newdest);
3574 SUBST (XEXP (setsrc, 1), newdest);
3575 subst_done = true;
3576 }
3577 }
3578 }
3579
3580 if (!subst_done)
3581 {
3582 newi2pat = gen_rtx_SET (VOIDmode, newdest, *split);
3583 SUBST (*split, newdest);
3584 }
3585
3586 i2_code_number = recog_for_combine (&newi2pat, i2, &new_i2_notes);
3587
3588 /* recog_for_combine might have added CLOBBERs to newi2pat.
3589 Make sure NEWPAT does not depend on the clobbered regs. */
3590 if (GET_CODE (newi2pat) == PARALLEL)
3591 for (i = XVECLEN (newi2pat, 0) - 1; i >= 0; i--)
3592 if (GET_CODE (XVECEXP (newi2pat, 0, i)) == CLOBBER)
3593 {
3594 rtx reg = XEXP (XVECEXP (newi2pat, 0, i), 0);
3595 if (reg_overlap_mentioned_p (reg, newpat))
3596 {
3597 undo_all ();
3598 return 0;
3599 }
3600 }
3601
3602 /* If the split point was a MULT and we didn't have one before,
3603 don't use one now. */
3604 if (i2_code_number >= 0 && ! (split_code == MULT && ! have_mult))
3605 insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
3606 }
3607 }
3608
3609 /* Check for a case where we loaded from memory in a narrow mode and
3610 then sign extended it, but we need both registers. In that case,
3611 we have a PARALLEL with both loads from the same memory location.
3612 We can split this into a load from memory followed by a register-register
3613 copy. This saves at least one insn, more if register allocation can
3614 eliminate the copy.
3615
3616 We cannot do this if the destination of the first assignment is a
3617 condition code register or cc0. We eliminate this case by making sure
3618 the SET_DEST and SET_SRC have the same mode.
3619
3620 We cannot do this if the destination of the second assignment is
3621 a register that we have already assumed is zero-extended. Similarly
3622 for a SUBREG of such a register. */
3623
3624 else if (i1 && insn_code_number < 0 && asm_noperands (newpat) < 0
3625 && GET_CODE (newpat) == PARALLEL
3626 && XVECLEN (newpat, 0) == 2
3627 && GET_CODE (XVECEXP (newpat, 0, 0)) == SET
3628 && GET_CODE (SET_SRC (XVECEXP (newpat, 0, 0))) == SIGN_EXTEND
3629 && (GET_MODE (SET_DEST (XVECEXP (newpat, 0, 0)))
3630 == GET_MODE (SET_SRC (XVECEXP (newpat, 0, 0))))
3631 && GET_CODE (XVECEXP (newpat, 0, 1)) == SET
3632 && rtx_equal_p (SET_SRC (XVECEXP (newpat, 0, 1)),
3633 XEXP (SET_SRC (XVECEXP (newpat, 0, 0)), 0))
3634 && ! use_crosses_set_p (SET_SRC (XVECEXP (newpat, 0, 1)),
3635 DF_INSN_LUID (i2))
3636 && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) != ZERO_EXTRACT
3637 && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) != STRICT_LOW_PART
3638 && ! (temp = SET_DEST (XVECEXP (newpat, 0, 1)),
3639 (REG_P (temp)
3640 && VEC_index (reg_stat_type, reg_stat,
3641 REGNO (temp)).nonzero_bits != 0
3642 && GET_MODE_PRECISION (GET_MODE (temp)) < BITS_PER_WORD
3643 && GET_MODE_PRECISION (GET_MODE (temp)) < HOST_BITS_PER_INT
3644 && (VEC_index (reg_stat_type, reg_stat,
3645 REGNO (temp)).nonzero_bits
3646 != GET_MODE_MASK (word_mode))))
3647 && ! (GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) == SUBREG
3648 && (temp = SUBREG_REG (SET_DEST (XVECEXP (newpat, 0, 1))),
3649 (REG_P (temp)
3650 && VEC_index (reg_stat_type, reg_stat,
3651 REGNO (temp)).nonzero_bits != 0
3652 && GET_MODE_PRECISION (GET_MODE (temp)) < BITS_PER_WORD
3653 && GET_MODE_PRECISION (GET_MODE (temp)) < HOST_BITS_PER_INT
3654 && (VEC_index (reg_stat_type, reg_stat,
3655 REGNO (temp)).nonzero_bits
3656 != GET_MODE_MASK (word_mode)))))
3657 && ! reg_overlap_mentioned_p (SET_DEST (XVECEXP (newpat, 0, 1)),
3658 SET_SRC (XVECEXP (newpat, 0, 1)))
3659 && ! find_reg_note (i3, REG_UNUSED,
3660 SET_DEST (XVECEXP (newpat, 0, 0))))
3661 {
3662 rtx ni2dest;
3663
3664 newi2pat = XVECEXP (newpat, 0, 0);
3665 ni2dest = SET_DEST (XVECEXP (newpat, 0, 0));
3666 newpat = XVECEXP (newpat, 0, 1);
3667 SUBST (SET_SRC (newpat),
3668 gen_lowpart (GET_MODE (SET_SRC (newpat)), ni2dest));
3669 i2_code_number = recog_for_combine (&newi2pat, i2, &new_i2_notes);
3670
3671 if (i2_code_number >= 0)
3672 insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
3673
3674 if (insn_code_number >= 0)
3675 swap_i2i3 = 1;
3676 }
3677
3678 /* Similarly, check for a case where we have a PARALLEL of two independent
3679 SETs but we started with three insns. In this case, we can do the sets
3680 as two separate insns. This case occurs when some SET allows two
3681 other insns to combine, but the destination of that SET is still live. */
3682
3683 else if (i1 && insn_code_number < 0 && asm_noperands (newpat) < 0
3684 && GET_CODE (newpat) == PARALLEL
3685 && XVECLEN (newpat, 0) == 2
3686 && GET_CODE (XVECEXP (newpat, 0, 0)) == SET
3687 && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 0))) != ZERO_EXTRACT
3688 && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 0))) != STRICT_LOW_PART
3689 && GET_CODE (XVECEXP (newpat, 0, 1)) == SET
3690 && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) != ZERO_EXTRACT
3691 && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) != STRICT_LOW_PART
3692 && ! reg_referenced_p (SET_DEST (XVECEXP (newpat, 0, 1)),
3693 XVECEXP (newpat, 0, 0))
3694 && ! reg_referenced_p (SET_DEST (XVECEXP (newpat, 0, 0)),
3695 XVECEXP (newpat, 0, 1))
3696 && ! (contains_muldiv (SET_SRC (XVECEXP (newpat, 0, 0)))
3697 && contains_muldiv (SET_SRC (XVECEXP (newpat, 0, 1)))))
3698 {
3699 /* Normally, it doesn't matter which of the two is done first,
3700 but the one that references cc0 can't be the second, and
3701 one which uses any regs/memory set in between i2 and i3 can't
3702 be first. */
3703 if (!use_crosses_set_p (SET_SRC (XVECEXP (newpat, 0, 1)),
3704 DF_INSN_LUID (i2))
3705 #ifdef HAVE_cc0
3706 && !reg_referenced_p (cc0_rtx, XVECEXP (newpat, 0, 0))
3707 #endif
3708 )
3709 {
3710 newi2pat = XVECEXP (newpat, 0, 1);
3711 newpat = XVECEXP (newpat, 0, 0);
3712 }
3713 else if (!use_crosses_set_p (SET_SRC (XVECEXP (newpat, 0, 0)),
3714 DF_INSN_LUID (i2))
3715 #ifdef HAVE_cc0
3716 && !reg_referenced_p (cc0_rtx, XVECEXP (newpat, 0, 1))
3717 #endif
3718 )
3719 {
3720 newi2pat = XVECEXP (newpat, 0, 0);
3721 newpat = XVECEXP (newpat, 0, 1);
3722 }
3723 else
3724 {
3725 undo_all ();
3726 return 0;
3727 }
3728
3729 i2_code_number = recog_for_combine (&newi2pat, i2, &new_i2_notes);
3730
3731 if (i2_code_number >= 0)
3732 {
3733 /* recog_for_combine might have added CLOBBERs to newi2pat.
3734 Make sure NEWPAT does not depend on the clobbered regs. */
3735 if (GET_CODE (newi2pat) == PARALLEL)
3736 {
3737 for (i = XVECLEN (newi2pat, 0) - 1; i >= 0; i--)
3738 if (GET_CODE (XVECEXP (newi2pat, 0, i)) == CLOBBER)
3739 {
3740 rtx reg = XEXP (XVECEXP (newi2pat, 0, i), 0);
3741 if (reg_overlap_mentioned_p (reg, newpat))
3742 {
3743 undo_all ();
3744 return 0;
3745 }
3746 }
3747 }
3748
3749 insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
3750 }
3751 }
3752
3753 /* If it still isn't recognized, fail and change things back the way they
3754 were. */
3755 if ((insn_code_number < 0
3756 /* Is the result a reasonable ASM_OPERANDS? */
3757 && (! check_asm_operands (newpat) || added_sets_1 || added_sets_2)))
3758 {
3759 undo_all ();
3760 return 0;
3761 }
3762
3763 /* If we had to change another insn, make sure it is valid also. */
3764 if (undobuf.other_insn)
3765 {
3766 CLEAR_HARD_REG_SET (newpat_used_regs);
3767
3768 other_pat = PATTERN (undobuf.other_insn);
3769 other_code_number = recog_for_combine (&other_pat, undobuf.other_insn,
3770 &new_other_notes);
3771
3772 if (other_code_number < 0 && ! check_asm_operands (other_pat))
3773 {
3774 undo_all ();
3775 return 0;
3776 }
3777 }
3778
3779 #ifdef HAVE_cc0
3780 /* If I2 is the CC0 setter and I3 is the CC0 user then check whether
3781 they are adjacent to each other or not. */
3782 {
3783 rtx p = prev_nonnote_insn (i3);
3784 if (p && p != i2 && NONJUMP_INSN_P (p) && newi2pat
3785 && sets_cc0_p (newi2pat))
3786 {
3787 undo_all ();
3788 return 0;
3789 }
3790 }
3791 #endif
3792
3793 /* Only allow this combination if insn_rtx_costs reports that the
3794 replacement instructions are cheaper than the originals. */
3795 if (!combine_validate_cost (i0, i1, i2, i3, newpat, newi2pat, other_pat))
3796 {
3797 undo_all ();
3798 return 0;
3799 }
3800
3801 if (MAY_HAVE_DEBUG_INSNS)
3802 {
3803 struct undo *undo;
3804
3805 for (undo = undobuf.undos; undo; undo = undo->next)
3806 if (undo->kind == UNDO_MODE)
3807 {
3808 rtx reg = *undo->where.r;
3809 enum machine_mode new_mode = GET_MODE (reg);
3810 enum machine_mode old_mode = undo->old_contents.m;
3811
3812 /* Temporarily revert mode back. */
3813 adjust_reg_mode (reg, old_mode);
3814
3815 if (reg == i2dest && i2scratch)
3816 {
3817 /* If we used i2dest as a scratch register with a
3818 different mode, substitute it for the original
3819 i2src while its original mode is temporarily
3820 restored, and then clear i2scratch so that we don't
3821 do it again later. */
3822 propagate_for_debug (i2, last_combined_insn, reg, i2src,
3823 this_basic_block);
3824 i2scratch = false;
3825 /* Put back the new mode. */
3826 adjust_reg_mode (reg, new_mode);
3827 }
3828 else
3829 {
3830 rtx tempreg = gen_raw_REG (old_mode, REGNO (reg));
3831 rtx first, last;
3832
3833 if (reg == i2dest)
3834 {
3835 first = i2;
3836 last = last_combined_insn;
3837 }
3838 else
3839 {
3840 first = i3;
3841 last = undobuf.other_insn;
3842 gcc_assert (last);
3843 if (DF_INSN_LUID (last)
3844 < DF_INSN_LUID (last_combined_insn))
3845 last = last_combined_insn;
3846 }
3847
3848 /* We're dealing with a reg that changed mode but not
3849 meaning, so we want to turn it into a subreg for
3850 the new mode. However, because of REG sharing and
3851 because its mode had already changed, we have to do
3852 it in two steps. First, replace any debug uses of
3853 reg, with its original mode temporarily restored,
3854 with this copy we have created; then, replace the
3855 copy with the SUBREG of the original shared reg,
3856 once again changed to the new mode. */
3857 propagate_for_debug (first, last, reg, tempreg,
3858 this_basic_block);
3859 adjust_reg_mode (reg, new_mode);
3860 propagate_for_debug (first, last, tempreg,
3861 lowpart_subreg (old_mode, reg, new_mode),
3862 this_basic_block);
3863 }
3864 }
3865 }
3866
3867 /* If we will be able to accept this, we have made a
3868 change to the destination of I3. This requires us to
3869 do a few adjustments. */
3870
3871 if (changed_i3_dest)
3872 {
3873 PATTERN (i3) = newpat;
3874 adjust_for_new_dest (i3);
3875 }
3876
3877 /* We now know that we can do this combination. Merge the insns and
3878 update the status of registers and LOG_LINKS. */
3879
3880 if (undobuf.other_insn)
3881 {
3882 rtx note, next;
3883
3884 PATTERN (undobuf.other_insn) = other_pat;
3885
3886 /* If any of the notes in OTHER_INSN were REG_UNUSED, ensure that they
3887 are still valid. Then add any non-duplicate notes added by
3888 recog_for_combine. */
3889 for (note = REG_NOTES (undobuf.other_insn); note; note = next)
3890 {
3891 next = XEXP (note, 1);
3892
3893 if (REG_NOTE_KIND (note) == REG_UNUSED
3894 && ! reg_set_p (XEXP (note, 0), PATTERN (undobuf.other_insn)))
3895 remove_note (undobuf.other_insn, note);
3896 }
3897
3898 distribute_notes (new_other_notes, undobuf.other_insn,
3899 undobuf.other_insn, NULL_RTX, NULL_RTX, NULL_RTX,
3900 NULL_RTX);
3901 }
3902
3903 if (swap_i2i3)
3904 {
3905 rtx insn;
3906 struct insn_link *link;
3907 rtx ni2dest;
3908
3909 /* I3 now uses what used to be its destination and which is now
3910 I2's destination. This requires us to do a few adjustments. */
3911 PATTERN (i3) = newpat;
3912 adjust_for_new_dest (i3);
3913
3914 /* We need a LOG_LINK from I3 to I2. But we used to have one,
3915 so we still will.
3916
3917 However, some later insn might be using I2's dest and have
3918 a LOG_LINK pointing at I3. We must remove this link.
3919 The simplest way to remove the link is to point it at I1,
3920 which we know will be a NOTE. */
3921
3922 /* newi2pat is usually a SET here; however, recog_for_combine might
3923 have added some clobbers. */
3924 if (GET_CODE (newi2pat) == PARALLEL)
3925 ni2dest = SET_DEST (XVECEXP (newi2pat, 0, 0));
3926 else
3927 ni2dest = SET_DEST (newi2pat);
3928
3929 for (insn = NEXT_INSN (i3);
3930 insn && (this_basic_block->next_bb == EXIT_BLOCK_PTR
3931 || insn != BB_HEAD (this_basic_block->next_bb));
3932 insn = NEXT_INSN (insn))
3933 {
3934 if (INSN_P (insn) && reg_referenced_p (ni2dest, PATTERN (insn)))
3935 {
3936 FOR_EACH_LOG_LINK (link, insn)
3937 if (link->insn == i3)
3938 link->insn = i1;
3939
3940 break;
3941 }
3942 }
3943 }
3944
3945 {
3946 rtx i3notes, i2notes, i1notes = 0, i0notes = 0;
3947 struct insn_link *i3links, *i2links, *i1links = 0, *i0links = 0;
3948 rtx midnotes = 0;
3949 int from_luid;
3950 /* Compute which registers we expect to eliminate. newi2pat may be setting
3951 either i3dest or i2dest, so we must check it. Also, i1dest may be the
3952 same as i3dest, in which case newi2pat may be setting i1dest. */
3953 rtx elim_i2 = ((newi2pat && reg_set_p (i2dest, newi2pat))
3954 || i2dest_in_i2src || i2dest_in_i1src || i2dest_in_i0src
3955 || !i2dest_killed
3956 ? 0 : i2dest);
3957 rtx elim_i1 = (i1 == 0 || i1dest_in_i1src || i1dest_in_i0src
3958 || (newi2pat && reg_set_p (i1dest, newi2pat))
3959 || !i1dest_killed
3960 ? 0 : i1dest);
3961 rtx elim_i0 = (i0 == 0 || i0dest_in_i0src
3962 || (newi2pat && reg_set_p (i0dest, newi2pat))
3963 || !i0dest_killed
3964 ? 0 : i0dest);
3965
3966 /* Get the old REG_NOTES and LOG_LINKS from all our insns and
3967 clear them. */
3968 i3notes = REG_NOTES (i3), i3links = LOG_LINKS (i3);
3969 i2notes = REG_NOTES (i2), i2links = LOG_LINKS (i2);
3970 if (i1)
3971 i1notes = REG_NOTES (i1), i1links = LOG_LINKS (i1);
3972 if (i0)
3973 i0notes = REG_NOTES (i0), i0links = LOG_LINKS (i0);
3974
3975 /* Ensure that we do not have something that should not be shared but
3976 occurs multiple times in the new insns. Check this by first
3977 resetting all the `used' flags and then copying anything is shared. */
3978
3979 reset_used_flags (i3notes);
3980 reset_used_flags (i2notes);
3981 reset_used_flags (i1notes);
3982 reset_used_flags (i0notes);
3983 reset_used_flags (newpat);
3984 reset_used_flags (newi2pat);
3985 if (undobuf.other_insn)
3986 reset_used_flags (PATTERN (undobuf.other_insn));
3987
3988 i3notes = copy_rtx_if_shared (i3notes);
3989 i2notes = copy_rtx_if_shared (i2notes);
3990 i1notes = copy_rtx_if_shared (i1notes);
3991 i0notes = copy_rtx_if_shared (i0notes);
3992 newpat = copy_rtx_if_shared (newpat);
3993 newi2pat = copy_rtx_if_shared (newi2pat);
3994 if (undobuf.other_insn)
3995 reset_used_flags (PATTERN (undobuf.other_insn));
3996
3997 INSN_CODE (i3) = insn_code_number;
3998 PATTERN (i3) = newpat;
3999
4000 if (CALL_P (i3) && CALL_INSN_FUNCTION_USAGE (i3))
4001 {
4002 rtx call_usage = CALL_INSN_FUNCTION_USAGE (i3);
4003
4004 reset_used_flags (call_usage);
4005 call_usage = copy_rtx (call_usage);
4006
4007 if (substed_i2)
4008 {
4009 /* I2SRC must still be meaningful at this point. Some splitting
4010 operations can invalidate I2SRC, but those operations do not
4011 apply to calls. */
4012 gcc_assert (i2src);
4013 replace_rtx (call_usage, i2dest, i2src);
4014 }
4015
4016 if (substed_i1)
4017 replace_rtx (call_usage, i1dest, i1src);
4018 if (substed_i0)
4019 replace_rtx (call_usage, i0dest, i0src);
4020
4021 CALL_INSN_FUNCTION_USAGE (i3) = call_usage;
4022 }
4023
4024 if (undobuf.other_insn)
4025 INSN_CODE (undobuf.other_insn) = other_code_number;
4026
4027 /* We had one special case above where I2 had more than one set and
4028 we replaced a destination of one of those sets with the destination
4029 of I3. In that case, we have to update LOG_LINKS of insns later
4030 in this basic block. Note that this (expensive) case is rare.
4031
4032 Also, in this case, we must pretend that all REG_NOTEs for I2
4033 actually came from I3, so that REG_UNUSED notes from I2 will be
4034 properly handled. */
4035
4036 if (i3_subst_into_i2)
4037 {
4038 for (i = 0; i < XVECLEN (PATTERN (i2), 0); i++)
4039 if ((GET_CODE (XVECEXP (PATTERN (i2), 0, i)) == SET
4040 || GET_CODE (XVECEXP (PATTERN (i2), 0, i)) == CLOBBER)
4041 && REG_P (SET_DEST (XVECEXP (PATTERN (i2), 0, i)))
4042 && SET_DEST (XVECEXP (PATTERN (i2), 0, i)) != i2dest
4043 && ! find_reg_note (i2, REG_UNUSED,
4044 SET_DEST (XVECEXP (PATTERN (i2), 0, i))))
4045 for (temp = NEXT_INSN (i2);
4046 temp && (this_basic_block->next_bb == EXIT_BLOCK_PTR
4047 || BB_HEAD (this_basic_block) != temp);
4048 temp = NEXT_INSN (temp))
4049 if (temp != i3 && INSN_P (temp))
4050 FOR_EACH_LOG_LINK (link, temp)
4051 if (link->insn == i2)
4052 link->insn = i3;
4053
4054 if (i3notes)
4055 {
4056 rtx link = i3notes;
4057 while (XEXP (link, 1))
4058 link = XEXP (link, 1);
4059 XEXP (link, 1) = i2notes;
4060 }
4061 else
4062 i3notes = i2notes;
4063 i2notes = 0;
4064 }
4065
4066 LOG_LINKS (i3) = NULL;
4067 REG_NOTES (i3) = 0;
4068 LOG_LINKS (i2) = NULL;
4069 REG_NOTES (i2) = 0;
4070
4071 if (newi2pat)
4072 {
4073 if (MAY_HAVE_DEBUG_INSNS && i2scratch)
4074 propagate_for_debug (i2, last_combined_insn, i2dest, i2src,
4075 this_basic_block);
4076 INSN_CODE (i2) = i2_code_number;
4077 PATTERN (i2) = newi2pat;
4078 }
4079 else
4080 {
4081 if (MAY_HAVE_DEBUG_INSNS && i2src)
4082 propagate_for_debug (i2, last_combined_insn, i2dest, i2src,
4083 this_basic_block);
4084 SET_INSN_DELETED (i2);
4085 }
4086
4087 if (i1)
4088 {
4089 LOG_LINKS (i1) = NULL;
4090 REG_NOTES (i1) = 0;
4091 if (MAY_HAVE_DEBUG_INSNS)
4092 propagate_for_debug (i1, last_combined_insn, i1dest, i1src,
4093 this_basic_block);
4094 SET_INSN_DELETED (i1);
4095 }
4096
4097 if (i0)
4098 {
4099 LOG_LINKS (i0) = NULL;
4100 REG_NOTES (i0) = 0;
4101 if (MAY_HAVE_DEBUG_INSNS)
4102 propagate_for_debug (i0, last_combined_insn, i0dest, i0src,
4103 this_basic_block);
4104 SET_INSN_DELETED (i0);
4105 }
4106
4107 /* Get death notes for everything that is now used in either I3 or
4108 I2 and used to die in a previous insn. If we built two new
4109 patterns, move from I1 to I2 then I2 to I3 so that we get the
4110 proper movement on registers that I2 modifies. */
4111
4112 if (i0)
4113 from_luid = DF_INSN_LUID (i0);
4114 else if (i1)
4115 from_luid = DF_INSN_LUID (i1);
4116 else
4117 from_luid = DF_INSN_LUID (i2);
4118 if (newi2pat)
4119 move_deaths (newi2pat, NULL_RTX, from_luid, i2, &midnotes);
4120 move_deaths (newpat, newi2pat, from_luid, i3, &midnotes);
4121
4122 /* Distribute all the LOG_LINKS and REG_NOTES from I1, I2, and I3. */
4123 if (i3notes)
4124 distribute_notes (i3notes, i3, i3, newi2pat ? i2 : NULL_RTX,
4125 elim_i2, elim_i1, elim_i0);
4126 if (i2notes)
4127 distribute_notes (i2notes, i2, i3, newi2pat ? i2 : NULL_RTX,
4128 elim_i2, elim_i1, elim_i0);
4129 if (i1notes)
4130 distribute_notes (i1notes, i1, i3, newi2pat ? i2 : NULL_RTX,
4131 elim_i2, elim_i1, elim_i0);
4132 if (i0notes)
4133 distribute_notes (i0notes, i0, i3, newi2pat ? i2 : NULL_RTX,
4134 elim_i2, elim_i1, elim_i0);
4135 if (midnotes)
4136 distribute_notes (midnotes, NULL_RTX, i3, newi2pat ? i2 : NULL_RTX,
4137 elim_i2, elim_i1, elim_i0);
4138
4139 /* Distribute any notes added to I2 or I3 by recog_for_combine. We
4140 know these are REG_UNUSED and want them to go to the desired insn,
4141 so we always pass it as i3. */
4142
4143 if (newi2pat && new_i2_notes)
4144 distribute_notes (new_i2_notes, i2, i2, NULL_RTX, NULL_RTX, NULL_RTX,
4145 NULL_RTX);
4146
4147 if (new_i3_notes)
4148 distribute_notes (new_i3_notes, i3, i3, NULL_RTX, NULL_RTX, NULL_RTX,
4149 NULL_RTX);
4150
4151 /* If I3DEST was used in I3SRC, it really died in I3. We may need to
4152 put a REG_DEAD note for it somewhere. If NEWI2PAT exists and sets
4153 I3DEST, the death must be somewhere before I2, not I3. If we passed I3
4154 in that case, it might delete I2. Similarly for I2 and I1.
4155 Show an additional death due to the REG_DEAD note we make here. If
4156 we discard it in distribute_notes, we will decrement it again. */
4157
4158 if (i3dest_killed)
4159 {
4160 if (newi2pat && reg_set_p (i3dest_killed, newi2pat))
4161 distribute_notes (alloc_reg_note (REG_DEAD, i3dest_killed,
4162 NULL_RTX),
4163 NULL_RTX, i2, NULL_RTX, elim_i2, elim_i1, elim_i0);
4164 else
4165 distribute_notes (alloc_reg_note (REG_DEAD, i3dest_killed,
4166 NULL_RTX),
4167 NULL_RTX, i3, newi2pat ? i2 : NULL_RTX,
4168 elim_i2, elim_i1, elim_i0);
4169 }
4170
4171 if (i2dest_in_i2src)
4172 {
4173 rtx new_note = alloc_reg_note (REG_DEAD, i2dest, NULL_RTX);
4174 if (newi2pat && reg_set_p (i2dest, newi2pat))
4175 distribute_notes (new_note, NULL_RTX, i2, NULL_RTX, NULL_RTX,
4176 NULL_RTX, NULL_RTX);
4177 else
4178 distribute_notes (new_note, NULL_RTX, i3, newi2pat ? i2 : NULL_RTX,
4179 NULL_RTX, NULL_RTX, NULL_RTX);
4180 }
4181
4182 if (i1dest_in_i1src)
4183 {
4184 rtx new_note = alloc_reg_note (REG_DEAD, i1dest, NULL_RTX);
4185 if (newi2pat && reg_set_p (i1dest, newi2pat))
4186 distribute_notes (new_note, NULL_RTX, i2, NULL_RTX, NULL_RTX,
4187 NULL_RTX, NULL_RTX);
4188 else
4189 distribute_notes (new_note, NULL_RTX, i3, newi2pat ? i2 : NULL_RTX,
4190 NULL_RTX, NULL_RTX, NULL_RTX);
4191 }
4192
4193 if (i0dest_in_i0src)
4194 {
4195 rtx new_note = alloc_reg_note (REG_DEAD, i0dest, NULL_RTX);
4196 if (newi2pat && reg_set_p (i0dest, newi2pat))
4197 distribute_notes (new_note, NULL_RTX, i2, NULL_RTX, NULL_RTX,
4198 NULL_RTX, NULL_RTX);
4199 else
4200 distribute_notes (new_note, NULL_RTX, i3, newi2pat ? i2 : NULL_RTX,
4201 NULL_RTX, NULL_RTX, NULL_RTX);
4202 }
4203
4204 distribute_links (i3links);
4205 distribute_links (i2links);
4206 distribute_links (i1links);
4207 distribute_links (i0links);
4208
4209 if (REG_P (i2dest))
4210 {
4211 struct insn_link *link;
4212 rtx i2_insn = 0, i2_val = 0, set;
4213
4214 /* The insn that used to set this register doesn't exist, and
4215 this life of the register may not exist either. See if one of
4216 I3's links points to an insn that sets I2DEST. If it does,
4217 that is now the last known value for I2DEST. If we don't update
4218 this and I2 set the register to a value that depended on its old
4219 contents, we will get confused. If this insn is used, thing
4220 will be set correctly in combine_instructions. */
4221 FOR_EACH_LOG_LINK (link, i3)
4222 if ((set = single_set (link->insn)) != 0
4223 && rtx_equal_p (i2dest, SET_DEST (set)))
4224 i2_insn = link->insn, i2_val = SET_SRC (set);
4225
4226 record_value_for_reg (i2dest, i2_insn, i2_val);
4227
4228 /* If the reg formerly set in I2 died only once and that was in I3,
4229 zero its use count so it won't make `reload' do any work. */
4230 if (! added_sets_2
4231 && (newi2pat == 0 || ! reg_mentioned_p (i2dest, newi2pat))
4232 && ! i2dest_in_i2src)
4233 INC_REG_N_SETS (REGNO (i2dest), -1);
4234 }
4235
4236 if (i1 && REG_P (i1dest))
4237 {
4238 struct insn_link *link;
4239 rtx i1_insn = 0, i1_val = 0, set;
4240
4241 FOR_EACH_LOG_LINK (link, i3)
4242 if ((set = single_set (link->insn)) != 0
4243 && rtx_equal_p (i1dest, SET_DEST (set)))
4244 i1_insn = link->insn, i1_val = SET_SRC (set);
4245
4246 record_value_for_reg (i1dest, i1_insn, i1_val);
4247
4248 if (! added_sets_1 && ! i1dest_in_i1src)
4249 INC_REG_N_SETS (REGNO (i1dest), -1);
4250 }
4251
4252 if (i0 && REG_P (i0dest))
4253 {
4254 struct insn_link *link;
4255 rtx i0_insn = 0, i0_val = 0, set;
4256
4257 FOR_EACH_LOG_LINK (link, i3)
4258 if ((set = single_set (link->insn)) != 0
4259 && rtx_equal_p (i0dest, SET_DEST (set)))
4260 i0_insn = link->insn, i0_val = SET_SRC (set);
4261
4262 record_value_for_reg (i0dest, i0_insn, i0_val);
4263
4264 if (! added_sets_0 && ! i0dest_in_i0src)
4265 INC_REG_N_SETS (REGNO (i0dest), -1);
4266 }
4267
4268 /* Update reg_stat[].nonzero_bits et al for any changes that may have
4269 been made to this insn. The order of
4270 set_nonzero_bits_and_sign_copies() is important. Because newi2pat
4271 can affect nonzero_bits of newpat */
4272 if (newi2pat)
4273 note_stores (newi2pat, set_nonzero_bits_and_sign_copies, NULL);
4274 note_stores (newpat, set_nonzero_bits_and_sign_copies, NULL);
4275 }
4276
4277 if (undobuf.other_insn != NULL_RTX)
4278 {
4279 if (dump_file)
4280 {
4281 fprintf (dump_file, "modifying other_insn ");
4282 dump_insn_slim (dump_file, undobuf.other_insn);
4283 }
4284 df_insn_rescan (undobuf.other_insn);
4285 }
4286
4287 if (i0 && !(NOTE_P(i0) && (NOTE_KIND (i0) == NOTE_INSN_DELETED)))
4288 {
4289 if (dump_file)
4290 {
4291 fprintf (dump_file, "modifying insn i1 ");
4292 dump_insn_slim (dump_file, i0);
4293 }
4294 df_insn_rescan (i0);
4295 }
4296
4297 if (i1 && !(NOTE_P(i1) && (NOTE_KIND (i1) == NOTE_INSN_DELETED)))
4298 {
4299 if (dump_file)
4300 {
4301 fprintf (dump_file, "modifying insn i1 ");
4302 dump_insn_slim (dump_file, i1);
4303 }
4304 df_insn_rescan (i1);
4305 }
4306
4307 if (i2 && !(NOTE_P(i2) && (NOTE_KIND (i2) == NOTE_INSN_DELETED)))
4308 {
4309 if (dump_file)
4310 {
4311 fprintf (dump_file, "modifying insn i2 ");
4312 dump_insn_slim (dump_file, i2);
4313 }
4314 df_insn_rescan (i2);
4315 }
4316
4317 if (i3 && !(NOTE_P(i3) && (NOTE_KIND (i3) == NOTE_INSN_DELETED)))
4318 {
4319 if (dump_file)
4320 {
4321 fprintf (dump_file, "modifying insn i3 ");
4322 dump_insn_slim (dump_file, i3);
4323 }
4324 df_insn_rescan (i3);
4325 }
4326
4327 /* Set new_direct_jump_p if a new return or simple jump instruction
4328 has been created. Adjust the CFG accordingly. */
4329
4330 if (returnjump_p (i3) || any_uncondjump_p (i3))
4331 {
4332 *new_direct_jump_p = 1;
4333 mark_jump_label (PATTERN (i3), i3, 0);
4334 update_cfg_for_uncondjump (i3);
4335 }
4336
4337 if (undobuf.other_insn != NULL_RTX
4338 && (returnjump_p (undobuf.other_insn)
4339 || any_uncondjump_p (undobuf.other_insn)))
4340 {
4341 *new_direct_jump_p = 1;
4342 update_cfg_for_uncondjump (undobuf.other_insn);
4343 }
4344
4345 /* A noop might also need cleaning up of CFG, if it comes from the
4346 simplification of a jump. */
4347 if (JUMP_P (i3)
4348 && GET_CODE (newpat) == SET
4349 && SET_SRC (newpat) == pc_rtx
4350 && SET_DEST (newpat) == pc_rtx)
4351 {
4352 *new_direct_jump_p = 1;
4353 update_cfg_for_uncondjump (i3);
4354 }
4355
4356 if (undobuf.other_insn != NULL_RTX
4357 && JUMP_P (undobuf.other_insn)
4358 && GET_CODE (PATTERN (undobuf.other_insn)) == SET
4359 && SET_SRC (PATTERN (undobuf.other_insn)) == pc_rtx
4360 && SET_DEST (PATTERN (undobuf.other_insn)) == pc_rtx)
4361 {
4362 *new_direct_jump_p = 1;
4363 update_cfg_for_uncondjump (undobuf.other_insn);
4364 }
4365
4366 combine_successes++;
4367 undo_commit ();
4368
4369 if (added_links_insn
4370 && (newi2pat == 0 || DF_INSN_LUID (added_links_insn) < DF_INSN_LUID (i2))
4371 && DF_INSN_LUID (added_links_insn) < DF_INSN_LUID (i3))
4372 return added_links_insn;
4373 else
4374 return newi2pat ? i2 : i3;
4375 }
4376 \f
4377 /* Undo all the modifications recorded in undobuf. */
4378
4379 static void
4380 undo_all (void)
4381 {
4382 struct undo *undo, *next;
4383
4384 for (undo = undobuf.undos; undo; undo = next)
4385 {
4386 next = undo->next;
4387 switch (undo->kind)
4388 {
4389 case UNDO_RTX:
4390 *undo->where.r = undo->old_contents.r;
4391 break;
4392 case UNDO_INT:
4393 *undo->where.i = undo->old_contents.i;
4394 break;
4395 case UNDO_MODE:
4396 adjust_reg_mode (*undo->where.r, undo->old_contents.m);
4397 break;
4398 case UNDO_LINKS:
4399 *undo->where.l = undo->old_contents.l;
4400 break;
4401 default:
4402 gcc_unreachable ();
4403 }
4404
4405 undo->next = undobuf.frees;
4406 undobuf.frees = undo;
4407 }
4408
4409 undobuf.undos = 0;
4410 }
4411
4412 /* We've committed to accepting the changes we made. Move all
4413 of the undos to the free list. */
4414
4415 static void
4416 undo_commit (void)
4417 {
4418 struct undo *undo, *next;
4419
4420 for (undo = undobuf.undos; undo; undo = next)
4421 {
4422 next = undo->next;
4423 undo->next = undobuf.frees;
4424 undobuf.frees = undo;
4425 }
4426 undobuf.undos = 0;
4427 }
4428 \f
4429 /* Find the innermost point within the rtx at LOC, possibly LOC itself,
4430 where we have an arithmetic expression and return that point. LOC will
4431 be inside INSN.
4432
4433 try_combine will call this function to see if an insn can be split into
4434 two insns. */
4435
4436 static rtx *
4437 find_split_point (rtx *loc, rtx insn, bool set_src)
4438 {
4439 rtx x = *loc;
4440 enum rtx_code code = GET_CODE (x);
4441 rtx *split;
4442 unsigned HOST_WIDE_INT len = 0;
4443 HOST_WIDE_INT pos = 0;
4444 int unsignedp = 0;
4445 rtx inner = NULL_RTX;
4446
4447 /* First special-case some codes. */
4448 switch (code)
4449 {
4450 case SUBREG:
4451 #ifdef INSN_SCHEDULING
4452 /* If we are making a paradoxical SUBREG invalid, it becomes a split
4453 point. */
4454 if (MEM_P (SUBREG_REG (x)))
4455 return loc;
4456 #endif
4457 return find_split_point (&SUBREG_REG (x), insn, false);
4458
4459 case MEM:
4460 #ifdef HAVE_lo_sum
4461 /* If we have (mem (const ..)) or (mem (symbol_ref ...)), split it
4462 using LO_SUM and HIGH. */
4463 if (GET_CODE (XEXP (x, 0)) == CONST
4464 || GET_CODE (XEXP (x, 0)) == SYMBOL_REF)
4465 {
4466 enum machine_mode address_mode = get_address_mode (x);
4467
4468 SUBST (XEXP (x, 0),
4469 gen_rtx_LO_SUM (address_mode,
4470 gen_rtx_HIGH (address_mode, XEXP (x, 0)),
4471 XEXP (x, 0)));
4472 return &XEXP (XEXP (x, 0), 0);
4473 }
4474 #endif
4475
4476 /* If we have a PLUS whose second operand is a constant and the
4477 address is not valid, perhaps will can split it up using
4478 the machine-specific way to split large constants. We use
4479 the first pseudo-reg (one of the virtual regs) as a placeholder;
4480 it will not remain in the result. */
4481 if (GET_CODE (XEXP (x, 0)) == PLUS
4482 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
4483 && ! memory_address_addr_space_p (GET_MODE (x), XEXP (x, 0),
4484 MEM_ADDR_SPACE (x)))
4485 {
4486 rtx reg = regno_reg_rtx[FIRST_PSEUDO_REGISTER];
4487 rtx seq = combine_split_insns (gen_rtx_SET (VOIDmode, reg,
4488 XEXP (x, 0)),
4489 subst_insn);
4490
4491 /* This should have produced two insns, each of which sets our
4492 placeholder. If the source of the second is a valid address,
4493 we can make put both sources together and make a split point
4494 in the middle. */
4495
4496 if (seq
4497 && NEXT_INSN (seq) != NULL_RTX
4498 && NEXT_INSN (NEXT_INSN (seq)) == NULL_RTX
4499 && NONJUMP_INSN_P (seq)
4500 && GET_CODE (PATTERN (seq)) == SET
4501 && SET_DEST (PATTERN (seq)) == reg
4502 && ! reg_mentioned_p (reg,
4503 SET_SRC (PATTERN (seq)))
4504 && NONJUMP_INSN_P (NEXT_INSN (seq))
4505 && GET_CODE (PATTERN (NEXT_INSN (seq))) == SET
4506 && SET_DEST (PATTERN (NEXT_INSN (seq))) == reg
4507 && memory_address_addr_space_p
4508 (GET_MODE (x), SET_SRC (PATTERN (NEXT_INSN (seq))),
4509 MEM_ADDR_SPACE (x)))
4510 {
4511 rtx src1 = SET_SRC (PATTERN (seq));
4512 rtx src2 = SET_SRC (PATTERN (NEXT_INSN (seq)));
4513
4514 /* Replace the placeholder in SRC2 with SRC1. If we can
4515 find where in SRC2 it was placed, that can become our
4516 split point and we can replace this address with SRC2.
4517 Just try two obvious places. */
4518
4519 src2 = replace_rtx (src2, reg, src1);
4520 split = 0;
4521 if (XEXP (src2, 0) == src1)
4522 split = &XEXP (src2, 0);
4523 else if (GET_RTX_FORMAT (GET_CODE (XEXP (src2, 0)))[0] == 'e'
4524 && XEXP (XEXP (src2, 0), 0) == src1)
4525 split = &XEXP (XEXP (src2, 0), 0);
4526
4527 if (split)
4528 {
4529 SUBST (XEXP (x, 0), src2);
4530 return split;
4531 }
4532 }
4533
4534 /* If that didn't work, perhaps the first operand is complex and
4535 needs to be computed separately, so make a split point there.
4536 This will occur on machines that just support REG + CONST
4537 and have a constant moved through some previous computation. */
4538
4539 else if (!OBJECT_P (XEXP (XEXP (x, 0), 0))
4540 && ! (GET_CODE (XEXP (XEXP (x, 0), 0)) == SUBREG
4541 && OBJECT_P (SUBREG_REG (XEXP (XEXP (x, 0), 0)))))
4542 return &XEXP (XEXP (x, 0), 0);
4543 }
4544
4545 /* If we have a PLUS whose first operand is complex, try computing it
4546 separately by making a split there. */
4547 if (GET_CODE (XEXP (x, 0)) == PLUS
4548 && ! memory_address_addr_space_p (GET_MODE (x), XEXP (x, 0),
4549 MEM_ADDR_SPACE (x))
4550 && ! OBJECT_P (XEXP (XEXP (x, 0), 0))
4551 && ! (GET_CODE (XEXP (XEXP (x, 0), 0)) == SUBREG
4552 && OBJECT_P (SUBREG_REG (XEXP (XEXP (x, 0), 0)))))
4553 return &XEXP (XEXP (x, 0), 0);
4554 break;
4555
4556 case SET:
4557 #ifdef HAVE_cc0
4558 /* If SET_DEST is CC0 and SET_SRC is not an operand, a COMPARE, or a
4559 ZERO_EXTRACT, the most likely reason why this doesn't match is that
4560 we need to put the operand into a register. So split at that
4561 point. */
4562
4563 if (SET_DEST (x) == cc0_rtx
4564 && GET_CODE (SET_SRC (x)) != COMPARE
4565 && GET_CODE (SET_SRC (x)) != ZERO_EXTRACT
4566 && !OBJECT_P (SET_SRC (x))
4567 && ! (GET_CODE (SET_SRC (x)) == SUBREG
4568 && OBJECT_P (SUBREG_REG (SET_SRC (x)))))
4569 return &SET_SRC (x);
4570 #endif
4571
4572 /* See if we can split SET_SRC as it stands. */
4573 split = find_split_point (&SET_SRC (x), insn, true);
4574 if (split && split != &SET_SRC (x))
4575 return split;
4576
4577 /* See if we can split SET_DEST as it stands. */
4578 split = find_split_point (&SET_DEST (x), insn, false);
4579 if (split && split != &SET_DEST (x))
4580 return split;
4581
4582 /* See if this is a bitfield assignment with everything constant. If
4583 so, this is an IOR of an AND, so split it into that. */
4584 if (GET_CODE (SET_DEST (x)) == ZERO_EXTRACT
4585 && HWI_COMPUTABLE_MODE_P (GET_MODE (XEXP (SET_DEST (x), 0)))
4586 && CONST_INT_P (XEXP (SET_DEST (x), 1))
4587 && CONST_INT_P (XEXP (SET_DEST (x), 2))
4588 && CONST_INT_P (SET_SRC (x))
4589 && ((INTVAL (XEXP (SET_DEST (x), 1))
4590 + INTVAL (XEXP (SET_DEST (x), 2)))
4591 <= GET_MODE_PRECISION (GET_MODE (XEXP (SET_DEST (x), 0))))
4592 && ! side_effects_p (XEXP (SET_DEST (x), 0)))
4593 {
4594 HOST_WIDE_INT pos = INTVAL (XEXP (SET_DEST (x), 2));
4595 unsigned HOST_WIDE_INT len = INTVAL (XEXP (SET_DEST (x), 1));
4596 unsigned HOST_WIDE_INT src = INTVAL (SET_SRC (x));
4597 rtx dest = XEXP (SET_DEST (x), 0);
4598 enum machine_mode mode = GET_MODE (dest);
4599 unsigned HOST_WIDE_INT mask
4600 = ((unsigned HOST_WIDE_INT) 1 << len) - 1;
4601 rtx or_mask;
4602
4603 if (BITS_BIG_ENDIAN)
4604 pos = GET_MODE_PRECISION (mode) - len - pos;
4605
4606 or_mask = gen_int_mode (src << pos, mode);
4607 if (src == mask)
4608 SUBST (SET_SRC (x),
4609 simplify_gen_binary (IOR, mode, dest, or_mask));
4610 else
4611 {
4612 rtx negmask = gen_int_mode (~(mask << pos), mode);
4613 SUBST (SET_SRC (x),
4614 simplify_gen_binary (IOR, mode,
4615 simplify_gen_binary (AND, mode,
4616 dest, negmask),
4617 or_mask));
4618 }
4619
4620 SUBST (SET_DEST (x), dest);
4621
4622 split = find_split_point (&SET_SRC (x), insn, true);
4623 if (split && split != &SET_SRC (x))
4624 return split;
4625 }
4626
4627 /* Otherwise, see if this is an operation that we can split into two.
4628 If so, try to split that. */
4629 code = GET_CODE (SET_SRC (x));
4630
4631 switch (code)
4632 {
4633 case AND:
4634 /* If we are AND'ing with a large constant that is only a single
4635 bit and the result is only being used in a context where we
4636 need to know if it is zero or nonzero, replace it with a bit
4637 extraction. This will avoid the large constant, which might
4638 have taken more than one insn to make. If the constant were
4639 not a valid argument to the AND but took only one insn to make,
4640 this is no worse, but if it took more than one insn, it will
4641 be better. */
4642
4643 if (CONST_INT_P (XEXP (SET_SRC (x), 1))
4644 && REG_P (XEXP (SET_SRC (x), 0))
4645 && (pos = exact_log2 (UINTVAL (XEXP (SET_SRC (x), 1)))) >= 7
4646 && REG_P (SET_DEST (x))
4647 && (split = find_single_use (SET_DEST (x), insn, (rtx*) 0)) != 0
4648 && (GET_CODE (*split) == EQ || GET_CODE (*split) == NE)
4649 && XEXP (*split, 0) == SET_DEST (x)
4650 && XEXP (*split, 1) == const0_rtx)
4651 {
4652 rtx extraction = make_extraction (GET_MODE (SET_DEST (x)),
4653 XEXP (SET_SRC (x), 0),
4654 pos, NULL_RTX, 1, 1, 0, 0);
4655 if (extraction != 0)
4656 {
4657 SUBST (SET_SRC (x), extraction);
4658 return find_split_point (loc, insn, false);
4659 }
4660 }
4661 break;
4662
4663 case NE:
4664 /* If STORE_FLAG_VALUE is -1, this is (NE X 0) and only one bit of X
4665 is known to be on, this can be converted into a NEG of a shift. */
4666 if (STORE_FLAG_VALUE == -1 && XEXP (SET_SRC (x), 1) == const0_rtx
4667 && GET_MODE (SET_SRC (x)) == GET_MODE (XEXP (SET_SRC (x), 0))
4668 && 1 <= (pos = exact_log2
4669 (nonzero_bits (XEXP (SET_SRC (x), 0),
4670 GET_MODE (XEXP (SET_SRC (x), 0))))))
4671 {
4672 enum machine_mode mode = GET_MODE (XEXP (SET_SRC (x), 0));
4673
4674 SUBST (SET_SRC (x),
4675 gen_rtx_NEG (mode,
4676 gen_rtx_LSHIFTRT (mode,
4677 XEXP (SET_SRC (x), 0),
4678 GEN_INT (pos))));
4679
4680 split = find_split_point (&SET_SRC (x), insn, true);
4681 if (split && split != &SET_SRC (x))
4682 return split;
4683 }
4684 break;
4685
4686 case SIGN_EXTEND:
4687 inner = XEXP (SET_SRC (x), 0);
4688
4689 /* We can't optimize if either mode is a partial integer
4690 mode as we don't know how many bits are significant
4691 in those modes. */
4692 if (GET_MODE_CLASS (GET_MODE (inner)) == MODE_PARTIAL_INT
4693 || GET_MODE_CLASS (GET_MODE (SET_SRC (x))) == MODE_PARTIAL_INT)
4694 break;
4695
4696 pos = 0;
4697 len = GET_MODE_PRECISION (GET_MODE (inner));
4698 unsignedp = 0;
4699 break;
4700
4701 case SIGN_EXTRACT:
4702 case ZERO_EXTRACT:
4703 if (CONST_INT_P (XEXP (SET_SRC (x), 1))
4704 && CONST_INT_P (XEXP (SET_SRC (x), 2)))
4705 {
4706 inner = XEXP (SET_SRC (x), 0);
4707 len = INTVAL (XEXP (SET_SRC (x), 1));
4708 pos = INTVAL (XEXP (SET_SRC (x), 2));
4709
4710 if (BITS_BIG_ENDIAN)
4711 pos = GET_MODE_PRECISION (GET_MODE (inner)) - len - pos;
4712 unsignedp = (code == ZERO_EXTRACT);
4713 }
4714 break;
4715
4716 default:
4717 break;
4718 }
4719
4720 if (len && pos >= 0
4721 && pos + len <= GET_MODE_PRECISION (GET_MODE (inner)))
4722 {
4723 enum machine_mode mode = GET_MODE (SET_SRC (x));
4724
4725 /* For unsigned, we have a choice of a shift followed by an
4726 AND or two shifts. Use two shifts for field sizes where the
4727 constant might be too large. We assume here that we can
4728 always at least get 8-bit constants in an AND insn, which is
4729 true for every current RISC. */
4730
4731 if (unsignedp && len <= 8)
4732 {
4733 SUBST (SET_SRC (x),
4734 gen_rtx_AND (mode,
4735 gen_rtx_LSHIFTRT
4736 (mode, gen_lowpart (mode, inner),
4737 GEN_INT (pos)),
4738 GEN_INT (((unsigned HOST_WIDE_INT) 1 << len)
4739 - 1)));
4740
4741 split = find_split_point (&SET_SRC (x), insn, true);
4742 if (split && split != &SET_SRC (x))
4743 return split;
4744 }
4745 else
4746 {
4747 SUBST (SET_SRC (x),
4748 gen_rtx_fmt_ee
4749 (unsignedp ? LSHIFTRT : ASHIFTRT, mode,
4750 gen_rtx_ASHIFT (mode,
4751 gen_lowpart (mode, inner),
4752 GEN_INT (GET_MODE_PRECISION (mode)
4753 - len - pos)),
4754 GEN_INT (GET_MODE_PRECISION (mode) - len)));
4755
4756 split = find_split_point (&SET_SRC (x), insn, true);
4757 if (split && split != &SET_SRC (x))
4758 return split;
4759 }
4760 }
4761
4762 /* See if this is a simple operation with a constant as the second
4763 operand. It might be that this constant is out of range and hence
4764 could be used as a split point. */
4765 if (BINARY_P (SET_SRC (x))
4766 && CONSTANT_P (XEXP (SET_SRC (x), 1))
4767 && (OBJECT_P (XEXP (SET_SRC (x), 0))
4768 || (GET_CODE (XEXP (SET_SRC (x), 0)) == SUBREG
4769 && OBJECT_P (SUBREG_REG (XEXP (SET_SRC (x), 0))))))
4770 return &XEXP (SET_SRC (x), 1);
4771
4772 /* Finally, see if this is a simple operation with its first operand
4773 not in a register. The operation might require this operand in a
4774 register, so return it as a split point. We can always do this
4775 because if the first operand were another operation, we would have
4776 already found it as a split point. */
4777 if ((BINARY_P (SET_SRC (x)) || UNARY_P (SET_SRC (x)))
4778 && ! register_operand (XEXP (SET_SRC (x), 0), VOIDmode))
4779 return &XEXP (SET_SRC (x), 0);
4780
4781 return 0;
4782
4783 case AND:
4784 case IOR:
4785 /* We write NOR as (and (not A) (not B)), but if we don't have a NOR,
4786 it is better to write this as (not (ior A B)) so we can split it.
4787 Similarly for IOR. */
4788 if (GET_CODE (XEXP (x, 0)) == NOT && GET_CODE (XEXP (x, 1)) == NOT)
4789 {
4790 SUBST (*loc,
4791 gen_rtx_NOT (GET_MODE (x),
4792 gen_rtx_fmt_ee (code == IOR ? AND : IOR,
4793 GET_MODE (x),
4794 XEXP (XEXP (x, 0), 0),
4795 XEXP (XEXP (x, 1), 0))));
4796 return find_split_point (loc, insn, set_src);
4797 }
4798
4799 /* Many RISC machines have a large set of logical insns. If the
4800 second operand is a NOT, put it first so we will try to split the
4801 other operand first. */
4802 if (GET_CODE (XEXP (x, 1)) == NOT)
4803 {
4804 rtx tem = XEXP (x, 0);
4805 SUBST (XEXP (x, 0), XEXP (x, 1));
4806 SUBST (XEXP (x, 1), tem);
4807 }
4808 break;
4809
4810 case PLUS:
4811 case MINUS:
4812 /* Canonicalization can produce (minus A (mult B C)), where C is a
4813 constant. It may be better to try splitting (plus (mult B -C) A)
4814 instead if this isn't a multiply by a power of two. */
4815 if (set_src && code == MINUS && GET_CODE (XEXP (x, 1)) == MULT
4816 && GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT
4817 && exact_log2 (INTVAL (XEXP (XEXP (x, 1), 1))) < 0)
4818 {
4819 enum machine_mode mode = GET_MODE (x);
4820 unsigned HOST_WIDE_INT this_int = INTVAL (XEXP (XEXP (x, 1), 1));
4821 HOST_WIDE_INT other_int = trunc_int_for_mode (-this_int, mode);
4822 SUBST (*loc, gen_rtx_PLUS (mode, gen_rtx_MULT (mode,
4823 XEXP (XEXP (x, 1), 0),
4824 GEN_INT (other_int)),
4825 XEXP (x, 0)));
4826 return find_split_point (loc, insn, set_src);
4827 }
4828
4829 /* Split at a multiply-accumulate instruction. However if this is
4830 the SET_SRC, we likely do not have such an instruction and it's
4831 worthless to try this split. */
4832 if (!set_src && GET_CODE (XEXP (x, 0)) == MULT)
4833 return loc;
4834
4835 default:
4836 break;
4837 }
4838
4839 /* Otherwise, select our actions depending on our rtx class. */
4840 switch (GET_RTX_CLASS (code))
4841 {
4842 case RTX_BITFIELD_OPS: /* This is ZERO_EXTRACT and SIGN_EXTRACT. */
4843 case RTX_TERNARY:
4844 split = find_split_point (&XEXP (x, 2), insn, false);
4845 if (split)
4846 return split;
4847 /* ... fall through ... */
4848 case RTX_BIN_ARITH:
4849 case RTX_COMM_ARITH:
4850 case RTX_COMPARE:
4851 case RTX_COMM_COMPARE:
4852 split = find_split_point (&XEXP (x, 1), insn, false);
4853 if (split)
4854 return split;
4855 /* ... fall through ... */
4856 case RTX_UNARY:
4857 /* Some machines have (and (shift ...) ...) insns. If X is not
4858 an AND, but XEXP (X, 0) is, use it as our split point. */
4859 if (GET_CODE (x) != AND && GET_CODE (XEXP (x, 0)) == AND)
4860 return &XEXP (x, 0);
4861
4862 split = find_split_point (&XEXP (x, 0), insn, false);
4863 if (split)
4864 return split;
4865 return loc;
4866
4867 default:
4868 /* Otherwise, we don't have a split point. */
4869 return 0;
4870 }
4871 }
4872 \f
4873 /* Throughout X, replace FROM with TO, and return the result.
4874 The result is TO if X is FROM;
4875 otherwise the result is X, but its contents may have been modified.
4876 If they were modified, a record was made in undobuf so that
4877 undo_all will (among other things) return X to its original state.
4878
4879 If the number of changes necessary is too much to record to undo,
4880 the excess changes are not made, so the result is invalid.
4881 The changes already made can still be undone.
4882 undobuf.num_undo is incremented for such changes, so by testing that
4883 the caller can tell whether the result is valid.
4884
4885 `n_occurrences' is incremented each time FROM is replaced.
4886
4887 IN_DEST is nonzero if we are processing the SET_DEST of a SET.
4888
4889 IN_COND is nonzero if we are at the top level of a condition.
4890
4891 UNIQUE_COPY is nonzero if each substitution must be unique. We do this
4892 by copying if `n_occurrences' is nonzero. */
4893
4894 static rtx
4895 subst (rtx x, rtx from, rtx to, int in_dest, int in_cond, int unique_copy)
4896 {
4897 enum rtx_code code = GET_CODE (x);
4898 enum machine_mode op0_mode = VOIDmode;
4899 const char *fmt;
4900 int len, i;
4901 rtx new_rtx;
4902
4903 /* Two expressions are equal if they are identical copies of a shared
4904 RTX or if they are both registers with the same register number
4905 and mode. */
4906
4907 #define COMBINE_RTX_EQUAL_P(X,Y) \
4908 ((X) == (Y) \
4909 || (REG_P (X) && REG_P (Y) \
4910 && REGNO (X) == REGNO (Y) && GET_MODE (X) == GET_MODE (Y)))
4911
4912 if (! in_dest && COMBINE_RTX_EQUAL_P (x, from))
4913 {
4914 n_occurrences++;
4915 return (unique_copy && n_occurrences > 1 ? copy_rtx (to) : to);
4916 }
4917
4918 /* If X and FROM are the same register but different modes, they
4919 will not have been seen as equal above. However, the log links code
4920 will make a LOG_LINKS entry for that case. If we do nothing, we
4921 will try to rerecognize our original insn and, when it succeeds,
4922 we will delete the feeding insn, which is incorrect.
4923
4924 So force this insn not to match in this (rare) case. */
4925 if (! in_dest && code == REG && REG_P (from)
4926 && reg_overlap_mentioned_p (x, from))
4927 return gen_rtx_CLOBBER (GET_MODE (x), const0_rtx);
4928
4929 /* If this is an object, we are done unless it is a MEM or LO_SUM, both
4930 of which may contain things that can be combined. */
4931 if (code != MEM && code != LO_SUM && OBJECT_P (x))
4932 return x;
4933
4934 /* It is possible to have a subexpression appear twice in the insn.
4935 Suppose that FROM is a register that appears within TO.
4936 Then, after that subexpression has been scanned once by `subst',
4937 the second time it is scanned, TO may be found. If we were
4938 to scan TO here, we would find FROM within it and create a
4939 self-referent rtl structure which is completely wrong. */
4940 if (COMBINE_RTX_EQUAL_P (x, to))
4941 return to;
4942
4943 /* Parallel asm_operands need special attention because all of the
4944 inputs are shared across the arms. Furthermore, unsharing the
4945 rtl results in recognition failures. Failure to handle this case
4946 specially can result in circular rtl.
4947
4948 Solve this by doing a normal pass across the first entry of the
4949 parallel, and only processing the SET_DESTs of the subsequent
4950 entries. Ug. */
4951
4952 if (code == PARALLEL
4953 && GET_CODE (XVECEXP (x, 0, 0)) == SET
4954 && GET_CODE (SET_SRC (XVECEXP (x, 0, 0))) == ASM_OPERANDS)
4955 {
4956 new_rtx = subst (XVECEXP (x, 0, 0), from, to, 0, 0, unique_copy);
4957
4958 /* If this substitution failed, this whole thing fails. */
4959 if (GET_CODE (new_rtx) == CLOBBER
4960 && XEXP (new_rtx, 0) == const0_rtx)
4961 return new_rtx;
4962
4963 SUBST (XVECEXP (x, 0, 0), new_rtx);
4964
4965 for (i = XVECLEN (x, 0) - 1; i >= 1; i--)
4966 {
4967 rtx dest = SET_DEST (XVECEXP (x, 0, i));
4968
4969 if (!REG_P (dest)
4970 && GET_CODE (dest) != CC0
4971 && GET_CODE (dest) != PC)
4972 {
4973 new_rtx = subst (dest, from, to, 0, 0, unique_copy);
4974
4975 /* If this substitution failed, this whole thing fails. */
4976 if (GET_CODE (new_rtx) == CLOBBER
4977 && XEXP (new_rtx, 0) == const0_rtx)
4978 return new_rtx;
4979
4980 SUBST (SET_DEST (XVECEXP (x, 0, i)), new_rtx);
4981 }
4982 }
4983 }
4984 else
4985 {
4986 len = GET_RTX_LENGTH (code);
4987 fmt = GET_RTX_FORMAT (code);
4988
4989 /* We don't need to process a SET_DEST that is a register, CC0,
4990 or PC, so set up to skip this common case. All other cases
4991 where we want to suppress replacing something inside a
4992 SET_SRC are handled via the IN_DEST operand. */
4993 if (code == SET
4994 && (REG_P (SET_DEST (x))
4995 || GET_CODE (SET_DEST (x)) == CC0
4996 || GET_CODE (SET_DEST (x)) == PC))
4997 fmt = "ie";
4998
4999 /* Get the mode of operand 0 in case X is now a SIGN_EXTEND of a
5000 constant. */
5001 if (fmt[0] == 'e')
5002 op0_mode = GET_MODE (XEXP (x, 0));
5003
5004 for (i = 0; i < len; i++)
5005 {
5006 if (fmt[i] == 'E')
5007 {
5008 int j;
5009 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
5010 {
5011 if (COMBINE_RTX_EQUAL_P (XVECEXP (x, i, j), from))
5012 {
5013 new_rtx = (unique_copy && n_occurrences
5014 ? copy_rtx (to) : to);
5015 n_occurrences++;
5016 }
5017 else
5018 {
5019 new_rtx = subst (XVECEXP (x, i, j), from, to, 0, 0,
5020 unique_copy);
5021
5022 /* If this substitution failed, this whole thing
5023 fails. */
5024 if (GET_CODE (new_rtx) == CLOBBER
5025 && XEXP (new_rtx, 0) == const0_rtx)
5026 return new_rtx;
5027 }
5028
5029 SUBST (XVECEXP (x, i, j), new_rtx);
5030 }
5031 }
5032 else if (fmt[i] == 'e')
5033 {
5034 /* If this is a register being set, ignore it. */
5035 new_rtx = XEXP (x, i);
5036 if (in_dest
5037 && i == 0
5038 && (((code == SUBREG || code == ZERO_EXTRACT)
5039 && REG_P (new_rtx))
5040 || code == STRICT_LOW_PART))
5041 ;
5042
5043 else if (COMBINE_RTX_EQUAL_P (XEXP (x, i), from))
5044 {
5045 /* In general, don't install a subreg involving two
5046 modes not tieable. It can worsen register
5047 allocation, and can even make invalid reload
5048 insns, since the reg inside may need to be copied
5049 from in the outside mode, and that may be invalid
5050 if it is an fp reg copied in integer mode.
5051
5052 We allow two exceptions to this: It is valid if
5053 it is inside another SUBREG and the mode of that
5054 SUBREG and the mode of the inside of TO is
5055 tieable and it is valid if X is a SET that copies
5056 FROM to CC0. */
5057
5058 if (GET_CODE (to) == SUBREG
5059 && ! MODES_TIEABLE_P (GET_MODE (to),
5060 GET_MODE (SUBREG_REG (to)))
5061 && ! (code == SUBREG
5062 && MODES_TIEABLE_P (GET_MODE (x),
5063 GET_MODE (SUBREG_REG (to))))
5064 #ifdef HAVE_cc0
5065 && ! (code == SET && i == 1 && XEXP (x, 0) == cc0_rtx)
5066 #endif
5067 )
5068 return gen_rtx_CLOBBER (VOIDmode, const0_rtx);
5069
5070 #ifdef CANNOT_CHANGE_MODE_CLASS
5071 if (code == SUBREG
5072 && REG_P (to)
5073 && REGNO (to) < FIRST_PSEUDO_REGISTER
5074 && REG_CANNOT_CHANGE_MODE_P (REGNO (to),
5075 GET_MODE (to),
5076 GET_MODE (x)))
5077 return gen_rtx_CLOBBER (VOIDmode, const0_rtx);
5078 #endif
5079
5080 new_rtx = (unique_copy && n_occurrences ? copy_rtx (to) : to);
5081 n_occurrences++;
5082 }
5083 else
5084 /* If we are in a SET_DEST, suppress most cases unless we
5085 have gone inside a MEM, in which case we want to
5086 simplify the address. We assume here that things that
5087 are actually part of the destination have their inner
5088 parts in the first expression. This is true for SUBREG,
5089 STRICT_LOW_PART, and ZERO_EXTRACT, which are the only
5090 things aside from REG and MEM that should appear in a
5091 SET_DEST. */
5092 new_rtx = subst (XEXP (x, i), from, to,
5093 (((in_dest
5094 && (code == SUBREG || code == STRICT_LOW_PART
5095 || code == ZERO_EXTRACT))
5096 || code == SET)
5097 && i == 0),
5098 code == IF_THEN_ELSE && i == 0,
5099 unique_copy);
5100
5101 /* If we found that we will have to reject this combination,
5102 indicate that by returning the CLOBBER ourselves, rather than
5103 an expression containing it. This will speed things up as
5104 well as prevent accidents where two CLOBBERs are considered
5105 to be equal, thus producing an incorrect simplification. */
5106
5107 if (GET_CODE (new_rtx) == CLOBBER && XEXP (new_rtx, 0) == const0_rtx)
5108 return new_rtx;
5109
5110 if (GET_CODE (x) == SUBREG && CONST_SCALAR_INT_P (new_rtx))
5111 {
5112 enum machine_mode mode = GET_MODE (x);
5113
5114 x = simplify_subreg (GET_MODE (x), new_rtx,
5115 GET_MODE (SUBREG_REG (x)),
5116 SUBREG_BYTE (x));
5117 if (! x)
5118 x = gen_rtx_CLOBBER (mode, const0_rtx);
5119 }
5120 else if (CONST_INT_P (new_rtx)
5121 && GET_CODE (x) == ZERO_EXTEND)
5122 {
5123 x = simplify_unary_operation (ZERO_EXTEND, GET_MODE (x),
5124 new_rtx, GET_MODE (XEXP (x, 0)));
5125 gcc_assert (x);
5126 }
5127 else
5128 SUBST (XEXP (x, i), new_rtx);
5129 }
5130 }
5131 }
5132
5133 /* Check if we are loading something from the constant pool via float
5134 extension; in this case we would undo compress_float_constant
5135 optimization and degenerate constant load to an immediate value. */
5136 if (GET_CODE (x) == FLOAT_EXTEND
5137 && MEM_P (XEXP (x, 0))
5138 && MEM_READONLY_P (XEXP (x, 0)))
5139 {
5140 rtx tmp = avoid_constant_pool_reference (x);
5141 if (x != tmp)
5142 return x;
5143 }
5144
5145 /* Try to simplify X. If the simplification changed the code, it is likely
5146 that further simplification will help, so loop, but limit the number
5147 of repetitions that will be performed. */
5148
5149 for (i = 0; i < 4; i++)
5150 {
5151 /* If X is sufficiently simple, don't bother trying to do anything
5152 with it. */
5153 if (code != CONST_INT && code != REG && code != CLOBBER)
5154 x = combine_simplify_rtx (x, op0_mode, in_dest, in_cond);
5155
5156 if (GET_CODE (x) == code)
5157 break;
5158
5159 code = GET_CODE (x);
5160
5161 /* We no longer know the original mode of operand 0 since we
5162 have changed the form of X) */
5163 op0_mode = VOIDmode;
5164 }
5165
5166 return x;
5167 }
5168 \f
5169 /* Simplify X, a piece of RTL. We just operate on the expression at the
5170 outer level; call `subst' to simplify recursively. Return the new
5171 expression.
5172
5173 OP0_MODE is the original mode of XEXP (x, 0). IN_DEST is nonzero
5174 if we are inside a SET_DEST. IN_COND is nonzero if we are at the top level
5175 of a condition. */
5176
5177 static rtx
5178 combine_simplify_rtx (rtx x, enum machine_mode op0_mode, int in_dest,
5179 int in_cond)
5180 {
5181 enum rtx_code code = GET_CODE (x);
5182 enum machine_mode mode = GET_MODE (x);
5183 rtx temp;
5184 int i;
5185
5186 /* If this is a commutative operation, put a constant last and a complex
5187 expression first. We don't need to do this for comparisons here. */
5188 if (COMMUTATIVE_ARITH_P (x)
5189 && swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
5190 {
5191 temp = XEXP (x, 0);
5192 SUBST (XEXP (x, 0), XEXP (x, 1));
5193 SUBST (XEXP (x, 1), temp);
5194 }
5195
5196 /* If this is a simple operation applied to an IF_THEN_ELSE, try
5197 applying it to the arms of the IF_THEN_ELSE. This often simplifies
5198 things. Check for cases where both arms are testing the same
5199 condition.
5200
5201 Don't do anything if all operands are very simple. */
5202
5203 if ((BINARY_P (x)
5204 && ((!OBJECT_P (XEXP (x, 0))
5205 && ! (GET_CODE (XEXP (x, 0)) == SUBREG
5206 && OBJECT_P (SUBREG_REG (XEXP (x, 0)))))
5207 || (!OBJECT_P (XEXP (x, 1))
5208 && ! (GET_CODE (XEXP (x, 1)) == SUBREG
5209 && OBJECT_P (SUBREG_REG (XEXP (x, 1)))))))
5210 || (UNARY_P (x)
5211 && (!OBJECT_P (XEXP (x, 0))
5212 && ! (GET_CODE (XEXP (x, 0)) == SUBREG
5213 && OBJECT_P (SUBREG_REG (XEXP (x, 0)))))))
5214 {
5215 rtx cond, true_rtx, false_rtx;
5216
5217 cond = if_then_else_cond (x, &true_rtx, &false_rtx);
5218 if (cond != 0
5219 /* If everything is a comparison, what we have is highly unlikely
5220 to be simpler, so don't use it. */
5221 && ! (COMPARISON_P (x)
5222 && (COMPARISON_P (true_rtx) || COMPARISON_P (false_rtx))))
5223 {
5224 rtx cop1 = const0_rtx;
5225 enum rtx_code cond_code = simplify_comparison (NE, &cond, &cop1);
5226
5227 if (cond_code == NE && COMPARISON_P (cond))
5228 return x;
5229
5230 /* Simplify the alternative arms; this may collapse the true and
5231 false arms to store-flag values. Be careful to use copy_rtx
5232 here since true_rtx or false_rtx might share RTL with x as a
5233 result of the if_then_else_cond call above. */
5234 true_rtx = subst (copy_rtx (true_rtx), pc_rtx, pc_rtx, 0, 0, 0);
5235 false_rtx = subst (copy_rtx (false_rtx), pc_rtx, pc_rtx, 0, 0, 0);
5236
5237 /* If true_rtx and false_rtx are not general_operands, an if_then_else
5238 is unlikely to be simpler. */
5239 if (general_operand (true_rtx, VOIDmode)
5240 && general_operand (false_rtx, VOIDmode))
5241 {
5242 enum rtx_code reversed;
5243
5244 /* Restarting if we generate a store-flag expression will cause
5245 us to loop. Just drop through in this case. */
5246
5247 /* If the result values are STORE_FLAG_VALUE and zero, we can
5248 just make the comparison operation. */
5249 if (true_rtx == const_true_rtx && false_rtx == const0_rtx)
5250 x = simplify_gen_relational (cond_code, mode, VOIDmode,
5251 cond, cop1);
5252 else if (true_rtx == const0_rtx && false_rtx == const_true_rtx
5253 && ((reversed = reversed_comparison_code_parts
5254 (cond_code, cond, cop1, NULL))
5255 != UNKNOWN))
5256 x = simplify_gen_relational (reversed, mode, VOIDmode,
5257 cond, cop1);
5258
5259 /* Likewise, we can make the negate of a comparison operation
5260 if the result values are - STORE_FLAG_VALUE and zero. */
5261 else if (CONST_INT_P (true_rtx)
5262 && INTVAL (true_rtx) == - STORE_FLAG_VALUE
5263 && false_rtx == const0_rtx)
5264 x = simplify_gen_unary (NEG, mode,
5265 simplify_gen_relational (cond_code,
5266 mode, VOIDmode,
5267 cond, cop1),
5268 mode);
5269 else if (CONST_INT_P (false_rtx)
5270 && INTVAL (false_rtx) == - STORE_FLAG_VALUE
5271 && true_rtx == const0_rtx
5272 && ((reversed = reversed_comparison_code_parts
5273 (cond_code, cond, cop1, NULL))
5274 != UNKNOWN))
5275 x = simplify_gen_unary (NEG, mode,
5276 simplify_gen_relational (reversed,
5277 mode, VOIDmode,
5278 cond, cop1),
5279 mode);
5280 else
5281 return gen_rtx_IF_THEN_ELSE (mode,
5282 simplify_gen_relational (cond_code,
5283 mode,
5284 VOIDmode,
5285 cond,
5286 cop1),
5287 true_rtx, false_rtx);
5288
5289 code = GET_CODE (x);
5290 op0_mode = VOIDmode;
5291 }
5292 }
5293 }
5294
5295 /* Try to fold this expression in case we have constants that weren't
5296 present before. */
5297 temp = 0;
5298 switch (GET_RTX_CLASS (code))
5299 {
5300 case RTX_UNARY:
5301 if (op0_mode == VOIDmode)
5302 op0_mode = GET_MODE (XEXP (x, 0));
5303 temp = simplify_unary_operation (code, mode, XEXP (x, 0), op0_mode);
5304 break;
5305 case RTX_COMPARE:
5306 case RTX_COMM_COMPARE:
5307 {
5308 enum machine_mode cmp_mode = GET_MODE (XEXP (x, 0));
5309 if (cmp_mode == VOIDmode)
5310 {
5311 cmp_mode = GET_MODE (XEXP (x, 1));
5312 if (cmp_mode == VOIDmode)
5313 cmp_mode = op0_mode;
5314 }
5315 temp = simplify_relational_operation (code, mode, cmp_mode,
5316 XEXP (x, 0), XEXP (x, 1));
5317 }
5318 break;
5319 case RTX_COMM_ARITH:
5320 case RTX_BIN_ARITH:
5321 temp = simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
5322 break;
5323 case RTX_BITFIELD_OPS:
5324 case RTX_TERNARY:
5325 temp = simplify_ternary_operation (code, mode, op0_mode, XEXP (x, 0),
5326 XEXP (x, 1), XEXP (x, 2));
5327 break;
5328 default:
5329 break;
5330 }
5331
5332 if (temp)
5333 {
5334 x = temp;
5335 code = GET_CODE (temp);
5336 op0_mode = VOIDmode;
5337 mode = GET_MODE (temp);
5338 }
5339
5340 /* First see if we can apply the inverse distributive law. */
5341 if (code == PLUS || code == MINUS
5342 || code == AND || code == IOR || code == XOR)
5343 {
5344 x = apply_distributive_law (x);
5345 code = GET_CODE (x);
5346 op0_mode = VOIDmode;
5347 }
5348
5349 /* If CODE is an associative operation not otherwise handled, see if we
5350 can associate some operands. This can win if they are constants or
5351 if they are logically related (i.e. (a & b) & a). */
5352 if ((code == PLUS || code == MINUS || code == MULT || code == DIV
5353 || code == AND || code == IOR || code == XOR
5354 || code == SMAX || code == SMIN || code == UMAX || code == UMIN)
5355 && ((INTEGRAL_MODE_P (mode) && code != DIV)
5356 || (flag_associative_math && FLOAT_MODE_P (mode))))
5357 {
5358 if (GET_CODE (XEXP (x, 0)) == code)
5359 {
5360 rtx other = XEXP (XEXP (x, 0), 0);
5361 rtx inner_op0 = XEXP (XEXP (x, 0), 1);
5362 rtx inner_op1 = XEXP (x, 1);
5363 rtx inner;
5364
5365 /* Make sure we pass the constant operand if any as the second
5366 one if this is a commutative operation. */
5367 if (CONSTANT_P (inner_op0) && COMMUTATIVE_ARITH_P (x))
5368 {
5369 rtx tem = inner_op0;
5370 inner_op0 = inner_op1;
5371 inner_op1 = tem;
5372 }
5373 inner = simplify_binary_operation (code == MINUS ? PLUS
5374 : code == DIV ? MULT
5375 : code,
5376 mode, inner_op0, inner_op1);
5377
5378 /* For commutative operations, try the other pair if that one
5379 didn't simplify. */
5380 if (inner == 0 && COMMUTATIVE_ARITH_P (x))
5381 {
5382 other = XEXP (XEXP (x, 0), 1);
5383 inner = simplify_binary_operation (code, mode,
5384 XEXP (XEXP (x, 0), 0),
5385 XEXP (x, 1));
5386 }
5387
5388 if (inner)
5389 return simplify_gen_binary (code, mode, other, inner);
5390 }
5391 }
5392
5393 /* A little bit of algebraic simplification here. */
5394 switch (code)
5395 {
5396 case MEM:
5397 /* Ensure that our address has any ASHIFTs converted to MULT in case
5398 address-recognizing predicates are called later. */
5399 temp = make_compound_operation (XEXP (x, 0), MEM);
5400 SUBST (XEXP (x, 0), temp);
5401 break;
5402
5403 case SUBREG:
5404 if (op0_mode == VOIDmode)
5405 op0_mode = GET_MODE (SUBREG_REG (x));
5406
5407 /* See if this can be moved to simplify_subreg. */
5408 if (CONSTANT_P (SUBREG_REG (x))
5409 && subreg_lowpart_offset (mode, op0_mode) == SUBREG_BYTE (x)
5410 /* Don't call gen_lowpart if the inner mode
5411 is VOIDmode and we cannot simplify it, as SUBREG without
5412 inner mode is invalid. */
5413 && (GET_MODE (SUBREG_REG (x)) != VOIDmode
5414 || gen_lowpart_common (mode, SUBREG_REG (x))))
5415 return gen_lowpart (mode, SUBREG_REG (x));
5416
5417 if (GET_MODE_CLASS (GET_MODE (SUBREG_REG (x))) == MODE_CC)
5418 break;
5419 {
5420 rtx temp;
5421 temp = simplify_subreg (mode, SUBREG_REG (x), op0_mode,
5422 SUBREG_BYTE (x));
5423 if (temp)
5424 return temp;
5425 }
5426
5427 /* Don't change the mode of the MEM if that would change the meaning
5428 of the address. */
5429 if (MEM_P (SUBREG_REG (x))
5430 && (MEM_VOLATILE_P (SUBREG_REG (x))
5431 || mode_dependent_address_p (XEXP (SUBREG_REG (x), 0),
5432 MEM_ADDR_SPACE (SUBREG_REG (x)))))
5433 return gen_rtx_CLOBBER (mode, const0_rtx);
5434
5435 /* Note that we cannot do any narrowing for non-constants since
5436 we might have been counting on using the fact that some bits were
5437 zero. We now do this in the SET. */
5438
5439 break;
5440
5441 case NEG:
5442 temp = expand_compound_operation (XEXP (x, 0));
5443
5444 /* For C equal to the width of MODE minus 1, (neg (ashiftrt X C)) can be
5445 replaced by (lshiftrt X C). This will convert
5446 (neg (sign_extract X 1 Y)) to (zero_extract X 1 Y). */
5447
5448 if (GET_CODE (temp) == ASHIFTRT
5449 && CONST_INT_P (XEXP (temp, 1))
5450 && INTVAL (XEXP (temp, 1)) == GET_MODE_PRECISION (mode) - 1)
5451 return simplify_shift_const (NULL_RTX, LSHIFTRT, mode, XEXP (temp, 0),
5452 INTVAL (XEXP (temp, 1)));
5453
5454 /* If X has only a single bit that might be nonzero, say, bit I, convert
5455 (neg X) to (ashiftrt (ashift X C-I) C-I) where C is the bitsize of
5456 MODE minus 1. This will convert (neg (zero_extract X 1 Y)) to
5457 (sign_extract X 1 Y). But only do this if TEMP isn't a register
5458 or a SUBREG of one since we'd be making the expression more
5459 complex if it was just a register. */
5460
5461 if (!REG_P (temp)
5462 && ! (GET_CODE (temp) == SUBREG
5463 && REG_P (SUBREG_REG (temp)))
5464 && (i = exact_log2 (nonzero_bits (temp, mode))) >= 0)
5465 {
5466 rtx temp1 = simplify_shift_const
5467 (NULL_RTX, ASHIFTRT, mode,
5468 simplify_shift_const (NULL_RTX, ASHIFT, mode, temp,
5469 GET_MODE_PRECISION (mode) - 1 - i),
5470 GET_MODE_PRECISION (mode) - 1 - i);
5471
5472 /* If all we did was surround TEMP with the two shifts, we
5473 haven't improved anything, so don't use it. Otherwise,
5474 we are better off with TEMP1. */
5475 if (GET_CODE (temp1) != ASHIFTRT
5476 || GET_CODE (XEXP (temp1, 0)) != ASHIFT
5477 || XEXP (XEXP (temp1, 0), 0) != temp)
5478 return temp1;
5479 }
5480 break;
5481
5482 case TRUNCATE:
5483 /* We can't handle truncation to a partial integer mode here
5484 because we don't know the real bitsize of the partial
5485 integer mode. */
5486 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
5487 break;
5488
5489 if (HWI_COMPUTABLE_MODE_P (mode))
5490 SUBST (XEXP (x, 0),
5491 force_to_mode (XEXP (x, 0), GET_MODE (XEXP (x, 0)),
5492 GET_MODE_MASK (mode), 0));
5493
5494 /* We can truncate a constant value and return it. */
5495 if (CONST_INT_P (XEXP (x, 0)))
5496 return gen_int_mode (INTVAL (XEXP (x, 0)), mode);
5497
5498 /* Similarly to what we do in simplify-rtx.c, a truncate of a register
5499 whose value is a comparison can be replaced with a subreg if
5500 STORE_FLAG_VALUE permits. */
5501 if (HWI_COMPUTABLE_MODE_P (mode)
5502 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0
5503 && (temp = get_last_value (XEXP (x, 0)))
5504 && COMPARISON_P (temp))
5505 return gen_lowpart (mode, XEXP (x, 0));
5506 break;
5507
5508 case CONST:
5509 /* (const (const X)) can become (const X). Do it this way rather than
5510 returning the inner CONST since CONST can be shared with a
5511 REG_EQUAL note. */
5512 if (GET_CODE (XEXP (x, 0)) == CONST)
5513 SUBST (XEXP (x, 0), XEXP (XEXP (x, 0), 0));
5514 break;
5515
5516 #ifdef HAVE_lo_sum
5517 case LO_SUM:
5518 /* Convert (lo_sum (high FOO) FOO) to FOO. This is necessary so we
5519 can add in an offset. find_split_point will split this address up
5520 again if it doesn't match. */
5521 if (GET_CODE (XEXP (x, 0)) == HIGH
5522 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
5523 return XEXP (x, 1);
5524 break;
5525 #endif
5526
5527 case PLUS:
5528 /* (plus (xor (and <foo> (const_int pow2 - 1)) <c>) <-c>)
5529 when c is (const_int (pow2 + 1) / 2) is a sign extension of a
5530 bit-field and can be replaced by either a sign_extend or a
5531 sign_extract. The `and' may be a zero_extend and the two
5532 <c>, -<c> constants may be reversed. */
5533 if (GET_CODE (XEXP (x, 0)) == XOR
5534 && CONST_INT_P (XEXP (x, 1))
5535 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
5536 && INTVAL (XEXP (x, 1)) == -INTVAL (XEXP (XEXP (x, 0), 1))
5537 && ((i = exact_log2 (UINTVAL (XEXP (XEXP (x, 0), 1)))) >= 0
5538 || (i = exact_log2 (UINTVAL (XEXP (x, 1)))) >= 0)
5539 && HWI_COMPUTABLE_MODE_P (mode)
5540 && ((GET_CODE (XEXP (XEXP (x, 0), 0)) == AND
5541 && CONST_INT_P (XEXP (XEXP (XEXP (x, 0), 0), 1))
5542 && (UINTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1))
5543 == ((unsigned HOST_WIDE_INT) 1 << (i + 1)) - 1))
5544 || (GET_CODE (XEXP (XEXP (x, 0), 0)) == ZERO_EXTEND
5545 && (GET_MODE_PRECISION (GET_MODE (XEXP (XEXP (XEXP (x, 0), 0), 0)))
5546 == (unsigned int) i + 1))))
5547 return simplify_shift_const
5548 (NULL_RTX, ASHIFTRT, mode,
5549 simplify_shift_const (NULL_RTX, ASHIFT, mode,
5550 XEXP (XEXP (XEXP (x, 0), 0), 0),
5551 GET_MODE_PRECISION (mode) - (i + 1)),
5552 GET_MODE_PRECISION (mode) - (i + 1));
5553
5554 /* If only the low-order bit of X is possibly nonzero, (plus x -1)
5555 can become (ashiftrt (ashift (xor x 1) C) C) where C is
5556 the bitsize of the mode - 1. This allows simplification of
5557 "a = (b & 8) == 0;" */
5558 if (XEXP (x, 1) == constm1_rtx
5559 && !REG_P (XEXP (x, 0))
5560 && ! (GET_CODE (XEXP (x, 0)) == SUBREG
5561 && REG_P (SUBREG_REG (XEXP (x, 0))))
5562 && nonzero_bits (XEXP (x, 0), mode) == 1)
5563 return simplify_shift_const (NULL_RTX, ASHIFTRT, mode,
5564 simplify_shift_const (NULL_RTX, ASHIFT, mode,
5565 gen_rtx_XOR (mode, XEXP (x, 0), const1_rtx),
5566 GET_MODE_PRECISION (mode) - 1),
5567 GET_MODE_PRECISION (mode) - 1);
5568
5569 /* If we are adding two things that have no bits in common, convert
5570 the addition into an IOR. This will often be further simplified,
5571 for example in cases like ((a & 1) + (a & 2)), which can
5572 become a & 3. */
5573
5574 if (HWI_COMPUTABLE_MODE_P (mode)
5575 && (nonzero_bits (XEXP (x, 0), mode)
5576 & nonzero_bits (XEXP (x, 1), mode)) == 0)
5577 {
5578 /* Try to simplify the expression further. */
5579 rtx tor = simplify_gen_binary (IOR, mode, XEXP (x, 0), XEXP (x, 1));
5580 temp = combine_simplify_rtx (tor, VOIDmode, in_dest, 0);
5581
5582 /* If we could, great. If not, do not go ahead with the IOR
5583 replacement, since PLUS appears in many special purpose
5584 address arithmetic instructions. */
5585 if (GET_CODE (temp) != CLOBBER
5586 && (GET_CODE (temp) != IOR
5587 || ((XEXP (temp, 0) != XEXP (x, 0)
5588 || XEXP (temp, 1) != XEXP (x, 1))
5589 && (XEXP (temp, 0) != XEXP (x, 1)
5590 || XEXP (temp, 1) != XEXP (x, 0)))))
5591 return temp;
5592 }
5593 break;
5594
5595 case MINUS:
5596 /* (minus <foo> (and <foo> (const_int -pow2))) becomes
5597 (and <foo> (const_int pow2-1)) */
5598 if (GET_CODE (XEXP (x, 1)) == AND
5599 && CONST_INT_P (XEXP (XEXP (x, 1), 1))
5600 && exact_log2 (-UINTVAL (XEXP (XEXP (x, 1), 1))) >= 0
5601 && rtx_equal_p (XEXP (XEXP (x, 1), 0), XEXP (x, 0)))
5602 return simplify_and_const_int (NULL_RTX, mode, XEXP (x, 0),
5603 -INTVAL (XEXP (XEXP (x, 1), 1)) - 1);
5604 break;
5605
5606 case MULT:
5607 /* If we have (mult (plus A B) C), apply the distributive law and then
5608 the inverse distributive law to see if things simplify. This
5609 occurs mostly in addresses, often when unrolling loops. */
5610
5611 if (GET_CODE (XEXP (x, 0)) == PLUS)
5612 {
5613 rtx result = distribute_and_simplify_rtx (x, 0);
5614 if (result)
5615 return result;
5616 }
5617
5618 /* Try simplify a*(b/c) as (a*b)/c. */
5619 if (FLOAT_MODE_P (mode) && flag_associative_math
5620 && GET_CODE (XEXP (x, 0)) == DIV)
5621 {
5622 rtx tem = simplify_binary_operation (MULT, mode,
5623 XEXP (XEXP (x, 0), 0),
5624 XEXP (x, 1));
5625 if (tem)
5626 return simplify_gen_binary (DIV, mode, tem, XEXP (XEXP (x, 0), 1));
5627 }
5628 break;
5629
5630 case UDIV:
5631 /* If this is a divide by a power of two, treat it as a shift if
5632 its first operand is a shift. */
5633 if (CONST_INT_P (XEXP (x, 1))
5634 && (i = exact_log2 (UINTVAL (XEXP (x, 1)))) >= 0
5635 && (GET_CODE (XEXP (x, 0)) == ASHIFT
5636 || GET_CODE (XEXP (x, 0)) == LSHIFTRT
5637 || GET_CODE (XEXP (x, 0)) == ASHIFTRT
5638 || GET_CODE (XEXP (x, 0)) == ROTATE
5639 || GET_CODE (XEXP (x, 0)) == ROTATERT))
5640 return simplify_shift_const (NULL_RTX, LSHIFTRT, mode, XEXP (x, 0), i);
5641 break;
5642
5643 case EQ: case NE:
5644 case GT: case GTU: case GE: case GEU:
5645 case LT: case LTU: case LE: case LEU:
5646 case UNEQ: case LTGT:
5647 case UNGT: case UNGE:
5648 case UNLT: case UNLE:
5649 case UNORDERED: case ORDERED:
5650 /* If the first operand is a condition code, we can't do anything
5651 with it. */
5652 if (GET_CODE (XEXP (x, 0)) == COMPARE
5653 || (GET_MODE_CLASS (GET_MODE (XEXP (x, 0))) != MODE_CC
5654 && ! CC0_P (XEXP (x, 0))))
5655 {
5656 rtx op0 = XEXP (x, 0);
5657 rtx op1 = XEXP (x, 1);
5658 enum rtx_code new_code;
5659
5660 if (GET_CODE (op0) == COMPARE)
5661 op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
5662
5663 /* Simplify our comparison, if possible. */
5664 new_code = simplify_comparison (code, &op0, &op1);
5665
5666 /* If STORE_FLAG_VALUE is 1, we can convert (ne x 0) to simply X
5667 if only the low-order bit is possibly nonzero in X (such as when
5668 X is a ZERO_EXTRACT of one bit). Similarly, we can convert EQ to
5669 (xor X 1) or (minus 1 X); we use the former. Finally, if X is
5670 known to be either 0 or -1, NE becomes a NEG and EQ becomes
5671 (plus X 1).
5672
5673 Remove any ZERO_EXTRACT we made when thinking this was a
5674 comparison. It may now be simpler to use, e.g., an AND. If a
5675 ZERO_EXTRACT is indeed appropriate, it will be placed back by
5676 the call to make_compound_operation in the SET case.
5677
5678 Don't apply these optimizations if the caller would
5679 prefer a comparison rather than a value.
5680 E.g., for the condition in an IF_THEN_ELSE most targets need
5681 an explicit comparison. */
5682
5683 if (in_cond)
5684 ;
5685
5686 else if (STORE_FLAG_VALUE == 1
5687 && new_code == NE && GET_MODE_CLASS (mode) == MODE_INT
5688 && op1 == const0_rtx
5689 && mode == GET_MODE (op0)
5690 && nonzero_bits (op0, mode) == 1)
5691 return gen_lowpart (mode,
5692 expand_compound_operation (op0));
5693
5694 else if (STORE_FLAG_VALUE == 1
5695 && new_code == NE && GET_MODE_CLASS (mode) == MODE_INT
5696 && op1 == const0_rtx
5697 && mode == GET_MODE (op0)
5698 && (num_sign_bit_copies (op0, mode)
5699 == GET_MODE_PRECISION (mode)))
5700 {
5701 op0 = expand_compound_operation (op0);
5702 return simplify_gen_unary (NEG, mode,
5703 gen_lowpart (mode, op0),
5704 mode);
5705 }
5706
5707 else if (STORE_FLAG_VALUE == 1
5708 && new_code == EQ && GET_MODE_CLASS (mode) == MODE_INT
5709 && op1 == const0_rtx
5710 && mode == GET_MODE (op0)
5711 && nonzero_bits (op0, mode) == 1)
5712 {
5713 op0 = expand_compound_operation (op0);
5714 return simplify_gen_binary (XOR, mode,
5715 gen_lowpart (mode, op0),
5716 const1_rtx);
5717 }
5718
5719 else if (STORE_FLAG_VALUE == 1
5720 && new_code == EQ && GET_MODE_CLASS (mode) == MODE_INT
5721 && op1 == const0_rtx
5722 && mode == GET_MODE (op0)
5723 && (num_sign_bit_copies (op0, mode)
5724 == GET_MODE_PRECISION (mode)))
5725 {
5726 op0 = expand_compound_operation (op0);
5727 return plus_constant (mode, gen_lowpart (mode, op0), 1);
5728 }
5729
5730 /* If STORE_FLAG_VALUE is -1, we have cases similar to
5731 those above. */
5732 if (in_cond)
5733 ;
5734
5735 else if (STORE_FLAG_VALUE == -1
5736 && new_code == NE && GET_MODE_CLASS (mode) == MODE_INT
5737 && op1 == const0_rtx
5738 && (num_sign_bit_copies (op0, mode)
5739 == GET_MODE_PRECISION (mode)))
5740 return gen_lowpart (mode,
5741 expand_compound_operation (op0));
5742
5743 else if (STORE_FLAG_VALUE == -1
5744 && new_code == NE && GET_MODE_CLASS (mode) == MODE_INT
5745 && op1 == const0_rtx
5746 && mode == GET_MODE (op0)
5747 && nonzero_bits (op0, mode) == 1)
5748 {
5749 op0 = expand_compound_operation (op0);
5750 return simplify_gen_unary (NEG, mode,
5751 gen_lowpart (mode, op0),
5752 mode);
5753 }
5754
5755 else if (STORE_FLAG_VALUE == -1
5756 && new_code == EQ && GET_MODE_CLASS (mode) == MODE_INT
5757 && op1 == const0_rtx
5758 && mode == GET_MODE (op0)
5759 && (num_sign_bit_copies (op0, mode)
5760 == GET_MODE_PRECISION (mode)))
5761 {
5762 op0 = expand_compound_operation (op0);
5763 return simplify_gen_unary (NOT, mode,
5764 gen_lowpart (mode, op0),
5765 mode);
5766 }
5767
5768 /* If X is 0/1, (eq X 0) is X-1. */
5769 else if (STORE_FLAG_VALUE == -1
5770 && new_code == EQ && GET_MODE_CLASS (mode) == MODE_INT
5771 && op1 == const0_rtx
5772 && mode == GET_MODE (op0)
5773 && nonzero_bits (op0, mode) == 1)
5774 {
5775 op0 = expand_compound_operation (op0);
5776 return plus_constant (mode, gen_lowpart (mode, op0), -1);
5777 }
5778
5779 /* If STORE_FLAG_VALUE says to just test the sign bit and X has just
5780 one bit that might be nonzero, we can convert (ne x 0) to
5781 (ashift x c) where C puts the bit in the sign bit. Remove any
5782 AND with STORE_FLAG_VALUE when we are done, since we are only
5783 going to test the sign bit. */
5784 if (new_code == NE && GET_MODE_CLASS (mode) == MODE_INT
5785 && HWI_COMPUTABLE_MODE_P (mode)
5786 && val_signbit_p (mode, STORE_FLAG_VALUE)
5787 && op1 == const0_rtx
5788 && mode == GET_MODE (op0)
5789 && (i = exact_log2 (nonzero_bits (op0, mode))) >= 0)
5790 {
5791 x = simplify_shift_const (NULL_RTX, ASHIFT, mode,
5792 expand_compound_operation (op0),
5793 GET_MODE_PRECISION (mode) - 1 - i);
5794 if (GET_CODE (x) == AND && XEXP (x, 1) == const_true_rtx)
5795 return XEXP (x, 0);
5796 else
5797 return x;
5798 }
5799
5800 /* If the code changed, return a whole new comparison. */
5801 if (new_code != code)
5802 return gen_rtx_fmt_ee (new_code, mode, op0, op1);
5803
5804 /* Otherwise, keep this operation, but maybe change its operands.
5805 This also converts (ne (compare FOO BAR) 0) to (ne FOO BAR). */
5806 SUBST (XEXP (x, 0), op0);
5807 SUBST (XEXP (x, 1), op1);
5808 }
5809 break;
5810
5811 case IF_THEN_ELSE:
5812 return simplify_if_then_else (x);
5813
5814 case ZERO_EXTRACT:
5815 case SIGN_EXTRACT:
5816 case ZERO_EXTEND:
5817 case SIGN_EXTEND:
5818 /* If we are processing SET_DEST, we are done. */
5819 if (in_dest)
5820 return x;
5821
5822 return expand_compound_operation (x);
5823
5824 case SET:
5825 return simplify_set (x);
5826
5827 case AND:
5828 case IOR:
5829 return simplify_logical (x);
5830
5831 case ASHIFT:
5832 case LSHIFTRT:
5833 case ASHIFTRT:
5834 case ROTATE:
5835 case ROTATERT:
5836 /* If this is a shift by a constant amount, simplify it. */
5837 if (CONST_INT_P (XEXP (x, 1)))
5838 return simplify_shift_const (x, code, mode, XEXP (x, 0),
5839 INTVAL (XEXP (x, 1)));
5840
5841 else if (SHIFT_COUNT_TRUNCATED && !REG_P (XEXP (x, 1)))
5842 SUBST (XEXP (x, 1),
5843 force_to_mode (XEXP (x, 1), GET_MODE (XEXP (x, 1)),
5844 ((unsigned HOST_WIDE_INT) 1
5845 << exact_log2 (GET_MODE_BITSIZE (GET_MODE (x))))
5846 - 1,
5847 0));
5848 break;
5849
5850 default:
5851 break;
5852 }
5853
5854 return x;
5855 }
5856 \f
5857 /* Simplify X, an IF_THEN_ELSE expression. Return the new expression. */
5858
5859 static rtx
5860 simplify_if_then_else (rtx x)
5861 {
5862 enum machine_mode mode = GET_MODE (x);
5863 rtx cond = XEXP (x, 0);
5864 rtx true_rtx = XEXP (x, 1);
5865 rtx false_rtx = XEXP (x, 2);
5866 enum rtx_code true_code = GET_CODE (cond);
5867 int comparison_p = COMPARISON_P (cond);
5868 rtx temp;
5869 int i;
5870 enum rtx_code false_code;
5871 rtx reversed;
5872
5873 /* Simplify storing of the truth value. */
5874 if (comparison_p && true_rtx == const_true_rtx && false_rtx == const0_rtx)
5875 return simplify_gen_relational (true_code, mode, VOIDmode,
5876 XEXP (cond, 0), XEXP (cond, 1));
5877
5878 /* Also when the truth value has to be reversed. */
5879 if (comparison_p
5880 && true_rtx == const0_rtx && false_rtx == const_true_rtx
5881 && (reversed = reversed_comparison (cond, mode)))
5882 return reversed;
5883
5884 /* Sometimes we can simplify the arm of an IF_THEN_ELSE if a register used
5885 in it is being compared against certain values. Get the true and false
5886 comparisons and see if that says anything about the value of each arm. */
5887
5888 if (comparison_p
5889 && ((false_code = reversed_comparison_code (cond, NULL))
5890 != UNKNOWN)
5891 && REG_P (XEXP (cond, 0)))
5892 {
5893 HOST_WIDE_INT nzb;
5894 rtx from = XEXP (cond, 0);
5895 rtx true_val = XEXP (cond, 1);
5896 rtx false_val = true_val;
5897 int swapped = 0;
5898
5899 /* If FALSE_CODE is EQ, swap the codes and arms. */
5900
5901 if (false_code == EQ)
5902 {
5903 swapped = 1, true_code = EQ, false_code = NE;
5904 temp = true_rtx, true_rtx = false_rtx, false_rtx = temp;
5905 }
5906
5907 /* If we are comparing against zero and the expression being tested has
5908 only a single bit that might be nonzero, that is its value when it is
5909 not equal to zero. Similarly if it is known to be -1 or 0. */
5910
5911 if (true_code == EQ && true_val == const0_rtx
5912 && exact_log2 (nzb = nonzero_bits (from, GET_MODE (from))) >= 0)
5913 {
5914 false_code = EQ;
5915 false_val = gen_int_mode (nzb, GET_MODE (from));
5916 }
5917 else if (true_code == EQ && true_val == const0_rtx
5918 && (num_sign_bit_copies (from, GET_MODE (from))
5919 == GET_MODE_PRECISION (GET_MODE (from))))
5920 {
5921 false_code = EQ;
5922 false_val = constm1_rtx;
5923 }
5924
5925 /* Now simplify an arm if we know the value of the register in the
5926 branch and it is used in the arm. Be careful due to the potential
5927 of locally-shared RTL. */
5928
5929 if (reg_mentioned_p (from, true_rtx))
5930 true_rtx = subst (known_cond (copy_rtx (true_rtx), true_code,
5931 from, true_val),
5932 pc_rtx, pc_rtx, 0, 0, 0);
5933 if (reg_mentioned_p (from, false_rtx))
5934 false_rtx = subst (known_cond (copy_rtx (false_rtx), false_code,
5935 from, false_val),
5936 pc_rtx, pc_rtx, 0, 0, 0);
5937
5938 SUBST (XEXP (x, 1), swapped ? false_rtx : true_rtx);
5939 SUBST (XEXP (x, 2), swapped ? true_rtx : false_rtx);
5940
5941 true_rtx = XEXP (x, 1);
5942 false_rtx = XEXP (x, 2);
5943 true_code = GET_CODE (cond);
5944 }
5945
5946 /* If we have (if_then_else FOO (pc) (label_ref BAR)) and FOO can be
5947 reversed, do so to avoid needing two sets of patterns for
5948 subtract-and-branch insns. Similarly if we have a constant in the true
5949 arm, the false arm is the same as the first operand of the comparison, or
5950 the false arm is more complicated than the true arm. */
5951
5952 if (comparison_p
5953 && reversed_comparison_code (cond, NULL) != UNKNOWN
5954 && (true_rtx == pc_rtx
5955 || (CONSTANT_P (true_rtx)
5956 && !CONST_INT_P (false_rtx) && false_rtx != pc_rtx)
5957 || true_rtx == const0_rtx
5958 || (OBJECT_P (true_rtx) && !OBJECT_P (false_rtx))
5959 || (GET_CODE (true_rtx) == SUBREG && OBJECT_P (SUBREG_REG (true_rtx))
5960 && !OBJECT_P (false_rtx))
5961 || reg_mentioned_p (true_rtx, false_rtx)
5962 || rtx_equal_p (false_rtx, XEXP (cond, 0))))
5963 {
5964 true_code = reversed_comparison_code (cond, NULL);
5965 SUBST (XEXP (x, 0), reversed_comparison (cond, GET_MODE (cond)));
5966 SUBST (XEXP (x, 1), false_rtx);
5967 SUBST (XEXP (x, 2), true_rtx);
5968
5969 temp = true_rtx, true_rtx = false_rtx, false_rtx = temp;
5970 cond = XEXP (x, 0);
5971
5972 /* It is possible that the conditional has been simplified out. */
5973 true_code = GET_CODE (cond);
5974 comparison_p = COMPARISON_P (cond);
5975 }
5976
5977 /* If the two arms are identical, we don't need the comparison. */
5978
5979 if (rtx_equal_p (true_rtx, false_rtx) && ! side_effects_p (cond))
5980 return true_rtx;
5981
5982 /* Convert a == b ? b : a to "a". */
5983 if (true_code == EQ && ! side_effects_p (cond)
5984 && !HONOR_NANS (mode)
5985 && rtx_equal_p (XEXP (cond, 0), false_rtx)
5986 && rtx_equal_p (XEXP (cond, 1), true_rtx))
5987 return false_rtx;
5988 else if (true_code == NE && ! side_effects_p (cond)
5989 && !HONOR_NANS (mode)
5990 && rtx_equal_p (XEXP (cond, 0), true_rtx)
5991 && rtx_equal_p (XEXP (cond, 1), false_rtx))
5992 return true_rtx;
5993
5994 /* Look for cases where we have (abs x) or (neg (abs X)). */
5995
5996 if (GET_MODE_CLASS (mode) == MODE_INT
5997 && comparison_p
5998 && XEXP (cond, 1) == const0_rtx
5999 && GET_CODE (false_rtx) == NEG
6000 && rtx_equal_p (true_rtx, XEXP (false_rtx, 0))
6001 && rtx_equal_p (true_rtx, XEXP (cond, 0))
6002 && ! side_effects_p (true_rtx))
6003 switch (true_code)
6004 {
6005 case GT:
6006 case GE:
6007 return simplify_gen_unary (ABS, mode, true_rtx, mode);
6008 case LT:
6009 case LE:
6010 return
6011 simplify_gen_unary (NEG, mode,
6012 simplify_gen_unary (ABS, mode, true_rtx, mode),
6013 mode);
6014 default:
6015 break;
6016 }
6017
6018 /* Look for MIN or MAX. */
6019
6020 if ((! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
6021 && comparison_p
6022 && rtx_equal_p (XEXP (cond, 0), true_rtx)
6023 && rtx_equal_p (XEXP (cond, 1), false_rtx)
6024 && ! side_effects_p (cond))
6025 switch (true_code)
6026 {
6027 case GE:
6028 case GT:
6029 return simplify_gen_binary (SMAX, mode, true_rtx, false_rtx);
6030 case LE:
6031 case LT:
6032 return simplify_gen_binary (SMIN, mode, true_rtx, false_rtx);
6033 case GEU:
6034 case GTU:
6035 return simplify_gen_binary (UMAX, mode, true_rtx, false_rtx);
6036 case LEU:
6037 case LTU:
6038 return simplify_gen_binary (UMIN, mode, true_rtx, false_rtx);
6039 default:
6040 break;
6041 }
6042
6043 /* If we have (if_then_else COND (OP Z C1) Z) and OP is an identity when its
6044 second operand is zero, this can be done as (OP Z (mult COND C2)) where
6045 C2 = C1 * STORE_FLAG_VALUE. Similarly if OP has an outer ZERO_EXTEND or
6046 SIGN_EXTEND as long as Z is already extended (so we don't destroy it).
6047 We can do this kind of thing in some cases when STORE_FLAG_VALUE is
6048 neither 1 or -1, but it isn't worth checking for. */
6049
6050 if ((STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
6051 && comparison_p
6052 && GET_MODE_CLASS (mode) == MODE_INT
6053 && ! side_effects_p (x))
6054 {
6055 rtx t = make_compound_operation (true_rtx, SET);
6056 rtx f = make_compound_operation (false_rtx, SET);
6057 rtx cond_op0 = XEXP (cond, 0);
6058 rtx cond_op1 = XEXP (cond, 1);
6059 enum rtx_code op = UNKNOWN, extend_op = UNKNOWN;
6060 enum machine_mode m = mode;
6061 rtx z = 0, c1 = NULL_RTX;
6062
6063 if ((GET_CODE (t) == PLUS || GET_CODE (t) == MINUS
6064 || GET_CODE (t) == IOR || GET_CODE (t) == XOR
6065 || GET_CODE (t) == ASHIFT
6066 || GET_CODE (t) == LSHIFTRT || GET_CODE (t) == ASHIFTRT)
6067 && rtx_equal_p (XEXP (t, 0), f))
6068 c1 = XEXP (t, 1), op = GET_CODE (t), z = f;
6069
6070 /* If an identity-zero op is commutative, check whether there
6071 would be a match if we swapped the operands. */
6072 else if ((GET_CODE (t) == PLUS || GET_CODE (t) == IOR
6073 || GET_CODE (t) == XOR)
6074 && rtx_equal_p (XEXP (t, 1), f))
6075 c1 = XEXP (t, 0), op = GET_CODE (t), z = f;
6076 else if (GET_CODE (t) == SIGN_EXTEND
6077 && (GET_CODE (XEXP (t, 0)) == PLUS
6078 || GET_CODE (XEXP (t, 0)) == MINUS
6079 || GET_CODE (XEXP (t, 0)) == IOR
6080 || GET_CODE (XEXP (t, 0)) == XOR
6081 || GET_CODE (XEXP (t, 0)) == ASHIFT
6082 || GET_CODE (XEXP (t, 0)) == LSHIFTRT
6083 || GET_CODE (XEXP (t, 0)) == ASHIFTRT)
6084 && GET_CODE (XEXP (XEXP (t, 0), 0)) == SUBREG
6085 && subreg_lowpart_p (XEXP (XEXP (t, 0), 0))
6086 && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 0)), f)
6087 && (num_sign_bit_copies (f, GET_MODE (f))
6088 > (unsigned int)
6089 (GET_MODE_PRECISION (mode)
6090 - GET_MODE_PRECISION (GET_MODE (XEXP (XEXP (t, 0), 0))))))
6091 {
6092 c1 = XEXP (XEXP (t, 0), 1); z = f; op = GET_CODE (XEXP (t, 0));
6093 extend_op = SIGN_EXTEND;
6094 m = GET_MODE (XEXP (t, 0));
6095 }
6096 else if (GET_CODE (t) == SIGN_EXTEND
6097 && (GET_CODE (XEXP (t, 0)) == PLUS
6098 || GET_CODE (XEXP (t, 0)) == IOR
6099 || GET_CODE (XEXP (t, 0)) == XOR)
6100 && GET_CODE (XEXP (XEXP (t, 0), 1)) == SUBREG
6101 && subreg_lowpart_p (XEXP (XEXP (t, 0), 1))
6102 && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 1)), f)
6103 && (num_sign_bit_copies (f, GET_MODE (f))
6104 > (unsigned int)
6105 (GET_MODE_PRECISION (mode)
6106 - GET_MODE_PRECISION (GET_MODE (XEXP (XEXP (t, 0), 1))))))
6107 {
6108 c1 = XEXP (XEXP (t, 0), 0); z = f; op = GET_CODE (XEXP (t, 0));
6109 extend_op = SIGN_EXTEND;
6110 m = GET_MODE (XEXP (t, 0));
6111 }
6112 else if (GET_CODE (t) == ZERO_EXTEND
6113 && (GET_CODE (XEXP (t, 0)) == PLUS
6114 || GET_CODE (XEXP (t, 0)) == MINUS
6115 || GET_CODE (XEXP (t, 0)) == IOR
6116 || GET_CODE (XEXP (t, 0)) == XOR
6117 || GET_CODE (XEXP (t, 0)) == ASHIFT
6118 || GET_CODE (XEXP (t, 0)) == LSHIFTRT
6119 || GET_CODE (XEXP (t, 0)) == ASHIFTRT)
6120 && GET_CODE (XEXP (XEXP (t, 0), 0)) == SUBREG
6121 && HWI_COMPUTABLE_MODE_P (mode)
6122 && subreg_lowpart_p (XEXP (XEXP (t, 0), 0))
6123 && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 0)), f)
6124 && ((nonzero_bits (f, GET_MODE (f))
6125 & ~GET_MODE_MASK (GET_MODE (XEXP (XEXP (t, 0), 0))))
6126 == 0))
6127 {
6128 c1 = XEXP (XEXP (t, 0), 1); z = f; op = GET_CODE (XEXP (t, 0));
6129 extend_op = ZERO_EXTEND;
6130 m = GET_MODE (XEXP (t, 0));
6131 }
6132 else if (GET_CODE (t) == ZERO_EXTEND
6133 && (GET_CODE (XEXP (t, 0)) == PLUS
6134 || GET_CODE (XEXP (t, 0)) == IOR
6135 || GET_CODE (XEXP (t, 0)) == XOR)
6136 && GET_CODE (XEXP (XEXP (t, 0), 1)) == SUBREG
6137 && HWI_COMPUTABLE_MODE_P (mode)
6138 && subreg_lowpart_p (XEXP (XEXP (t, 0), 1))
6139 && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 1)), f)
6140 && ((nonzero_bits (f, GET_MODE (f))
6141 & ~GET_MODE_MASK (GET_MODE (XEXP (XEXP (t, 0), 1))))
6142 == 0))
6143 {
6144 c1 = XEXP (XEXP (t, 0), 0); z = f; op = GET_CODE (XEXP (t, 0));
6145 extend_op = ZERO_EXTEND;
6146 m = GET_MODE (XEXP (t, 0));
6147 }
6148
6149 if (z)
6150 {
6151 temp = subst (simplify_gen_relational (true_code, m, VOIDmode,
6152 cond_op0, cond_op1),
6153 pc_rtx, pc_rtx, 0, 0, 0);
6154 temp = simplify_gen_binary (MULT, m, temp,
6155 simplify_gen_binary (MULT, m, c1,
6156 const_true_rtx));
6157 temp = subst (temp, pc_rtx, pc_rtx, 0, 0, 0);
6158 temp = simplify_gen_binary (op, m, gen_lowpart (m, z), temp);
6159
6160 if (extend_op != UNKNOWN)
6161 temp = simplify_gen_unary (extend_op, mode, temp, m);
6162
6163 return temp;
6164 }
6165 }
6166
6167 /* If we have (if_then_else (ne A 0) C1 0) and either A is known to be 0 or
6168 1 and C1 is a single bit or A is known to be 0 or -1 and C1 is the
6169 negation of a single bit, we can convert this operation to a shift. We
6170 can actually do this more generally, but it doesn't seem worth it. */
6171
6172 if (true_code == NE && XEXP (cond, 1) == const0_rtx
6173 && false_rtx == const0_rtx && CONST_INT_P (true_rtx)
6174 && ((1 == nonzero_bits (XEXP (cond, 0), mode)
6175 && (i = exact_log2 (UINTVAL (true_rtx))) >= 0)
6176 || ((num_sign_bit_copies (XEXP (cond, 0), mode)
6177 == GET_MODE_PRECISION (mode))
6178 && (i = exact_log2 (-UINTVAL (true_rtx))) >= 0)))
6179 return
6180 simplify_shift_const (NULL_RTX, ASHIFT, mode,
6181 gen_lowpart (mode, XEXP (cond, 0)), i);
6182
6183 /* (IF_THEN_ELSE (NE REG 0) (0) (8)) is REG for nonzero_bits (REG) == 8. */
6184 if (true_code == NE && XEXP (cond, 1) == const0_rtx
6185 && false_rtx == const0_rtx && CONST_INT_P (true_rtx)
6186 && GET_MODE (XEXP (cond, 0)) == mode
6187 && (UINTVAL (true_rtx) & GET_MODE_MASK (mode))
6188 == nonzero_bits (XEXP (cond, 0), mode)
6189 && (i = exact_log2 (UINTVAL (true_rtx) & GET_MODE_MASK (mode))) >= 0)
6190 return XEXP (cond, 0);
6191
6192 return x;
6193 }
6194 \f
6195 /* Simplify X, a SET expression. Return the new expression. */
6196
6197 static rtx
6198 simplify_set (rtx x)
6199 {
6200 rtx src = SET_SRC (x);
6201 rtx dest = SET_DEST (x);
6202 enum machine_mode mode
6203 = GET_MODE (src) != VOIDmode ? GET_MODE (src) : GET_MODE (dest);
6204 rtx other_insn;
6205 rtx *cc_use;
6206
6207 /* (set (pc) (return)) gets written as (return). */
6208 if (GET_CODE (dest) == PC && ANY_RETURN_P (src))
6209 return src;
6210
6211 /* Now that we know for sure which bits of SRC we are using, see if we can
6212 simplify the expression for the object knowing that we only need the
6213 low-order bits. */
6214
6215 if (GET_MODE_CLASS (mode) == MODE_INT && HWI_COMPUTABLE_MODE_P (mode))
6216 {
6217 src = force_to_mode (src, mode, ~(unsigned HOST_WIDE_INT) 0, 0);
6218 SUBST (SET_SRC (x), src);
6219 }
6220
6221 /* If we are setting CC0 or if the source is a COMPARE, look for the use of
6222 the comparison result and try to simplify it unless we already have used
6223 undobuf.other_insn. */
6224 if ((GET_MODE_CLASS (mode) == MODE_CC
6225 || GET_CODE (src) == COMPARE
6226 || CC0_P (dest))
6227 && (cc_use = find_single_use (dest, subst_insn, &other_insn)) != 0
6228 && (undobuf.other_insn == 0 || other_insn == undobuf.other_insn)
6229 && COMPARISON_P (*cc_use)
6230 && rtx_equal_p (XEXP (*cc_use, 0), dest))
6231 {
6232 enum rtx_code old_code = GET_CODE (*cc_use);
6233 enum rtx_code new_code;
6234 rtx op0, op1, tmp;
6235 int other_changed = 0;
6236 rtx inner_compare = NULL_RTX;
6237 enum machine_mode compare_mode = GET_MODE (dest);
6238
6239 if (GET_CODE (src) == COMPARE)
6240 {
6241 op0 = XEXP (src, 0), op1 = XEXP (src, 1);
6242 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
6243 {
6244 inner_compare = op0;
6245 op0 = XEXP (inner_compare, 0), op1 = XEXP (inner_compare, 1);
6246 }
6247 }
6248 else
6249 op0 = src, op1 = CONST0_RTX (GET_MODE (src));
6250
6251 tmp = simplify_relational_operation (old_code, compare_mode, VOIDmode,
6252 op0, op1);
6253 if (!tmp)
6254 new_code = old_code;
6255 else if (!CONSTANT_P (tmp))
6256 {
6257 new_code = GET_CODE (tmp);
6258 op0 = XEXP (tmp, 0);
6259 op1 = XEXP (tmp, 1);
6260 }
6261 else
6262 {
6263 rtx pat = PATTERN (other_insn);
6264 undobuf.other_insn = other_insn;
6265 SUBST (*cc_use, tmp);
6266
6267 /* Attempt to simplify CC user. */
6268 if (GET_CODE (pat) == SET)
6269 {
6270 rtx new_rtx = simplify_rtx (SET_SRC (pat));
6271 if (new_rtx != NULL_RTX)
6272 SUBST (SET_SRC (pat), new_rtx);
6273 }
6274
6275 /* Convert X into a no-op move. */
6276 SUBST (SET_DEST (x), pc_rtx);
6277 SUBST (SET_SRC (x), pc_rtx);
6278 return x;
6279 }
6280
6281 /* Simplify our comparison, if possible. */
6282 new_code = simplify_comparison (new_code, &op0, &op1);
6283
6284 #ifdef SELECT_CC_MODE
6285 /* If this machine has CC modes other than CCmode, check to see if we
6286 need to use a different CC mode here. */
6287 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
6288 compare_mode = GET_MODE (op0);
6289 else if (inner_compare
6290 && GET_MODE_CLASS (GET_MODE (inner_compare)) == MODE_CC
6291 && new_code == old_code
6292 && op0 == XEXP (inner_compare, 0)
6293 && op1 == XEXP (inner_compare, 1))
6294 compare_mode = GET_MODE (inner_compare);
6295 else
6296 compare_mode = SELECT_CC_MODE (new_code, op0, op1);
6297
6298 #ifndef HAVE_cc0
6299 /* If the mode changed, we have to change SET_DEST, the mode in the
6300 compare, and the mode in the place SET_DEST is used. If SET_DEST is
6301 a hard register, just build new versions with the proper mode. If it
6302 is a pseudo, we lose unless it is only time we set the pseudo, in
6303 which case we can safely change its mode. */
6304 if (compare_mode != GET_MODE (dest))
6305 {
6306 if (can_change_dest_mode (dest, 0, compare_mode))
6307 {
6308 unsigned int regno = REGNO (dest);
6309 rtx new_dest;
6310
6311 if (regno < FIRST_PSEUDO_REGISTER)
6312 new_dest = gen_rtx_REG (compare_mode, regno);
6313 else
6314 {
6315 SUBST_MODE (regno_reg_rtx[regno], compare_mode);
6316 new_dest = regno_reg_rtx[regno];
6317 }
6318
6319 SUBST (SET_DEST (x), new_dest);
6320 SUBST (XEXP (*cc_use, 0), new_dest);
6321 other_changed = 1;
6322
6323 dest = new_dest;
6324 }
6325 }
6326 #endif /* cc0 */
6327 #endif /* SELECT_CC_MODE */
6328
6329 /* If the code changed, we have to build a new comparison in
6330 undobuf.other_insn. */
6331 if (new_code != old_code)
6332 {
6333 int other_changed_previously = other_changed;
6334 unsigned HOST_WIDE_INT mask;
6335 rtx old_cc_use = *cc_use;
6336
6337 SUBST (*cc_use, gen_rtx_fmt_ee (new_code, GET_MODE (*cc_use),
6338 dest, const0_rtx));
6339 other_changed = 1;
6340
6341 /* If the only change we made was to change an EQ into an NE or
6342 vice versa, OP0 has only one bit that might be nonzero, and OP1
6343 is zero, check if changing the user of the condition code will
6344 produce a valid insn. If it won't, we can keep the original code
6345 in that insn by surrounding our operation with an XOR. */
6346
6347 if (((old_code == NE && new_code == EQ)
6348 || (old_code == EQ && new_code == NE))
6349 && ! other_changed_previously && op1 == const0_rtx
6350 && HWI_COMPUTABLE_MODE_P (GET_MODE (op0))
6351 && exact_log2 (mask = nonzero_bits (op0, GET_MODE (op0))) >= 0)
6352 {
6353 rtx pat = PATTERN (other_insn), note = 0;
6354
6355 if ((recog_for_combine (&pat, other_insn, &note) < 0
6356 && ! check_asm_operands (pat)))
6357 {
6358 *cc_use = old_cc_use;
6359 other_changed = 0;
6360
6361 op0 = simplify_gen_binary (XOR, GET_MODE (op0),
6362 op0, GEN_INT (mask));
6363 }
6364 }
6365 }
6366
6367 if (other_changed)
6368 undobuf.other_insn = other_insn;
6369
6370 /* Otherwise, if we didn't previously have a COMPARE in the
6371 correct mode, we need one. */
6372 if (GET_CODE (src) != COMPARE || GET_MODE (src) != compare_mode)
6373 {
6374 SUBST (SET_SRC (x), gen_rtx_COMPARE (compare_mode, op0, op1));
6375 src = SET_SRC (x);
6376 }
6377 else if (GET_MODE (op0) == compare_mode && op1 == const0_rtx)
6378 {
6379 SUBST (SET_SRC (x), op0);
6380 src = SET_SRC (x);
6381 }
6382 /* Otherwise, update the COMPARE if needed. */
6383 else if (XEXP (src, 0) != op0 || XEXP (src, 1) != op1)
6384 {
6385 SUBST (SET_SRC (x), gen_rtx_COMPARE (compare_mode, op0, op1));
6386 src = SET_SRC (x);
6387 }
6388 }
6389 else
6390 {
6391 /* Get SET_SRC in a form where we have placed back any
6392 compound expressions. Then do the checks below. */
6393 src = make_compound_operation (src, SET);
6394 SUBST (SET_SRC (x), src);
6395 }
6396
6397 /* If we have (set x (subreg:m1 (op:m2 ...) 0)) with OP being some operation,
6398 and X being a REG or (subreg (reg)), we may be able to convert this to
6399 (set (subreg:m2 x) (op)).
6400
6401 We can always do this if M1 is narrower than M2 because that means that
6402 we only care about the low bits of the result.
6403
6404 However, on machines without WORD_REGISTER_OPERATIONS defined, we cannot
6405 perform a narrower operation than requested since the high-order bits will
6406 be undefined. On machine where it is defined, this transformation is safe
6407 as long as M1 and M2 have the same number of words. */
6408
6409 if (GET_CODE (src) == SUBREG && subreg_lowpart_p (src)
6410 && !OBJECT_P (SUBREG_REG (src))
6411 && (((GET_MODE_SIZE (GET_MODE (src)) + (UNITS_PER_WORD - 1))
6412 / UNITS_PER_WORD)
6413 == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (src)))
6414 + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD))
6415 #ifndef WORD_REGISTER_OPERATIONS
6416 && (GET_MODE_SIZE (GET_MODE (src))
6417 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (src))))
6418 #endif
6419 #ifdef CANNOT_CHANGE_MODE_CLASS
6420 && ! (REG_P (dest) && REGNO (dest) < FIRST_PSEUDO_REGISTER
6421 && REG_CANNOT_CHANGE_MODE_P (REGNO (dest),
6422 GET_MODE (SUBREG_REG (src)),
6423 GET_MODE (src)))
6424 #endif
6425 && (REG_P (dest)
6426 || (GET_CODE (dest) == SUBREG
6427 && REG_P (SUBREG_REG (dest)))))
6428 {
6429 SUBST (SET_DEST (x),
6430 gen_lowpart (GET_MODE (SUBREG_REG (src)),
6431 dest));
6432 SUBST (SET_SRC (x), SUBREG_REG (src));
6433
6434 src = SET_SRC (x), dest = SET_DEST (x);
6435 }
6436
6437 #ifdef HAVE_cc0
6438 /* If we have (set (cc0) (subreg ...)), we try to remove the subreg
6439 in SRC. */
6440 if (dest == cc0_rtx
6441 && GET_CODE (src) == SUBREG
6442 && subreg_lowpart_p (src)
6443 && (GET_MODE_PRECISION (GET_MODE (src))
6444 < GET_MODE_PRECISION (GET_MODE (SUBREG_REG (src)))))
6445 {
6446 rtx inner = SUBREG_REG (src);
6447 enum machine_mode inner_mode = GET_MODE (inner);
6448
6449 /* Here we make sure that we don't have a sign bit on. */
6450 if (val_signbit_known_clear_p (GET_MODE (src),
6451 nonzero_bits (inner, inner_mode)))
6452 {
6453 SUBST (SET_SRC (x), inner);
6454 src = SET_SRC (x);
6455 }
6456 }
6457 #endif
6458
6459 #ifdef LOAD_EXTEND_OP
6460 /* If we have (set FOO (subreg:M (mem:N BAR) 0)) with M wider than N, this
6461 would require a paradoxical subreg. Replace the subreg with a
6462 zero_extend to avoid the reload that would otherwise be required. */
6463
6464 if (GET_CODE (src) == SUBREG && subreg_lowpart_p (src)
6465 && INTEGRAL_MODE_P (GET_MODE (SUBREG_REG (src)))
6466 && LOAD_EXTEND_OP (GET_MODE (SUBREG_REG (src))) != UNKNOWN
6467 && SUBREG_BYTE (src) == 0
6468 && paradoxical_subreg_p (src)
6469 && MEM_P (SUBREG_REG (src)))
6470 {
6471 SUBST (SET_SRC (x),
6472 gen_rtx_fmt_e (LOAD_EXTEND_OP (GET_MODE (SUBREG_REG (src))),
6473 GET_MODE (src), SUBREG_REG (src)));
6474
6475 src = SET_SRC (x);
6476 }
6477 #endif
6478
6479 /* If we don't have a conditional move, SET_SRC is an IF_THEN_ELSE, and we
6480 are comparing an item known to be 0 or -1 against 0, use a logical
6481 operation instead. Check for one of the arms being an IOR of the other
6482 arm with some value. We compute three terms to be IOR'ed together. In
6483 practice, at most two will be nonzero. Then we do the IOR's. */
6484
6485 if (GET_CODE (dest) != PC
6486 && GET_CODE (src) == IF_THEN_ELSE
6487 && GET_MODE_CLASS (GET_MODE (src)) == MODE_INT
6488 && (GET_CODE (XEXP (src, 0)) == EQ || GET_CODE (XEXP (src, 0)) == NE)
6489 && XEXP (XEXP (src, 0), 1) == const0_rtx
6490 && GET_MODE (src) == GET_MODE (XEXP (XEXP (src, 0), 0))
6491 #ifdef HAVE_conditional_move
6492 && ! can_conditionally_move_p (GET_MODE (src))
6493 #endif
6494 && (num_sign_bit_copies (XEXP (XEXP (src, 0), 0),
6495 GET_MODE (XEXP (XEXP (src, 0), 0)))
6496 == GET_MODE_PRECISION (GET_MODE (XEXP (XEXP (src, 0), 0))))
6497 && ! side_effects_p (src))
6498 {
6499 rtx true_rtx = (GET_CODE (XEXP (src, 0)) == NE
6500 ? XEXP (src, 1) : XEXP (src, 2));
6501 rtx false_rtx = (GET_CODE (XEXP (src, 0)) == NE
6502 ? XEXP (src, 2) : XEXP (src, 1));
6503 rtx term1 = const0_rtx, term2, term3;
6504
6505 if (GET_CODE (true_rtx) == IOR
6506 && rtx_equal_p (XEXP (true_rtx, 0), false_rtx))
6507 term1 = false_rtx, true_rtx = XEXP (true_rtx, 1), false_rtx = const0_rtx;
6508 else if (GET_CODE (true_rtx) == IOR
6509 && rtx_equal_p (XEXP (true_rtx, 1), false_rtx))
6510 term1 = false_rtx, true_rtx = XEXP (true_rtx, 0), false_rtx = const0_rtx;
6511 else if (GET_CODE (false_rtx) == IOR
6512 && rtx_equal_p (XEXP (false_rtx, 0), true_rtx))
6513 term1 = true_rtx, false_rtx = XEXP (false_rtx, 1), true_rtx = const0_rtx;
6514 else if (GET_CODE (false_rtx) == IOR
6515 && rtx_equal_p (XEXP (false_rtx, 1), true_rtx))
6516 term1 = true_rtx, false_rtx = XEXP (false_rtx, 0), true_rtx = const0_rtx;
6517
6518 term2 = simplify_gen_binary (AND, GET_MODE (src),
6519 XEXP (XEXP (src, 0), 0), true_rtx);
6520 term3 = simplify_gen_binary (AND, GET_MODE (src),
6521 simplify_gen_unary (NOT, GET_MODE (src),
6522 XEXP (XEXP (src, 0), 0),
6523 GET_MODE (src)),
6524 false_rtx);
6525
6526 SUBST (SET_SRC (x),
6527 simplify_gen_binary (IOR, GET_MODE (src),
6528 simplify_gen_binary (IOR, GET_MODE (src),
6529 term1, term2),
6530 term3));
6531
6532 src = SET_SRC (x);
6533 }
6534
6535 /* If either SRC or DEST is a CLOBBER of (const_int 0), make this
6536 whole thing fail. */
6537 if (GET_CODE (src) == CLOBBER && XEXP (src, 0) == const0_rtx)
6538 return src;
6539 else if (GET_CODE (dest) == CLOBBER && XEXP (dest, 0) == const0_rtx)
6540 return dest;
6541 else
6542 /* Convert this into a field assignment operation, if possible. */
6543 return make_field_assignment (x);
6544 }
6545 \f
6546 /* Simplify, X, and AND, IOR, or XOR operation, and return the simplified
6547 result. */
6548
6549 static rtx
6550 simplify_logical (rtx x)
6551 {
6552 enum machine_mode mode = GET_MODE (x);
6553 rtx op0 = XEXP (x, 0);
6554 rtx op1 = XEXP (x, 1);
6555
6556 switch (GET_CODE (x))
6557 {
6558 case AND:
6559 /* We can call simplify_and_const_int only if we don't lose
6560 any (sign) bits when converting INTVAL (op1) to
6561 "unsigned HOST_WIDE_INT". */
6562 if (CONST_INT_P (op1)
6563 && (HWI_COMPUTABLE_MODE_P (mode)
6564 || INTVAL (op1) > 0))
6565 {
6566 x = simplify_and_const_int (x, mode, op0, INTVAL (op1));
6567 if (GET_CODE (x) != AND)
6568 return x;
6569
6570 op0 = XEXP (x, 0);
6571 op1 = XEXP (x, 1);
6572 }
6573
6574 /* If we have any of (and (ior A B) C) or (and (xor A B) C),
6575 apply the distributive law and then the inverse distributive
6576 law to see if things simplify. */
6577 if (GET_CODE (op0) == IOR || GET_CODE (op0) == XOR)
6578 {
6579 rtx result = distribute_and_simplify_rtx (x, 0);
6580 if (result)
6581 return result;
6582 }
6583 if (GET_CODE (op1) == IOR || GET_CODE (op1) == XOR)
6584 {
6585 rtx result = distribute_and_simplify_rtx (x, 1);
6586 if (result)
6587 return result;
6588 }
6589 break;
6590
6591 case IOR:
6592 /* If we have (ior (and A B) C), apply the distributive law and then
6593 the inverse distributive law to see if things simplify. */
6594
6595 if (GET_CODE (op0) == AND)
6596 {
6597 rtx result = distribute_and_simplify_rtx (x, 0);
6598 if (result)
6599 return result;
6600 }
6601
6602 if (GET_CODE (op1) == AND)
6603 {
6604 rtx result = distribute_and_simplify_rtx (x, 1);
6605 if (result)
6606 return result;
6607 }
6608 break;
6609
6610 default:
6611 gcc_unreachable ();
6612 }
6613
6614 return x;
6615 }
6616 \f
6617 /* We consider ZERO_EXTRACT, SIGN_EXTRACT, and SIGN_EXTEND as "compound
6618 operations" because they can be replaced with two more basic operations.
6619 ZERO_EXTEND is also considered "compound" because it can be replaced with
6620 an AND operation, which is simpler, though only one operation.
6621
6622 The function expand_compound_operation is called with an rtx expression
6623 and will convert it to the appropriate shifts and AND operations,
6624 simplifying at each stage.
6625
6626 The function make_compound_operation is called to convert an expression
6627 consisting of shifts and ANDs into the equivalent compound expression.
6628 It is the inverse of this function, loosely speaking. */
6629
6630 static rtx
6631 expand_compound_operation (rtx x)
6632 {
6633 unsigned HOST_WIDE_INT pos = 0, len;
6634 int unsignedp = 0;
6635 unsigned int modewidth;
6636 rtx tem;
6637
6638 switch (GET_CODE (x))
6639 {
6640 case ZERO_EXTEND:
6641 unsignedp = 1;
6642 case SIGN_EXTEND:
6643 /* We can't necessarily use a const_int for a multiword mode;
6644 it depends on implicitly extending the value.
6645 Since we don't know the right way to extend it,
6646 we can't tell whether the implicit way is right.
6647
6648 Even for a mode that is no wider than a const_int,
6649 we can't win, because we need to sign extend one of its bits through
6650 the rest of it, and we don't know which bit. */
6651 if (CONST_INT_P (XEXP (x, 0)))
6652 return x;
6653
6654 /* Return if (subreg:MODE FROM 0) is not a safe replacement for
6655 (zero_extend:MODE FROM) or (sign_extend:MODE FROM). It is for any MEM
6656 because (SUBREG (MEM...)) is guaranteed to cause the MEM to be
6657 reloaded. If not for that, MEM's would very rarely be safe.
6658
6659 Reject MODEs bigger than a word, because we might not be able
6660 to reference a two-register group starting with an arbitrary register
6661 (and currently gen_lowpart might crash for a SUBREG). */
6662
6663 if (GET_MODE_SIZE (GET_MODE (XEXP (x, 0))) > UNITS_PER_WORD)
6664 return x;
6665
6666 /* Reject MODEs that aren't scalar integers because turning vector
6667 or complex modes into shifts causes problems. */
6668
6669 if (! SCALAR_INT_MODE_P (GET_MODE (XEXP (x, 0))))
6670 return x;
6671
6672 len = GET_MODE_PRECISION (GET_MODE (XEXP (x, 0)));
6673 /* If the inner object has VOIDmode (the only way this can happen
6674 is if it is an ASM_OPERANDS), we can't do anything since we don't
6675 know how much masking to do. */
6676 if (len == 0)
6677 return x;
6678
6679 break;
6680
6681 case ZERO_EXTRACT:
6682 unsignedp = 1;
6683
6684 /* ... fall through ... */
6685
6686 case SIGN_EXTRACT:
6687 /* If the operand is a CLOBBER, just return it. */
6688 if (GET_CODE (XEXP (x, 0)) == CLOBBER)
6689 return XEXP (x, 0);
6690
6691 if (!CONST_INT_P (XEXP (x, 1))
6692 || !CONST_INT_P (XEXP (x, 2))
6693 || GET_MODE (XEXP (x, 0)) == VOIDmode)
6694 return x;
6695
6696 /* Reject MODEs that aren't scalar integers because turning vector
6697 or complex modes into shifts causes problems. */
6698
6699 if (! SCALAR_INT_MODE_P (GET_MODE (XEXP (x, 0))))
6700 return x;
6701
6702 len = INTVAL (XEXP (x, 1));
6703 pos = INTVAL (XEXP (x, 2));
6704
6705 /* This should stay within the object being extracted, fail otherwise. */
6706 if (len + pos > GET_MODE_PRECISION (GET_MODE (XEXP (x, 0))))
6707 return x;
6708
6709 if (BITS_BIG_ENDIAN)
6710 pos = GET_MODE_PRECISION (GET_MODE (XEXP (x, 0))) - len - pos;
6711
6712 break;
6713
6714 default:
6715 return x;
6716 }
6717 /* Convert sign extension to zero extension, if we know that the high
6718 bit is not set, as this is easier to optimize. It will be converted
6719 back to cheaper alternative in make_extraction. */
6720 if (GET_CODE (x) == SIGN_EXTEND
6721 && (HWI_COMPUTABLE_MODE_P (GET_MODE (x))
6722 && ((nonzero_bits (XEXP (x, 0), GET_MODE (XEXP (x, 0)))
6723 & ~(((unsigned HOST_WIDE_INT)
6724 GET_MODE_MASK (GET_MODE (XEXP (x, 0))))
6725 >> 1))
6726 == 0)))
6727 {
6728 rtx temp = gen_rtx_ZERO_EXTEND (GET_MODE (x), XEXP (x, 0));
6729 rtx temp2 = expand_compound_operation (temp);
6730
6731 /* Make sure this is a profitable operation. */
6732 if (set_src_cost (x, optimize_this_for_speed_p)
6733 > set_src_cost (temp2, optimize_this_for_speed_p))
6734 return temp2;
6735 else if (set_src_cost (x, optimize_this_for_speed_p)
6736 > set_src_cost (temp, optimize_this_for_speed_p))
6737 return temp;
6738 else
6739 return x;
6740 }
6741
6742 /* We can optimize some special cases of ZERO_EXTEND. */
6743 if (GET_CODE (x) == ZERO_EXTEND)
6744 {
6745 /* (zero_extend:DI (truncate:SI foo:DI)) is just foo:DI if we
6746 know that the last value didn't have any inappropriate bits
6747 set. */
6748 if (GET_CODE (XEXP (x, 0)) == TRUNCATE
6749 && GET_MODE (XEXP (XEXP (x, 0), 0)) == GET_MODE (x)
6750 && HWI_COMPUTABLE_MODE_P (GET_MODE (x))
6751 && (nonzero_bits (XEXP (XEXP (x, 0), 0), GET_MODE (x))
6752 & ~GET_MODE_MASK (GET_MODE (XEXP (x, 0)))) == 0)
6753 return XEXP (XEXP (x, 0), 0);
6754
6755 /* Likewise for (zero_extend:DI (subreg:SI foo:DI 0)). */
6756 if (GET_CODE (XEXP (x, 0)) == SUBREG
6757 && GET_MODE (SUBREG_REG (XEXP (x, 0))) == GET_MODE (x)
6758 && subreg_lowpart_p (XEXP (x, 0))
6759 && HWI_COMPUTABLE_MODE_P (GET_MODE (x))
6760 && (nonzero_bits (SUBREG_REG (XEXP (x, 0)), GET_MODE (x))
6761 & ~GET_MODE_MASK (GET_MODE (XEXP (x, 0)))) == 0)
6762 return SUBREG_REG (XEXP (x, 0));
6763
6764 /* (zero_extend:DI (truncate:SI foo:DI)) is just foo:DI when foo
6765 is a comparison and STORE_FLAG_VALUE permits. This is like
6766 the first case, but it works even when GET_MODE (x) is larger
6767 than HOST_WIDE_INT. */
6768 if (GET_CODE (XEXP (x, 0)) == TRUNCATE
6769 && GET_MODE (XEXP (XEXP (x, 0), 0)) == GET_MODE (x)
6770 && COMPARISON_P (XEXP (XEXP (x, 0), 0))
6771 && (GET_MODE_PRECISION (GET_MODE (XEXP (x, 0)))
6772 <= HOST_BITS_PER_WIDE_INT)
6773 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (GET_MODE (XEXP (x, 0)))) == 0)
6774 return XEXP (XEXP (x, 0), 0);
6775
6776 /* Likewise for (zero_extend:DI (subreg:SI foo:DI 0)). */
6777 if (GET_CODE (XEXP (x, 0)) == SUBREG
6778 && GET_MODE (SUBREG_REG (XEXP (x, 0))) == GET_MODE (x)
6779 && subreg_lowpart_p (XEXP (x, 0))
6780 && COMPARISON_P (SUBREG_REG (XEXP (x, 0)))
6781 && (GET_MODE_PRECISION (GET_MODE (XEXP (x, 0)))
6782 <= HOST_BITS_PER_WIDE_INT)
6783 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (GET_MODE (XEXP (x, 0)))) == 0)
6784 return SUBREG_REG (XEXP (x, 0));
6785
6786 }
6787
6788 /* If we reach here, we want to return a pair of shifts. The inner
6789 shift is a left shift of BITSIZE - POS - LEN bits. The outer
6790 shift is a right shift of BITSIZE - LEN bits. It is arithmetic or
6791 logical depending on the value of UNSIGNEDP.
6792
6793 If this was a ZERO_EXTEND or ZERO_EXTRACT, this pair of shifts will be
6794 converted into an AND of a shift.
6795
6796 We must check for the case where the left shift would have a negative
6797 count. This can happen in a case like (x >> 31) & 255 on machines
6798 that can't shift by a constant. On those machines, we would first
6799 combine the shift with the AND to produce a variable-position
6800 extraction. Then the constant of 31 would be substituted in
6801 to produce such a position. */
6802
6803 modewidth = GET_MODE_PRECISION (GET_MODE (x));
6804 if (modewidth >= pos + len)
6805 {
6806 enum machine_mode mode = GET_MODE (x);
6807 tem = gen_lowpart (mode, XEXP (x, 0));
6808 if (!tem || GET_CODE (tem) == CLOBBER)
6809 return x;
6810 tem = simplify_shift_const (NULL_RTX, ASHIFT, mode,
6811 tem, modewidth - pos - len);
6812 tem = simplify_shift_const (NULL_RTX, unsignedp ? LSHIFTRT : ASHIFTRT,
6813 mode, tem, modewidth - len);
6814 }
6815 else if (unsignedp && len < HOST_BITS_PER_WIDE_INT)
6816 tem = simplify_and_const_int (NULL_RTX, GET_MODE (x),
6817 simplify_shift_const (NULL_RTX, LSHIFTRT,
6818 GET_MODE (x),
6819 XEXP (x, 0), pos),
6820 ((unsigned HOST_WIDE_INT) 1 << len) - 1);
6821 else
6822 /* Any other cases we can't handle. */
6823 return x;
6824
6825 /* If we couldn't do this for some reason, return the original
6826 expression. */
6827 if (GET_CODE (tem) == CLOBBER)
6828 return x;
6829
6830 return tem;
6831 }
6832 \f
6833 /* X is a SET which contains an assignment of one object into
6834 a part of another (such as a bit-field assignment, STRICT_LOW_PART,
6835 or certain SUBREGS). If possible, convert it into a series of
6836 logical operations.
6837
6838 We half-heartedly support variable positions, but do not at all
6839 support variable lengths. */
6840
6841 static const_rtx
6842 expand_field_assignment (const_rtx x)
6843 {
6844 rtx inner;
6845 rtx pos; /* Always counts from low bit. */
6846 int len;
6847 rtx mask, cleared, masked;
6848 enum machine_mode compute_mode;
6849
6850 /* Loop until we find something we can't simplify. */
6851 while (1)
6852 {
6853 if (GET_CODE (SET_DEST (x)) == STRICT_LOW_PART
6854 && GET_CODE (XEXP (SET_DEST (x), 0)) == SUBREG)
6855 {
6856 inner = SUBREG_REG (XEXP (SET_DEST (x), 0));
6857 len = GET_MODE_PRECISION (GET_MODE (XEXP (SET_DEST (x), 0)));
6858 pos = GEN_INT (subreg_lsb (XEXP (SET_DEST (x), 0)));
6859 }
6860 else if (GET_CODE (SET_DEST (x)) == ZERO_EXTRACT
6861 && CONST_INT_P (XEXP (SET_DEST (x), 1)))
6862 {
6863 inner = XEXP (SET_DEST (x), 0);
6864 len = INTVAL (XEXP (SET_DEST (x), 1));
6865 pos = XEXP (SET_DEST (x), 2);
6866
6867 /* A constant position should stay within the width of INNER. */
6868 if (CONST_INT_P (pos)
6869 && INTVAL (pos) + len > GET_MODE_PRECISION (GET_MODE (inner)))
6870 break;
6871
6872 if (BITS_BIG_ENDIAN)
6873 {
6874 if (CONST_INT_P (pos))
6875 pos = GEN_INT (GET_MODE_PRECISION (GET_MODE (inner)) - len
6876 - INTVAL (pos));
6877 else if (GET_CODE (pos) == MINUS
6878 && CONST_INT_P (XEXP (pos, 1))
6879 && (INTVAL (XEXP (pos, 1))
6880 == GET_MODE_PRECISION (GET_MODE (inner)) - len))
6881 /* If position is ADJUST - X, new position is X. */
6882 pos = XEXP (pos, 0);
6883 else
6884 pos = simplify_gen_binary (MINUS, GET_MODE (pos),
6885 GEN_INT (GET_MODE_PRECISION (
6886 GET_MODE (inner))
6887 - len),
6888 pos);
6889 }
6890 }
6891
6892 /* A SUBREG between two modes that occupy the same numbers of words
6893 can be done by moving the SUBREG to the source. */
6894 else if (GET_CODE (SET_DEST (x)) == SUBREG
6895 /* We need SUBREGs to compute nonzero_bits properly. */
6896 && nonzero_sign_valid
6897 && (((GET_MODE_SIZE (GET_MODE (SET_DEST (x)))
6898 + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)
6899 == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (SET_DEST (x))))
6900 + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)))
6901 {
6902 x = gen_rtx_SET (VOIDmode, SUBREG_REG (SET_DEST (x)),
6903 gen_lowpart
6904 (GET_MODE (SUBREG_REG (SET_DEST (x))),
6905 SET_SRC (x)));
6906 continue;
6907 }
6908 else
6909 break;
6910
6911 while (GET_CODE (inner) == SUBREG && subreg_lowpart_p (inner))
6912 inner = SUBREG_REG (inner);
6913
6914 compute_mode = GET_MODE (inner);
6915
6916 /* Don't attempt bitwise arithmetic on non scalar integer modes. */
6917 if (! SCALAR_INT_MODE_P (compute_mode))
6918 {
6919 enum machine_mode imode;
6920
6921 /* Don't do anything for vector or complex integral types. */
6922 if (! FLOAT_MODE_P (compute_mode))
6923 break;
6924
6925 /* Try to find an integral mode to pun with. */
6926 imode = mode_for_size (GET_MODE_BITSIZE (compute_mode), MODE_INT, 0);
6927 if (imode == BLKmode)
6928 break;
6929
6930 compute_mode = imode;
6931 inner = gen_lowpart (imode, inner);
6932 }
6933
6934 /* Compute a mask of LEN bits, if we can do this on the host machine. */
6935 if (len >= HOST_BITS_PER_WIDE_INT)
6936 break;
6937
6938 /* Now compute the equivalent expression. Make a copy of INNER
6939 for the SET_DEST in case it is a MEM into which we will substitute;
6940 we don't want shared RTL in that case. */
6941 mask = GEN_INT (((unsigned HOST_WIDE_INT) 1 << len) - 1);
6942 cleared = simplify_gen_binary (AND, compute_mode,
6943 simplify_gen_unary (NOT, compute_mode,
6944 simplify_gen_binary (ASHIFT,
6945 compute_mode,
6946 mask, pos),
6947 compute_mode),
6948 inner);
6949 masked = simplify_gen_binary (ASHIFT, compute_mode,
6950 simplify_gen_binary (
6951 AND, compute_mode,
6952 gen_lowpart (compute_mode, SET_SRC (x)),
6953 mask),
6954 pos);
6955
6956 x = gen_rtx_SET (VOIDmode, copy_rtx (inner),
6957 simplify_gen_binary (IOR, compute_mode,
6958 cleared, masked));
6959 }
6960
6961 return x;
6962 }
6963 \f
6964 /* Return an RTX for a reference to LEN bits of INNER. If POS_RTX is nonzero,
6965 it is an RTX that represents the (variable) starting position; otherwise,
6966 POS is the (constant) starting bit position. Both are counted from the LSB.
6967
6968 UNSIGNEDP is nonzero for an unsigned reference and zero for a signed one.
6969
6970 IN_DEST is nonzero if this is a reference in the destination of a SET.
6971 This is used when a ZERO_ or SIGN_EXTRACT isn't needed. If nonzero,
6972 a STRICT_LOW_PART will be used, if zero, ZERO_EXTEND or SIGN_EXTEND will
6973 be used.
6974
6975 IN_COMPARE is nonzero if we are in a COMPARE. This means that a
6976 ZERO_EXTRACT should be built even for bits starting at bit 0.
6977
6978 MODE is the desired mode of the result (if IN_DEST == 0).
6979
6980 The result is an RTX for the extraction or NULL_RTX if the target
6981 can't handle it. */
6982
6983 static rtx
6984 make_extraction (enum machine_mode mode, rtx inner, HOST_WIDE_INT pos,
6985 rtx pos_rtx, unsigned HOST_WIDE_INT len, int unsignedp,
6986 int in_dest, int in_compare)
6987 {
6988 /* This mode describes the size of the storage area
6989 to fetch the overall value from. Within that, we
6990 ignore the POS lowest bits, etc. */
6991 enum machine_mode is_mode = GET_MODE (inner);
6992 enum machine_mode inner_mode;
6993 enum machine_mode wanted_inner_mode;
6994 enum machine_mode wanted_inner_reg_mode = word_mode;
6995 enum machine_mode pos_mode = word_mode;
6996 enum machine_mode extraction_mode = word_mode;
6997 enum machine_mode tmode = mode_for_size (len, MODE_INT, 1);
6998 rtx new_rtx = 0;
6999 rtx orig_pos_rtx = pos_rtx;
7000 HOST_WIDE_INT orig_pos;
7001
7002 if (pos_rtx && CONST_INT_P (pos_rtx))
7003 pos = INTVAL (pos_rtx), pos_rtx = 0;
7004
7005 if (GET_CODE (inner) == SUBREG && subreg_lowpart_p (inner))
7006 {
7007 /* If going from (subreg:SI (mem:QI ...)) to (mem:QI ...),
7008 consider just the QI as the memory to extract from.
7009 The subreg adds or removes high bits; its mode is
7010 irrelevant to the meaning of this extraction,
7011 since POS and LEN count from the lsb. */
7012 if (MEM_P (SUBREG_REG (inner)))
7013 is_mode = GET_MODE (SUBREG_REG (inner));
7014 inner = SUBREG_REG (inner);
7015 }
7016 else if (GET_CODE (inner) == ASHIFT
7017 && CONST_INT_P (XEXP (inner, 1))
7018 && pos_rtx == 0 && pos == 0
7019 && len > UINTVAL (XEXP (inner, 1)))
7020 {
7021 /* We're extracting the least significant bits of an rtx
7022 (ashift X (const_int C)), where LEN > C. Extract the
7023 least significant (LEN - C) bits of X, giving an rtx
7024 whose mode is MODE, then shift it left C times. */
7025 new_rtx = make_extraction (mode, XEXP (inner, 0),
7026 0, 0, len - INTVAL (XEXP (inner, 1)),
7027 unsignedp, in_dest, in_compare);
7028 if (new_rtx != 0)
7029 return gen_rtx_ASHIFT (mode, new_rtx, XEXP (inner, 1));
7030 }
7031
7032 inner_mode = GET_MODE (inner);
7033
7034 /* See if this can be done without an extraction. We never can if the
7035 width of the field is not the same as that of some integer mode. For
7036 registers, we can only avoid the extraction if the position is at the
7037 low-order bit and this is either not in the destination or we have the
7038 appropriate STRICT_LOW_PART operation available.
7039
7040 For MEM, we can avoid an extract if the field starts on an appropriate
7041 boundary and we can change the mode of the memory reference. */
7042
7043 if (tmode != BLKmode
7044 && ((pos_rtx == 0 && (pos % BITS_PER_WORD) == 0
7045 && !MEM_P (inner)
7046 && (inner_mode == tmode
7047 || !REG_P (inner)
7048 || TRULY_NOOP_TRUNCATION_MODES_P (tmode, inner_mode)
7049 || reg_truncated_to_mode (tmode, inner))
7050 && (! in_dest
7051 || (REG_P (inner)
7052 && have_insn_for (STRICT_LOW_PART, tmode))))
7053 || (MEM_P (inner) && pos_rtx == 0
7054 && (pos
7055 % (STRICT_ALIGNMENT ? GET_MODE_ALIGNMENT (tmode)
7056 : BITS_PER_UNIT)) == 0
7057 /* We can't do this if we are widening INNER_MODE (it
7058 may not be aligned, for one thing). */
7059 && GET_MODE_PRECISION (inner_mode) >= GET_MODE_PRECISION (tmode)
7060 && (inner_mode == tmode
7061 || (! mode_dependent_address_p (XEXP (inner, 0),
7062 MEM_ADDR_SPACE (inner))
7063 && ! MEM_VOLATILE_P (inner))))))
7064 {
7065 /* If INNER is a MEM, make a new MEM that encompasses just the desired
7066 field. If the original and current mode are the same, we need not
7067 adjust the offset. Otherwise, we do if bytes big endian.
7068
7069 If INNER is not a MEM, get a piece consisting of just the field
7070 of interest (in this case POS % BITS_PER_WORD must be 0). */
7071
7072 if (MEM_P (inner))
7073 {
7074 HOST_WIDE_INT offset;
7075
7076 /* POS counts from lsb, but make OFFSET count in memory order. */
7077 if (BYTES_BIG_ENDIAN)
7078 offset = (GET_MODE_PRECISION (is_mode) - len - pos) / BITS_PER_UNIT;
7079 else
7080 offset = pos / BITS_PER_UNIT;
7081
7082 new_rtx = adjust_address_nv (inner, tmode, offset);
7083 }
7084 else if (REG_P (inner))
7085 {
7086 if (tmode != inner_mode)
7087 {
7088 /* We can't call gen_lowpart in a DEST since we
7089 always want a SUBREG (see below) and it would sometimes
7090 return a new hard register. */
7091 if (pos || in_dest)
7092 {
7093 HOST_WIDE_INT final_word = pos / BITS_PER_WORD;
7094
7095 if (WORDS_BIG_ENDIAN
7096 && GET_MODE_SIZE (inner_mode) > UNITS_PER_WORD)
7097 final_word = ((GET_MODE_SIZE (inner_mode)
7098 - GET_MODE_SIZE (tmode))
7099 / UNITS_PER_WORD) - final_word;
7100
7101 final_word *= UNITS_PER_WORD;
7102 if (BYTES_BIG_ENDIAN &&
7103 GET_MODE_SIZE (inner_mode) > GET_MODE_SIZE (tmode))
7104 final_word += (GET_MODE_SIZE (inner_mode)
7105 - GET_MODE_SIZE (tmode)) % UNITS_PER_WORD;
7106
7107 /* Avoid creating invalid subregs, for example when
7108 simplifying (x>>32)&255. */
7109 if (!validate_subreg (tmode, inner_mode, inner, final_word))
7110 return NULL_RTX;
7111
7112 new_rtx = gen_rtx_SUBREG (tmode, inner, final_word);
7113 }
7114 else
7115 new_rtx = gen_lowpart (tmode, inner);
7116 }
7117 else
7118 new_rtx = inner;
7119 }
7120 else
7121 new_rtx = force_to_mode (inner, tmode,
7122 len >= HOST_BITS_PER_WIDE_INT
7123 ? ~(unsigned HOST_WIDE_INT) 0
7124 : ((unsigned HOST_WIDE_INT) 1 << len) - 1,
7125 0);
7126
7127 /* If this extraction is going into the destination of a SET,
7128 make a STRICT_LOW_PART unless we made a MEM. */
7129
7130 if (in_dest)
7131 return (MEM_P (new_rtx) ? new_rtx
7132 : (GET_CODE (new_rtx) != SUBREG
7133 ? gen_rtx_CLOBBER (tmode, const0_rtx)
7134 : gen_rtx_STRICT_LOW_PART (VOIDmode, new_rtx)));
7135
7136 if (mode == tmode)
7137 return new_rtx;
7138
7139 if (CONST_SCALAR_INT_P (new_rtx))
7140 return simplify_unary_operation (unsignedp ? ZERO_EXTEND : SIGN_EXTEND,
7141 mode, new_rtx, tmode);
7142
7143 /* If we know that no extraneous bits are set, and that the high
7144 bit is not set, convert the extraction to the cheaper of
7145 sign and zero extension, that are equivalent in these cases. */
7146 if (flag_expensive_optimizations
7147 && (HWI_COMPUTABLE_MODE_P (tmode)
7148 && ((nonzero_bits (new_rtx, tmode)
7149 & ~(((unsigned HOST_WIDE_INT)GET_MODE_MASK (tmode)) >> 1))
7150 == 0)))
7151 {
7152 rtx temp = gen_rtx_ZERO_EXTEND (mode, new_rtx);
7153 rtx temp1 = gen_rtx_SIGN_EXTEND (mode, new_rtx);
7154
7155 /* Prefer ZERO_EXTENSION, since it gives more information to
7156 backends. */
7157 if (set_src_cost (temp, optimize_this_for_speed_p)
7158 <= set_src_cost (temp1, optimize_this_for_speed_p))
7159 return temp;
7160 return temp1;
7161 }
7162
7163 /* Otherwise, sign- or zero-extend unless we already are in the
7164 proper mode. */
7165
7166 return (gen_rtx_fmt_e (unsignedp ? ZERO_EXTEND : SIGN_EXTEND,
7167 mode, new_rtx));
7168 }
7169
7170 /* Unless this is a COMPARE or we have a funny memory reference,
7171 don't do anything with zero-extending field extracts starting at
7172 the low-order bit since they are simple AND operations. */
7173 if (pos_rtx == 0 && pos == 0 && ! in_dest
7174 && ! in_compare && unsignedp)
7175 return 0;
7176
7177 /* Unless INNER is not MEM, reject this if we would be spanning bytes or
7178 if the position is not a constant and the length is not 1. In all
7179 other cases, we would only be going outside our object in cases when
7180 an original shift would have been undefined. */
7181 if (MEM_P (inner)
7182 && ((pos_rtx == 0 && pos + len > GET_MODE_PRECISION (is_mode))
7183 || (pos_rtx != 0 && len != 1)))
7184 return 0;
7185
7186 /* Get the mode to use should INNER not be a MEM, the mode for the position,
7187 and the mode for the result. */
7188 if (in_dest && mode_for_extraction (EP_insv, -1) != MAX_MACHINE_MODE)
7189 {
7190 wanted_inner_reg_mode = mode_for_extraction (EP_insv, 0);
7191 pos_mode = mode_for_extraction (EP_insv, 2);
7192 extraction_mode = mode_for_extraction (EP_insv, 3);
7193 }
7194
7195 if (! in_dest && unsignedp
7196 && mode_for_extraction (EP_extzv, -1) != MAX_MACHINE_MODE)
7197 {
7198 wanted_inner_reg_mode = mode_for_extraction (EP_extzv, 1);
7199 pos_mode = mode_for_extraction (EP_extzv, 3);
7200 extraction_mode = mode_for_extraction (EP_extzv, 0);
7201 }
7202
7203 if (! in_dest && ! unsignedp
7204 && mode_for_extraction (EP_extv, -1) != MAX_MACHINE_MODE)
7205 {
7206 wanted_inner_reg_mode = mode_for_extraction (EP_extv, 1);
7207 pos_mode = mode_for_extraction (EP_extv, 3);
7208 extraction_mode = mode_for_extraction (EP_extv, 0);
7209 }
7210
7211 /* Never narrow an object, since that might not be safe. */
7212
7213 if (mode != VOIDmode
7214 && GET_MODE_SIZE (extraction_mode) < GET_MODE_SIZE (mode))
7215 extraction_mode = mode;
7216
7217 /* If this is not from memory, the desired mode is the preferred mode
7218 for an extraction pattern's first input operand, or word_mode if there
7219 is none. */
7220 if (!MEM_P (inner))
7221 wanted_inner_mode = wanted_inner_reg_mode;
7222 else
7223 {
7224 /* Be careful not to go beyond the extracted object and maintain the
7225 natural alignment of the memory. */
7226 wanted_inner_mode = smallest_mode_for_size (len, MODE_INT);
7227 while (pos % GET_MODE_BITSIZE (wanted_inner_mode) + len
7228 > GET_MODE_BITSIZE (wanted_inner_mode))
7229 {
7230 wanted_inner_mode = GET_MODE_WIDER_MODE (wanted_inner_mode);
7231 gcc_assert (wanted_inner_mode != VOIDmode);
7232 }
7233 }
7234
7235 orig_pos = pos;
7236
7237 if (BITS_BIG_ENDIAN)
7238 {
7239 /* POS is passed as if BITS_BIG_ENDIAN == 0, so we need to convert it to
7240 BITS_BIG_ENDIAN style. If position is constant, compute new
7241 position. Otherwise, build subtraction.
7242 Note that POS is relative to the mode of the original argument.
7243 If it's a MEM we need to recompute POS relative to that.
7244 However, if we're extracting from (or inserting into) a register,
7245 we want to recompute POS relative to wanted_inner_mode. */
7246 int width = (MEM_P (inner)
7247 ? GET_MODE_BITSIZE (is_mode)
7248 : GET_MODE_BITSIZE (wanted_inner_mode));
7249
7250 if (pos_rtx == 0)
7251 pos = width - len - pos;
7252 else
7253 pos_rtx
7254 = gen_rtx_MINUS (GET_MODE (pos_rtx), GEN_INT (width - len), pos_rtx);
7255 /* POS may be less than 0 now, but we check for that below.
7256 Note that it can only be less than 0 if !MEM_P (inner). */
7257 }
7258
7259 /* If INNER has a wider mode, and this is a constant extraction, try to
7260 make it smaller and adjust the byte to point to the byte containing
7261 the value. */
7262 if (wanted_inner_mode != VOIDmode
7263 && inner_mode != wanted_inner_mode
7264 && ! pos_rtx
7265 && GET_MODE_SIZE (wanted_inner_mode) < GET_MODE_SIZE (is_mode)
7266 && MEM_P (inner)
7267 && ! mode_dependent_address_p (XEXP (inner, 0), MEM_ADDR_SPACE (inner))
7268 && ! MEM_VOLATILE_P (inner))
7269 {
7270 int offset = 0;
7271
7272 /* The computations below will be correct if the machine is big
7273 endian in both bits and bytes or little endian in bits and bytes.
7274 If it is mixed, we must adjust. */
7275
7276 /* If bytes are big endian and we had a paradoxical SUBREG, we must
7277 adjust OFFSET to compensate. */
7278 if (BYTES_BIG_ENDIAN
7279 && GET_MODE_SIZE (inner_mode) < GET_MODE_SIZE (is_mode))
7280 offset -= GET_MODE_SIZE (is_mode) - GET_MODE_SIZE (inner_mode);
7281
7282 /* We can now move to the desired byte. */
7283 offset += (pos / GET_MODE_BITSIZE (wanted_inner_mode))
7284 * GET_MODE_SIZE (wanted_inner_mode);
7285 pos %= GET_MODE_BITSIZE (wanted_inner_mode);
7286
7287 if (BYTES_BIG_ENDIAN != BITS_BIG_ENDIAN
7288 && is_mode != wanted_inner_mode)
7289 offset = (GET_MODE_SIZE (is_mode)
7290 - GET_MODE_SIZE (wanted_inner_mode) - offset);
7291
7292 inner = adjust_address_nv (inner, wanted_inner_mode, offset);
7293 }
7294
7295 /* If INNER is not memory, get it into the proper mode. If we are changing
7296 its mode, POS must be a constant and smaller than the size of the new
7297 mode. */
7298 else if (!MEM_P (inner))
7299 {
7300 /* On the LHS, don't create paradoxical subregs implicitely truncating
7301 the register unless TRULY_NOOP_TRUNCATION. */
7302 if (in_dest
7303 && !TRULY_NOOP_TRUNCATION_MODES_P (GET_MODE (inner),
7304 wanted_inner_mode))
7305 return NULL_RTX;
7306
7307 if (GET_MODE (inner) != wanted_inner_mode
7308 && (pos_rtx != 0
7309 || orig_pos + len > GET_MODE_BITSIZE (wanted_inner_mode)))
7310 return NULL_RTX;
7311
7312 if (orig_pos < 0)
7313 return NULL_RTX;
7314
7315 inner = force_to_mode (inner, wanted_inner_mode,
7316 pos_rtx
7317 || len + orig_pos >= HOST_BITS_PER_WIDE_INT
7318 ? ~(unsigned HOST_WIDE_INT) 0
7319 : ((((unsigned HOST_WIDE_INT) 1 << len) - 1)
7320 << orig_pos),
7321 0);
7322 }
7323
7324 /* Adjust mode of POS_RTX, if needed. If we want a wider mode, we
7325 have to zero extend. Otherwise, we can just use a SUBREG. */
7326 if (pos_rtx != 0
7327 && GET_MODE_SIZE (pos_mode) > GET_MODE_SIZE (GET_MODE (pos_rtx)))
7328 {
7329 rtx temp = gen_rtx_ZERO_EXTEND (pos_mode, pos_rtx);
7330
7331 /* If we know that no extraneous bits are set, and that the high
7332 bit is not set, convert extraction to cheaper one - either
7333 SIGN_EXTENSION or ZERO_EXTENSION, that are equivalent in these
7334 cases. */
7335 if (flag_expensive_optimizations
7336 && (HWI_COMPUTABLE_MODE_P (GET_MODE (pos_rtx))
7337 && ((nonzero_bits (pos_rtx, GET_MODE (pos_rtx))
7338 & ~(((unsigned HOST_WIDE_INT)
7339 GET_MODE_MASK (GET_MODE (pos_rtx)))
7340 >> 1))
7341 == 0)))
7342 {
7343 rtx temp1 = gen_rtx_SIGN_EXTEND (pos_mode, pos_rtx);
7344
7345 /* Prefer ZERO_EXTENSION, since it gives more information to
7346 backends. */
7347 if (set_src_cost (temp1, optimize_this_for_speed_p)
7348 < set_src_cost (temp, optimize_this_for_speed_p))
7349 temp = temp1;
7350 }
7351 pos_rtx = temp;
7352 }
7353
7354 /* Make POS_RTX unless we already have it and it is correct. If we don't
7355 have a POS_RTX but we do have an ORIG_POS_RTX, the latter must
7356 be a CONST_INT. */
7357 if (pos_rtx == 0 && orig_pos_rtx != 0 && INTVAL (orig_pos_rtx) == pos)
7358 pos_rtx = orig_pos_rtx;
7359
7360 else if (pos_rtx == 0)
7361 pos_rtx = GEN_INT (pos);
7362
7363 /* Make the required operation. See if we can use existing rtx. */
7364 new_rtx = gen_rtx_fmt_eee (unsignedp ? ZERO_EXTRACT : SIGN_EXTRACT,
7365 extraction_mode, inner, GEN_INT (len), pos_rtx);
7366 if (! in_dest)
7367 new_rtx = gen_lowpart (mode, new_rtx);
7368
7369 return new_rtx;
7370 }
7371 \f
7372 /* See if X contains an ASHIFT of COUNT or more bits that can be commuted
7373 with any other operations in X. Return X without that shift if so. */
7374
7375 static rtx
7376 extract_left_shift (rtx x, int count)
7377 {
7378 enum rtx_code code = GET_CODE (x);
7379 enum machine_mode mode = GET_MODE (x);
7380 rtx tem;
7381
7382 switch (code)
7383 {
7384 case ASHIFT:
7385 /* This is the shift itself. If it is wide enough, we will return
7386 either the value being shifted if the shift count is equal to
7387 COUNT or a shift for the difference. */
7388 if (CONST_INT_P (XEXP (x, 1))
7389 && INTVAL (XEXP (x, 1)) >= count)
7390 return simplify_shift_const (NULL_RTX, ASHIFT, mode, XEXP (x, 0),
7391 INTVAL (XEXP (x, 1)) - count);
7392 break;
7393
7394 case NEG: case NOT:
7395 if ((tem = extract_left_shift (XEXP (x, 0), count)) != 0)
7396 return simplify_gen_unary (code, mode, tem, mode);
7397
7398 break;
7399
7400 case PLUS: case IOR: case XOR: case AND:
7401 /* If we can safely shift this constant and we find the inner shift,
7402 make a new operation. */
7403 if (CONST_INT_P (XEXP (x, 1))
7404 && (UINTVAL (XEXP (x, 1))
7405 & ((((unsigned HOST_WIDE_INT) 1 << count)) - 1)) == 0
7406 && (tem = extract_left_shift (XEXP (x, 0), count)) != 0)
7407 return simplify_gen_binary (code, mode, tem,
7408 GEN_INT (INTVAL (XEXP (x, 1)) >> count));
7409
7410 break;
7411
7412 default:
7413 break;
7414 }
7415
7416 return 0;
7417 }
7418 \f
7419 /* Look at the expression rooted at X. Look for expressions
7420 equivalent to ZERO_EXTRACT, SIGN_EXTRACT, ZERO_EXTEND, SIGN_EXTEND.
7421 Form these expressions.
7422
7423 Return the new rtx, usually just X.
7424
7425 Also, for machines like the VAX that don't have logical shift insns,
7426 try to convert logical to arithmetic shift operations in cases where
7427 they are equivalent. This undoes the canonicalizations to logical
7428 shifts done elsewhere.
7429
7430 We try, as much as possible, to re-use rtl expressions to save memory.
7431
7432 IN_CODE says what kind of expression we are processing. Normally, it is
7433 SET. In a memory address (inside a MEM, PLUS or minus, the latter two
7434 being kludges), it is MEM. When processing the arguments of a comparison
7435 or a COMPARE against zero, it is COMPARE. */
7436
7437 rtx
7438 make_compound_operation (rtx x, enum rtx_code in_code)
7439 {
7440 enum rtx_code code = GET_CODE (x);
7441 enum machine_mode mode = GET_MODE (x);
7442 int mode_width = GET_MODE_PRECISION (mode);
7443 rtx rhs, lhs;
7444 enum rtx_code next_code;
7445 int i, j;
7446 rtx new_rtx = 0;
7447 rtx tem;
7448 const char *fmt;
7449
7450 /* Select the code to be used in recursive calls. Once we are inside an
7451 address, we stay there. If we have a comparison, set to COMPARE,
7452 but once inside, go back to our default of SET. */
7453
7454 next_code = (code == MEM ? MEM
7455 : ((code == PLUS || code == MINUS)
7456 && SCALAR_INT_MODE_P (mode)) ? MEM
7457 : ((code == COMPARE || COMPARISON_P (x))
7458 && XEXP (x, 1) == const0_rtx) ? COMPARE
7459 : in_code == COMPARE ? SET : in_code);
7460
7461 /* Process depending on the code of this operation. If NEW is set
7462 nonzero, it will be returned. */
7463
7464 switch (code)
7465 {
7466 case ASHIFT:
7467 /* Convert shifts by constants into multiplications if inside
7468 an address. */
7469 if (in_code == MEM && CONST_INT_P (XEXP (x, 1))
7470 && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT
7471 && INTVAL (XEXP (x, 1)) >= 0
7472 && SCALAR_INT_MODE_P (mode))
7473 {
7474 HOST_WIDE_INT count = INTVAL (XEXP (x, 1));
7475 HOST_WIDE_INT multval = (HOST_WIDE_INT) 1 << count;
7476
7477 new_rtx = make_compound_operation (XEXP (x, 0), next_code);
7478 if (GET_CODE (new_rtx) == NEG)
7479 {
7480 new_rtx = XEXP (new_rtx, 0);
7481 multval = -multval;
7482 }
7483 multval = trunc_int_for_mode (multval, mode);
7484 new_rtx = gen_rtx_MULT (mode, new_rtx, GEN_INT (multval));
7485 }
7486 break;
7487
7488 case PLUS:
7489 lhs = XEXP (x, 0);
7490 rhs = XEXP (x, 1);
7491 lhs = make_compound_operation (lhs, next_code);
7492 rhs = make_compound_operation (rhs, next_code);
7493 if (GET_CODE (lhs) == MULT && GET_CODE (XEXP (lhs, 0)) == NEG
7494 && SCALAR_INT_MODE_P (mode))
7495 {
7496 tem = simplify_gen_binary (MULT, mode, XEXP (XEXP (lhs, 0), 0),
7497 XEXP (lhs, 1));
7498 new_rtx = simplify_gen_binary (MINUS, mode, rhs, tem);
7499 }
7500 else if (GET_CODE (lhs) == MULT
7501 && (CONST_INT_P (XEXP (lhs, 1)) && INTVAL (XEXP (lhs, 1)) < 0))
7502 {
7503 tem = simplify_gen_binary (MULT, mode, XEXP (lhs, 0),
7504 simplify_gen_unary (NEG, mode,
7505 XEXP (lhs, 1),
7506 mode));
7507 new_rtx = simplify_gen_binary (MINUS, mode, rhs, tem);
7508 }
7509 else
7510 {
7511 SUBST (XEXP (x, 0), lhs);
7512 SUBST (XEXP (x, 1), rhs);
7513 goto maybe_swap;
7514 }
7515 x = gen_lowpart (mode, new_rtx);
7516 goto maybe_swap;
7517
7518 case MINUS:
7519 lhs = XEXP (x, 0);
7520 rhs = XEXP (x, 1);
7521 lhs = make_compound_operation (lhs, next_code);
7522 rhs = make_compound_operation (rhs, next_code);
7523 if (GET_CODE (rhs) == MULT && GET_CODE (XEXP (rhs, 0)) == NEG
7524 && SCALAR_INT_MODE_P (mode))
7525 {
7526 tem = simplify_gen_binary (MULT, mode, XEXP (XEXP (rhs, 0), 0),
7527 XEXP (rhs, 1));
7528 new_rtx = simplify_gen_binary (PLUS, mode, tem, lhs);
7529 }
7530 else if (GET_CODE (rhs) == MULT
7531 && (CONST_INT_P (XEXP (rhs, 1)) && INTVAL (XEXP (rhs, 1)) < 0))
7532 {
7533 tem = simplify_gen_binary (MULT, mode, XEXP (rhs, 0),
7534 simplify_gen_unary (NEG, mode,
7535 XEXP (rhs, 1),
7536 mode));
7537 new_rtx = simplify_gen_binary (PLUS, mode, tem, lhs);
7538 }
7539 else
7540 {
7541 SUBST (XEXP (x, 0), lhs);
7542 SUBST (XEXP (x, 1), rhs);
7543 return x;
7544 }
7545 return gen_lowpart (mode, new_rtx);
7546
7547 case AND:
7548 /* If the second operand is not a constant, we can't do anything
7549 with it. */
7550 if (!CONST_INT_P (XEXP (x, 1)))
7551 break;
7552
7553 /* If the constant is a power of two minus one and the first operand
7554 is a logical right shift, make an extraction. */
7555 if (GET_CODE (XEXP (x, 0)) == LSHIFTRT
7556 && (i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0)
7557 {
7558 new_rtx = make_compound_operation (XEXP (XEXP (x, 0), 0), next_code);
7559 new_rtx = make_extraction (mode, new_rtx, 0, XEXP (XEXP (x, 0), 1), i, 1,
7560 0, in_code == COMPARE);
7561 }
7562
7563 /* Same as previous, but for (subreg (lshiftrt ...)) in first op. */
7564 else if (GET_CODE (XEXP (x, 0)) == SUBREG
7565 && subreg_lowpart_p (XEXP (x, 0))
7566 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == LSHIFTRT
7567 && (i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0)
7568 {
7569 new_rtx = make_compound_operation (XEXP (SUBREG_REG (XEXP (x, 0)), 0),
7570 next_code);
7571 new_rtx = make_extraction (GET_MODE (SUBREG_REG (XEXP (x, 0))), new_rtx, 0,
7572 XEXP (SUBREG_REG (XEXP (x, 0)), 1), i, 1,
7573 0, in_code == COMPARE);
7574 }
7575 /* Same as previous, but for (xor/ior (lshiftrt...) (lshiftrt...)). */
7576 else if ((GET_CODE (XEXP (x, 0)) == XOR
7577 || GET_CODE (XEXP (x, 0)) == IOR)
7578 && GET_CODE (XEXP (XEXP (x, 0), 0)) == LSHIFTRT
7579 && GET_CODE (XEXP (XEXP (x, 0), 1)) == LSHIFTRT
7580 && (i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0)
7581 {
7582 /* Apply the distributive law, and then try to make extractions. */
7583 new_rtx = gen_rtx_fmt_ee (GET_CODE (XEXP (x, 0)), mode,
7584 gen_rtx_AND (mode, XEXP (XEXP (x, 0), 0),
7585 XEXP (x, 1)),
7586 gen_rtx_AND (mode, XEXP (XEXP (x, 0), 1),
7587 XEXP (x, 1)));
7588 new_rtx = make_compound_operation (new_rtx, in_code);
7589 }
7590
7591 /* If we are have (and (rotate X C) M) and C is larger than the number
7592 of bits in M, this is an extraction. */
7593
7594 else if (GET_CODE (XEXP (x, 0)) == ROTATE
7595 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
7596 && (i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0
7597 && i <= INTVAL (XEXP (XEXP (x, 0), 1)))
7598 {
7599 new_rtx = make_compound_operation (XEXP (XEXP (x, 0), 0), next_code);
7600 new_rtx = make_extraction (mode, new_rtx,
7601 (GET_MODE_PRECISION (mode)
7602 - INTVAL (XEXP (XEXP (x, 0), 1))),
7603 NULL_RTX, i, 1, 0, in_code == COMPARE);
7604 }
7605
7606 /* On machines without logical shifts, if the operand of the AND is
7607 a logical shift and our mask turns off all the propagated sign
7608 bits, we can replace the logical shift with an arithmetic shift. */
7609 else if (GET_CODE (XEXP (x, 0)) == LSHIFTRT
7610 && !have_insn_for (LSHIFTRT, mode)
7611 && have_insn_for (ASHIFTRT, mode)
7612 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
7613 && INTVAL (XEXP (XEXP (x, 0), 1)) >= 0
7614 && INTVAL (XEXP (XEXP (x, 0), 1)) < HOST_BITS_PER_WIDE_INT
7615 && mode_width <= HOST_BITS_PER_WIDE_INT)
7616 {
7617 unsigned HOST_WIDE_INT mask = GET_MODE_MASK (mode);
7618
7619 mask >>= INTVAL (XEXP (XEXP (x, 0), 1));
7620 if ((INTVAL (XEXP (x, 1)) & ~mask) == 0)
7621 SUBST (XEXP (x, 0),
7622 gen_rtx_ASHIFTRT (mode,
7623 make_compound_operation
7624 (XEXP (XEXP (x, 0), 0), next_code),
7625 XEXP (XEXP (x, 0), 1)));
7626 }
7627
7628 /* If the constant is one less than a power of two, this might be
7629 representable by an extraction even if no shift is present.
7630 If it doesn't end up being a ZERO_EXTEND, we will ignore it unless
7631 we are in a COMPARE. */
7632 else if ((i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0)
7633 new_rtx = make_extraction (mode,
7634 make_compound_operation (XEXP (x, 0),
7635 next_code),
7636 0, NULL_RTX, i, 1, 0, in_code == COMPARE);
7637
7638 /* If we are in a comparison and this is an AND with a power of two,
7639 convert this into the appropriate bit extract. */
7640 else if (in_code == COMPARE
7641 && (i = exact_log2 (UINTVAL (XEXP (x, 1)))) >= 0)
7642 new_rtx = make_extraction (mode,
7643 make_compound_operation (XEXP (x, 0),
7644 next_code),
7645 i, NULL_RTX, 1, 1, 0, 1);
7646
7647 break;
7648
7649 case LSHIFTRT:
7650 /* If the sign bit is known to be zero, replace this with an
7651 arithmetic shift. */
7652 if (have_insn_for (ASHIFTRT, mode)
7653 && ! have_insn_for (LSHIFTRT, mode)
7654 && mode_width <= HOST_BITS_PER_WIDE_INT
7655 && (nonzero_bits (XEXP (x, 0), mode) & (1 << (mode_width - 1))) == 0)
7656 {
7657 new_rtx = gen_rtx_ASHIFTRT (mode,
7658 make_compound_operation (XEXP (x, 0),
7659 next_code),
7660 XEXP (x, 1));
7661 break;
7662 }
7663
7664 /* ... fall through ... */
7665
7666 case ASHIFTRT:
7667 lhs = XEXP (x, 0);
7668 rhs = XEXP (x, 1);
7669
7670 /* If we have (ashiftrt (ashift foo C1) C2) with C2 >= C1,
7671 this is a SIGN_EXTRACT. */
7672 if (CONST_INT_P (rhs)
7673 && GET_CODE (lhs) == ASHIFT
7674 && CONST_INT_P (XEXP (lhs, 1))
7675 && INTVAL (rhs) >= INTVAL (XEXP (lhs, 1))
7676 && INTVAL (XEXP (lhs, 1)) >= 0
7677 && INTVAL (rhs) < mode_width)
7678 {
7679 new_rtx = make_compound_operation (XEXP (lhs, 0), next_code);
7680 new_rtx = make_extraction (mode, new_rtx,
7681 INTVAL (rhs) - INTVAL (XEXP (lhs, 1)),
7682 NULL_RTX, mode_width - INTVAL (rhs),
7683 code == LSHIFTRT, 0, in_code == COMPARE);
7684 break;
7685 }
7686
7687 /* See if we have operations between an ASHIFTRT and an ASHIFT.
7688 If so, try to merge the shifts into a SIGN_EXTEND. We could
7689 also do this for some cases of SIGN_EXTRACT, but it doesn't
7690 seem worth the effort; the case checked for occurs on Alpha. */
7691
7692 if (!OBJECT_P (lhs)
7693 && ! (GET_CODE (lhs) == SUBREG
7694 && (OBJECT_P (SUBREG_REG (lhs))))
7695 && CONST_INT_P (rhs)
7696 && INTVAL (rhs) < HOST_BITS_PER_WIDE_INT
7697 && INTVAL (rhs) < mode_width
7698 && (new_rtx = extract_left_shift (lhs, INTVAL (rhs))) != 0)
7699 new_rtx = make_extraction (mode, make_compound_operation (new_rtx, next_code),
7700 0, NULL_RTX, mode_width - INTVAL (rhs),
7701 code == LSHIFTRT, 0, in_code == COMPARE);
7702
7703 break;
7704
7705 case SUBREG:
7706 /* Call ourselves recursively on the inner expression. If we are
7707 narrowing the object and it has a different RTL code from
7708 what it originally did, do this SUBREG as a force_to_mode. */
7709 {
7710 rtx inner = SUBREG_REG (x), simplified;
7711
7712 tem = make_compound_operation (inner, in_code);
7713
7714 simplified
7715 = simplify_subreg (mode, tem, GET_MODE (inner), SUBREG_BYTE (x));
7716 if (simplified)
7717 tem = simplified;
7718
7719 if (GET_CODE (tem) != GET_CODE (inner)
7720 && GET_MODE_SIZE (mode) < GET_MODE_SIZE (GET_MODE (inner))
7721 && subreg_lowpart_p (x))
7722 {
7723 rtx newer
7724 = force_to_mode (tem, mode, ~(unsigned HOST_WIDE_INT) 0, 0);
7725
7726 /* If we have something other than a SUBREG, we might have
7727 done an expansion, so rerun ourselves. */
7728 if (GET_CODE (newer) != SUBREG)
7729 newer = make_compound_operation (newer, in_code);
7730
7731 /* force_to_mode can expand compounds. If it just re-expanded the
7732 compound, use gen_lowpart to convert to the desired mode. */
7733 if (rtx_equal_p (newer, x)
7734 /* Likewise if it re-expanded the compound only partially.
7735 This happens for SUBREG of ZERO_EXTRACT if they extract
7736 the same number of bits. */
7737 || (GET_CODE (newer) == SUBREG
7738 && (GET_CODE (SUBREG_REG (newer)) == LSHIFTRT
7739 || GET_CODE (SUBREG_REG (newer)) == ASHIFTRT)
7740 && GET_CODE (inner) == AND
7741 && rtx_equal_p (SUBREG_REG (newer), XEXP (inner, 0))))
7742 return gen_lowpart (GET_MODE (x), tem);
7743
7744 return newer;
7745 }
7746
7747 if (simplified)
7748 return tem;
7749 }
7750 break;
7751
7752 default:
7753 break;
7754 }
7755
7756 if (new_rtx)
7757 {
7758 x = gen_lowpart (mode, new_rtx);
7759 code = GET_CODE (x);
7760 }
7761
7762 /* Now recursively process each operand of this operation. We need to
7763 handle ZERO_EXTEND specially so that we don't lose track of the
7764 inner mode. */
7765 if (GET_CODE (x) == ZERO_EXTEND)
7766 {
7767 new_rtx = make_compound_operation (XEXP (x, 0), next_code);
7768 tem = simplify_const_unary_operation (ZERO_EXTEND, GET_MODE (x),
7769 new_rtx, GET_MODE (XEXP (x, 0)));
7770 if (tem)
7771 return tem;
7772 SUBST (XEXP (x, 0), new_rtx);
7773 return x;
7774 }
7775
7776 fmt = GET_RTX_FORMAT (code);
7777 for (i = 0; i < GET_RTX_LENGTH (code); i++)
7778 if (fmt[i] == 'e')
7779 {
7780 new_rtx = make_compound_operation (XEXP (x, i), next_code);
7781 SUBST (XEXP (x, i), new_rtx);
7782 }
7783 else if (fmt[i] == 'E')
7784 for (j = 0; j < XVECLEN (x, i); j++)
7785 {
7786 new_rtx = make_compound_operation (XVECEXP (x, i, j), next_code);
7787 SUBST (XVECEXP (x, i, j), new_rtx);
7788 }
7789
7790 maybe_swap:
7791 /* If this is a commutative operation, the changes to the operands
7792 may have made it noncanonical. */
7793 if (COMMUTATIVE_ARITH_P (x)
7794 && swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
7795 {
7796 tem = XEXP (x, 0);
7797 SUBST (XEXP (x, 0), XEXP (x, 1));
7798 SUBST (XEXP (x, 1), tem);
7799 }
7800
7801 return x;
7802 }
7803 \f
7804 /* Given M see if it is a value that would select a field of bits
7805 within an item, but not the entire word. Return -1 if not.
7806 Otherwise, return the starting position of the field, where 0 is the
7807 low-order bit.
7808
7809 *PLEN is set to the length of the field. */
7810
7811 static int
7812 get_pos_from_mask (unsigned HOST_WIDE_INT m, unsigned HOST_WIDE_INT *plen)
7813 {
7814 /* Get the bit number of the first 1 bit from the right, -1 if none. */
7815 int pos = m ? ctz_hwi (m) : -1;
7816 int len = 0;
7817
7818 if (pos >= 0)
7819 /* Now shift off the low-order zero bits and see if we have a
7820 power of two minus 1. */
7821 len = exact_log2 ((m >> pos) + 1);
7822
7823 if (len <= 0)
7824 pos = -1;
7825
7826 *plen = len;
7827 return pos;
7828 }
7829 \f
7830 /* If X refers to a register that equals REG in value, replace these
7831 references with REG. */
7832 static rtx
7833 canon_reg_for_combine (rtx x, rtx reg)
7834 {
7835 rtx op0, op1, op2;
7836 const char *fmt;
7837 int i;
7838 bool copied;
7839
7840 enum rtx_code code = GET_CODE (x);
7841 switch (GET_RTX_CLASS (code))
7842 {
7843 case RTX_UNARY:
7844 op0 = canon_reg_for_combine (XEXP (x, 0), reg);
7845 if (op0 != XEXP (x, 0))
7846 return simplify_gen_unary (GET_CODE (x), GET_MODE (x), op0,
7847 GET_MODE (reg));
7848 break;
7849
7850 case RTX_BIN_ARITH:
7851 case RTX_COMM_ARITH:
7852 op0 = canon_reg_for_combine (XEXP (x, 0), reg);
7853 op1 = canon_reg_for_combine (XEXP (x, 1), reg);
7854 if (op0 != XEXP (x, 0) || op1 != XEXP (x, 1))
7855 return simplify_gen_binary (GET_CODE (x), GET_MODE (x), op0, op1);
7856 break;
7857
7858 case RTX_COMPARE:
7859 case RTX_COMM_COMPARE:
7860 op0 = canon_reg_for_combine (XEXP (x, 0), reg);
7861 op1 = canon_reg_for_combine (XEXP (x, 1), reg);
7862 if (op0 != XEXP (x, 0) || op1 != XEXP (x, 1))
7863 return simplify_gen_relational (GET_CODE (x), GET_MODE (x),
7864 GET_MODE (op0), op0, op1);
7865 break;
7866
7867 case RTX_TERNARY:
7868 case RTX_BITFIELD_OPS:
7869 op0 = canon_reg_for_combine (XEXP (x, 0), reg);
7870 op1 = canon_reg_for_combine (XEXP (x, 1), reg);
7871 op2 = canon_reg_for_combine (XEXP (x, 2), reg);
7872 if (op0 != XEXP (x, 0) || op1 != XEXP (x, 1) || op2 != XEXP (x, 2))
7873 return simplify_gen_ternary (GET_CODE (x), GET_MODE (x),
7874 GET_MODE (op0), op0, op1, op2);
7875
7876 case RTX_OBJ:
7877 if (REG_P (x))
7878 {
7879 if (rtx_equal_p (get_last_value (reg), x)
7880 || rtx_equal_p (reg, get_last_value (x)))
7881 return reg;
7882 else
7883 break;
7884 }
7885
7886 /* fall through */
7887
7888 default:
7889 fmt = GET_RTX_FORMAT (code);
7890 copied = false;
7891 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
7892 if (fmt[i] == 'e')
7893 {
7894 rtx op = canon_reg_for_combine (XEXP (x, i), reg);
7895 if (op != XEXP (x, i))
7896 {
7897 if (!copied)
7898 {
7899 copied = true;
7900 x = copy_rtx (x);
7901 }
7902 XEXP (x, i) = op;
7903 }
7904 }
7905 else if (fmt[i] == 'E')
7906 {
7907 int j;
7908 for (j = 0; j < XVECLEN (x, i); j++)
7909 {
7910 rtx op = canon_reg_for_combine (XVECEXP (x, i, j), reg);
7911 if (op != XVECEXP (x, i, j))
7912 {
7913 if (!copied)
7914 {
7915 copied = true;
7916 x = copy_rtx (x);
7917 }
7918 XVECEXP (x, i, j) = op;
7919 }
7920 }
7921 }
7922
7923 break;
7924 }
7925
7926 return x;
7927 }
7928
7929 /* Return X converted to MODE. If the value is already truncated to
7930 MODE we can just return a subreg even though in the general case we
7931 would need an explicit truncation. */
7932
7933 static rtx
7934 gen_lowpart_or_truncate (enum machine_mode mode, rtx x)
7935 {
7936 if (!CONST_INT_P (x)
7937 && GET_MODE_SIZE (mode) < GET_MODE_SIZE (GET_MODE (x))
7938 && !TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (x))
7939 && !(REG_P (x) && reg_truncated_to_mode (mode, x)))
7940 {
7941 /* Bit-cast X into an integer mode. */
7942 if (!SCALAR_INT_MODE_P (GET_MODE (x)))
7943 x = gen_lowpart (int_mode_for_mode (GET_MODE (x)), x);
7944 x = simplify_gen_unary (TRUNCATE, int_mode_for_mode (mode),
7945 x, GET_MODE (x));
7946 }
7947
7948 return gen_lowpart (mode, x);
7949 }
7950
7951 /* See if X can be simplified knowing that we will only refer to it in
7952 MODE and will only refer to those bits that are nonzero in MASK.
7953 If other bits are being computed or if masking operations are done
7954 that select a superset of the bits in MASK, they can sometimes be
7955 ignored.
7956
7957 Return a possibly simplified expression, but always convert X to
7958 MODE. If X is a CONST_INT, AND the CONST_INT with MASK.
7959
7960 If JUST_SELECT is nonzero, don't optimize by noticing that bits in MASK
7961 are all off in X. This is used when X will be complemented, by either
7962 NOT, NEG, or XOR. */
7963
7964 static rtx
7965 force_to_mode (rtx x, enum machine_mode mode, unsigned HOST_WIDE_INT mask,
7966 int just_select)
7967 {
7968 enum rtx_code code = GET_CODE (x);
7969 int next_select = just_select || code == XOR || code == NOT || code == NEG;
7970 enum machine_mode op_mode;
7971 unsigned HOST_WIDE_INT fuller_mask, nonzero;
7972 rtx op0, op1, temp;
7973
7974 /* If this is a CALL or ASM_OPERANDS, don't do anything. Some of the
7975 code below will do the wrong thing since the mode of such an
7976 expression is VOIDmode.
7977
7978 Also do nothing if X is a CLOBBER; this can happen if X was
7979 the return value from a call to gen_lowpart. */
7980 if (code == CALL || code == ASM_OPERANDS || code == CLOBBER)
7981 return x;
7982
7983 /* We want to perform the operation is its present mode unless we know
7984 that the operation is valid in MODE, in which case we do the operation
7985 in MODE. */
7986 op_mode = ((GET_MODE_CLASS (mode) == GET_MODE_CLASS (GET_MODE (x))
7987 && have_insn_for (code, mode))
7988 ? mode : GET_MODE (x));
7989
7990 /* It is not valid to do a right-shift in a narrower mode
7991 than the one it came in with. */
7992 if ((code == LSHIFTRT || code == ASHIFTRT)
7993 && GET_MODE_PRECISION (mode) < GET_MODE_PRECISION (GET_MODE (x)))
7994 op_mode = GET_MODE (x);
7995
7996 /* Truncate MASK to fit OP_MODE. */
7997 if (op_mode)
7998 mask &= GET_MODE_MASK (op_mode);
7999
8000 /* When we have an arithmetic operation, or a shift whose count we
8001 do not know, we need to assume that all bits up to the highest-order
8002 bit in MASK will be needed. This is how we form such a mask. */
8003 if (mask & ((unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)))
8004 fuller_mask = ~(unsigned HOST_WIDE_INT) 0;
8005 else
8006 fuller_mask = (((unsigned HOST_WIDE_INT) 1 << (floor_log2 (mask) + 1))
8007 - 1);
8008
8009 /* Determine what bits of X are guaranteed to be (non)zero. */
8010 nonzero = nonzero_bits (x, mode);
8011
8012 /* If none of the bits in X are needed, return a zero. */
8013 if (!just_select && (nonzero & mask) == 0 && !side_effects_p (x))
8014 x = const0_rtx;
8015
8016 /* If X is a CONST_INT, return a new one. Do this here since the
8017 test below will fail. */
8018 if (CONST_INT_P (x))
8019 {
8020 if (SCALAR_INT_MODE_P (mode))
8021 return gen_int_mode (INTVAL (x) & mask, mode);
8022 else
8023 {
8024 x = GEN_INT (INTVAL (x) & mask);
8025 return gen_lowpart_common (mode, x);
8026 }
8027 }
8028
8029 /* If X is narrower than MODE and we want all the bits in X's mode, just
8030 get X in the proper mode. */
8031 if (GET_MODE_SIZE (GET_MODE (x)) < GET_MODE_SIZE (mode)
8032 && (GET_MODE_MASK (GET_MODE (x)) & ~mask) == 0)
8033 return gen_lowpart (mode, x);
8034
8035 /* We can ignore the effect of a SUBREG if it narrows the mode or
8036 if the constant masks to zero all the bits the mode doesn't have. */
8037 if (GET_CODE (x) == SUBREG
8038 && subreg_lowpart_p (x)
8039 && ((GET_MODE_SIZE (GET_MODE (x))
8040 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x))))
8041 || (0 == (mask
8042 & GET_MODE_MASK (GET_MODE (x))
8043 & ~GET_MODE_MASK (GET_MODE (SUBREG_REG (x)))))))
8044 return force_to_mode (SUBREG_REG (x), mode, mask, next_select);
8045
8046 /* The arithmetic simplifications here only work for scalar integer modes. */
8047 if (!SCALAR_INT_MODE_P (mode) || !SCALAR_INT_MODE_P (GET_MODE (x)))
8048 return gen_lowpart_or_truncate (mode, x);
8049
8050 switch (code)
8051 {
8052 case CLOBBER:
8053 /* If X is a (clobber (const_int)), return it since we know we are
8054 generating something that won't match. */
8055 return x;
8056
8057 case SIGN_EXTEND:
8058 case ZERO_EXTEND:
8059 case ZERO_EXTRACT:
8060 case SIGN_EXTRACT:
8061 x = expand_compound_operation (x);
8062 if (GET_CODE (x) != code)
8063 return force_to_mode (x, mode, mask, next_select);
8064 break;
8065
8066 case TRUNCATE:
8067 /* Similarly for a truncate. */
8068 return force_to_mode (XEXP (x, 0), mode, mask, next_select);
8069
8070 case AND:
8071 /* If this is an AND with a constant, convert it into an AND
8072 whose constant is the AND of that constant with MASK. If it
8073 remains an AND of MASK, delete it since it is redundant. */
8074
8075 if (CONST_INT_P (XEXP (x, 1)))
8076 {
8077 x = simplify_and_const_int (x, op_mode, XEXP (x, 0),
8078 mask & INTVAL (XEXP (x, 1)));
8079
8080 /* If X is still an AND, see if it is an AND with a mask that
8081 is just some low-order bits. If so, and it is MASK, we don't
8082 need it. */
8083
8084 if (GET_CODE (x) == AND && CONST_INT_P (XEXP (x, 1))
8085 && ((INTVAL (XEXP (x, 1)) & GET_MODE_MASK (GET_MODE (x)))
8086 == mask))
8087 x = XEXP (x, 0);
8088
8089 /* If it remains an AND, try making another AND with the bits
8090 in the mode mask that aren't in MASK turned on. If the
8091 constant in the AND is wide enough, this might make a
8092 cheaper constant. */
8093
8094 if (GET_CODE (x) == AND && CONST_INT_P (XEXP (x, 1))
8095 && GET_MODE_MASK (GET_MODE (x)) != mask
8096 && HWI_COMPUTABLE_MODE_P (GET_MODE (x)))
8097 {
8098 unsigned HOST_WIDE_INT cval
8099 = UINTVAL (XEXP (x, 1))
8100 | (GET_MODE_MASK (GET_MODE (x)) & ~mask);
8101 int width = GET_MODE_PRECISION (GET_MODE (x));
8102 rtx y;
8103
8104 /* If MODE is narrower than HOST_WIDE_INT and CVAL is a negative
8105 number, sign extend it. */
8106 if (width > 0 && width < HOST_BITS_PER_WIDE_INT
8107 && (cval & ((unsigned HOST_WIDE_INT) 1 << (width - 1))) != 0)
8108 cval |= (unsigned HOST_WIDE_INT) -1 << width;
8109
8110 y = simplify_gen_binary (AND, GET_MODE (x),
8111 XEXP (x, 0), GEN_INT (cval));
8112 if (set_src_cost (y, optimize_this_for_speed_p)
8113 < set_src_cost (x, optimize_this_for_speed_p))
8114 x = y;
8115 }
8116
8117 break;
8118 }
8119
8120 goto binop;
8121
8122 case PLUS:
8123 /* In (and (plus FOO C1) M), if M is a mask that just turns off
8124 low-order bits (as in an alignment operation) and FOO is already
8125 aligned to that boundary, mask C1 to that boundary as well.
8126 This may eliminate that PLUS and, later, the AND. */
8127
8128 {
8129 unsigned int width = GET_MODE_PRECISION (mode);
8130 unsigned HOST_WIDE_INT smask = mask;
8131
8132 /* If MODE is narrower than HOST_WIDE_INT and mask is a negative
8133 number, sign extend it. */
8134
8135 if (width < HOST_BITS_PER_WIDE_INT
8136 && (smask & ((unsigned HOST_WIDE_INT) 1 << (width - 1))) != 0)
8137 smask |= (unsigned HOST_WIDE_INT) (-1) << width;
8138
8139 if (CONST_INT_P (XEXP (x, 1))
8140 && exact_log2 (- smask) >= 0
8141 && (nonzero_bits (XEXP (x, 0), mode) & ~smask) == 0
8142 && (INTVAL (XEXP (x, 1)) & ~smask) != 0)
8143 return force_to_mode (plus_constant (GET_MODE (x), XEXP (x, 0),
8144 (INTVAL (XEXP (x, 1)) & smask)),
8145 mode, smask, next_select);
8146 }
8147
8148 /* ... fall through ... */
8149
8150 case MULT:
8151 /* For PLUS, MINUS and MULT, we need any bits less significant than the
8152 most significant bit in MASK since carries from those bits will
8153 affect the bits we are interested in. */
8154 mask = fuller_mask;
8155 goto binop;
8156
8157 case MINUS:
8158 /* If X is (minus C Y) where C's least set bit is larger than any bit
8159 in the mask, then we may replace with (neg Y). */
8160 if (CONST_INT_P (XEXP (x, 0))
8161 && (((unsigned HOST_WIDE_INT) (INTVAL (XEXP (x, 0))
8162 & -INTVAL (XEXP (x, 0))))
8163 > mask))
8164 {
8165 x = simplify_gen_unary (NEG, GET_MODE (x), XEXP (x, 1),
8166 GET_MODE (x));
8167 return force_to_mode (x, mode, mask, next_select);
8168 }
8169
8170 /* Similarly, if C contains every bit in the fuller_mask, then we may
8171 replace with (not Y). */
8172 if (CONST_INT_P (XEXP (x, 0))
8173 && ((UINTVAL (XEXP (x, 0)) | fuller_mask) == UINTVAL (XEXP (x, 0))))
8174 {
8175 x = simplify_gen_unary (NOT, GET_MODE (x),
8176 XEXP (x, 1), GET_MODE (x));
8177 return force_to_mode (x, mode, mask, next_select);
8178 }
8179
8180 mask = fuller_mask;
8181 goto binop;
8182
8183 case IOR:
8184 case XOR:
8185 /* If X is (ior (lshiftrt FOO C1) C2), try to commute the IOR and
8186 LSHIFTRT so we end up with an (and (lshiftrt (ior ...) ...) ...)
8187 operation which may be a bitfield extraction. Ensure that the
8188 constant we form is not wider than the mode of X. */
8189
8190 if (GET_CODE (XEXP (x, 0)) == LSHIFTRT
8191 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
8192 && INTVAL (XEXP (XEXP (x, 0), 1)) >= 0
8193 && INTVAL (XEXP (XEXP (x, 0), 1)) < HOST_BITS_PER_WIDE_INT
8194 && CONST_INT_P (XEXP (x, 1))
8195 && ((INTVAL (XEXP (XEXP (x, 0), 1))
8196 + floor_log2 (INTVAL (XEXP (x, 1))))
8197 < GET_MODE_PRECISION (GET_MODE (x)))
8198 && (UINTVAL (XEXP (x, 1))
8199 & ~nonzero_bits (XEXP (x, 0), GET_MODE (x))) == 0)
8200 {
8201 temp = GEN_INT ((INTVAL (XEXP (x, 1)) & mask)
8202 << INTVAL (XEXP (XEXP (x, 0), 1)));
8203 temp = simplify_gen_binary (GET_CODE (x), GET_MODE (x),
8204 XEXP (XEXP (x, 0), 0), temp);
8205 x = simplify_gen_binary (LSHIFTRT, GET_MODE (x), temp,
8206 XEXP (XEXP (x, 0), 1));
8207 return force_to_mode (x, mode, mask, next_select);
8208 }
8209
8210 binop:
8211 /* For most binary operations, just propagate into the operation and
8212 change the mode if we have an operation of that mode. */
8213
8214 op0 = force_to_mode (XEXP (x, 0), mode, mask, next_select);
8215 op1 = force_to_mode (XEXP (x, 1), mode, mask, next_select);
8216
8217 /* If we ended up truncating both operands, truncate the result of the
8218 operation instead. */
8219 if (GET_CODE (op0) == TRUNCATE
8220 && GET_CODE (op1) == TRUNCATE)
8221 {
8222 op0 = XEXP (op0, 0);
8223 op1 = XEXP (op1, 0);
8224 }
8225
8226 op0 = gen_lowpart_or_truncate (op_mode, op0);
8227 op1 = gen_lowpart_or_truncate (op_mode, op1);
8228
8229 if (op_mode != GET_MODE (x) || op0 != XEXP (x, 0) || op1 != XEXP (x, 1))
8230 x = simplify_gen_binary (code, op_mode, op0, op1);
8231 break;
8232
8233 case ASHIFT:
8234 /* For left shifts, do the same, but just for the first operand.
8235 However, we cannot do anything with shifts where we cannot
8236 guarantee that the counts are smaller than the size of the mode
8237 because such a count will have a different meaning in a
8238 wider mode. */
8239
8240 if (! (CONST_INT_P (XEXP (x, 1))
8241 && INTVAL (XEXP (x, 1)) >= 0
8242 && INTVAL (XEXP (x, 1)) < GET_MODE_PRECISION (mode))
8243 && ! (GET_MODE (XEXP (x, 1)) != VOIDmode
8244 && (nonzero_bits (XEXP (x, 1), GET_MODE (XEXP (x, 1)))
8245 < (unsigned HOST_WIDE_INT) GET_MODE_PRECISION (mode))))
8246 break;
8247
8248 /* If the shift count is a constant and we can do arithmetic in
8249 the mode of the shift, refine which bits we need. Otherwise, use the
8250 conservative form of the mask. */
8251 if (CONST_INT_P (XEXP (x, 1))
8252 && INTVAL (XEXP (x, 1)) >= 0
8253 && INTVAL (XEXP (x, 1)) < GET_MODE_PRECISION (op_mode)
8254 && HWI_COMPUTABLE_MODE_P (op_mode))
8255 mask >>= INTVAL (XEXP (x, 1));
8256 else
8257 mask = fuller_mask;
8258
8259 op0 = gen_lowpart_or_truncate (op_mode,
8260 force_to_mode (XEXP (x, 0), op_mode,
8261 mask, next_select));
8262
8263 if (op_mode != GET_MODE (x) || op0 != XEXP (x, 0))
8264 x = simplify_gen_binary (code, op_mode, op0, XEXP (x, 1));
8265 break;
8266
8267 case LSHIFTRT:
8268 /* Here we can only do something if the shift count is a constant,
8269 this shift constant is valid for the host, and we can do arithmetic
8270 in OP_MODE. */
8271
8272 if (CONST_INT_P (XEXP (x, 1))
8273 && INTVAL (XEXP (x, 1)) >= 0
8274 && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT
8275 && HWI_COMPUTABLE_MODE_P (op_mode))
8276 {
8277 rtx inner = XEXP (x, 0);
8278 unsigned HOST_WIDE_INT inner_mask;
8279
8280 /* Select the mask of the bits we need for the shift operand. */
8281 inner_mask = mask << INTVAL (XEXP (x, 1));
8282
8283 /* We can only change the mode of the shift if we can do arithmetic
8284 in the mode of the shift and INNER_MASK is no wider than the
8285 width of X's mode. */
8286 if ((inner_mask & ~GET_MODE_MASK (GET_MODE (x))) != 0)
8287 op_mode = GET_MODE (x);
8288
8289 inner = force_to_mode (inner, op_mode, inner_mask, next_select);
8290
8291 if (GET_MODE (x) != op_mode || inner != XEXP (x, 0))
8292 x = simplify_gen_binary (LSHIFTRT, op_mode, inner, XEXP (x, 1));
8293 }
8294
8295 /* If we have (and (lshiftrt FOO C1) C2) where the combination of the
8296 shift and AND produces only copies of the sign bit (C2 is one less
8297 than a power of two), we can do this with just a shift. */
8298
8299 if (GET_CODE (x) == LSHIFTRT
8300 && CONST_INT_P (XEXP (x, 1))
8301 /* The shift puts one of the sign bit copies in the least significant
8302 bit. */
8303 && ((INTVAL (XEXP (x, 1))
8304 + num_sign_bit_copies (XEXP (x, 0), GET_MODE (XEXP (x, 0))))
8305 >= GET_MODE_PRECISION (GET_MODE (x)))
8306 && exact_log2 (mask + 1) >= 0
8307 /* Number of bits left after the shift must be more than the mask
8308 needs. */
8309 && ((INTVAL (XEXP (x, 1)) + exact_log2 (mask + 1))
8310 <= GET_MODE_PRECISION (GET_MODE (x)))
8311 /* Must be more sign bit copies than the mask needs. */
8312 && ((int) num_sign_bit_copies (XEXP (x, 0), GET_MODE (XEXP (x, 0)))
8313 >= exact_log2 (mask + 1)))
8314 x = simplify_gen_binary (LSHIFTRT, GET_MODE (x), XEXP (x, 0),
8315 GEN_INT (GET_MODE_PRECISION (GET_MODE (x))
8316 - exact_log2 (mask + 1)));
8317
8318 goto shiftrt;
8319
8320 case ASHIFTRT:
8321 /* If we are just looking for the sign bit, we don't need this shift at
8322 all, even if it has a variable count. */
8323 if (val_signbit_p (GET_MODE (x), mask))
8324 return force_to_mode (XEXP (x, 0), mode, mask, next_select);
8325
8326 /* If this is a shift by a constant, get a mask that contains those bits
8327 that are not copies of the sign bit. We then have two cases: If
8328 MASK only includes those bits, this can be a logical shift, which may
8329 allow simplifications. If MASK is a single-bit field not within
8330 those bits, we are requesting a copy of the sign bit and hence can
8331 shift the sign bit to the appropriate location. */
8332
8333 if (CONST_INT_P (XEXP (x, 1)) && INTVAL (XEXP (x, 1)) >= 0
8334 && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT)
8335 {
8336 int i;
8337
8338 /* If the considered data is wider than HOST_WIDE_INT, we can't
8339 represent a mask for all its bits in a single scalar.
8340 But we only care about the lower bits, so calculate these. */
8341
8342 if (GET_MODE_PRECISION (GET_MODE (x)) > HOST_BITS_PER_WIDE_INT)
8343 {
8344 nonzero = ~(unsigned HOST_WIDE_INT) 0;
8345
8346 /* GET_MODE_PRECISION (GET_MODE (x)) - INTVAL (XEXP (x, 1))
8347 is the number of bits a full-width mask would have set.
8348 We need only shift if these are fewer than nonzero can
8349 hold. If not, we must keep all bits set in nonzero. */
8350
8351 if (GET_MODE_PRECISION (GET_MODE (x)) - INTVAL (XEXP (x, 1))
8352 < HOST_BITS_PER_WIDE_INT)
8353 nonzero >>= INTVAL (XEXP (x, 1))
8354 + HOST_BITS_PER_WIDE_INT
8355 - GET_MODE_PRECISION (GET_MODE (x)) ;
8356 }
8357 else
8358 {
8359 nonzero = GET_MODE_MASK (GET_MODE (x));
8360 nonzero >>= INTVAL (XEXP (x, 1));
8361 }
8362
8363 if ((mask & ~nonzero) == 0)
8364 {
8365 x = simplify_shift_const (NULL_RTX, LSHIFTRT, GET_MODE (x),
8366 XEXP (x, 0), INTVAL (XEXP (x, 1)));
8367 if (GET_CODE (x) != ASHIFTRT)
8368 return force_to_mode (x, mode, mask, next_select);
8369 }
8370
8371 else if ((i = exact_log2 (mask)) >= 0)
8372 {
8373 x = simplify_shift_const
8374 (NULL_RTX, LSHIFTRT, GET_MODE (x), XEXP (x, 0),
8375 GET_MODE_PRECISION (GET_MODE (x)) - 1 - i);
8376
8377 if (GET_CODE (x) != ASHIFTRT)
8378 return force_to_mode (x, mode, mask, next_select);
8379 }
8380 }
8381
8382 /* If MASK is 1, convert this to an LSHIFTRT. This can be done
8383 even if the shift count isn't a constant. */
8384 if (mask == 1)
8385 x = simplify_gen_binary (LSHIFTRT, GET_MODE (x),
8386 XEXP (x, 0), XEXP (x, 1));
8387
8388 shiftrt:
8389
8390 /* If this is a zero- or sign-extension operation that just affects bits
8391 we don't care about, remove it. Be sure the call above returned
8392 something that is still a shift. */
8393
8394 if ((GET_CODE (x) == LSHIFTRT || GET_CODE (x) == ASHIFTRT)
8395 && CONST_INT_P (XEXP (x, 1))
8396 && INTVAL (XEXP (x, 1)) >= 0
8397 && (INTVAL (XEXP (x, 1))
8398 <= GET_MODE_PRECISION (GET_MODE (x)) - (floor_log2 (mask) + 1))
8399 && GET_CODE (XEXP (x, 0)) == ASHIFT
8400 && XEXP (XEXP (x, 0), 1) == XEXP (x, 1))
8401 return force_to_mode (XEXP (XEXP (x, 0), 0), mode, mask,
8402 next_select);
8403
8404 break;
8405
8406 case ROTATE:
8407 case ROTATERT:
8408 /* If the shift count is constant and we can do computations
8409 in the mode of X, compute where the bits we care about are.
8410 Otherwise, we can't do anything. Don't change the mode of
8411 the shift or propagate MODE into the shift, though. */
8412 if (CONST_INT_P (XEXP (x, 1))
8413 && INTVAL (XEXP (x, 1)) >= 0)
8414 {
8415 temp = simplify_binary_operation (code == ROTATE ? ROTATERT : ROTATE,
8416 GET_MODE (x), GEN_INT (mask),
8417 XEXP (x, 1));
8418 if (temp && CONST_INT_P (temp))
8419 SUBST (XEXP (x, 0),
8420 force_to_mode (XEXP (x, 0), GET_MODE (x),
8421 INTVAL (temp), next_select));
8422 }
8423 break;
8424
8425 case NEG:
8426 /* If we just want the low-order bit, the NEG isn't needed since it
8427 won't change the low-order bit. */
8428 if (mask == 1)
8429 return force_to_mode (XEXP (x, 0), mode, mask, just_select);
8430
8431 /* We need any bits less significant than the most significant bit in
8432 MASK since carries from those bits will affect the bits we are
8433 interested in. */
8434 mask = fuller_mask;
8435 goto unop;
8436
8437 case NOT:
8438 /* (not FOO) is (xor FOO CONST), so if FOO is an LSHIFTRT, we can do the
8439 same as the XOR case above. Ensure that the constant we form is not
8440 wider than the mode of X. */
8441
8442 if (GET_CODE (XEXP (x, 0)) == LSHIFTRT
8443 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
8444 && INTVAL (XEXP (XEXP (x, 0), 1)) >= 0
8445 && (INTVAL (XEXP (XEXP (x, 0), 1)) + floor_log2 (mask)
8446 < GET_MODE_PRECISION (GET_MODE (x)))
8447 && INTVAL (XEXP (XEXP (x, 0), 1)) < HOST_BITS_PER_WIDE_INT)
8448 {
8449 temp = gen_int_mode (mask << INTVAL (XEXP (XEXP (x, 0), 1)),
8450 GET_MODE (x));
8451 temp = simplify_gen_binary (XOR, GET_MODE (x),
8452 XEXP (XEXP (x, 0), 0), temp);
8453 x = simplify_gen_binary (LSHIFTRT, GET_MODE (x),
8454 temp, XEXP (XEXP (x, 0), 1));
8455
8456 return force_to_mode (x, mode, mask, next_select);
8457 }
8458
8459 /* (and (not FOO) CONST) is (not (or FOO (not CONST))), so we must
8460 use the full mask inside the NOT. */
8461 mask = fuller_mask;
8462
8463 unop:
8464 op0 = gen_lowpart_or_truncate (op_mode,
8465 force_to_mode (XEXP (x, 0), mode, mask,
8466 next_select));
8467 if (op_mode != GET_MODE (x) || op0 != XEXP (x, 0))
8468 x = simplify_gen_unary (code, op_mode, op0, op_mode);
8469 break;
8470
8471 case NE:
8472 /* (and (ne FOO 0) CONST) can be (and FOO CONST) if CONST is included
8473 in STORE_FLAG_VALUE and FOO has a single bit that might be nonzero,
8474 which is equal to STORE_FLAG_VALUE. */
8475 if ((mask & ~STORE_FLAG_VALUE) == 0
8476 && XEXP (x, 1) == const0_rtx
8477 && GET_MODE (XEXP (x, 0)) == mode
8478 && exact_log2 (nonzero_bits (XEXP (x, 0), mode)) >= 0
8479 && (nonzero_bits (XEXP (x, 0), mode)
8480 == (unsigned HOST_WIDE_INT) STORE_FLAG_VALUE))
8481 return force_to_mode (XEXP (x, 0), mode, mask, next_select);
8482
8483 break;
8484
8485 case IF_THEN_ELSE:
8486 /* We have no way of knowing if the IF_THEN_ELSE can itself be
8487 written in a narrower mode. We play it safe and do not do so. */
8488
8489 SUBST (XEXP (x, 1),
8490 gen_lowpart_or_truncate (GET_MODE (x),
8491 force_to_mode (XEXP (x, 1), mode,
8492 mask, next_select)));
8493 SUBST (XEXP (x, 2),
8494 gen_lowpart_or_truncate (GET_MODE (x),
8495 force_to_mode (XEXP (x, 2), mode,
8496 mask, next_select)));
8497 break;
8498
8499 default:
8500 break;
8501 }
8502
8503 /* Ensure we return a value of the proper mode. */
8504 return gen_lowpart_or_truncate (mode, x);
8505 }
8506 \f
8507 /* Return nonzero if X is an expression that has one of two values depending on
8508 whether some other value is zero or nonzero. In that case, we return the
8509 value that is being tested, *PTRUE is set to the value if the rtx being
8510 returned has a nonzero value, and *PFALSE is set to the other alternative.
8511
8512 If we return zero, we set *PTRUE and *PFALSE to X. */
8513
8514 static rtx
8515 if_then_else_cond (rtx x, rtx *ptrue, rtx *pfalse)
8516 {
8517 enum machine_mode mode = GET_MODE (x);
8518 enum rtx_code code = GET_CODE (x);
8519 rtx cond0, cond1, true0, true1, false0, false1;
8520 unsigned HOST_WIDE_INT nz;
8521
8522 /* If we are comparing a value against zero, we are done. */
8523 if ((code == NE || code == EQ)
8524 && XEXP (x, 1) == const0_rtx)
8525 {
8526 *ptrue = (code == NE) ? const_true_rtx : const0_rtx;
8527 *pfalse = (code == NE) ? const0_rtx : const_true_rtx;
8528 return XEXP (x, 0);
8529 }
8530
8531 /* If this is a unary operation whose operand has one of two values, apply
8532 our opcode to compute those values. */
8533 else if (UNARY_P (x)
8534 && (cond0 = if_then_else_cond (XEXP (x, 0), &true0, &false0)) != 0)
8535 {
8536 *ptrue = simplify_gen_unary (code, mode, true0, GET_MODE (XEXP (x, 0)));
8537 *pfalse = simplify_gen_unary (code, mode, false0,
8538 GET_MODE (XEXP (x, 0)));
8539 return cond0;
8540 }
8541
8542 /* If this is a COMPARE, do nothing, since the IF_THEN_ELSE we would
8543 make can't possibly match and would suppress other optimizations. */
8544 else if (code == COMPARE)
8545 ;
8546
8547 /* If this is a binary operation, see if either side has only one of two
8548 values. If either one does or if both do and they are conditional on
8549 the same value, compute the new true and false values. */
8550 else if (BINARY_P (x))
8551 {
8552 cond0 = if_then_else_cond (XEXP (x, 0), &true0, &false0);
8553 cond1 = if_then_else_cond (XEXP (x, 1), &true1, &false1);
8554
8555 if ((cond0 != 0 || cond1 != 0)
8556 && ! (cond0 != 0 && cond1 != 0 && ! rtx_equal_p (cond0, cond1)))
8557 {
8558 /* If if_then_else_cond returned zero, then true/false are the
8559 same rtl. We must copy one of them to prevent invalid rtl
8560 sharing. */
8561 if (cond0 == 0)
8562 true0 = copy_rtx (true0);
8563 else if (cond1 == 0)
8564 true1 = copy_rtx (true1);
8565
8566 if (COMPARISON_P (x))
8567 {
8568 *ptrue = simplify_gen_relational (code, mode, VOIDmode,
8569 true0, true1);
8570 *pfalse = simplify_gen_relational (code, mode, VOIDmode,
8571 false0, false1);
8572 }
8573 else
8574 {
8575 *ptrue = simplify_gen_binary (code, mode, true0, true1);
8576 *pfalse = simplify_gen_binary (code, mode, false0, false1);
8577 }
8578
8579 return cond0 ? cond0 : cond1;
8580 }
8581
8582 /* See if we have PLUS, IOR, XOR, MINUS or UMAX, where one of the
8583 operands is zero when the other is nonzero, and vice-versa,
8584 and STORE_FLAG_VALUE is 1 or -1. */
8585
8586 if ((STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
8587 && (code == PLUS || code == IOR || code == XOR || code == MINUS
8588 || code == UMAX)
8589 && GET_CODE (XEXP (x, 0)) == MULT && GET_CODE (XEXP (x, 1)) == MULT)
8590 {
8591 rtx op0 = XEXP (XEXP (x, 0), 1);
8592 rtx op1 = XEXP (XEXP (x, 1), 1);
8593
8594 cond0 = XEXP (XEXP (x, 0), 0);
8595 cond1 = XEXP (XEXP (x, 1), 0);
8596
8597 if (COMPARISON_P (cond0)
8598 && COMPARISON_P (cond1)
8599 && ((GET_CODE (cond0) == reversed_comparison_code (cond1, NULL)
8600 && rtx_equal_p (XEXP (cond0, 0), XEXP (cond1, 0))
8601 && rtx_equal_p (XEXP (cond0, 1), XEXP (cond1, 1)))
8602 || ((swap_condition (GET_CODE (cond0))
8603 == reversed_comparison_code (cond1, NULL))
8604 && rtx_equal_p (XEXP (cond0, 0), XEXP (cond1, 1))
8605 && rtx_equal_p (XEXP (cond0, 1), XEXP (cond1, 0))))
8606 && ! side_effects_p (x))
8607 {
8608 *ptrue = simplify_gen_binary (MULT, mode, op0, const_true_rtx);
8609 *pfalse = simplify_gen_binary (MULT, mode,
8610 (code == MINUS
8611 ? simplify_gen_unary (NEG, mode,
8612 op1, mode)
8613 : op1),
8614 const_true_rtx);
8615 return cond0;
8616 }
8617 }
8618
8619 /* Similarly for MULT, AND and UMIN, except that for these the result
8620 is always zero. */
8621 if ((STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
8622 && (code == MULT || code == AND || code == UMIN)
8623 && GET_CODE (XEXP (x, 0)) == MULT && GET_CODE (XEXP (x, 1)) == MULT)
8624 {
8625 cond0 = XEXP (XEXP (x, 0), 0);
8626 cond1 = XEXP (XEXP (x, 1), 0);
8627
8628 if (COMPARISON_P (cond0)
8629 && COMPARISON_P (cond1)
8630 && ((GET_CODE (cond0) == reversed_comparison_code (cond1, NULL)
8631 && rtx_equal_p (XEXP (cond0, 0), XEXP (cond1, 0))
8632 && rtx_equal_p (XEXP (cond0, 1), XEXP (cond1, 1)))
8633 || ((swap_condition (GET_CODE (cond0))
8634 == reversed_comparison_code (cond1, NULL))
8635 && rtx_equal_p (XEXP (cond0, 0), XEXP (cond1, 1))
8636 && rtx_equal_p (XEXP (cond0, 1), XEXP (cond1, 0))))
8637 && ! side_effects_p (x))
8638 {
8639 *ptrue = *pfalse = const0_rtx;
8640 return cond0;
8641 }
8642 }
8643 }
8644
8645 else if (code == IF_THEN_ELSE)
8646 {
8647 /* If we have IF_THEN_ELSE already, extract the condition and
8648 canonicalize it if it is NE or EQ. */
8649 cond0 = XEXP (x, 0);
8650 *ptrue = XEXP (x, 1), *pfalse = XEXP (x, 2);
8651 if (GET_CODE (cond0) == NE && XEXP (cond0, 1) == const0_rtx)
8652 return XEXP (cond0, 0);
8653 else if (GET_CODE (cond0) == EQ && XEXP (cond0, 1) == const0_rtx)
8654 {
8655 *ptrue = XEXP (x, 2), *pfalse = XEXP (x, 1);
8656 return XEXP (cond0, 0);
8657 }
8658 else
8659 return cond0;
8660 }
8661
8662 /* If X is a SUBREG, we can narrow both the true and false values
8663 if the inner expression, if there is a condition. */
8664 else if (code == SUBREG
8665 && 0 != (cond0 = if_then_else_cond (SUBREG_REG (x),
8666 &true0, &false0)))
8667 {
8668 true0 = simplify_gen_subreg (mode, true0,
8669 GET_MODE (SUBREG_REG (x)), SUBREG_BYTE (x));
8670 false0 = simplify_gen_subreg (mode, false0,
8671 GET_MODE (SUBREG_REG (x)), SUBREG_BYTE (x));
8672 if (true0 && false0)
8673 {
8674 *ptrue = true0;
8675 *pfalse = false0;
8676 return cond0;
8677 }
8678 }
8679
8680 /* If X is a constant, this isn't special and will cause confusions
8681 if we treat it as such. Likewise if it is equivalent to a constant. */
8682 else if (CONSTANT_P (x)
8683 || ((cond0 = get_last_value (x)) != 0 && CONSTANT_P (cond0)))
8684 ;
8685
8686 /* If we're in BImode, canonicalize on 0 and STORE_FLAG_VALUE, as that
8687 will be least confusing to the rest of the compiler. */
8688 else if (mode == BImode)
8689 {
8690 *ptrue = GEN_INT (STORE_FLAG_VALUE), *pfalse = const0_rtx;
8691 return x;
8692 }
8693
8694 /* If X is known to be either 0 or -1, those are the true and
8695 false values when testing X. */
8696 else if (x == constm1_rtx || x == const0_rtx
8697 || (mode != VOIDmode
8698 && num_sign_bit_copies (x, mode) == GET_MODE_PRECISION (mode)))
8699 {
8700 *ptrue = constm1_rtx, *pfalse = const0_rtx;
8701 return x;
8702 }
8703
8704 /* Likewise for 0 or a single bit. */
8705 else if (HWI_COMPUTABLE_MODE_P (mode)
8706 && exact_log2 (nz = nonzero_bits (x, mode)) >= 0)
8707 {
8708 *ptrue = gen_int_mode (nz, mode), *pfalse = const0_rtx;
8709 return x;
8710 }
8711
8712 /* Otherwise fail; show no condition with true and false values the same. */
8713 *ptrue = *pfalse = x;
8714 return 0;
8715 }
8716 \f
8717 /* Return the value of expression X given the fact that condition COND
8718 is known to be true when applied to REG as its first operand and VAL
8719 as its second. X is known to not be shared and so can be modified in
8720 place.
8721
8722 We only handle the simplest cases, and specifically those cases that
8723 arise with IF_THEN_ELSE expressions. */
8724
8725 static rtx
8726 known_cond (rtx x, enum rtx_code cond, rtx reg, rtx val)
8727 {
8728 enum rtx_code code = GET_CODE (x);
8729 rtx temp;
8730 const char *fmt;
8731 int i, j;
8732
8733 if (side_effects_p (x))
8734 return x;
8735
8736 /* If either operand of the condition is a floating point value,
8737 then we have to avoid collapsing an EQ comparison. */
8738 if (cond == EQ
8739 && rtx_equal_p (x, reg)
8740 && ! FLOAT_MODE_P (GET_MODE (x))
8741 && ! FLOAT_MODE_P (GET_MODE (val)))
8742 return val;
8743
8744 if (cond == UNEQ && rtx_equal_p (x, reg))
8745 return val;
8746
8747 /* If X is (abs REG) and we know something about REG's relationship
8748 with zero, we may be able to simplify this. */
8749
8750 if (code == ABS && rtx_equal_p (XEXP (x, 0), reg) && val == const0_rtx)
8751 switch (cond)
8752 {
8753 case GE: case GT: case EQ:
8754 return XEXP (x, 0);
8755 case LT: case LE:
8756 return simplify_gen_unary (NEG, GET_MODE (XEXP (x, 0)),
8757 XEXP (x, 0),
8758 GET_MODE (XEXP (x, 0)));
8759 default:
8760 break;
8761 }
8762
8763 /* The only other cases we handle are MIN, MAX, and comparisons if the
8764 operands are the same as REG and VAL. */
8765
8766 else if (COMPARISON_P (x) || COMMUTATIVE_ARITH_P (x))
8767 {
8768 if (rtx_equal_p (XEXP (x, 0), val))
8769 cond = swap_condition (cond), temp = val, val = reg, reg = temp;
8770
8771 if (rtx_equal_p (XEXP (x, 0), reg) && rtx_equal_p (XEXP (x, 1), val))
8772 {
8773 if (COMPARISON_P (x))
8774 {
8775 if (comparison_dominates_p (cond, code))
8776 return const_true_rtx;
8777
8778 code = reversed_comparison_code (x, NULL);
8779 if (code != UNKNOWN
8780 && comparison_dominates_p (cond, code))
8781 return const0_rtx;
8782 else
8783 return x;
8784 }
8785 else if (code == SMAX || code == SMIN
8786 || code == UMIN || code == UMAX)
8787 {
8788 int unsignedp = (code == UMIN || code == UMAX);
8789
8790 /* Do not reverse the condition when it is NE or EQ.
8791 This is because we cannot conclude anything about
8792 the value of 'SMAX (x, y)' when x is not equal to y,
8793 but we can when x equals y. */
8794 if ((code == SMAX || code == UMAX)
8795 && ! (cond == EQ || cond == NE))
8796 cond = reverse_condition (cond);
8797
8798 switch (cond)
8799 {
8800 case GE: case GT:
8801 return unsignedp ? x : XEXP (x, 1);
8802 case LE: case LT:
8803 return unsignedp ? x : XEXP (x, 0);
8804 case GEU: case GTU:
8805 return unsignedp ? XEXP (x, 1) : x;
8806 case LEU: case LTU:
8807 return unsignedp ? XEXP (x, 0) : x;
8808 default:
8809 break;
8810 }
8811 }
8812 }
8813 }
8814 else if (code == SUBREG)
8815 {
8816 enum machine_mode inner_mode = GET_MODE (SUBREG_REG (x));
8817 rtx new_rtx, r = known_cond (SUBREG_REG (x), cond, reg, val);
8818
8819 if (SUBREG_REG (x) != r)
8820 {
8821 /* We must simplify subreg here, before we lose track of the
8822 original inner_mode. */
8823 new_rtx = simplify_subreg (GET_MODE (x), r,
8824 inner_mode, SUBREG_BYTE (x));
8825 if (new_rtx)
8826 return new_rtx;
8827 else
8828 SUBST (SUBREG_REG (x), r);
8829 }
8830
8831 return x;
8832 }
8833 /* We don't have to handle SIGN_EXTEND here, because even in the
8834 case of replacing something with a modeless CONST_INT, a
8835 CONST_INT is already (supposed to be) a valid sign extension for
8836 its narrower mode, which implies it's already properly
8837 sign-extended for the wider mode. Now, for ZERO_EXTEND, the
8838 story is different. */
8839 else if (code == ZERO_EXTEND)
8840 {
8841 enum machine_mode inner_mode = GET_MODE (XEXP (x, 0));
8842 rtx new_rtx, r = known_cond (XEXP (x, 0), cond, reg, val);
8843
8844 if (XEXP (x, 0) != r)
8845 {
8846 /* We must simplify the zero_extend here, before we lose
8847 track of the original inner_mode. */
8848 new_rtx = simplify_unary_operation (ZERO_EXTEND, GET_MODE (x),
8849 r, inner_mode);
8850 if (new_rtx)
8851 return new_rtx;
8852 else
8853 SUBST (XEXP (x, 0), r);
8854 }
8855
8856 return x;
8857 }
8858
8859 fmt = GET_RTX_FORMAT (code);
8860 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
8861 {
8862 if (fmt[i] == 'e')
8863 SUBST (XEXP (x, i), known_cond (XEXP (x, i), cond, reg, val));
8864 else if (fmt[i] == 'E')
8865 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
8866 SUBST (XVECEXP (x, i, j), known_cond (XVECEXP (x, i, j),
8867 cond, reg, val));
8868 }
8869
8870 return x;
8871 }
8872 \f
8873 /* See if X and Y are equal for the purposes of seeing if we can rewrite an
8874 assignment as a field assignment. */
8875
8876 static int
8877 rtx_equal_for_field_assignment_p (rtx x, rtx y)
8878 {
8879 if (x == y || rtx_equal_p (x, y))
8880 return 1;
8881
8882 if (x == 0 || y == 0 || GET_MODE (x) != GET_MODE (y))
8883 return 0;
8884
8885 /* Check for a paradoxical SUBREG of a MEM compared with the MEM.
8886 Note that all SUBREGs of MEM are paradoxical; otherwise they
8887 would have been rewritten. */
8888 if (MEM_P (x) && GET_CODE (y) == SUBREG
8889 && MEM_P (SUBREG_REG (y))
8890 && rtx_equal_p (SUBREG_REG (y),
8891 gen_lowpart (GET_MODE (SUBREG_REG (y)), x)))
8892 return 1;
8893
8894 if (MEM_P (y) && GET_CODE (x) == SUBREG
8895 && MEM_P (SUBREG_REG (x))
8896 && rtx_equal_p (SUBREG_REG (x),
8897 gen_lowpart (GET_MODE (SUBREG_REG (x)), y)))
8898 return 1;
8899
8900 /* We used to see if get_last_value of X and Y were the same but that's
8901 not correct. In one direction, we'll cause the assignment to have
8902 the wrong destination and in the case, we'll import a register into this
8903 insn that might have already have been dead. So fail if none of the
8904 above cases are true. */
8905 return 0;
8906 }
8907 \f
8908 /* See if X, a SET operation, can be rewritten as a bit-field assignment.
8909 Return that assignment if so.
8910
8911 We only handle the most common cases. */
8912
8913 static rtx
8914 make_field_assignment (rtx x)
8915 {
8916 rtx dest = SET_DEST (x);
8917 rtx src = SET_SRC (x);
8918 rtx assign;
8919 rtx rhs, lhs;
8920 HOST_WIDE_INT c1;
8921 HOST_WIDE_INT pos;
8922 unsigned HOST_WIDE_INT len;
8923 rtx other;
8924 enum machine_mode mode;
8925
8926 /* If SRC was (and (not (ashift (const_int 1) POS)) DEST), this is
8927 a clear of a one-bit field. We will have changed it to
8928 (and (rotate (const_int -2) POS) DEST), so check for that. Also check
8929 for a SUBREG. */
8930
8931 if (GET_CODE (src) == AND && GET_CODE (XEXP (src, 0)) == ROTATE
8932 && CONST_INT_P (XEXP (XEXP (src, 0), 0))
8933 && INTVAL (XEXP (XEXP (src, 0), 0)) == -2
8934 && rtx_equal_for_field_assignment_p (dest, XEXP (src, 1)))
8935 {
8936 assign = make_extraction (VOIDmode, dest, 0, XEXP (XEXP (src, 0), 1),
8937 1, 1, 1, 0);
8938 if (assign != 0)
8939 return gen_rtx_SET (VOIDmode, assign, const0_rtx);
8940 return x;
8941 }
8942
8943 if (GET_CODE (src) == AND && GET_CODE (XEXP (src, 0)) == SUBREG
8944 && subreg_lowpart_p (XEXP (src, 0))
8945 && (GET_MODE_SIZE (GET_MODE (XEXP (src, 0)))
8946 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (XEXP (src, 0)))))
8947 && GET_CODE (SUBREG_REG (XEXP (src, 0))) == ROTATE
8948 && CONST_INT_P (XEXP (SUBREG_REG (XEXP (src, 0)), 0))
8949 && INTVAL (XEXP (SUBREG_REG (XEXP (src, 0)), 0)) == -2
8950 && rtx_equal_for_field_assignment_p (dest, XEXP (src, 1)))
8951 {
8952 assign = make_extraction (VOIDmode, dest, 0,
8953 XEXP (SUBREG_REG (XEXP (src, 0)), 1),
8954 1, 1, 1, 0);
8955 if (assign != 0)
8956 return gen_rtx_SET (VOIDmode, assign, const0_rtx);
8957 return x;
8958 }
8959
8960 /* If SRC is (ior (ashift (const_int 1) POS) DEST), this is a set of a
8961 one-bit field. */
8962 if (GET_CODE (src) == IOR && GET_CODE (XEXP (src, 0)) == ASHIFT
8963 && XEXP (XEXP (src, 0), 0) == const1_rtx
8964 && rtx_equal_for_field_assignment_p (dest, XEXP (src, 1)))
8965 {
8966 assign = make_extraction (VOIDmode, dest, 0, XEXP (XEXP (src, 0), 1),
8967 1, 1, 1, 0);
8968 if (assign != 0)
8969 return gen_rtx_SET (VOIDmode, assign, const1_rtx);
8970 return x;
8971 }
8972
8973 /* If DEST is already a field assignment, i.e. ZERO_EXTRACT, and the
8974 SRC is an AND with all bits of that field set, then we can discard
8975 the AND. */
8976 if (GET_CODE (dest) == ZERO_EXTRACT
8977 && CONST_INT_P (XEXP (dest, 1))
8978 && GET_CODE (src) == AND
8979 && CONST_INT_P (XEXP (src, 1)))
8980 {
8981 HOST_WIDE_INT width = INTVAL (XEXP (dest, 1));
8982 unsigned HOST_WIDE_INT and_mask = INTVAL (XEXP (src, 1));
8983 unsigned HOST_WIDE_INT ze_mask;
8984
8985 if (width >= HOST_BITS_PER_WIDE_INT)
8986 ze_mask = -1;
8987 else
8988 ze_mask = ((unsigned HOST_WIDE_INT)1 << width) - 1;
8989
8990 /* Complete overlap. We can remove the source AND. */
8991 if ((and_mask & ze_mask) == ze_mask)
8992 return gen_rtx_SET (VOIDmode, dest, XEXP (src, 0));
8993
8994 /* Partial overlap. We can reduce the source AND. */
8995 if ((and_mask & ze_mask) != and_mask)
8996 {
8997 mode = GET_MODE (src);
8998 src = gen_rtx_AND (mode, XEXP (src, 0),
8999 gen_int_mode (and_mask & ze_mask, mode));
9000 return gen_rtx_SET (VOIDmode, dest, src);
9001 }
9002 }
9003
9004 /* The other case we handle is assignments into a constant-position
9005 field. They look like (ior/xor (and DEST C1) OTHER). If C1 represents
9006 a mask that has all one bits except for a group of zero bits and
9007 OTHER is known to have zeros where C1 has ones, this is such an
9008 assignment. Compute the position and length from C1. Shift OTHER
9009 to the appropriate position, force it to the required mode, and
9010 make the extraction. Check for the AND in both operands. */
9011
9012 if (GET_CODE (src) != IOR && GET_CODE (src) != XOR)
9013 return x;
9014
9015 rhs = expand_compound_operation (XEXP (src, 0));
9016 lhs = expand_compound_operation (XEXP (src, 1));
9017
9018 if (GET_CODE (rhs) == AND
9019 && CONST_INT_P (XEXP (rhs, 1))
9020 && rtx_equal_for_field_assignment_p (XEXP (rhs, 0), dest))
9021 c1 = INTVAL (XEXP (rhs, 1)), other = lhs;
9022 else if (GET_CODE (lhs) == AND
9023 && CONST_INT_P (XEXP (lhs, 1))
9024 && rtx_equal_for_field_assignment_p (XEXP (lhs, 0), dest))
9025 c1 = INTVAL (XEXP (lhs, 1)), other = rhs;
9026 else
9027 return x;
9028
9029 pos = get_pos_from_mask ((~c1) & GET_MODE_MASK (GET_MODE (dest)), &len);
9030 if (pos < 0 || pos + len > GET_MODE_PRECISION (GET_MODE (dest))
9031 || GET_MODE_PRECISION (GET_MODE (dest)) > HOST_BITS_PER_WIDE_INT
9032 || (c1 & nonzero_bits (other, GET_MODE (dest))) != 0)
9033 return x;
9034
9035 assign = make_extraction (VOIDmode, dest, pos, NULL_RTX, len, 1, 1, 0);
9036 if (assign == 0)
9037 return x;
9038
9039 /* The mode to use for the source is the mode of the assignment, or of
9040 what is inside a possible STRICT_LOW_PART. */
9041 mode = (GET_CODE (assign) == STRICT_LOW_PART
9042 ? GET_MODE (XEXP (assign, 0)) : GET_MODE (assign));
9043
9044 /* Shift OTHER right POS places and make it the source, restricting it
9045 to the proper length and mode. */
9046
9047 src = canon_reg_for_combine (simplify_shift_const (NULL_RTX, LSHIFTRT,
9048 GET_MODE (src),
9049 other, pos),
9050 dest);
9051 src = force_to_mode (src, mode,
9052 GET_MODE_PRECISION (mode) >= HOST_BITS_PER_WIDE_INT
9053 ? ~(unsigned HOST_WIDE_INT) 0
9054 : ((unsigned HOST_WIDE_INT) 1 << len) - 1,
9055 0);
9056
9057 /* If SRC is masked by an AND that does not make a difference in
9058 the value being stored, strip it. */
9059 if (GET_CODE (assign) == ZERO_EXTRACT
9060 && CONST_INT_P (XEXP (assign, 1))
9061 && INTVAL (XEXP (assign, 1)) < HOST_BITS_PER_WIDE_INT
9062 && GET_CODE (src) == AND
9063 && CONST_INT_P (XEXP (src, 1))
9064 && UINTVAL (XEXP (src, 1))
9065 == ((unsigned HOST_WIDE_INT) 1 << INTVAL (XEXP (assign, 1))) - 1)
9066 src = XEXP (src, 0);
9067
9068 return gen_rtx_SET (VOIDmode, assign, src);
9069 }
9070 \f
9071 /* See if X is of the form (+ (* a c) (* b c)) and convert to (* (+ a b) c)
9072 if so. */
9073
9074 static rtx
9075 apply_distributive_law (rtx x)
9076 {
9077 enum rtx_code code = GET_CODE (x);
9078 enum rtx_code inner_code;
9079 rtx lhs, rhs, other;
9080 rtx tem;
9081
9082 /* Distributivity is not true for floating point as it can change the
9083 value. So we don't do it unless -funsafe-math-optimizations. */
9084 if (FLOAT_MODE_P (GET_MODE (x))
9085 && ! flag_unsafe_math_optimizations)
9086 return x;
9087
9088 /* The outer operation can only be one of the following: */
9089 if (code != IOR && code != AND && code != XOR
9090 && code != PLUS && code != MINUS)
9091 return x;
9092
9093 lhs = XEXP (x, 0);
9094 rhs = XEXP (x, 1);
9095
9096 /* If either operand is a primitive we can't do anything, so get out
9097 fast. */
9098 if (OBJECT_P (lhs) || OBJECT_P (rhs))
9099 return x;
9100
9101 lhs = expand_compound_operation (lhs);
9102 rhs = expand_compound_operation (rhs);
9103 inner_code = GET_CODE (lhs);
9104 if (inner_code != GET_CODE (rhs))
9105 return x;
9106
9107 /* See if the inner and outer operations distribute. */
9108 switch (inner_code)
9109 {
9110 case LSHIFTRT:
9111 case ASHIFTRT:
9112 case AND:
9113 case IOR:
9114 /* These all distribute except over PLUS. */
9115 if (code == PLUS || code == MINUS)
9116 return x;
9117 break;
9118
9119 case MULT:
9120 if (code != PLUS && code != MINUS)
9121 return x;
9122 break;
9123
9124 case ASHIFT:
9125 /* This is also a multiply, so it distributes over everything. */
9126 break;
9127
9128 /* This used to handle SUBREG, but this turned out to be counter-
9129 productive, since (subreg (op ...)) usually is not handled by
9130 insn patterns, and this "optimization" therefore transformed
9131 recognizable patterns into unrecognizable ones. Therefore the
9132 SUBREG case was removed from here.
9133
9134 It is possible that distributing SUBREG over arithmetic operations
9135 leads to an intermediate result than can then be optimized further,
9136 e.g. by moving the outer SUBREG to the other side of a SET as done
9137 in simplify_set. This seems to have been the original intent of
9138 handling SUBREGs here.
9139
9140 However, with current GCC this does not appear to actually happen,
9141 at least on major platforms. If some case is found where removing
9142 the SUBREG case here prevents follow-on optimizations, distributing
9143 SUBREGs ought to be re-added at that place, e.g. in simplify_set. */
9144
9145 default:
9146 return x;
9147 }
9148
9149 /* Set LHS and RHS to the inner operands (A and B in the example
9150 above) and set OTHER to the common operand (C in the example).
9151 There is only one way to do this unless the inner operation is
9152 commutative. */
9153 if (COMMUTATIVE_ARITH_P (lhs)
9154 && rtx_equal_p (XEXP (lhs, 0), XEXP (rhs, 0)))
9155 other = XEXP (lhs, 0), lhs = XEXP (lhs, 1), rhs = XEXP (rhs, 1);
9156 else if (COMMUTATIVE_ARITH_P (lhs)
9157 && rtx_equal_p (XEXP (lhs, 0), XEXP (rhs, 1)))
9158 other = XEXP (lhs, 0), lhs = XEXP (lhs, 1), rhs = XEXP (rhs, 0);
9159 else if (COMMUTATIVE_ARITH_P (lhs)
9160 && rtx_equal_p (XEXP (lhs, 1), XEXP (rhs, 0)))
9161 other = XEXP (lhs, 1), lhs = XEXP (lhs, 0), rhs = XEXP (rhs, 1);
9162 else if (rtx_equal_p (XEXP (lhs, 1), XEXP (rhs, 1)))
9163 other = XEXP (lhs, 1), lhs = XEXP (lhs, 0), rhs = XEXP (rhs, 0);
9164 else
9165 return x;
9166
9167 /* Form the new inner operation, seeing if it simplifies first. */
9168 tem = simplify_gen_binary (code, GET_MODE (x), lhs, rhs);
9169
9170 /* There is one exception to the general way of distributing:
9171 (a | c) ^ (b | c) -> (a ^ b) & ~c */
9172 if (code == XOR && inner_code == IOR)
9173 {
9174 inner_code = AND;
9175 other = simplify_gen_unary (NOT, GET_MODE (x), other, GET_MODE (x));
9176 }
9177
9178 /* We may be able to continuing distributing the result, so call
9179 ourselves recursively on the inner operation before forming the
9180 outer operation, which we return. */
9181 return simplify_gen_binary (inner_code, GET_MODE (x),
9182 apply_distributive_law (tem), other);
9183 }
9184
9185 /* See if X is of the form (* (+ A B) C), and if so convert to
9186 (+ (* A C) (* B C)) and try to simplify.
9187
9188 Most of the time, this results in no change. However, if some of
9189 the operands are the same or inverses of each other, simplifications
9190 will result.
9191
9192 For example, (and (ior A B) (not B)) can occur as the result of
9193 expanding a bit field assignment. When we apply the distributive
9194 law to this, we get (ior (and (A (not B))) (and (B (not B)))),
9195 which then simplifies to (and (A (not B))).
9196
9197 Note that no checks happen on the validity of applying the inverse
9198 distributive law. This is pointless since we can do it in the
9199 few places where this routine is called.
9200
9201 N is the index of the term that is decomposed (the arithmetic operation,
9202 i.e. (+ A B) in the first example above). !N is the index of the term that
9203 is distributed, i.e. of C in the first example above. */
9204 static rtx
9205 distribute_and_simplify_rtx (rtx x, int n)
9206 {
9207 enum machine_mode mode;
9208 enum rtx_code outer_code, inner_code;
9209 rtx decomposed, distributed, inner_op0, inner_op1, new_op0, new_op1, tmp;
9210
9211 /* Distributivity is not true for floating point as it can change the
9212 value. So we don't do it unless -funsafe-math-optimizations. */
9213 if (FLOAT_MODE_P (GET_MODE (x))
9214 && ! flag_unsafe_math_optimizations)
9215 return NULL_RTX;
9216
9217 decomposed = XEXP (x, n);
9218 if (!ARITHMETIC_P (decomposed))
9219 return NULL_RTX;
9220
9221 mode = GET_MODE (x);
9222 outer_code = GET_CODE (x);
9223 distributed = XEXP (x, !n);
9224
9225 inner_code = GET_CODE (decomposed);
9226 inner_op0 = XEXP (decomposed, 0);
9227 inner_op1 = XEXP (decomposed, 1);
9228
9229 /* Special case (and (xor B C) (not A)), which is equivalent to
9230 (xor (ior A B) (ior A C)) */
9231 if (outer_code == AND && inner_code == XOR && GET_CODE (distributed) == NOT)
9232 {
9233 distributed = XEXP (distributed, 0);
9234 outer_code = IOR;
9235 }
9236
9237 if (n == 0)
9238 {
9239 /* Distribute the second term. */
9240 new_op0 = simplify_gen_binary (outer_code, mode, inner_op0, distributed);
9241 new_op1 = simplify_gen_binary (outer_code, mode, inner_op1, distributed);
9242 }
9243 else
9244 {
9245 /* Distribute the first term. */
9246 new_op0 = simplify_gen_binary (outer_code, mode, distributed, inner_op0);
9247 new_op1 = simplify_gen_binary (outer_code, mode, distributed, inner_op1);
9248 }
9249
9250 tmp = apply_distributive_law (simplify_gen_binary (inner_code, mode,
9251 new_op0, new_op1));
9252 if (GET_CODE (tmp) != outer_code
9253 && (set_src_cost (tmp, optimize_this_for_speed_p)
9254 < set_src_cost (x, optimize_this_for_speed_p)))
9255 return tmp;
9256
9257 return NULL_RTX;
9258 }
9259 \f
9260 /* Simplify a logical `and' of VAROP with the constant CONSTOP, to be done
9261 in MODE. Return an equivalent form, if different from (and VAROP
9262 (const_int CONSTOP)). Otherwise, return NULL_RTX. */
9263
9264 static rtx
9265 simplify_and_const_int_1 (enum machine_mode mode, rtx varop,
9266 unsigned HOST_WIDE_INT constop)
9267 {
9268 unsigned HOST_WIDE_INT nonzero;
9269 unsigned HOST_WIDE_INT orig_constop;
9270 rtx orig_varop;
9271 int i;
9272
9273 orig_varop = varop;
9274 orig_constop = constop;
9275 if (GET_CODE (varop) == CLOBBER)
9276 return NULL_RTX;
9277
9278 /* Simplify VAROP knowing that we will be only looking at some of the
9279 bits in it.
9280
9281 Note by passing in CONSTOP, we guarantee that the bits not set in
9282 CONSTOP are not significant and will never be examined. We must
9283 ensure that is the case by explicitly masking out those bits
9284 before returning. */
9285 varop = force_to_mode (varop, mode, constop, 0);
9286
9287 /* If VAROP is a CLOBBER, we will fail so return it. */
9288 if (GET_CODE (varop) == CLOBBER)
9289 return varop;
9290
9291 /* If VAROP is a CONST_INT, then we need to apply the mask in CONSTOP
9292 to VAROP and return the new constant. */
9293 if (CONST_INT_P (varop))
9294 return gen_int_mode (INTVAL (varop) & constop, mode);
9295
9296 /* See what bits may be nonzero in VAROP. Unlike the general case of
9297 a call to nonzero_bits, here we don't care about bits outside
9298 MODE. */
9299
9300 nonzero = nonzero_bits (varop, mode) & GET_MODE_MASK (mode);
9301
9302 /* Turn off all bits in the constant that are known to already be zero.
9303 Thus, if the AND isn't needed at all, we will have CONSTOP == NONZERO_BITS
9304 which is tested below. */
9305
9306 constop &= nonzero;
9307
9308 /* If we don't have any bits left, return zero. */
9309 if (constop == 0)
9310 return const0_rtx;
9311
9312 /* If VAROP is a NEG of something known to be zero or 1 and CONSTOP is
9313 a power of two, we can replace this with an ASHIFT. */
9314 if (GET_CODE (varop) == NEG && nonzero_bits (XEXP (varop, 0), mode) == 1
9315 && (i = exact_log2 (constop)) >= 0)
9316 return simplify_shift_const (NULL_RTX, ASHIFT, mode, XEXP (varop, 0), i);
9317
9318 /* If VAROP is an IOR or XOR, apply the AND to both branches of the IOR
9319 or XOR, then try to apply the distributive law. This may eliminate
9320 operations if either branch can be simplified because of the AND.
9321 It may also make some cases more complex, but those cases probably
9322 won't match a pattern either with or without this. */
9323
9324 if (GET_CODE (varop) == IOR || GET_CODE (varop) == XOR)
9325 return
9326 gen_lowpart
9327 (mode,
9328 apply_distributive_law
9329 (simplify_gen_binary (GET_CODE (varop), GET_MODE (varop),
9330 simplify_and_const_int (NULL_RTX,
9331 GET_MODE (varop),
9332 XEXP (varop, 0),
9333 constop),
9334 simplify_and_const_int (NULL_RTX,
9335 GET_MODE (varop),
9336 XEXP (varop, 1),
9337 constop))));
9338
9339 /* If VAROP is PLUS, and the constant is a mask of low bits, distribute
9340 the AND and see if one of the operands simplifies to zero. If so, we
9341 may eliminate it. */
9342
9343 if (GET_CODE (varop) == PLUS
9344 && exact_log2 (constop + 1) >= 0)
9345 {
9346 rtx o0, o1;
9347
9348 o0 = simplify_and_const_int (NULL_RTX, mode, XEXP (varop, 0), constop);
9349 o1 = simplify_and_const_int (NULL_RTX, mode, XEXP (varop, 1), constop);
9350 if (o0 == const0_rtx)
9351 return o1;
9352 if (o1 == const0_rtx)
9353 return o0;
9354 }
9355
9356 /* Make a SUBREG if necessary. If we can't make it, fail. */
9357 varop = gen_lowpart (mode, varop);
9358 if (varop == NULL_RTX || GET_CODE (varop) == CLOBBER)
9359 return NULL_RTX;
9360
9361 /* If we are only masking insignificant bits, return VAROP. */
9362 if (constop == nonzero)
9363 return varop;
9364
9365 if (varop == orig_varop && constop == orig_constop)
9366 return NULL_RTX;
9367
9368 /* Otherwise, return an AND. */
9369 return simplify_gen_binary (AND, mode, varop, gen_int_mode (constop, mode));
9370 }
9371
9372
9373 /* We have X, a logical `and' of VAROP with the constant CONSTOP, to be done
9374 in MODE.
9375
9376 Return an equivalent form, if different from X. Otherwise, return X. If
9377 X is zero, we are to always construct the equivalent form. */
9378
9379 static rtx
9380 simplify_and_const_int (rtx x, enum machine_mode mode, rtx varop,
9381 unsigned HOST_WIDE_INT constop)
9382 {
9383 rtx tem = simplify_and_const_int_1 (mode, varop, constop);
9384 if (tem)
9385 return tem;
9386
9387 if (!x)
9388 x = simplify_gen_binary (AND, GET_MODE (varop), varop,
9389 gen_int_mode (constop, mode));
9390 if (GET_MODE (x) != mode)
9391 x = gen_lowpart (mode, x);
9392 return x;
9393 }
9394 \f
9395 /* Given a REG, X, compute which bits in X can be nonzero.
9396 We don't care about bits outside of those defined in MODE.
9397
9398 For most X this is simply GET_MODE_MASK (GET_MODE (MODE)), but if X is
9399 a shift, AND, or zero_extract, we can do better. */
9400
9401 static rtx
9402 reg_nonzero_bits_for_combine (const_rtx x, enum machine_mode mode,
9403 const_rtx known_x ATTRIBUTE_UNUSED,
9404 enum machine_mode known_mode ATTRIBUTE_UNUSED,
9405 unsigned HOST_WIDE_INT known_ret ATTRIBUTE_UNUSED,
9406 unsigned HOST_WIDE_INT *nonzero)
9407 {
9408 rtx tem;
9409 reg_stat_type *rsp;
9410
9411 /* If X is a register whose nonzero bits value is current, use it.
9412 Otherwise, if X is a register whose value we can find, use that
9413 value. Otherwise, use the previously-computed global nonzero bits
9414 for this register. */
9415
9416 rsp = &VEC_index (reg_stat_type, reg_stat, REGNO (x));
9417 if (rsp->last_set_value != 0
9418 && (rsp->last_set_mode == mode
9419 || (GET_MODE_CLASS (rsp->last_set_mode) == MODE_INT
9420 && GET_MODE_CLASS (mode) == MODE_INT))
9421 && ((rsp->last_set_label >= label_tick_ebb_start
9422 && rsp->last_set_label < label_tick)
9423 || (rsp->last_set_label == label_tick
9424 && DF_INSN_LUID (rsp->last_set) < subst_low_luid)
9425 || (REGNO (x) >= FIRST_PSEUDO_REGISTER
9426 && REG_N_SETS (REGNO (x)) == 1
9427 && !REGNO_REG_SET_P
9428 (DF_LR_IN (ENTRY_BLOCK_PTR->next_bb), REGNO (x)))))
9429 {
9430 *nonzero &= rsp->last_set_nonzero_bits;
9431 return NULL;
9432 }
9433
9434 tem = get_last_value (x);
9435
9436 if (tem)
9437 {
9438 #ifdef SHORT_IMMEDIATES_SIGN_EXTEND
9439 /* If X is narrower than MODE and TEM is a non-negative
9440 constant that would appear negative in the mode of X,
9441 sign-extend it for use in reg_nonzero_bits because some
9442 machines (maybe most) will actually do the sign-extension
9443 and this is the conservative approach.
9444
9445 ??? For 2.5, try to tighten up the MD files in this regard
9446 instead of this kludge. */
9447
9448 if (GET_MODE_PRECISION (GET_MODE (x)) < GET_MODE_PRECISION (mode)
9449 && CONST_INT_P (tem)
9450 && INTVAL (tem) > 0
9451 && val_signbit_known_set_p (GET_MODE (x), INTVAL (tem)))
9452 tem = GEN_INT (INTVAL (tem) | ~GET_MODE_MASK (GET_MODE (x)));
9453 #endif
9454 return tem;
9455 }
9456 else if (nonzero_sign_valid && rsp->nonzero_bits)
9457 {
9458 unsigned HOST_WIDE_INT mask = rsp->nonzero_bits;
9459
9460 if (GET_MODE_PRECISION (GET_MODE (x)) < GET_MODE_PRECISION (mode))
9461 /* We don't know anything about the upper bits. */
9462 mask |= GET_MODE_MASK (mode) ^ GET_MODE_MASK (GET_MODE (x));
9463 *nonzero &= mask;
9464 }
9465
9466 return NULL;
9467 }
9468
9469 /* Return the number of bits at the high-order end of X that are known to
9470 be equal to the sign bit. X will be used in mode MODE; if MODE is
9471 VOIDmode, X will be used in its own mode. The returned value will always
9472 be between 1 and the number of bits in MODE. */
9473
9474 static rtx
9475 reg_num_sign_bit_copies_for_combine (const_rtx x, enum machine_mode mode,
9476 const_rtx known_x ATTRIBUTE_UNUSED,
9477 enum machine_mode known_mode
9478 ATTRIBUTE_UNUSED,
9479 unsigned int known_ret ATTRIBUTE_UNUSED,
9480 unsigned int *result)
9481 {
9482 rtx tem;
9483 reg_stat_type *rsp;
9484
9485 rsp = &VEC_index (reg_stat_type, reg_stat, REGNO (x));
9486 if (rsp->last_set_value != 0
9487 && rsp->last_set_mode == mode
9488 && ((rsp->last_set_label >= label_tick_ebb_start
9489 && rsp->last_set_label < label_tick)
9490 || (rsp->last_set_label == label_tick
9491 && DF_INSN_LUID (rsp->last_set) < subst_low_luid)
9492 || (REGNO (x) >= FIRST_PSEUDO_REGISTER
9493 && REG_N_SETS (REGNO (x)) == 1
9494 && !REGNO_REG_SET_P
9495 (DF_LR_IN (ENTRY_BLOCK_PTR->next_bb), REGNO (x)))))
9496 {
9497 *result = rsp->last_set_sign_bit_copies;
9498 return NULL;
9499 }
9500
9501 tem = get_last_value (x);
9502 if (tem != 0)
9503 return tem;
9504
9505 if (nonzero_sign_valid && rsp->sign_bit_copies != 0
9506 && GET_MODE_PRECISION (GET_MODE (x)) == GET_MODE_PRECISION (mode))
9507 *result = rsp->sign_bit_copies;
9508
9509 return NULL;
9510 }
9511 \f
9512 /* Return the number of "extended" bits there are in X, when interpreted
9513 as a quantity in MODE whose signedness is indicated by UNSIGNEDP. For
9514 unsigned quantities, this is the number of high-order zero bits.
9515 For signed quantities, this is the number of copies of the sign bit
9516 minus 1. In both case, this function returns the number of "spare"
9517 bits. For example, if two quantities for which this function returns
9518 at least 1 are added, the addition is known not to overflow.
9519
9520 This function will always return 0 unless called during combine, which
9521 implies that it must be called from a define_split. */
9522
9523 unsigned int
9524 extended_count (const_rtx x, enum machine_mode mode, int unsignedp)
9525 {
9526 if (nonzero_sign_valid == 0)
9527 return 0;
9528
9529 return (unsignedp
9530 ? (HWI_COMPUTABLE_MODE_P (mode)
9531 ? (unsigned int) (GET_MODE_PRECISION (mode) - 1
9532 - floor_log2 (nonzero_bits (x, mode)))
9533 : 0)
9534 : num_sign_bit_copies (x, mode) - 1);
9535 }
9536
9537 /* This function is called from `simplify_shift_const' to merge two
9538 outer operations. Specifically, we have already found that we need
9539 to perform operation *POP0 with constant *PCONST0 at the outermost
9540 position. We would now like to also perform OP1 with constant CONST1
9541 (with *POP0 being done last).
9542
9543 Return 1 if we can do the operation and update *POP0 and *PCONST0 with
9544 the resulting operation. *PCOMP_P is set to 1 if we would need to
9545 complement the innermost operand, otherwise it is unchanged.
9546
9547 MODE is the mode in which the operation will be done. No bits outside
9548 the width of this mode matter. It is assumed that the width of this mode
9549 is smaller than or equal to HOST_BITS_PER_WIDE_INT.
9550
9551 If *POP0 or OP1 are UNKNOWN, it means no operation is required. Only NEG, PLUS,
9552 IOR, XOR, and AND are supported. We may set *POP0 to SET if the proper
9553 result is simply *PCONST0.
9554
9555 If the resulting operation cannot be expressed as one operation, we
9556 return 0 and do not change *POP0, *PCONST0, and *PCOMP_P. */
9557
9558 static int
9559 merge_outer_ops (enum rtx_code *pop0, HOST_WIDE_INT *pconst0, enum rtx_code op1, HOST_WIDE_INT const1, enum machine_mode mode, int *pcomp_p)
9560 {
9561 enum rtx_code op0 = *pop0;
9562 HOST_WIDE_INT const0 = *pconst0;
9563
9564 const0 &= GET_MODE_MASK (mode);
9565 const1 &= GET_MODE_MASK (mode);
9566
9567 /* If OP0 is an AND, clear unimportant bits in CONST1. */
9568 if (op0 == AND)
9569 const1 &= const0;
9570
9571 /* If OP0 or OP1 is UNKNOWN, this is easy. Similarly if they are the same or
9572 if OP0 is SET. */
9573
9574 if (op1 == UNKNOWN || op0 == SET)
9575 return 1;
9576
9577 else if (op0 == UNKNOWN)
9578 op0 = op1, const0 = const1;
9579
9580 else if (op0 == op1)
9581 {
9582 switch (op0)
9583 {
9584 case AND:
9585 const0 &= const1;
9586 break;
9587 case IOR:
9588 const0 |= const1;
9589 break;
9590 case XOR:
9591 const0 ^= const1;
9592 break;
9593 case PLUS:
9594 const0 += const1;
9595 break;
9596 case NEG:
9597 op0 = UNKNOWN;
9598 break;
9599 default:
9600 break;
9601 }
9602 }
9603
9604 /* Otherwise, if either is a PLUS or NEG, we can't do anything. */
9605 else if (op0 == PLUS || op1 == PLUS || op0 == NEG || op1 == NEG)
9606 return 0;
9607
9608 /* If the two constants aren't the same, we can't do anything. The
9609 remaining six cases can all be done. */
9610 else if (const0 != const1)
9611 return 0;
9612
9613 else
9614 switch (op0)
9615 {
9616 case IOR:
9617 if (op1 == AND)
9618 /* (a & b) | b == b */
9619 op0 = SET;
9620 else /* op1 == XOR */
9621 /* (a ^ b) | b == a | b */
9622 {;}
9623 break;
9624
9625 case XOR:
9626 if (op1 == AND)
9627 /* (a & b) ^ b == (~a) & b */
9628 op0 = AND, *pcomp_p = 1;
9629 else /* op1 == IOR */
9630 /* (a | b) ^ b == a & ~b */
9631 op0 = AND, const0 = ~const0;
9632 break;
9633
9634 case AND:
9635 if (op1 == IOR)
9636 /* (a | b) & b == b */
9637 op0 = SET;
9638 else /* op1 == XOR */
9639 /* (a ^ b) & b) == (~a) & b */
9640 *pcomp_p = 1;
9641 break;
9642 default:
9643 break;
9644 }
9645
9646 /* Check for NO-OP cases. */
9647 const0 &= GET_MODE_MASK (mode);
9648 if (const0 == 0
9649 && (op0 == IOR || op0 == XOR || op0 == PLUS))
9650 op0 = UNKNOWN;
9651 else if (const0 == 0 && op0 == AND)
9652 op0 = SET;
9653 else if ((unsigned HOST_WIDE_INT) const0 == GET_MODE_MASK (mode)
9654 && op0 == AND)
9655 op0 = UNKNOWN;
9656
9657 *pop0 = op0;
9658
9659 /* ??? Slightly redundant with the above mask, but not entirely.
9660 Moving this above means we'd have to sign-extend the mode mask
9661 for the final test. */
9662 if (op0 != UNKNOWN && op0 != NEG)
9663 *pconst0 = trunc_int_for_mode (const0, mode);
9664
9665 return 1;
9666 }
9667 \f
9668 /* A helper to simplify_shift_const_1 to determine the mode we can perform
9669 the shift in. The original shift operation CODE is performed on OP in
9670 ORIG_MODE. Return the wider mode MODE if we can perform the operation
9671 in that mode. Return ORIG_MODE otherwise. We can also assume that the
9672 result of the shift is subject to operation OUTER_CODE with operand
9673 OUTER_CONST. */
9674
9675 static enum machine_mode
9676 try_widen_shift_mode (enum rtx_code code, rtx op, int count,
9677 enum machine_mode orig_mode, enum machine_mode mode,
9678 enum rtx_code outer_code, HOST_WIDE_INT outer_const)
9679 {
9680 if (orig_mode == mode)
9681 return mode;
9682 gcc_assert (GET_MODE_PRECISION (mode) > GET_MODE_PRECISION (orig_mode));
9683
9684 /* In general we can't perform in wider mode for right shift and rotate. */
9685 switch (code)
9686 {
9687 case ASHIFTRT:
9688 /* We can still widen if the bits brought in from the left are identical
9689 to the sign bit of ORIG_MODE. */
9690 if (num_sign_bit_copies (op, mode)
9691 > (unsigned) (GET_MODE_PRECISION (mode)
9692 - GET_MODE_PRECISION (orig_mode)))
9693 return mode;
9694 return orig_mode;
9695
9696 case LSHIFTRT:
9697 /* Similarly here but with zero bits. */
9698 if (HWI_COMPUTABLE_MODE_P (mode)
9699 && (nonzero_bits (op, mode) & ~GET_MODE_MASK (orig_mode)) == 0)
9700 return mode;
9701
9702 /* We can also widen if the bits brought in will be masked off. This
9703 operation is performed in ORIG_MODE. */
9704 if (outer_code == AND)
9705 {
9706 int care_bits = low_bitmask_len (orig_mode, outer_const);
9707
9708 if (care_bits >= 0
9709 && GET_MODE_PRECISION (orig_mode) - care_bits >= count)
9710 return mode;
9711 }
9712 /* fall through */
9713
9714 case ROTATE:
9715 return orig_mode;
9716
9717 case ROTATERT:
9718 gcc_unreachable ();
9719
9720 default:
9721 return mode;
9722 }
9723 }
9724
9725 /* Simplify a shift of VAROP by ORIG_COUNT bits. CODE says what kind
9726 of shift. The result of the shift is RESULT_MODE. Return NULL_RTX
9727 if we cannot simplify it. Otherwise, return a simplified value.
9728
9729 The shift is normally computed in the widest mode we find in VAROP, as
9730 long as it isn't a different number of words than RESULT_MODE. Exceptions
9731 are ASHIFTRT and ROTATE, which are always done in their original mode. */
9732
9733 static rtx
9734 simplify_shift_const_1 (enum rtx_code code, enum machine_mode result_mode,
9735 rtx varop, int orig_count)
9736 {
9737 enum rtx_code orig_code = code;
9738 rtx orig_varop = varop;
9739 int count;
9740 enum machine_mode mode = result_mode;
9741 enum machine_mode shift_mode, tmode;
9742 unsigned int mode_words
9743 = (GET_MODE_SIZE (mode) + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD;
9744 /* We form (outer_op (code varop count) (outer_const)). */
9745 enum rtx_code outer_op = UNKNOWN;
9746 HOST_WIDE_INT outer_const = 0;
9747 int complement_p = 0;
9748 rtx new_rtx, x;
9749
9750 /* Make sure and truncate the "natural" shift on the way in. We don't
9751 want to do this inside the loop as it makes it more difficult to
9752 combine shifts. */
9753 if (SHIFT_COUNT_TRUNCATED)
9754 orig_count &= GET_MODE_BITSIZE (mode) - 1;
9755
9756 /* If we were given an invalid count, don't do anything except exactly
9757 what was requested. */
9758
9759 if (orig_count < 0 || orig_count >= (int) GET_MODE_PRECISION (mode))
9760 return NULL_RTX;
9761
9762 count = orig_count;
9763
9764 /* Unless one of the branches of the `if' in this loop does a `continue',
9765 we will `break' the loop after the `if'. */
9766
9767 while (count != 0)
9768 {
9769 /* If we have an operand of (clobber (const_int 0)), fail. */
9770 if (GET_CODE (varop) == CLOBBER)
9771 return NULL_RTX;
9772
9773 /* Convert ROTATERT to ROTATE. */
9774 if (code == ROTATERT)
9775 {
9776 unsigned int bitsize = GET_MODE_PRECISION (result_mode);
9777 code = ROTATE;
9778 if (VECTOR_MODE_P (result_mode))
9779 count = bitsize / GET_MODE_NUNITS (result_mode) - count;
9780 else
9781 count = bitsize - count;
9782 }
9783
9784 shift_mode = try_widen_shift_mode (code, varop, count, result_mode,
9785 mode, outer_op, outer_const);
9786
9787 /* Handle cases where the count is greater than the size of the mode
9788 minus 1. For ASHIFT, use the size minus one as the count (this can
9789 occur when simplifying (lshiftrt (ashiftrt ..))). For rotates,
9790 take the count modulo the size. For other shifts, the result is
9791 zero.
9792
9793 Since these shifts are being produced by the compiler by combining
9794 multiple operations, each of which are defined, we know what the
9795 result is supposed to be. */
9796
9797 if (count > (GET_MODE_PRECISION (shift_mode) - 1))
9798 {
9799 if (code == ASHIFTRT)
9800 count = GET_MODE_PRECISION (shift_mode) - 1;
9801 else if (code == ROTATE || code == ROTATERT)
9802 count %= GET_MODE_PRECISION (shift_mode);
9803 else
9804 {
9805 /* We can't simply return zero because there may be an
9806 outer op. */
9807 varop = const0_rtx;
9808 count = 0;
9809 break;
9810 }
9811 }
9812
9813 /* If we discovered we had to complement VAROP, leave. Making a NOT
9814 here would cause an infinite loop. */
9815 if (complement_p)
9816 break;
9817
9818 /* An arithmetic right shift of a quantity known to be -1 or 0
9819 is a no-op. */
9820 if (code == ASHIFTRT
9821 && (num_sign_bit_copies (varop, shift_mode)
9822 == GET_MODE_PRECISION (shift_mode)))
9823 {
9824 count = 0;
9825 break;
9826 }
9827
9828 /* If we are doing an arithmetic right shift and discarding all but
9829 the sign bit copies, this is equivalent to doing a shift by the
9830 bitsize minus one. Convert it into that shift because it will often
9831 allow other simplifications. */
9832
9833 if (code == ASHIFTRT
9834 && (count + num_sign_bit_copies (varop, shift_mode)
9835 >= GET_MODE_PRECISION (shift_mode)))
9836 count = GET_MODE_PRECISION (shift_mode) - 1;
9837
9838 /* We simplify the tests below and elsewhere by converting
9839 ASHIFTRT to LSHIFTRT if we know the sign bit is clear.
9840 `make_compound_operation' will convert it to an ASHIFTRT for
9841 those machines (such as VAX) that don't have an LSHIFTRT. */
9842 if (code == ASHIFTRT
9843 && val_signbit_known_clear_p (shift_mode,
9844 nonzero_bits (varop, shift_mode)))
9845 code = LSHIFTRT;
9846
9847 if (((code == LSHIFTRT
9848 && HWI_COMPUTABLE_MODE_P (shift_mode)
9849 && !(nonzero_bits (varop, shift_mode) >> count))
9850 || (code == ASHIFT
9851 && HWI_COMPUTABLE_MODE_P (shift_mode)
9852 && !((nonzero_bits (varop, shift_mode) << count)
9853 & GET_MODE_MASK (shift_mode))))
9854 && !side_effects_p (varop))
9855 varop = const0_rtx;
9856
9857 switch (GET_CODE (varop))
9858 {
9859 case SIGN_EXTEND:
9860 case ZERO_EXTEND:
9861 case SIGN_EXTRACT:
9862 case ZERO_EXTRACT:
9863 new_rtx = expand_compound_operation (varop);
9864 if (new_rtx != varop)
9865 {
9866 varop = new_rtx;
9867 continue;
9868 }
9869 break;
9870
9871 case MEM:
9872 /* If we have (xshiftrt (mem ...) C) and C is MODE_WIDTH
9873 minus the width of a smaller mode, we can do this with a
9874 SIGN_EXTEND or ZERO_EXTEND from the narrower memory location. */
9875 if ((code == ASHIFTRT || code == LSHIFTRT)
9876 && ! mode_dependent_address_p (XEXP (varop, 0),
9877 MEM_ADDR_SPACE (varop))
9878 && ! MEM_VOLATILE_P (varop)
9879 && (tmode = mode_for_size (GET_MODE_BITSIZE (mode) - count,
9880 MODE_INT, 1)) != BLKmode)
9881 {
9882 new_rtx = adjust_address_nv (varop, tmode,
9883 BYTES_BIG_ENDIAN ? 0
9884 : count / BITS_PER_UNIT);
9885
9886 varop = gen_rtx_fmt_e (code == ASHIFTRT ? SIGN_EXTEND
9887 : ZERO_EXTEND, mode, new_rtx);
9888 count = 0;
9889 continue;
9890 }
9891 break;
9892
9893 case SUBREG:
9894 /* If VAROP is a SUBREG, strip it as long as the inner operand has
9895 the same number of words as what we've seen so far. Then store
9896 the widest mode in MODE. */
9897 if (subreg_lowpart_p (varop)
9898 && (GET_MODE_SIZE (GET_MODE (SUBREG_REG (varop)))
9899 > GET_MODE_SIZE (GET_MODE (varop)))
9900 && (unsigned int) ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (varop)))
9901 + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)
9902 == mode_words
9903 && GET_MODE_CLASS (GET_MODE (varop)) == MODE_INT
9904 && GET_MODE_CLASS (GET_MODE (SUBREG_REG (varop))) == MODE_INT)
9905 {
9906 varop = SUBREG_REG (varop);
9907 if (GET_MODE_SIZE (GET_MODE (varop)) > GET_MODE_SIZE (mode))
9908 mode = GET_MODE (varop);
9909 continue;
9910 }
9911 break;
9912
9913 case MULT:
9914 /* Some machines use MULT instead of ASHIFT because MULT
9915 is cheaper. But it is still better on those machines to
9916 merge two shifts into one. */
9917 if (CONST_INT_P (XEXP (varop, 1))
9918 && exact_log2 (UINTVAL (XEXP (varop, 1))) >= 0)
9919 {
9920 varop
9921 = simplify_gen_binary (ASHIFT, GET_MODE (varop),
9922 XEXP (varop, 0),
9923 GEN_INT (exact_log2 (
9924 UINTVAL (XEXP (varop, 1)))));
9925 continue;
9926 }
9927 break;
9928
9929 case UDIV:
9930 /* Similar, for when divides are cheaper. */
9931 if (CONST_INT_P (XEXP (varop, 1))
9932 && exact_log2 (UINTVAL (XEXP (varop, 1))) >= 0)
9933 {
9934 varop
9935 = simplify_gen_binary (LSHIFTRT, GET_MODE (varop),
9936 XEXP (varop, 0),
9937 GEN_INT (exact_log2 (
9938 UINTVAL (XEXP (varop, 1)))));
9939 continue;
9940 }
9941 break;
9942
9943 case ASHIFTRT:
9944 /* If we are extracting just the sign bit of an arithmetic
9945 right shift, that shift is not needed. However, the sign
9946 bit of a wider mode may be different from what would be
9947 interpreted as the sign bit in a narrower mode, so, if
9948 the result is narrower, don't discard the shift. */
9949 if (code == LSHIFTRT
9950 && count == (GET_MODE_BITSIZE (result_mode) - 1)
9951 && (GET_MODE_BITSIZE (result_mode)
9952 >= GET_MODE_BITSIZE (GET_MODE (varop))))
9953 {
9954 varop = XEXP (varop, 0);
9955 continue;
9956 }
9957
9958 /* ... fall through ... */
9959
9960 case LSHIFTRT:
9961 case ASHIFT:
9962 case ROTATE:
9963 /* Here we have two nested shifts. The result is usually the
9964 AND of a new shift with a mask. We compute the result below. */
9965 if (CONST_INT_P (XEXP (varop, 1))
9966 && INTVAL (XEXP (varop, 1)) >= 0
9967 && INTVAL (XEXP (varop, 1)) < GET_MODE_PRECISION (GET_MODE (varop))
9968 && HWI_COMPUTABLE_MODE_P (result_mode)
9969 && HWI_COMPUTABLE_MODE_P (mode)
9970 && !VECTOR_MODE_P (result_mode))
9971 {
9972 enum rtx_code first_code = GET_CODE (varop);
9973 unsigned int first_count = INTVAL (XEXP (varop, 1));
9974 unsigned HOST_WIDE_INT mask;
9975 rtx mask_rtx;
9976
9977 /* We have one common special case. We can't do any merging if
9978 the inner code is an ASHIFTRT of a smaller mode. However, if
9979 we have (ashift:M1 (subreg:M1 (ashiftrt:M2 FOO C1) 0) C2)
9980 with C2 == GET_MODE_BITSIZE (M1) - GET_MODE_BITSIZE (M2),
9981 we can convert it to
9982 (ashiftrt:M1 (ashift:M1 (and:M1 (subreg:M1 FOO 0) C3) C2) C1).
9983 This simplifies certain SIGN_EXTEND operations. */
9984 if (code == ASHIFT && first_code == ASHIFTRT
9985 && count == (GET_MODE_PRECISION (result_mode)
9986 - GET_MODE_PRECISION (GET_MODE (varop))))
9987 {
9988 /* C3 has the low-order C1 bits zero. */
9989
9990 mask = GET_MODE_MASK (mode)
9991 & ~(((unsigned HOST_WIDE_INT) 1 << first_count) - 1);
9992
9993 varop = simplify_and_const_int (NULL_RTX, result_mode,
9994 XEXP (varop, 0), mask);
9995 varop = simplify_shift_const (NULL_RTX, ASHIFT, result_mode,
9996 varop, count);
9997 count = first_count;
9998 code = ASHIFTRT;
9999 continue;
10000 }
10001
10002 /* If this was (ashiftrt (ashift foo C1) C2) and FOO has more
10003 than C1 high-order bits equal to the sign bit, we can convert
10004 this to either an ASHIFT or an ASHIFTRT depending on the
10005 two counts.
10006
10007 We cannot do this if VAROP's mode is not SHIFT_MODE. */
10008
10009 if (code == ASHIFTRT && first_code == ASHIFT
10010 && GET_MODE (varop) == shift_mode
10011 && (num_sign_bit_copies (XEXP (varop, 0), shift_mode)
10012 > first_count))
10013 {
10014 varop = XEXP (varop, 0);
10015 count -= first_count;
10016 if (count < 0)
10017 {
10018 count = -count;
10019 code = ASHIFT;
10020 }
10021
10022 continue;
10023 }
10024
10025 /* There are some cases we can't do. If CODE is ASHIFTRT,
10026 we can only do this if FIRST_CODE is also ASHIFTRT.
10027
10028 We can't do the case when CODE is ROTATE and FIRST_CODE is
10029 ASHIFTRT.
10030
10031 If the mode of this shift is not the mode of the outer shift,
10032 we can't do this if either shift is a right shift or ROTATE.
10033
10034 Finally, we can't do any of these if the mode is too wide
10035 unless the codes are the same.
10036
10037 Handle the case where the shift codes are the same
10038 first. */
10039
10040 if (code == first_code)
10041 {
10042 if (GET_MODE (varop) != result_mode
10043 && (code == ASHIFTRT || code == LSHIFTRT
10044 || code == ROTATE))
10045 break;
10046
10047 count += first_count;
10048 varop = XEXP (varop, 0);
10049 continue;
10050 }
10051
10052 if (code == ASHIFTRT
10053 || (code == ROTATE && first_code == ASHIFTRT)
10054 || GET_MODE_PRECISION (mode) > HOST_BITS_PER_WIDE_INT
10055 || (GET_MODE (varop) != result_mode
10056 && (first_code == ASHIFTRT || first_code == LSHIFTRT
10057 || first_code == ROTATE
10058 || code == ROTATE)))
10059 break;
10060
10061 /* To compute the mask to apply after the shift, shift the
10062 nonzero bits of the inner shift the same way the
10063 outer shift will. */
10064
10065 mask_rtx = GEN_INT (nonzero_bits (varop, GET_MODE (varop)));
10066
10067 mask_rtx
10068 = simplify_const_binary_operation (code, result_mode, mask_rtx,
10069 GEN_INT (count));
10070
10071 /* Give up if we can't compute an outer operation to use. */
10072 if (mask_rtx == 0
10073 || !CONST_INT_P (mask_rtx)
10074 || ! merge_outer_ops (&outer_op, &outer_const, AND,
10075 INTVAL (mask_rtx),
10076 result_mode, &complement_p))
10077 break;
10078
10079 /* If the shifts are in the same direction, we add the
10080 counts. Otherwise, we subtract them. */
10081 if ((code == ASHIFTRT || code == LSHIFTRT)
10082 == (first_code == ASHIFTRT || first_code == LSHIFTRT))
10083 count += first_count;
10084 else
10085 count -= first_count;
10086
10087 /* If COUNT is positive, the new shift is usually CODE,
10088 except for the two exceptions below, in which case it is
10089 FIRST_CODE. If the count is negative, FIRST_CODE should
10090 always be used */
10091 if (count > 0
10092 && ((first_code == ROTATE && code == ASHIFT)
10093 || (first_code == ASHIFTRT && code == LSHIFTRT)))
10094 code = first_code;
10095 else if (count < 0)
10096 code = first_code, count = -count;
10097
10098 varop = XEXP (varop, 0);
10099 continue;
10100 }
10101
10102 /* If we have (A << B << C) for any shift, we can convert this to
10103 (A << C << B). This wins if A is a constant. Only try this if
10104 B is not a constant. */
10105
10106 else if (GET_CODE (varop) == code
10107 && CONST_INT_P (XEXP (varop, 0))
10108 && !CONST_INT_P (XEXP (varop, 1)))
10109 {
10110 rtx new_rtx = simplify_const_binary_operation (code, mode,
10111 XEXP (varop, 0),
10112 GEN_INT (count));
10113 varop = gen_rtx_fmt_ee (code, mode, new_rtx, XEXP (varop, 1));
10114 count = 0;
10115 continue;
10116 }
10117 break;
10118
10119 case NOT:
10120 if (VECTOR_MODE_P (mode))
10121 break;
10122
10123 /* Make this fit the case below. */
10124 varop = gen_rtx_XOR (mode, XEXP (varop, 0), constm1_rtx);
10125 continue;
10126
10127 case IOR:
10128 case AND:
10129 case XOR:
10130 /* If we have (xshiftrt (ior (plus X (const_int -1)) X) C)
10131 with C the size of VAROP - 1 and the shift is logical if
10132 STORE_FLAG_VALUE is 1 and arithmetic if STORE_FLAG_VALUE is -1,
10133 we have an (le X 0) operation. If we have an arithmetic shift
10134 and STORE_FLAG_VALUE is 1 or we have a logical shift with
10135 STORE_FLAG_VALUE of -1, we have a (neg (le X 0)) operation. */
10136
10137 if (GET_CODE (varop) == IOR && GET_CODE (XEXP (varop, 0)) == PLUS
10138 && XEXP (XEXP (varop, 0), 1) == constm1_rtx
10139 && (STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
10140 && (code == LSHIFTRT || code == ASHIFTRT)
10141 && count == (GET_MODE_PRECISION (GET_MODE (varop)) - 1)
10142 && rtx_equal_p (XEXP (XEXP (varop, 0), 0), XEXP (varop, 1)))
10143 {
10144 count = 0;
10145 varop = gen_rtx_LE (GET_MODE (varop), XEXP (varop, 1),
10146 const0_rtx);
10147
10148 if (STORE_FLAG_VALUE == 1 ? code == ASHIFTRT : code == LSHIFTRT)
10149 varop = gen_rtx_NEG (GET_MODE (varop), varop);
10150
10151 continue;
10152 }
10153
10154 /* If we have (shift (logical)), move the logical to the outside
10155 to allow it to possibly combine with another logical and the
10156 shift to combine with another shift. This also canonicalizes to
10157 what a ZERO_EXTRACT looks like. Also, some machines have
10158 (and (shift)) insns. */
10159
10160 if (CONST_INT_P (XEXP (varop, 1))
10161 /* We can't do this if we have (ashiftrt (xor)) and the
10162 constant has its sign bit set in shift_mode. */
10163 && !(code == ASHIFTRT && GET_CODE (varop) == XOR
10164 && 0 > trunc_int_for_mode (INTVAL (XEXP (varop, 1)),
10165 shift_mode))
10166 && (new_rtx = simplify_const_binary_operation (code, result_mode,
10167 XEXP (varop, 1),
10168 GEN_INT (count))) != 0
10169 && CONST_INT_P (new_rtx)
10170 && merge_outer_ops (&outer_op, &outer_const, GET_CODE (varop),
10171 INTVAL (new_rtx), result_mode, &complement_p))
10172 {
10173 varop = XEXP (varop, 0);
10174 continue;
10175 }
10176
10177 /* If we can't do that, try to simplify the shift in each arm of the
10178 logical expression, make a new logical expression, and apply
10179 the inverse distributive law. This also can't be done
10180 for some (ashiftrt (xor)). */
10181 if (CONST_INT_P (XEXP (varop, 1))
10182 && !(code == ASHIFTRT && GET_CODE (varop) == XOR
10183 && 0 > trunc_int_for_mode (INTVAL (XEXP (varop, 1)),
10184 shift_mode)))
10185 {
10186 rtx lhs = simplify_shift_const (NULL_RTX, code, shift_mode,
10187 XEXP (varop, 0), count);
10188 rtx rhs = simplify_shift_const (NULL_RTX, code, shift_mode,
10189 XEXP (varop, 1), count);
10190
10191 varop = simplify_gen_binary (GET_CODE (varop), shift_mode,
10192 lhs, rhs);
10193 varop = apply_distributive_law (varop);
10194
10195 count = 0;
10196 continue;
10197 }
10198 break;
10199
10200 case EQ:
10201 /* Convert (lshiftrt (eq FOO 0) C) to (xor FOO 1) if STORE_FLAG_VALUE
10202 says that the sign bit can be tested, FOO has mode MODE, C is
10203 GET_MODE_PRECISION (MODE) - 1, and FOO has only its low-order bit
10204 that may be nonzero. */
10205 if (code == LSHIFTRT
10206 && XEXP (varop, 1) == const0_rtx
10207 && GET_MODE (XEXP (varop, 0)) == result_mode
10208 && count == (GET_MODE_PRECISION (result_mode) - 1)
10209 && HWI_COMPUTABLE_MODE_P (result_mode)
10210 && STORE_FLAG_VALUE == -1
10211 && nonzero_bits (XEXP (varop, 0), result_mode) == 1
10212 && merge_outer_ops (&outer_op, &outer_const, XOR, 1, result_mode,
10213 &complement_p))
10214 {
10215 varop = XEXP (varop, 0);
10216 count = 0;
10217 continue;
10218 }
10219 break;
10220
10221 case NEG:
10222 /* (lshiftrt (neg A) C) where A is either 0 or 1 and C is one less
10223 than the number of bits in the mode is equivalent to A. */
10224 if (code == LSHIFTRT
10225 && count == (GET_MODE_PRECISION (result_mode) - 1)
10226 && nonzero_bits (XEXP (varop, 0), result_mode) == 1)
10227 {
10228 varop = XEXP (varop, 0);
10229 count = 0;
10230 continue;
10231 }
10232
10233 /* NEG commutes with ASHIFT since it is multiplication. Move the
10234 NEG outside to allow shifts to combine. */
10235 if (code == ASHIFT
10236 && merge_outer_ops (&outer_op, &outer_const, NEG, 0, result_mode,
10237 &complement_p))
10238 {
10239 varop = XEXP (varop, 0);
10240 continue;
10241 }
10242 break;
10243
10244 case PLUS:
10245 /* (lshiftrt (plus A -1) C) where A is either 0 or 1 and C
10246 is one less than the number of bits in the mode is
10247 equivalent to (xor A 1). */
10248 if (code == LSHIFTRT
10249 && count == (GET_MODE_PRECISION (result_mode) - 1)
10250 && XEXP (varop, 1) == constm1_rtx
10251 && nonzero_bits (XEXP (varop, 0), result_mode) == 1
10252 && merge_outer_ops (&outer_op, &outer_const, XOR, 1, result_mode,
10253 &complement_p))
10254 {
10255 count = 0;
10256 varop = XEXP (varop, 0);
10257 continue;
10258 }
10259
10260 /* If we have (xshiftrt (plus FOO BAR) C), and the only bits
10261 that might be nonzero in BAR are those being shifted out and those
10262 bits are known zero in FOO, we can replace the PLUS with FOO.
10263 Similarly in the other operand order. This code occurs when
10264 we are computing the size of a variable-size array. */
10265
10266 if ((code == ASHIFTRT || code == LSHIFTRT)
10267 && count < HOST_BITS_PER_WIDE_INT
10268 && nonzero_bits (XEXP (varop, 1), result_mode) >> count == 0
10269 && (nonzero_bits (XEXP (varop, 1), result_mode)
10270 & nonzero_bits (XEXP (varop, 0), result_mode)) == 0)
10271 {
10272 varop = XEXP (varop, 0);
10273 continue;
10274 }
10275 else if ((code == ASHIFTRT || code == LSHIFTRT)
10276 && count < HOST_BITS_PER_WIDE_INT
10277 && HWI_COMPUTABLE_MODE_P (result_mode)
10278 && 0 == (nonzero_bits (XEXP (varop, 0), result_mode)
10279 >> count)
10280 && 0 == (nonzero_bits (XEXP (varop, 0), result_mode)
10281 & nonzero_bits (XEXP (varop, 1),
10282 result_mode)))
10283 {
10284 varop = XEXP (varop, 1);
10285 continue;
10286 }
10287
10288 /* (ashift (plus foo C) N) is (plus (ashift foo N) C'). */
10289 if (code == ASHIFT
10290 && CONST_INT_P (XEXP (varop, 1))
10291 && (new_rtx = simplify_const_binary_operation (ASHIFT, result_mode,
10292 XEXP (varop, 1),
10293 GEN_INT (count))) != 0
10294 && CONST_INT_P (new_rtx)
10295 && merge_outer_ops (&outer_op, &outer_const, PLUS,
10296 INTVAL (new_rtx), result_mode, &complement_p))
10297 {
10298 varop = XEXP (varop, 0);
10299 continue;
10300 }
10301
10302 /* Check for 'PLUS signbit', which is the canonical form of 'XOR
10303 signbit', and attempt to change the PLUS to an XOR and move it to
10304 the outer operation as is done above in the AND/IOR/XOR case
10305 leg for shift(logical). See details in logical handling above
10306 for reasoning in doing so. */
10307 if (code == LSHIFTRT
10308 && CONST_INT_P (XEXP (varop, 1))
10309 && mode_signbit_p (result_mode, XEXP (varop, 1))
10310 && (new_rtx = simplify_const_binary_operation (code, result_mode,
10311 XEXP (varop, 1),
10312 GEN_INT (count))) != 0
10313 && CONST_INT_P (new_rtx)
10314 && merge_outer_ops (&outer_op, &outer_const, XOR,
10315 INTVAL (new_rtx), result_mode, &complement_p))
10316 {
10317 varop = XEXP (varop, 0);
10318 continue;
10319 }
10320
10321 break;
10322
10323 case MINUS:
10324 /* If we have (xshiftrt (minus (ashiftrt X C)) X) C)
10325 with C the size of VAROP - 1 and the shift is logical if
10326 STORE_FLAG_VALUE is 1 and arithmetic if STORE_FLAG_VALUE is -1,
10327 we have a (gt X 0) operation. If the shift is arithmetic with
10328 STORE_FLAG_VALUE of 1 or logical with STORE_FLAG_VALUE == -1,
10329 we have a (neg (gt X 0)) operation. */
10330
10331 if ((STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
10332 && GET_CODE (XEXP (varop, 0)) == ASHIFTRT
10333 && count == (GET_MODE_PRECISION (GET_MODE (varop)) - 1)
10334 && (code == LSHIFTRT || code == ASHIFTRT)
10335 && CONST_INT_P (XEXP (XEXP (varop, 0), 1))
10336 && INTVAL (XEXP (XEXP (varop, 0), 1)) == count
10337 && rtx_equal_p (XEXP (XEXP (varop, 0), 0), XEXP (varop, 1)))
10338 {
10339 count = 0;
10340 varop = gen_rtx_GT (GET_MODE (varop), XEXP (varop, 1),
10341 const0_rtx);
10342
10343 if (STORE_FLAG_VALUE == 1 ? code == ASHIFTRT : code == LSHIFTRT)
10344 varop = gen_rtx_NEG (GET_MODE (varop), varop);
10345
10346 continue;
10347 }
10348 break;
10349
10350 case TRUNCATE:
10351 /* Change (lshiftrt (truncate (lshiftrt))) to (truncate (lshiftrt))
10352 if the truncate does not affect the value. */
10353 if (code == LSHIFTRT
10354 && GET_CODE (XEXP (varop, 0)) == LSHIFTRT
10355 && CONST_INT_P (XEXP (XEXP (varop, 0), 1))
10356 && (INTVAL (XEXP (XEXP (varop, 0), 1))
10357 >= (GET_MODE_PRECISION (GET_MODE (XEXP (varop, 0)))
10358 - GET_MODE_PRECISION (GET_MODE (varop)))))
10359 {
10360 rtx varop_inner = XEXP (varop, 0);
10361
10362 varop_inner
10363 = gen_rtx_LSHIFTRT (GET_MODE (varop_inner),
10364 XEXP (varop_inner, 0),
10365 GEN_INT
10366 (count + INTVAL (XEXP (varop_inner, 1))));
10367 varop = gen_rtx_TRUNCATE (GET_MODE (varop), varop_inner);
10368 count = 0;
10369 continue;
10370 }
10371 break;
10372
10373 default:
10374 break;
10375 }
10376
10377 break;
10378 }
10379
10380 shift_mode = try_widen_shift_mode (code, varop, count, result_mode, mode,
10381 outer_op, outer_const);
10382
10383 /* We have now finished analyzing the shift. The result should be
10384 a shift of type CODE with SHIFT_MODE shifting VAROP COUNT places. If
10385 OUTER_OP is non-UNKNOWN, it is an operation that needs to be applied
10386 to the result of the shift. OUTER_CONST is the relevant constant,
10387 but we must turn off all bits turned off in the shift. */
10388
10389 if (outer_op == UNKNOWN
10390 && orig_code == code && orig_count == count
10391 && varop == orig_varop
10392 && shift_mode == GET_MODE (varop))
10393 return NULL_RTX;
10394
10395 /* Make a SUBREG if necessary. If we can't make it, fail. */
10396 varop = gen_lowpart (shift_mode, varop);
10397 if (varop == NULL_RTX || GET_CODE (varop) == CLOBBER)
10398 return NULL_RTX;
10399
10400 /* If we have an outer operation and we just made a shift, it is
10401 possible that we could have simplified the shift were it not
10402 for the outer operation. So try to do the simplification
10403 recursively. */
10404
10405 if (outer_op != UNKNOWN)
10406 x = simplify_shift_const_1 (code, shift_mode, varop, count);
10407 else
10408 x = NULL_RTX;
10409
10410 if (x == NULL_RTX)
10411 x = simplify_gen_binary (code, shift_mode, varop, GEN_INT (count));
10412
10413 /* If we were doing an LSHIFTRT in a wider mode than it was originally,
10414 turn off all the bits that the shift would have turned off. */
10415 if (orig_code == LSHIFTRT && result_mode != shift_mode)
10416 x = simplify_and_const_int (NULL_RTX, shift_mode, x,
10417 GET_MODE_MASK (result_mode) >> orig_count);
10418
10419 /* Do the remainder of the processing in RESULT_MODE. */
10420 x = gen_lowpart_or_truncate (result_mode, x);
10421
10422 /* If COMPLEMENT_P is set, we have to complement X before doing the outer
10423 operation. */
10424 if (complement_p)
10425 x = simplify_gen_unary (NOT, result_mode, x, result_mode);
10426
10427 if (outer_op != UNKNOWN)
10428 {
10429 if (GET_RTX_CLASS (outer_op) != RTX_UNARY
10430 && GET_MODE_PRECISION (result_mode) < HOST_BITS_PER_WIDE_INT)
10431 outer_const = trunc_int_for_mode (outer_const, result_mode);
10432
10433 if (outer_op == AND)
10434 x = simplify_and_const_int (NULL_RTX, result_mode, x, outer_const);
10435 else if (outer_op == SET)
10436 {
10437 /* This means that we have determined that the result is
10438 equivalent to a constant. This should be rare. */
10439 if (!side_effects_p (x))
10440 x = GEN_INT (outer_const);
10441 }
10442 else if (GET_RTX_CLASS (outer_op) == RTX_UNARY)
10443 x = simplify_gen_unary (outer_op, result_mode, x, result_mode);
10444 else
10445 x = simplify_gen_binary (outer_op, result_mode, x,
10446 GEN_INT (outer_const));
10447 }
10448
10449 return x;
10450 }
10451
10452 /* Simplify a shift of VAROP by COUNT bits. CODE says what kind of shift.
10453 The result of the shift is RESULT_MODE. If we cannot simplify it,
10454 return X or, if it is NULL, synthesize the expression with
10455 simplify_gen_binary. Otherwise, return a simplified value.
10456
10457 The shift is normally computed in the widest mode we find in VAROP, as
10458 long as it isn't a different number of words than RESULT_MODE. Exceptions
10459 are ASHIFTRT and ROTATE, which are always done in their original mode. */
10460
10461 static rtx
10462 simplify_shift_const (rtx x, enum rtx_code code, enum machine_mode result_mode,
10463 rtx varop, int count)
10464 {
10465 rtx tem = simplify_shift_const_1 (code, result_mode, varop, count);
10466 if (tem)
10467 return tem;
10468
10469 if (!x)
10470 x = simplify_gen_binary (code, GET_MODE (varop), varop, GEN_INT (count));
10471 if (GET_MODE (x) != result_mode)
10472 x = gen_lowpart (result_mode, x);
10473 return x;
10474 }
10475
10476 \f
10477 /* Like recog, but we receive the address of a pointer to a new pattern.
10478 We try to match the rtx that the pointer points to.
10479 If that fails, we may try to modify or replace the pattern,
10480 storing the replacement into the same pointer object.
10481
10482 Modifications include deletion or addition of CLOBBERs.
10483
10484 PNOTES is a pointer to a location where any REG_UNUSED notes added for
10485 the CLOBBERs are placed.
10486
10487 The value is the final insn code from the pattern ultimately matched,
10488 or -1. */
10489
10490 static int
10491 recog_for_combine (rtx *pnewpat, rtx insn, rtx *pnotes)
10492 {
10493 rtx pat = *pnewpat;
10494 rtx pat_without_clobbers;
10495 int insn_code_number;
10496 int num_clobbers_to_add = 0;
10497 int i;
10498 rtx notes = NULL_RTX;
10499 rtx old_notes, old_pat;
10500 int old_icode;
10501
10502 /* If PAT is a PARALLEL, check to see if it contains the CLOBBER
10503 we use to indicate that something didn't match. If we find such a
10504 thing, force rejection. */
10505 if (GET_CODE (pat) == PARALLEL)
10506 for (i = XVECLEN (pat, 0) - 1; i >= 0; i--)
10507 if (GET_CODE (XVECEXP (pat, 0, i)) == CLOBBER
10508 && XEXP (XVECEXP (pat, 0, i), 0) == const0_rtx)
10509 return -1;
10510
10511 old_pat = PATTERN (insn);
10512 old_notes = REG_NOTES (insn);
10513 PATTERN (insn) = pat;
10514 REG_NOTES (insn) = NULL_RTX;
10515
10516 insn_code_number = recog (pat, insn, &num_clobbers_to_add);
10517 if (dump_file && (dump_flags & TDF_DETAILS))
10518 {
10519 if (insn_code_number < 0)
10520 fputs ("Failed to match this instruction:\n", dump_file);
10521 else
10522 fputs ("Successfully matched this instruction:\n", dump_file);
10523 print_rtl_single (dump_file, pat);
10524 }
10525
10526 /* If it isn't, there is the possibility that we previously had an insn
10527 that clobbered some register as a side effect, but the combined
10528 insn doesn't need to do that. So try once more without the clobbers
10529 unless this represents an ASM insn. */
10530
10531 if (insn_code_number < 0 && ! check_asm_operands (pat)
10532 && GET_CODE (pat) == PARALLEL)
10533 {
10534 int pos;
10535
10536 for (pos = 0, i = 0; i < XVECLEN (pat, 0); i++)
10537 if (GET_CODE (XVECEXP (pat, 0, i)) != CLOBBER)
10538 {
10539 if (i != pos)
10540 SUBST (XVECEXP (pat, 0, pos), XVECEXP (pat, 0, i));
10541 pos++;
10542 }
10543
10544 SUBST_INT (XVECLEN (pat, 0), pos);
10545
10546 if (pos == 1)
10547 pat = XVECEXP (pat, 0, 0);
10548
10549 PATTERN (insn) = pat;
10550 insn_code_number = recog (pat, insn, &num_clobbers_to_add);
10551 if (dump_file && (dump_flags & TDF_DETAILS))
10552 {
10553 if (insn_code_number < 0)
10554 fputs ("Failed to match this instruction:\n", dump_file);
10555 else
10556 fputs ("Successfully matched this instruction:\n", dump_file);
10557 print_rtl_single (dump_file, pat);
10558 }
10559 }
10560
10561 pat_without_clobbers = pat;
10562
10563 PATTERN (insn) = old_pat;
10564 REG_NOTES (insn) = old_notes;
10565
10566 /* Recognize all noop sets, these will be killed by followup pass. */
10567 if (insn_code_number < 0 && GET_CODE (pat) == SET && set_noop_p (pat))
10568 insn_code_number = NOOP_MOVE_INSN_CODE, num_clobbers_to_add = 0;
10569
10570 /* If we had any clobbers to add, make a new pattern than contains
10571 them. Then check to make sure that all of them are dead. */
10572 if (num_clobbers_to_add)
10573 {
10574 rtx newpat = gen_rtx_PARALLEL (VOIDmode,
10575 rtvec_alloc (GET_CODE (pat) == PARALLEL
10576 ? (XVECLEN (pat, 0)
10577 + num_clobbers_to_add)
10578 : num_clobbers_to_add + 1));
10579
10580 if (GET_CODE (pat) == PARALLEL)
10581 for (i = 0; i < XVECLEN (pat, 0); i++)
10582 XVECEXP (newpat, 0, i) = XVECEXP (pat, 0, i);
10583 else
10584 XVECEXP (newpat, 0, 0) = pat;
10585
10586 add_clobbers (newpat, insn_code_number);
10587
10588 for (i = XVECLEN (newpat, 0) - num_clobbers_to_add;
10589 i < XVECLEN (newpat, 0); i++)
10590 {
10591 if (REG_P (XEXP (XVECEXP (newpat, 0, i), 0))
10592 && ! reg_dead_at_p (XEXP (XVECEXP (newpat, 0, i), 0), insn))
10593 return -1;
10594 if (GET_CODE (XEXP (XVECEXP (newpat, 0, i), 0)) != SCRATCH)
10595 {
10596 gcc_assert (REG_P (XEXP (XVECEXP (newpat, 0, i), 0)));
10597 notes = alloc_reg_note (REG_UNUSED,
10598 XEXP (XVECEXP (newpat, 0, i), 0), notes);
10599 }
10600 }
10601 pat = newpat;
10602 }
10603
10604 if (insn_code_number >= 0
10605 && insn_code_number != NOOP_MOVE_INSN_CODE)
10606 {
10607 old_pat = PATTERN (insn);
10608 old_notes = REG_NOTES (insn);
10609 old_icode = INSN_CODE (insn);
10610 PATTERN (insn) = pat;
10611 REG_NOTES (insn) = notes;
10612
10613 /* Allow targets to reject combined insn. */
10614 if (!targetm.legitimate_combined_insn (insn))
10615 {
10616 if (dump_file && (dump_flags & TDF_DETAILS))
10617 fputs ("Instruction not appropriate for target.",
10618 dump_file);
10619
10620 /* Callers expect recog_for_combine to strip
10621 clobbers from the pattern on failure. */
10622 pat = pat_without_clobbers;
10623 notes = NULL_RTX;
10624
10625 insn_code_number = -1;
10626 }
10627
10628 PATTERN (insn) = old_pat;
10629 REG_NOTES (insn) = old_notes;
10630 INSN_CODE (insn) = old_icode;
10631 }
10632
10633 *pnewpat = pat;
10634 *pnotes = notes;
10635
10636 return insn_code_number;
10637 }
10638 \f
10639 /* Like gen_lowpart_general but for use by combine. In combine it
10640 is not possible to create any new pseudoregs. However, it is
10641 safe to create invalid memory addresses, because combine will
10642 try to recognize them and all they will do is make the combine
10643 attempt fail.
10644
10645 If for some reason this cannot do its job, an rtx
10646 (clobber (const_int 0)) is returned.
10647 An insn containing that will not be recognized. */
10648
10649 static rtx
10650 gen_lowpart_for_combine (enum machine_mode omode, rtx x)
10651 {
10652 enum machine_mode imode = GET_MODE (x);
10653 unsigned int osize = GET_MODE_SIZE (omode);
10654 unsigned int isize = GET_MODE_SIZE (imode);
10655 rtx result;
10656
10657 if (omode == imode)
10658 return x;
10659
10660 /* We can only support MODE being wider than a word if X is a
10661 constant integer or has a mode the same size. */
10662 if (GET_MODE_SIZE (omode) > UNITS_PER_WORD
10663 && ! (CONST_SCALAR_INT_P (x) || isize == osize))
10664 goto fail;
10665
10666 /* X might be a paradoxical (subreg (mem)). In that case, gen_lowpart
10667 won't know what to do. So we will strip off the SUBREG here and
10668 process normally. */
10669 if (GET_CODE (x) == SUBREG && MEM_P (SUBREG_REG (x)))
10670 {
10671 x = SUBREG_REG (x);
10672
10673 /* For use in case we fall down into the address adjustments
10674 further below, we need to adjust the known mode and size of
10675 x; imode and isize, since we just adjusted x. */
10676 imode = GET_MODE (x);
10677
10678 if (imode == omode)
10679 return x;
10680
10681 isize = GET_MODE_SIZE (imode);
10682 }
10683
10684 result = gen_lowpart_common (omode, x);
10685
10686 if (result)
10687 return result;
10688
10689 if (MEM_P (x))
10690 {
10691 int offset = 0;
10692
10693 /* Refuse to work on a volatile memory ref or one with a mode-dependent
10694 address. */
10695 if (MEM_VOLATILE_P (x)
10696 || mode_dependent_address_p (XEXP (x, 0), MEM_ADDR_SPACE (x)))
10697 goto fail;
10698
10699 /* If we want to refer to something bigger than the original memref,
10700 generate a paradoxical subreg instead. That will force a reload
10701 of the original memref X. */
10702 if (isize < osize)
10703 return gen_rtx_SUBREG (omode, x, 0);
10704
10705 if (WORDS_BIG_ENDIAN)
10706 offset = MAX (isize, UNITS_PER_WORD) - MAX (osize, UNITS_PER_WORD);
10707
10708 /* Adjust the address so that the address-after-the-data is
10709 unchanged. */
10710 if (BYTES_BIG_ENDIAN)
10711 offset -= MIN (UNITS_PER_WORD, osize) - MIN (UNITS_PER_WORD, isize);
10712
10713 return adjust_address_nv (x, omode, offset);
10714 }
10715
10716 /* If X is a comparison operator, rewrite it in a new mode. This
10717 probably won't match, but may allow further simplifications. */
10718 else if (COMPARISON_P (x))
10719 return gen_rtx_fmt_ee (GET_CODE (x), omode, XEXP (x, 0), XEXP (x, 1));
10720
10721 /* If we couldn't simplify X any other way, just enclose it in a
10722 SUBREG. Normally, this SUBREG won't match, but some patterns may
10723 include an explicit SUBREG or we may simplify it further in combine. */
10724 else
10725 {
10726 int offset = 0;
10727 rtx res;
10728
10729 offset = subreg_lowpart_offset (omode, imode);
10730 if (imode == VOIDmode)
10731 {
10732 imode = int_mode_for_mode (omode);
10733 x = gen_lowpart_common (imode, x);
10734 if (x == NULL)
10735 goto fail;
10736 }
10737 res = simplify_gen_subreg (omode, x, imode, offset);
10738 if (res)
10739 return res;
10740 }
10741
10742 fail:
10743 return gen_rtx_CLOBBER (omode, const0_rtx);
10744 }
10745 \f
10746 /* Try to simplify a comparison between OP0 and a constant OP1,
10747 where CODE is the comparison code that will be tested, into a
10748 (CODE OP0 const0_rtx) form.
10749
10750 The result is a possibly different comparison code to use.
10751 *POP1 may be updated. */
10752
10753 static enum rtx_code
10754 simplify_compare_const (enum rtx_code code, rtx op0, rtx *pop1)
10755 {
10756 enum machine_mode mode = GET_MODE (op0);
10757 unsigned int mode_width = GET_MODE_PRECISION (mode);
10758 HOST_WIDE_INT const_op = INTVAL (*pop1);
10759
10760 /* Get the constant we are comparing against and turn off all bits
10761 not on in our mode. */
10762 if (mode != VOIDmode)
10763 const_op = trunc_int_for_mode (const_op, mode);
10764
10765 /* If we are comparing against a constant power of two and the value
10766 being compared can only have that single bit nonzero (e.g., it was
10767 `and'ed with that bit), we can replace this with a comparison
10768 with zero. */
10769 if (const_op
10770 && (code == EQ || code == NE || code == GE || code == GEU
10771 || code == LT || code == LTU)
10772 && mode_width <= HOST_BITS_PER_WIDE_INT
10773 && exact_log2 (const_op) >= 0
10774 && nonzero_bits (op0, mode) == (unsigned HOST_WIDE_INT) const_op)
10775 {
10776 code = (code == EQ || code == GE || code == GEU ? NE : EQ);
10777 const_op = 0;
10778 }
10779
10780 /* Similarly, if we are comparing a value known to be either -1 or
10781 0 with -1, change it to the opposite comparison against zero. */
10782 if (const_op == -1
10783 && (code == EQ || code == NE || code == GT || code == LE
10784 || code == GEU || code == LTU)
10785 && num_sign_bit_copies (op0, mode) == mode_width)
10786 {
10787 code = (code == EQ || code == LE || code == GEU ? NE : EQ);
10788 const_op = 0;
10789 }
10790
10791 /* Do some canonicalizations based on the comparison code. We prefer
10792 comparisons against zero and then prefer equality comparisons.
10793 If we can reduce the size of a constant, we will do that too. */
10794 switch (code)
10795 {
10796 case LT:
10797 /* < C is equivalent to <= (C - 1) */
10798 if (const_op > 0)
10799 {
10800 const_op -= 1;
10801 code = LE;
10802 /* ... fall through to LE case below. */
10803 }
10804 else
10805 break;
10806
10807 case LE:
10808 /* <= C is equivalent to < (C + 1); we do this for C < 0 */
10809 if (const_op < 0)
10810 {
10811 const_op += 1;
10812 code = LT;
10813 }
10814
10815 /* If we are doing a <= 0 comparison on a value known to have
10816 a zero sign bit, we can replace this with == 0. */
10817 else if (const_op == 0
10818 && mode_width <= HOST_BITS_PER_WIDE_INT
10819 && (nonzero_bits (op0, mode)
10820 & ((unsigned HOST_WIDE_INT) 1 << (mode_width - 1)))
10821 == 0)
10822 code = EQ;
10823 break;
10824
10825 case GE:
10826 /* >= C is equivalent to > (C - 1). */
10827 if (const_op > 0)
10828 {
10829 const_op -= 1;
10830 code = GT;
10831 /* ... fall through to GT below. */
10832 }
10833 else
10834 break;
10835
10836 case GT:
10837 /* > C is equivalent to >= (C + 1); we do this for C < 0. */
10838 if (const_op < 0)
10839 {
10840 const_op += 1;
10841 code = GE;
10842 }
10843
10844 /* If we are doing a > 0 comparison on a value known to have
10845 a zero sign bit, we can replace this with != 0. */
10846 else if (const_op == 0
10847 && mode_width <= HOST_BITS_PER_WIDE_INT
10848 && (nonzero_bits (op0, mode)
10849 & ((unsigned HOST_WIDE_INT) 1 << (mode_width - 1)))
10850 == 0)
10851 code = NE;
10852 break;
10853
10854 case LTU:
10855 /* < C is equivalent to <= (C - 1). */
10856 if (const_op > 0)
10857 {
10858 const_op -= 1;
10859 code = LEU;
10860 /* ... fall through ... */
10861 }
10862 /* (unsigned) < 0x80000000 is equivalent to >= 0. */
10863 else if (mode_width <= HOST_BITS_PER_WIDE_INT
10864 && (unsigned HOST_WIDE_INT) const_op
10865 == (unsigned HOST_WIDE_INT) 1 << (mode_width - 1))
10866 {
10867 const_op = 0;
10868 code = GE;
10869 break;
10870 }
10871 else
10872 break;
10873
10874 case LEU:
10875 /* unsigned <= 0 is equivalent to == 0 */
10876 if (const_op == 0)
10877 code = EQ;
10878 /* (unsigned) <= 0x7fffffff is equivalent to >= 0. */
10879 else if (mode_width <= HOST_BITS_PER_WIDE_INT
10880 && (unsigned HOST_WIDE_INT) const_op
10881 == ((unsigned HOST_WIDE_INT) 1 << (mode_width - 1)) - 1)
10882 {
10883 const_op = 0;
10884 code = GE;
10885 }
10886 break;
10887
10888 case GEU:
10889 /* >= C is equivalent to > (C - 1). */
10890 if (const_op > 1)
10891 {
10892 const_op -= 1;
10893 code = GTU;
10894 /* ... fall through ... */
10895 }
10896
10897 /* (unsigned) >= 0x80000000 is equivalent to < 0. */
10898 else if (mode_width <= HOST_BITS_PER_WIDE_INT
10899 && (unsigned HOST_WIDE_INT) const_op
10900 == (unsigned HOST_WIDE_INT) 1 << (mode_width - 1))
10901 {
10902 const_op = 0;
10903 code = LT;
10904 break;
10905 }
10906 else
10907 break;
10908
10909 case GTU:
10910 /* unsigned > 0 is equivalent to != 0 */
10911 if (const_op == 0)
10912 code = NE;
10913 /* (unsigned) > 0x7fffffff is equivalent to < 0. */
10914 else if (mode_width <= HOST_BITS_PER_WIDE_INT
10915 && (unsigned HOST_WIDE_INT) const_op
10916 == ((unsigned HOST_WIDE_INT) 1 << (mode_width - 1)) - 1)
10917 {
10918 const_op = 0;
10919 code = LT;
10920 }
10921 break;
10922
10923 default:
10924 break;
10925 }
10926
10927 *pop1 = GEN_INT (const_op);
10928 return code;
10929 }
10930 \f
10931 /* Simplify a comparison between *POP0 and *POP1 where CODE is the
10932 comparison code that will be tested.
10933
10934 The result is a possibly different comparison code to use. *POP0 and
10935 *POP1 may be updated.
10936
10937 It is possible that we might detect that a comparison is either always
10938 true or always false. However, we do not perform general constant
10939 folding in combine, so this knowledge isn't useful. Such tautologies
10940 should have been detected earlier. Hence we ignore all such cases. */
10941
10942 static enum rtx_code
10943 simplify_comparison (enum rtx_code code, rtx *pop0, rtx *pop1)
10944 {
10945 rtx op0 = *pop0;
10946 rtx op1 = *pop1;
10947 rtx tem, tem1;
10948 int i;
10949 enum machine_mode mode, tmode;
10950
10951 /* Try a few ways of applying the same transformation to both operands. */
10952 while (1)
10953 {
10954 #ifndef WORD_REGISTER_OPERATIONS
10955 /* The test below this one won't handle SIGN_EXTENDs on these machines,
10956 so check specially. */
10957 if (code != GTU && code != GEU && code != LTU && code != LEU
10958 && GET_CODE (op0) == ASHIFTRT && GET_CODE (op1) == ASHIFTRT
10959 && GET_CODE (XEXP (op0, 0)) == ASHIFT
10960 && GET_CODE (XEXP (op1, 0)) == ASHIFT
10961 && GET_CODE (XEXP (XEXP (op0, 0), 0)) == SUBREG
10962 && GET_CODE (XEXP (XEXP (op1, 0), 0)) == SUBREG
10963 && (GET_MODE (SUBREG_REG (XEXP (XEXP (op0, 0), 0)))
10964 == GET_MODE (SUBREG_REG (XEXP (XEXP (op1, 0), 0))))
10965 && CONST_INT_P (XEXP (op0, 1))
10966 && XEXP (op0, 1) == XEXP (op1, 1)
10967 && XEXP (op0, 1) == XEXP (XEXP (op0, 0), 1)
10968 && XEXP (op0, 1) == XEXP (XEXP (op1, 0), 1)
10969 && (INTVAL (XEXP (op0, 1))
10970 == (GET_MODE_PRECISION (GET_MODE (op0))
10971 - (GET_MODE_PRECISION
10972 (GET_MODE (SUBREG_REG (XEXP (XEXP (op0, 0), 0))))))))
10973 {
10974 op0 = SUBREG_REG (XEXP (XEXP (op0, 0), 0));
10975 op1 = SUBREG_REG (XEXP (XEXP (op1, 0), 0));
10976 }
10977 #endif
10978
10979 /* If both operands are the same constant shift, see if we can ignore the
10980 shift. We can if the shift is a rotate or if the bits shifted out of
10981 this shift are known to be zero for both inputs and if the type of
10982 comparison is compatible with the shift. */
10983 if (GET_CODE (op0) == GET_CODE (op1)
10984 && HWI_COMPUTABLE_MODE_P (GET_MODE(op0))
10985 && ((GET_CODE (op0) == ROTATE && (code == NE || code == EQ))
10986 || ((GET_CODE (op0) == LSHIFTRT || GET_CODE (op0) == ASHIFT)
10987 && (code != GT && code != LT && code != GE && code != LE))
10988 || (GET_CODE (op0) == ASHIFTRT
10989 && (code != GTU && code != LTU
10990 && code != GEU && code != LEU)))
10991 && CONST_INT_P (XEXP (op0, 1))
10992 && INTVAL (XEXP (op0, 1)) >= 0
10993 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT
10994 && XEXP (op0, 1) == XEXP (op1, 1))
10995 {
10996 enum machine_mode mode = GET_MODE (op0);
10997 unsigned HOST_WIDE_INT mask = GET_MODE_MASK (mode);
10998 int shift_count = INTVAL (XEXP (op0, 1));
10999
11000 if (GET_CODE (op0) == LSHIFTRT || GET_CODE (op0) == ASHIFTRT)
11001 mask &= (mask >> shift_count) << shift_count;
11002 else if (GET_CODE (op0) == ASHIFT)
11003 mask = (mask & (mask << shift_count)) >> shift_count;
11004
11005 if ((nonzero_bits (XEXP (op0, 0), mode) & ~mask) == 0
11006 && (nonzero_bits (XEXP (op1, 0), mode) & ~mask) == 0)
11007 op0 = XEXP (op0, 0), op1 = XEXP (op1, 0);
11008 else
11009 break;
11010 }
11011
11012 /* If both operands are AND's of a paradoxical SUBREG by constant, the
11013 SUBREGs are of the same mode, and, in both cases, the AND would
11014 be redundant if the comparison was done in the narrower mode,
11015 do the comparison in the narrower mode (e.g., we are AND'ing with 1
11016 and the operand's possibly nonzero bits are 0xffffff01; in that case
11017 if we only care about QImode, we don't need the AND). This case
11018 occurs if the output mode of an scc insn is not SImode and
11019 STORE_FLAG_VALUE == 1 (e.g., the 386).
11020
11021 Similarly, check for a case where the AND's are ZERO_EXTEND
11022 operations from some narrower mode even though a SUBREG is not
11023 present. */
11024
11025 else if (GET_CODE (op0) == AND && GET_CODE (op1) == AND
11026 && CONST_INT_P (XEXP (op0, 1))
11027 && CONST_INT_P (XEXP (op1, 1)))
11028 {
11029 rtx inner_op0 = XEXP (op0, 0);
11030 rtx inner_op1 = XEXP (op1, 0);
11031 HOST_WIDE_INT c0 = INTVAL (XEXP (op0, 1));
11032 HOST_WIDE_INT c1 = INTVAL (XEXP (op1, 1));
11033 int changed = 0;
11034
11035 if (paradoxical_subreg_p (inner_op0)
11036 && GET_CODE (inner_op1) == SUBREG
11037 && (GET_MODE (SUBREG_REG (inner_op0))
11038 == GET_MODE (SUBREG_REG (inner_op1)))
11039 && (GET_MODE_PRECISION (GET_MODE (SUBREG_REG (inner_op0)))
11040 <= HOST_BITS_PER_WIDE_INT)
11041 && (0 == ((~c0) & nonzero_bits (SUBREG_REG (inner_op0),
11042 GET_MODE (SUBREG_REG (inner_op0)))))
11043 && (0 == ((~c1) & nonzero_bits (SUBREG_REG (inner_op1),
11044 GET_MODE (SUBREG_REG (inner_op1))))))
11045 {
11046 op0 = SUBREG_REG (inner_op0);
11047 op1 = SUBREG_REG (inner_op1);
11048
11049 /* The resulting comparison is always unsigned since we masked
11050 off the original sign bit. */
11051 code = unsigned_condition (code);
11052
11053 changed = 1;
11054 }
11055
11056 else if (c0 == c1)
11057 for (tmode = GET_CLASS_NARROWEST_MODE
11058 (GET_MODE_CLASS (GET_MODE (op0)));
11059 tmode != GET_MODE (op0); tmode = GET_MODE_WIDER_MODE (tmode))
11060 if ((unsigned HOST_WIDE_INT) c0 == GET_MODE_MASK (tmode))
11061 {
11062 op0 = gen_lowpart (tmode, inner_op0);
11063 op1 = gen_lowpart (tmode, inner_op1);
11064 code = unsigned_condition (code);
11065 changed = 1;
11066 break;
11067 }
11068
11069 if (! changed)
11070 break;
11071 }
11072
11073 /* If both operands are NOT, we can strip off the outer operation
11074 and adjust the comparison code for swapped operands; similarly for
11075 NEG, except that this must be an equality comparison. */
11076 else if ((GET_CODE (op0) == NOT && GET_CODE (op1) == NOT)
11077 || (GET_CODE (op0) == NEG && GET_CODE (op1) == NEG
11078 && (code == EQ || code == NE)))
11079 op0 = XEXP (op0, 0), op1 = XEXP (op1, 0), code = swap_condition (code);
11080
11081 else
11082 break;
11083 }
11084
11085 /* If the first operand is a constant, swap the operands and adjust the
11086 comparison code appropriately, but don't do this if the second operand
11087 is already a constant integer. */
11088 if (swap_commutative_operands_p (op0, op1))
11089 {
11090 tem = op0, op0 = op1, op1 = tem;
11091 code = swap_condition (code);
11092 }
11093
11094 /* We now enter a loop during which we will try to simplify the comparison.
11095 For the most part, we only are concerned with comparisons with zero,
11096 but some things may really be comparisons with zero but not start
11097 out looking that way. */
11098
11099 while (CONST_INT_P (op1))
11100 {
11101 enum machine_mode mode = GET_MODE (op0);
11102 unsigned int mode_width = GET_MODE_PRECISION (mode);
11103 unsigned HOST_WIDE_INT mask = GET_MODE_MASK (mode);
11104 int equality_comparison_p;
11105 int sign_bit_comparison_p;
11106 int unsigned_comparison_p;
11107 HOST_WIDE_INT const_op;
11108
11109 /* We only want to handle integral modes. This catches VOIDmode,
11110 CCmode, and the floating-point modes. An exception is that we
11111 can handle VOIDmode if OP0 is a COMPARE or a comparison
11112 operation. */
11113
11114 if (GET_MODE_CLASS (mode) != MODE_INT
11115 && ! (mode == VOIDmode
11116 && (GET_CODE (op0) == COMPARE || COMPARISON_P (op0))))
11117 break;
11118
11119 /* Try to simplify the compare to constant, possibly changing the
11120 comparison op, and/or changing op1 to zero. */
11121 code = simplify_compare_const (code, op0, &op1);
11122 const_op = INTVAL (op1);
11123
11124 /* Compute some predicates to simplify code below. */
11125
11126 equality_comparison_p = (code == EQ || code == NE);
11127 sign_bit_comparison_p = ((code == LT || code == GE) && const_op == 0);
11128 unsigned_comparison_p = (code == LTU || code == LEU || code == GTU
11129 || code == GEU);
11130
11131 /* If this is a sign bit comparison and we can do arithmetic in
11132 MODE, say that we will only be needing the sign bit of OP0. */
11133 if (sign_bit_comparison_p && HWI_COMPUTABLE_MODE_P (mode))
11134 op0 = force_to_mode (op0, mode,
11135 (unsigned HOST_WIDE_INT) 1
11136 << (GET_MODE_PRECISION (mode) - 1),
11137 0);
11138
11139 /* Now try cases based on the opcode of OP0. If none of the cases
11140 does a "continue", we exit this loop immediately after the
11141 switch. */
11142
11143 switch (GET_CODE (op0))
11144 {
11145 case ZERO_EXTRACT:
11146 /* If we are extracting a single bit from a variable position in
11147 a constant that has only a single bit set and are comparing it
11148 with zero, we can convert this into an equality comparison
11149 between the position and the location of the single bit. */
11150 /* Except we can't if SHIFT_COUNT_TRUNCATED is set, since we might
11151 have already reduced the shift count modulo the word size. */
11152 if (!SHIFT_COUNT_TRUNCATED
11153 && CONST_INT_P (XEXP (op0, 0))
11154 && XEXP (op0, 1) == const1_rtx
11155 && equality_comparison_p && const_op == 0
11156 && (i = exact_log2 (UINTVAL (XEXP (op0, 0)))) >= 0)
11157 {
11158 if (BITS_BIG_ENDIAN)
11159 i = BITS_PER_WORD - 1 - i;
11160
11161 op0 = XEXP (op0, 2);
11162 op1 = GEN_INT (i);
11163 const_op = i;
11164
11165 /* Result is nonzero iff shift count is equal to I. */
11166 code = reverse_condition (code);
11167 continue;
11168 }
11169
11170 /* ... fall through ... */
11171
11172 case SIGN_EXTRACT:
11173 tem = expand_compound_operation (op0);
11174 if (tem != op0)
11175 {
11176 op0 = tem;
11177 continue;
11178 }
11179 break;
11180
11181 case NOT:
11182 /* If testing for equality, we can take the NOT of the constant. */
11183 if (equality_comparison_p
11184 && (tem = simplify_unary_operation (NOT, mode, op1, mode)) != 0)
11185 {
11186 op0 = XEXP (op0, 0);
11187 op1 = tem;
11188 continue;
11189 }
11190
11191 /* If just looking at the sign bit, reverse the sense of the
11192 comparison. */
11193 if (sign_bit_comparison_p)
11194 {
11195 op0 = XEXP (op0, 0);
11196 code = (code == GE ? LT : GE);
11197 continue;
11198 }
11199 break;
11200
11201 case NEG:
11202 /* If testing for equality, we can take the NEG of the constant. */
11203 if (equality_comparison_p
11204 && (tem = simplify_unary_operation (NEG, mode, op1, mode)) != 0)
11205 {
11206 op0 = XEXP (op0, 0);
11207 op1 = tem;
11208 continue;
11209 }
11210
11211 /* The remaining cases only apply to comparisons with zero. */
11212 if (const_op != 0)
11213 break;
11214
11215 /* When X is ABS or is known positive,
11216 (neg X) is < 0 if and only if X != 0. */
11217
11218 if (sign_bit_comparison_p
11219 && (GET_CODE (XEXP (op0, 0)) == ABS
11220 || (mode_width <= HOST_BITS_PER_WIDE_INT
11221 && (nonzero_bits (XEXP (op0, 0), mode)
11222 & ((unsigned HOST_WIDE_INT) 1 << (mode_width - 1)))
11223 == 0)))
11224 {
11225 op0 = XEXP (op0, 0);
11226 code = (code == LT ? NE : EQ);
11227 continue;
11228 }
11229
11230 /* If we have NEG of something whose two high-order bits are the
11231 same, we know that "(-a) < 0" is equivalent to "a > 0". */
11232 if (num_sign_bit_copies (op0, mode) >= 2)
11233 {
11234 op0 = XEXP (op0, 0);
11235 code = swap_condition (code);
11236 continue;
11237 }
11238 break;
11239
11240 case ROTATE:
11241 /* If we are testing equality and our count is a constant, we
11242 can perform the inverse operation on our RHS. */
11243 if (equality_comparison_p && CONST_INT_P (XEXP (op0, 1))
11244 && (tem = simplify_binary_operation (ROTATERT, mode,
11245 op1, XEXP (op0, 1))) != 0)
11246 {
11247 op0 = XEXP (op0, 0);
11248 op1 = tem;
11249 continue;
11250 }
11251
11252 /* If we are doing a < 0 or >= 0 comparison, it means we are testing
11253 a particular bit. Convert it to an AND of a constant of that
11254 bit. This will be converted into a ZERO_EXTRACT. */
11255 if (const_op == 0 && sign_bit_comparison_p
11256 && CONST_INT_P (XEXP (op0, 1))
11257 && mode_width <= HOST_BITS_PER_WIDE_INT)
11258 {
11259 op0 = simplify_and_const_int (NULL_RTX, mode, XEXP (op0, 0),
11260 ((unsigned HOST_WIDE_INT) 1
11261 << (mode_width - 1
11262 - INTVAL (XEXP (op0, 1)))));
11263 code = (code == LT ? NE : EQ);
11264 continue;
11265 }
11266
11267 /* Fall through. */
11268
11269 case ABS:
11270 /* ABS is ignorable inside an equality comparison with zero. */
11271 if (const_op == 0 && equality_comparison_p)
11272 {
11273 op0 = XEXP (op0, 0);
11274 continue;
11275 }
11276 break;
11277
11278 case SIGN_EXTEND:
11279 /* Can simplify (compare (zero/sign_extend FOO) CONST) to
11280 (compare FOO CONST) if CONST fits in FOO's mode and we
11281 are either testing inequality or have an unsigned
11282 comparison with ZERO_EXTEND or a signed comparison with
11283 SIGN_EXTEND. But don't do it if we don't have a compare
11284 insn of the given mode, since we'd have to revert it
11285 later on, and then we wouldn't know whether to sign- or
11286 zero-extend. */
11287 mode = GET_MODE (XEXP (op0, 0));
11288 if (GET_MODE_CLASS (mode) == MODE_INT
11289 && ! unsigned_comparison_p
11290 && HWI_COMPUTABLE_MODE_P (mode)
11291 && trunc_int_for_mode (const_op, mode) == const_op
11292 && have_insn_for (COMPARE, mode))
11293 {
11294 op0 = XEXP (op0, 0);
11295 continue;
11296 }
11297 break;
11298
11299 case SUBREG:
11300 /* Check for the case where we are comparing A - C1 with C2, that is
11301
11302 (subreg:MODE (plus (A) (-C1))) op (C2)
11303
11304 with C1 a constant, and try to lift the SUBREG, i.e. to do the
11305 comparison in the wider mode. One of the following two conditions
11306 must be true in order for this to be valid:
11307
11308 1. The mode extension results in the same bit pattern being added
11309 on both sides and the comparison is equality or unsigned. As
11310 C2 has been truncated to fit in MODE, the pattern can only be
11311 all 0s or all 1s.
11312
11313 2. The mode extension results in the sign bit being copied on
11314 each side.
11315
11316 The difficulty here is that we have predicates for A but not for
11317 (A - C1) so we need to check that C1 is within proper bounds so
11318 as to perturbate A as little as possible. */
11319
11320 if (mode_width <= HOST_BITS_PER_WIDE_INT
11321 && subreg_lowpart_p (op0)
11322 && GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op0))) > mode_width
11323 && GET_CODE (SUBREG_REG (op0)) == PLUS
11324 && CONST_INT_P (XEXP (SUBREG_REG (op0), 1)))
11325 {
11326 enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op0));
11327 rtx a = XEXP (SUBREG_REG (op0), 0);
11328 HOST_WIDE_INT c1 = -INTVAL (XEXP (SUBREG_REG (op0), 1));
11329
11330 if ((c1 > 0
11331 && (unsigned HOST_WIDE_INT) c1
11332 < (unsigned HOST_WIDE_INT) 1 << (mode_width - 1)
11333 && (equality_comparison_p || unsigned_comparison_p)
11334 /* (A - C1) zero-extends if it is positive and sign-extends
11335 if it is negative, C2 both zero- and sign-extends. */
11336 && ((0 == (nonzero_bits (a, inner_mode)
11337 & ~GET_MODE_MASK (mode))
11338 && const_op >= 0)
11339 /* (A - C1) sign-extends if it is positive and 1-extends
11340 if it is negative, C2 both sign- and 1-extends. */
11341 || (num_sign_bit_copies (a, inner_mode)
11342 > (unsigned int) (GET_MODE_PRECISION (inner_mode)
11343 - mode_width)
11344 && const_op < 0)))
11345 || ((unsigned HOST_WIDE_INT) c1
11346 < (unsigned HOST_WIDE_INT) 1 << (mode_width - 2)
11347 /* (A - C1) always sign-extends, like C2. */
11348 && num_sign_bit_copies (a, inner_mode)
11349 > (unsigned int) (GET_MODE_PRECISION (inner_mode)
11350 - (mode_width - 1))))
11351 {
11352 op0 = SUBREG_REG (op0);
11353 continue;
11354 }
11355 }
11356
11357 /* If the inner mode is narrower and we are extracting the low part,
11358 we can treat the SUBREG as if it were a ZERO_EXTEND. */
11359 if (subreg_lowpart_p (op0)
11360 && GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op0))) < mode_width)
11361 /* Fall through */ ;
11362 else
11363 break;
11364
11365 /* ... fall through ... */
11366
11367 case ZERO_EXTEND:
11368 mode = GET_MODE (XEXP (op0, 0));
11369 if (GET_MODE_CLASS (mode) == MODE_INT
11370 && (unsigned_comparison_p || equality_comparison_p)
11371 && HWI_COMPUTABLE_MODE_P (mode)
11372 && (unsigned HOST_WIDE_INT) const_op <= GET_MODE_MASK (mode)
11373 && const_op >= 0
11374 && have_insn_for (COMPARE, mode))
11375 {
11376 op0 = XEXP (op0, 0);
11377 continue;
11378 }
11379 break;
11380
11381 case PLUS:
11382 /* (eq (plus X A) B) -> (eq X (minus B A)). We can only do
11383 this for equality comparisons due to pathological cases involving
11384 overflows. */
11385 if (equality_comparison_p
11386 && 0 != (tem = simplify_binary_operation (MINUS, mode,
11387 op1, XEXP (op0, 1))))
11388 {
11389 op0 = XEXP (op0, 0);
11390 op1 = tem;
11391 continue;
11392 }
11393
11394 /* (plus (abs X) (const_int -1)) is < 0 if and only if X == 0. */
11395 if (const_op == 0 && XEXP (op0, 1) == constm1_rtx
11396 && GET_CODE (XEXP (op0, 0)) == ABS && sign_bit_comparison_p)
11397 {
11398 op0 = XEXP (XEXP (op0, 0), 0);
11399 code = (code == LT ? EQ : NE);
11400 continue;
11401 }
11402 break;
11403
11404 case MINUS:
11405 /* We used to optimize signed comparisons against zero, but that
11406 was incorrect. Unsigned comparisons against zero (GTU, LEU)
11407 arrive here as equality comparisons, or (GEU, LTU) are
11408 optimized away. No need to special-case them. */
11409
11410 /* (eq (minus A B) C) -> (eq A (plus B C)) or
11411 (eq B (minus A C)), whichever simplifies. We can only do
11412 this for equality comparisons due to pathological cases involving
11413 overflows. */
11414 if (equality_comparison_p
11415 && 0 != (tem = simplify_binary_operation (PLUS, mode,
11416 XEXP (op0, 1), op1)))
11417 {
11418 op0 = XEXP (op0, 0);
11419 op1 = tem;
11420 continue;
11421 }
11422
11423 if (equality_comparison_p
11424 && 0 != (tem = simplify_binary_operation (MINUS, mode,
11425 XEXP (op0, 0), op1)))
11426 {
11427 op0 = XEXP (op0, 1);
11428 op1 = tem;
11429 continue;
11430 }
11431
11432 /* The sign bit of (minus (ashiftrt X C) X), where C is the number
11433 of bits in X minus 1, is one iff X > 0. */
11434 if (sign_bit_comparison_p && GET_CODE (XEXP (op0, 0)) == ASHIFTRT
11435 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
11436 && UINTVAL (XEXP (XEXP (op0, 0), 1)) == mode_width - 1
11437 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), XEXP (op0, 1)))
11438 {
11439 op0 = XEXP (op0, 1);
11440 code = (code == GE ? LE : GT);
11441 continue;
11442 }
11443 break;
11444
11445 case XOR:
11446 /* (eq (xor A B) C) -> (eq A (xor B C)). This is a simplification
11447 if C is zero or B is a constant. */
11448 if (equality_comparison_p
11449 && 0 != (tem = simplify_binary_operation (XOR, mode,
11450 XEXP (op0, 1), op1)))
11451 {
11452 op0 = XEXP (op0, 0);
11453 op1 = tem;
11454 continue;
11455 }
11456 break;
11457
11458 case EQ: case NE:
11459 case UNEQ: case LTGT:
11460 case LT: case LTU: case UNLT: case LE: case LEU: case UNLE:
11461 case GT: case GTU: case UNGT: case GE: case GEU: case UNGE:
11462 case UNORDERED: case ORDERED:
11463 /* We can't do anything if OP0 is a condition code value, rather
11464 than an actual data value. */
11465 if (const_op != 0
11466 || CC0_P (XEXP (op0, 0))
11467 || GET_MODE_CLASS (GET_MODE (XEXP (op0, 0))) == MODE_CC)
11468 break;
11469
11470 /* Get the two operands being compared. */
11471 if (GET_CODE (XEXP (op0, 0)) == COMPARE)
11472 tem = XEXP (XEXP (op0, 0), 0), tem1 = XEXP (XEXP (op0, 0), 1);
11473 else
11474 tem = XEXP (op0, 0), tem1 = XEXP (op0, 1);
11475
11476 /* Check for the cases where we simply want the result of the
11477 earlier test or the opposite of that result. */
11478 if (code == NE || code == EQ
11479 || (val_signbit_known_set_p (GET_MODE (op0), STORE_FLAG_VALUE)
11480 && (code == LT || code == GE)))
11481 {
11482 enum rtx_code new_code;
11483 if (code == LT || code == NE)
11484 new_code = GET_CODE (op0);
11485 else
11486 new_code = reversed_comparison_code (op0, NULL);
11487
11488 if (new_code != UNKNOWN)
11489 {
11490 code = new_code;
11491 op0 = tem;
11492 op1 = tem1;
11493 continue;
11494 }
11495 }
11496 break;
11497
11498 case IOR:
11499 /* The sign bit of (ior (plus X (const_int -1)) X) is nonzero
11500 iff X <= 0. */
11501 if (sign_bit_comparison_p && GET_CODE (XEXP (op0, 0)) == PLUS
11502 && XEXP (XEXP (op0, 0), 1) == constm1_rtx
11503 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), XEXP (op0, 1)))
11504 {
11505 op0 = XEXP (op0, 1);
11506 code = (code == GE ? GT : LE);
11507 continue;
11508 }
11509 break;
11510
11511 case AND:
11512 /* Convert (and (xshift 1 X) Y) to (and (lshiftrt Y X) 1). This
11513 will be converted to a ZERO_EXTRACT later. */
11514 if (const_op == 0 && equality_comparison_p
11515 && GET_CODE (XEXP (op0, 0)) == ASHIFT
11516 && XEXP (XEXP (op0, 0), 0) == const1_rtx)
11517 {
11518 op0 = gen_rtx_LSHIFTRT (mode, XEXP (op0, 1),
11519 XEXP (XEXP (op0, 0), 1));
11520 op0 = simplify_and_const_int (NULL_RTX, mode, op0, 1);
11521 continue;
11522 }
11523
11524 /* If we are comparing (and (lshiftrt X C1) C2) for equality with
11525 zero and X is a comparison and C1 and C2 describe only bits set
11526 in STORE_FLAG_VALUE, we can compare with X. */
11527 if (const_op == 0 && equality_comparison_p
11528 && mode_width <= HOST_BITS_PER_WIDE_INT
11529 && CONST_INT_P (XEXP (op0, 1))
11530 && GET_CODE (XEXP (op0, 0)) == LSHIFTRT
11531 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
11532 && INTVAL (XEXP (XEXP (op0, 0), 1)) >= 0
11533 && INTVAL (XEXP (XEXP (op0, 0), 1)) < HOST_BITS_PER_WIDE_INT)
11534 {
11535 mask = ((INTVAL (XEXP (op0, 1)) & GET_MODE_MASK (mode))
11536 << INTVAL (XEXP (XEXP (op0, 0), 1)));
11537 if ((~STORE_FLAG_VALUE & mask) == 0
11538 && (COMPARISON_P (XEXP (XEXP (op0, 0), 0))
11539 || ((tem = get_last_value (XEXP (XEXP (op0, 0), 0))) != 0
11540 && COMPARISON_P (tem))))
11541 {
11542 op0 = XEXP (XEXP (op0, 0), 0);
11543 continue;
11544 }
11545 }
11546
11547 /* If we are doing an equality comparison of an AND of a bit equal
11548 to the sign bit, replace this with a LT or GE comparison of
11549 the underlying value. */
11550 if (equality_comparison_p
11551 && const_op == 0
11552 && CONST_INT_P (XEXP (op0, 1))
11553 && mode_width <= HOST_BITS_PER_WIDE_INT
11554 && ((INTVAL (XEXP (op0, 1)) & GET_MODE_MASK (mode))
11555 == (unsigned HOST_WIDE_INT) 1 << (mode_width - 1)))
11556 {
11557 op0 = XEXP (op0, 0);
11558 code = (code == EQ ? GE : LT);
11559 continue;
11560 }
11561
11562 /* If this AND operation is really a ZERO_EXTEND from a narrower
11563 mode, the constant fits within that mode, and this is either an
11564 equality or unsigned comparison, try to do this comparison in
11565 the narrower mode.
11566
11567 Note that in:
11568
11569 (ne:DI (and:DI (reg:DI 4) (const_int 0xffffffff)) (const_int 0))
11570 -> (ne:DI (reg:SI 4) (const_int 0))
11571
11572 unless TRULY_NOOP_TRUNCATION allows it or the register is
11573 known to hold a value of the required mode the
11574 transformation is invalid. */
11575 if ((equality_comparison_p || unsigned_comparison_p)
11576 && CONST_INT_P (XEXP (op0, 1))
11577 && (i = exact_log2 ((UINTVAL (XEXP (op0, 1))
11578 & GET_MODE_MASK (mode))
11579 + 1)) >= 0
11580 && const_op >> i == 0
11581 && (tmode = mode_for_size (i, MODE_INT, 1)) != BLKmode
11582 && (TRULY_NOOP_TRUNCATION_MODES_P (tmode, GET_MODE (op0))
11583 || (REG_P (XEXP (op0, 0))
11584 && reg_truncated_to_mode (tmode, XEXP (op0, 0)))))
11585 {
11586 op0 = gen_lowpart (tmode, XEXP (op0, 0));
11587 continue;
11588 }
11589
11590 /* If this is (and:M1 (subreg:M2 X 0) (const_int C1)) where C1
11591 fits in both M1 and M2 and the SUBREG is either paradoxical
11592 or represents the low part, permute the SUBREG and the AND
11593 and try again. */
11594 if (GET_CODE (XEXP (op0, 0)) == SUBREG)
11595 {
11596 unsigned HOST_WIDE_INT c1;
11597 tmode = GET_MODE (SUBREG_REG (XEXP (op0, 0)));
11598 /* Require an integral mode, to avoid creating something like
11599 (AND:SF ...). */
11600 if (SCALAR_INT_MODE_P (tmode)
11601 /* It is unsafe to commute the AND into the SUBREG if the
11602 SUBREG is paradoxical and WORD_REGISTER_OPERATIONS is
11603 not defined. As originally written the upper bits
11604 have a defined value due to the AND operation.
11605 However, if we commute the AND inside the SUBREG then
11606 they no longer have defined values and the meaning of
11607 the code has been changed. */
11608 && (0
11609 #ifdef WORD_REGISTER_OPERATIONS
11610 || (mode_width > GET_MODE_PRECISION (tmode)
11611 && mode_width <= BITS_PER_WORD)
11612 #endif
11613 || (mode_width <= GET_MODE_PRECISION (tmode)
11614 && subreg_lowpart_p (XEXP (op0, 0))))
11615 && CONST_INT_P (XEXP (op0, 1))
11616 && mode_width <= HOST_BITS_PER_WIDE_INT
11617 && HWI_COMPUTABLE_MODE_P (tmode)
11618 && ((c1 = INTVAL (XEXP (op0, 1))) & ~mask) == 0
11619 && (c1 & ~GET_MODE_MASK (tmode)) == 0
11620 && c1 != mask
11621 && c1 != GET_MODE_MASK (tmode))
11622 {
11623 op0 = simplify_gen_binary (AND, tmode,
11624 SUBREG_REG (XEXP (op0, 0)),
11625 gen_int_mode (c1, tmode));
11626 op0 = gen_lowpart (mode, op0);
11627 continue;
11628 }
11629 }
11630
11631 /* Convert (ne (and (not X) 1) 0) to (eq (and X 1) 0). */
11632 if (const_op == 0 && equality_comparison_p
11633 && XEXP (op0, 1) == const1_rtx
11634 && GET_CODE (XEXP (op0, 0)) == NOT)
11635 {
11636 op0 = simplify_and_const_int (NULL_RTX, mode,
11637 XEXP (XEXP (op0, 0), 0), 1);
11638 code = (code == NE ? EQ : NE);
11639 continue;
11640 }
11641
11642 /* Convert (ne (and (lshiftrt (not X)) 1) 0) to
11643 (eq (and (lshiftrt X) 1) 0).
11644 Also handle the case where (not X) is expressed using xor. */
11645 if (const_op == 0 && equality_comparison_p
11646 && XEXP (op0, 1) == const1_rtx
11647 && GET_CODE (XEXP (op0, 0)) == LSHIFTRT)
11648 {
11649 rtx shift_op = XEXP (XEXP (op0, 0), 0);
11650 rtx shift_count = XEXP (XEXP (op0, 0), 1);
11651
11652 if (GET_CODE (shift_op) == NOT
11653 || (GET_CODE (shift_op) == XOR
11654 && CONST_INT_P (XEXP (shift_op, 1))
11655 && CONST_INT_P (shift_count)
11656 && HWI_COMPUTABLE_MODE_P (mode)
11657 && (UINTVAL (XEXP (shift_op, 1))
11658 == (unsigned HOST_WIDE_INT) 1
11659 << INTVAL (shift_count))))
11660 {
11661 op0
11662 = gen_rtx_LSHIFTRT (mode, XEXP (shift_op, 0), shift_count);
11663 op0 = simplify_and_const_int (NULL_RTX, mode, op0, 1);
11664 code = (code == NE ? EQ : NE);
11665 continue;
11666 }
11667 }
11668 break;
11669
11670 case ASHIFT:
11671 /* If we have (compare (ashift FOO N) (const_int C)) and
11672 the high order N bits of FOO (N+1 if an inequality comparison)
11673 are known to be zero, we can do this by comparing FOO with C
11674 shifted right N bits so long as the low-order N bits of C are
11675 zero. */
11676 if (CONST_INT_P (XEXP (op0, 1))
11677 && INTVAL (XEXP (op0, 1)) >= 0
11678 && ((INTVAL (XEXP (op0, 1)) + ! equality_comparison_p)
11679 < HOST_BITS_PER_WIDE_INT)
11680 && (((unsigned HOST_WIDE_INT) const_op
11681 & (((unsigned HOST_WIDE_INT) 1 << INTVAL (XEXP (op0, 1)))
11682 - 1)) == 0)
11683 && mode_width <= HOST_BITS_PER_WIDE_INT
11684 && (nonzero_bits (XEXP (op0, 0), mode)
11685 & ~(mask >> (INTVAL (XEXP (op0, 1))
11686 + ! equality_comparison_p))) == 0)
11687 {
11688 /* We must perform a logical shift, not an arithmetic one,
11689 as we want the top N bits of C to be zero. */
11690 unsigned HOST_WIDE_INT temp = const_op & GET_MODE_MASK (mode);
11691
11692 temp >>= INTVAL (XEXP (op0, 1));
11693 op1 = gen_int_mode (temp, mode);
11694 op0 = XEXP (op0, 0);
11695 continue;
11696 }
11697
11698 /* If we are doing a sign bit comparison, it means we are testing
11699 a particular bit. Convert it to the appropriate AND. */
11700 if (sign_bit_comparison_p && CONST_INT_P (XEXP (op0, 1))
11701 && mode_width <= HOST_BITS_PER_WIDE_INT)
11702 {
11703 op0 = simplify_and_const_int (NULL_RTX, mode, XEXP (op0, 0),
11704 ((unsigned HOST_WIDE_INT) 1
11705 << (mode_width - 1
11706 - INTVAL (XEXP (op0, 1)))));
11707 code = (code == LT ? NE : EQ);
11708 continue;
11709 }
11710
11711 /* If this an equality comparison with zero and we are shifting
11712 the low bit to the sign bit, we can convert this to an AND of the
11713 low-order bit. */
11714 if (const_op == 0 && equality_comparison_p
11715 && CONST_INT_P (XEXP (op0, 1))
11716 && UINTVAL (XEXP (op0, 1)) == mode_width - 1)
11717 {
11718 op0 = simplify_and_const_int (NULL_RTX, mode, XEXP (op0, 0), 1);
11719 continue;
11720 }
11721 break;
11722
11723 case ASHIFTRT:
11724 /* If this is an equality comparison with zero, we can do this
11725 as a logical shift, which might be much simpler. */
11726 if (equality_comparison_p && const_op == 0
11727 && CONST_INT_P (XEXP (op0, 1)))
11728 {
11729 op0 = simplify_shift_const (NULL_RTX, LSHIFTRT, mode,
11730 XEXP (op0, 0),
11731 INTVAL (XEXP (op0, 1)));
11732 continue;
11733 }
11734
11735 /* If OP0 is a sign extension and CODE is not an unsigned comparison,
11736 do the comparison in a narrower mode. */
11737 if (! unsigned_comparison_p
11738 && CONST_INT_P (XEXP (op0, 1))
11739 && GET_CODE (XEXP (op0, 0)) == ASHIFT
11740 && XEXP (op0, 1) == XEXP (XEXP (op0, 0), 1)
11741 && (tmode = mode_for_size (mode_width - INTVAL (XEXP (op0, 1)),
11742 MODE_INT, 1)) != BLKmode
11743 && (((unsigned HOST_WIDE_INT) const_op
11744 + (GET_MODE_MASK (tmode) >> 1) + 1)
11745 <= GET_MODE_MASK (tmode)))
11746 {
11747 op0 = gen_lowpart (tmode, XEXP (XEXP (op0, 0), 0));
11748 continue;
11749 }
11750
11751 /* Likewise if OP0 is a PLUS of a sign extension with a
11752 constant, which is usually represented with the PLUS
11753 between the shifts. */
11754 if (! unsigned_comparison_p
11755 && CONST_INT_P (XEXP (op0, 1))
11756 && GET_CODE (XEXP (op0, 0)) == PLUS
11757 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
11758 && GET_CODE (XEXP (XEXP (op0, 0), 0)) == ASHIFT
11759 && XEXP (op0, 1) == XEXP (XEXP (XEXP (op0, 0), 0), 1)
11760 && (tmode = mode_for_size (mode_width - INTVAL (XEXP (op0, 1)),
11761 MODE_INT, 1)) != BLKmode
11762 && (((unsigned HOST_WIDE_INT) const_op
11763 + (GET_MODE_MASK (tmode) >> 1) + 1)
11764 <= GET_MODE_MASK (tmode)))
11765 {
11766 rtx inner = XEXP (XEXP (XEXP (op0, 0), 0), 0);
11767 rtx add_const = XEXP (XEXP (op0, 0), 1);
11768 rtx new_const = simplify_gen_binary (ASHIFTRT, GET_MODE (op0),
11769 add_const, XEXP (op0, 1));
11770
11771 op0 = simplify_gen_binary (PLUS, tmode,
11772 gen_lowpart (tmode, inner),
11773 new_const);
11774 continue;
11775 }
11776
11777 /* ... fall through ... */
11778 case LSHIFTRT:
11779 /* If we have (compare (xshiftrt FOO N) (const_int C)) and
11780 the low order N bits of FOO are known to be zero, we can do this
11781 by comparing FOO with C shifted left N bits so long as no
11782 overflow occurs. Even if the low order N bits of FOO aren't known
11783 to be zero, if the comparison is >= or < we can use the same
11784 optimization and for > or <= by setting all the low
11785 order N bits in the comparison constant. */
11786 if (CONST_INT_P (XEXP (op0, 1))
11787 && INTVAL (XEXP (op0, 1)) > 0
11788 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT
11789 && mode_width <= HOST_BITS_PER_WIDE_INT
11790 && (((unsigned HOST_WIDE_INT) const_op
11791 + (GET_CODE (op0) != LSHIFTRT
11792 ? ((GET_MODE_MASK (mode) >> INTVAL (XEXP (op0, 1)) >> 1)
11793 + 1)
11794 : 0))
11795 <= GET_MODE_MASK (mode) >> INTVAL (XEXP (op0, 1))))
11796 {
11797 unsigned HOST_WIDE_INT low_bits
11798 = (nonzero_bits (XEXP (op0, 0), mode)
11799 & (((unsigned HOST_WIDE_INT) 1
11800 << INTVAL (XEXP (op0, 1))) - 1));
11801 if (low_bits == 0 || !equality_comparison_p)
11802 {
11803 /* If the shift was logical, then we must make the condition
11804 unsigned. */
11805 if (GET_CODE (op0) == LSHIFTRT)
11806 code = unsigned_condition (code);
11807
11808 const_op <<= INTVAL (XEXP (op0, 1));
11809 if (low_bits != 0
11810 && (code == GT || code == GTU
11811 || code == LE || code == LEU))
11812 const_op
11813 |= (((HOST_WIDE_INT) 1 << INTVAL (XEXP (op0, 1))) - 1);
11814 op1 = GEN_INT (const_op);
11815 op0 = XEXP (op0, 0);
11816 continue;
11817 }
11818 }
11819
11820 /* If we are using this shift to extract just the sign bit, we
11821 can replace this with an LT or GE comparison. */
11822 if (const_op == 0
11823 && (equality_comparison_p || sign_bit_comparison_p)
11824 && CONST_INT_P (XEXP (op0, 1))
11825 && UINTVAL (XEXP (op0, 1)) == mode_width - 1)
11826 {
11827 op0 = XEXP (op0, 0);
11828 code = (code == NE || code == GT ? LT : GE);
11829 continue;
11830 }
11831 break;
11832
11833 default:
11834 break;
11835 }
11836
11837 break;
11838 }
11839
11840 /* Now make any compound operations involved in this comparison. Then,
11841 check for an outmost SUBREG on OP0 that is not doing anything or is
11842 paradoxical. The latter transformation must only be performed when
11843 it is known that the "extra" bits will be the same in op0 and op1 or
11844 that they don't matter. There are three cases to consider:
11845
11846 1. SUBREG_REG (op0) is a register. In this case the bits are don't
11847 care bits and we can assume they have any convenient value. So
11848 making the transformation is safe.
11849
11850 2. SUBREG_REG (op0) is a memory and LOAD_EXTEND_OP is not defined.
11851 In this case the upper bits of op0 are undefined. We should not make
11852 the simplification in that case as we do not know the contents of
11853 those bits.
11854
11855 3. SUBREG_REG (op0) is a memory and LOAD_EXTEND_OP is defined and not
11856 UNKNOWN. In that case we know those bits are zeros or ones. We must
11857 also be sure that they are the same as the upper bits of op1.
11858
11859 We can never remove a SUBREG for a non-equality comparison because
11860 the sign bit is in a different place in the underlying object. */
11861
11862 op0 = make_compound_operation (op0, op1 == const0_rtx ? COMPARE : SET);
11863 op1 = make_compound_operation (op1, SET);
11864
11865 if (GET_CODE (op0) == SUBREG && subreg_lowpart_p (op0)
11866 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT
11867 && GET_MODE_CLASS (GET_MODE (SUBREG_REG (op0))) == MODE_INT
11868 && (code == NE || code == EQ))
11869 {
11870 if (paradoxical_subreg_p (op0))
11871 {
11872 /* For paradoxical subregs, allow case 1 as above. Case 3 isn't
11873 implemented. */
11874 if (REG_P (SUBREG_REG (op0)))
11875 {
11876 op0 = SUBREG_REG (op0);
11877 op1 = gen_lowpart (GET_MODE (op0), op1);
11878 }
11879 }
11880 else if ((GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op0)))
11881 <= HOST_BITS_PER_WIDE_INT)
11882 && (nonzero_bits (SUBREG_REG (op0),
11883 GET_MODE (SUBREG_REG (op0)))
11884 & ~GET_MODE_MASK (GET_MODE (op0))) == 0)
11885 {
11886 tem = gen_lowpart (GET_MODE (SUBREG_REG (op0)), op1);
11887
11888 if ((nonzero_bits (tem, GET_MODE (SUBREG_REG (op0)))
11889 & ~GET_MODE_MASK (GET_MODE (op0))) == 0)
11890 op0 = SUBREG_REG (op0), op1 = tem;
11891 }
11892 }
11893
11894 /* We now do the opposite procedure: Some machines don't have compare
11895 insns in all modes. If OP0's mode is an integer mode smaller than a
11896 word and we can't do a compare in that mode, see if there is a larger
11897 mode for which we can do the compare. There are a number of cases in
11898 which we can use the wider mode. */
11899
11900 mode = GET_MODE (op0);
11901 if (mode != VOIDmode && GET_MODE_CLASS (mode) == MODE_INT
11902 && GET_MODE_SIZE (mode) < UNITS_PER_WORD
11903 && ! have_insn_for (COMPARE, mode))
11904 for (tmode = GET_MODE_WIDER_MODE (mode);
11905 (tmode != VOIDmode && HWI_COMPUTABLE_MODE_P (tmode));
11906 tmode = GET_MODE_WIDER_MODE (tmode))
11907 if (have_insn_for (COMPARE, tmode))
11908 {
11909 int zero_extended;
11910
11911 /* If this is a test for negative, we can make an explicit
11912 test of the sign bit. Test this first so we can use
11913 a paradoxical subreg to extend OP0. */
11914
11915 if (op1 == const0_rtx && (code == LT || code == GE)
11916 && HWI_COMPUTABLE_MODE_P (mode))
11917 {
11918 op0 = simplify_gen_binary (AND, tmode,
11919 gen_lowpart (tmode, op0),
11920 GEN_INT ((unsigned HOST_WIDE_INT) 1
11921 << (GET_MODE_BITSIZE (mode)
11922 - 1)));
11923 code = (code == LT) ? NE : EQ;
11924 break;
11925 }
11926
11927 /* If the only nonzero bits in OP0 and OP1 are those in the
11928 narrower mode and this is an equality or unsigned comparison,
11929 we can use the wider mode. Similarly for sign-extended
11930 values, in which case it is true for all comparisons. */
11931 zero_extended = ((code == EQ || code == NE
11932 || code == GEU || code == GTU
11933 || code == LEU || code == LTU)
11934 && (nonzero_bits (op0, tmode)
11935 & ~GET_MODE_MASK (mode)) == 0
11936 && ((CONST_INT_P (op1)
11937 || (nonzero_bits (op1, tmode)
11938 & ~GET_MODE_MASK (mode)) == 0)));
11939
11940 if (zero_extended
11941 || ((num_sign_bit_copies (op0, tmode)
11942 > (unsigned int) (GET_MODE_PRECISION (tmode)
11943 - GET_MODE_PRECISION (mode)))
11944 && (num_sign_bit_copies (op1, tmode)
11945 > (unsigned int) (GET_MODE_PRECISION (tmode)
11946 - GET_MODE_PRECISION (mode)))))
11947 {
11948 /* If OP0 is an AND and we don't have an AND in MODE either,
11949 make a new AND in the proper mode. */
11950 if (GET_CODE (op0) == AND
11951 && !have_insn_for (AND, mode))
11952 op0 = simplify_gen_binary (AND, tmode,
11953 gen_lowpart (tmode,
11954 XEXP (op0, 0)),
11955 gen_lowpart (tmode,
11956 XEXP (op0, 1)));
11957 else
11958 {
11959 if (zero_extended)
11960 {
11961 op0 = simplify_gen_unary (ZERO_EXTEND, tmode, op0, mode);
11962 op1 = simplify_gen_unary (ZERO_EXTEND, tmode, op1, mode);
11963 }
11964 else
11965 {
11966 op0 = simplify_gen_unary (SIGN_EXTEND, tmode, op0, mode);
11967 op1 = simplify_gen_unary (SIGN_EXTEND, tmode, op1, mode);
11968 }
11969 break;
11970 }
11971 }
11972 }
11973
11974 #ifdef CANONICALIZE_COMPARISON
11975 /* If this machine only supports a subset of valid comparisons, see if we
11976 can convert an unsupported one into a supported one. */
11977 CANONICALIZE_COMPARISON (code, op0, op1);
11978 #endif
11979
11980 *pop0 = op0;
11981 *pop1 = op1;
11982
11983 return code;
11984 }
11985 \f
11986 /* Utility function for record_value_for_reg. Count number of
11987 rtxs in X. */
11988 static int
11989 count_rtxs (rtx x)
11990 {
11991 enum rtx_code code = GET_CODE (x);
11992 const char *fmt;
11993 int i, j, ret = 1;
11994
11995 if (GET_RTX_CLASS (code) == RTX_BIN_ARITH
11996 || GET_RTX_CLASS (code) == RTX_COMM_ARITH)
11997 {
11998 rtx x0 = XEXP (x, 0);
11999 rtx x1 = XEXP (x, 1);
12000
12001 if (x0 == x1)
12002 return 1 + 2 * count_rtxs (x0);
12003
12004 if ((GET_RTX_CLASS (GET_CODE (x1)) == RTX_BIN_ARITH
12005 || GET_RTX_CLASS (GET_CODE (x1)) == RTX_COMM_ARITH)
12006 && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
12007 return 2 + 2 * count_rtxs (x0)
12008 + count_rtxs (x == XEXP (x1, 0)
12009 ? XEXP (x1, 1) : XEXP (x1, 0));
12010
12011 if ((GET_RTX_CLASS (GET_CODE (x0)) == RTX_BIN_ARITH
12012 || GET_RTX_CLASS (GET_CODE (x0)) == RTX_COMM_ARITH)
12013 && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
12014 return 2 + 2 * count_rtxs (x1)
12015 + count_rtxs (x == XEXP (x0, 0)
12016 ? XEXP (x0, 1) : XEXP (x0, 0));
12017 }
12018
12019 fmt = GET_RTX_FORMAT (code);
12020 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
12021 if (fmt[i] == 'e')
12022 ret += count_rtxs (XEXP (x, i));
12023 else if (fmt[i] == 'E')
12024 for (j = 0; j < XVECLEN (x, i); j++)
12025 ret += count_rtxs (XVECEXP (x, i, j));
12026
12027 return ret;
12028 }
12029 \f
12030 /* Utility function for following routine. Called when X is part of a value
12031 being stored into last_set_value. Sets last_set_table_tick
12032 for each register mentioned. Similar to mention_regs in cse.c */
12033
12034 static void
12035 update_table_tick (rtx x)
12036 {
12037 enum rtx_code code = GET_CODE (x);
12038 const char *fmt = GET_RTX_FORMAT (code);
12039 int i, j;
12040
12041 if (code == REG)
12042 {
12043 unsigned int regno = REGNO (x);
12044 unsigned int endregno = END_REGNO (x);
12045 unsigned int r;
12046
12047 for (r = regno; r < endregno; r++)
12048 {
12049 reg_stat_type *rsp = &VEC_index (reg_stat_type, reg_stat, r);
12050 rsp->last_set_table_tick = label_tick;
12051 }
12052
12053 return;
12054 }
12055
12056 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
12057 if (fmt[i] == 'e')
12058 {
12059 /* Check for identical subexpressions. If x contains
12060 identical subexpression we only have to traverse one of
12061 them. */
12062 if (i == 0 && ARITHMETIC_P (x))
12063 {
12064 /* Note that at this point x1 has already been
12065 processed. */
12066 rtx x0 = XEXP (x, 0);
12067 rtx x1 = XEXP (x, 1);
12068
12069 /* If x0 and x1 are identical then there is no need to
12070 process x0. */
12071 if (x0 == x1)
12072 break;
12073
12074 /* If x0 is identical to a subexpression of x1 then while
12075 processing x1, x0 has already been processed. Thus we
12076 are done with x. */
12077 if (ARITHMETIC_P (x1)
12078 && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
12079 break;
12080
12081 /* If x1 is identical to a subexpression of x0 then we
12082 still have to process the rest of x0. */
12083 if (ARITHMETIC_P (x0)
12084 && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
12085 {
12086 update_table_tick (XEXP (x0, x1 == XEXP (x0, 0) ? 1 : 0));
12087 break;
12088 }
12089 }
12090
12091 update_table_tick (XEXP (x, i));
12092 }
12093 else if (fmt[i] == 'E')
12094 for (j = 0; j < XVECLEN (x, i); j++)
12095 update_table_tick (XVECEXP (x, i, j));
12096 }
12097
12098 /* Record that REG is set to VALUE in insn INSN. If VALUE is zero, we
12099 are saying that the register is clobbered and we no longer know its
12100 value. If INSN is zero, don't update reg_stat[].last_set; this is
12101 only permitted with VALUE also zero and is used to invalidate the
12102 register. */
12103
12104 static void
12105 record_value_for_reg (rtx reg, rtx insn, rtx value)
12106 {
12107 unsigned int regno = REGNO (reg);
12108 unsigned int endregno = END_REGNO (reg);
12109 unsigned int i;
12110 reg_stat_type *rsp;
12111
12112 /* If VALUE contains REG and we have a previous value for REG, substitute
12113 the previous value. */
12114 if (value && insn && reg_overlap_mentioned_p (reg, value))
12115 {
12116 rtx tem;
12117
12118 /* Set things up so get_last_value is allowed to see anything set up to
12119 our insn. */
12120 subst_low_luid = DF_INSN_LUID (insn);
12121 tem = get_last_value (reg);
12122
12123 /* If TEM is simply a binary operation with two CLOBBERs as operands,
12124 it isn't going to be useful and will take a lot of time to process,
12125 so just use the CLOBBER. */
12126
12127 if (tem)
12128 {
12129 if (ARITHMETIC_P (tem)
12130 && GET_CODE (XEXP (tem, 0)) == CLOBBER
12131 && GET_CODE (XEXP (tem, 1)) == CLOBBER)
12132 tem = XEXP (tem, 0);
12133 else if (count_occurrences (value, reg, 1) >= 2)
12134 {
12135 /* If there are two or more occurrences of REG in VALUE,
12136 prevent the value from growing too much. */
12137 if (count_rtxs (tem) > MAX_LAST_VALUE_RTL)
12138 tem = gen_rtx_CLOBBER (GET_MODE (tem), const0_rtx);
12139 }
12140
12141 value = replace_rtx (copy_rtx (value), reg, tem);
12142 }
12143 }
12144
12145 /* For each register modified, show we don't know its value, that
12146 we don't know about its bitwise content, that its value has been
12147 updated, and that we don't know the location of the death of the
12148 register. */
12149 for (i = regno; i < endregno; i++)
12150 {
12151 rsp = &VEC_index (reg_stat_type, reg_stat, i);
12152
12153 if (insn)
12154 rsp->last_set = insn;
12155
12156 rsp->last_set_value = 0;
12157 rsp->last_set_mode = VOIDmode;
12158 rsp->last_set_nonzero_bits = 0;
12159 rsp->last_set_sign_bit_copies = 0;
12160 rsp->last_death = 0;
12161 rsp->truncated_to_mode = VOIDmode;
12162 }
12163
12164 /* Mark registers that are being referenced in this value. */
12165 if (value)
12166 update_table_tick (value);
12167
12168 /* Now update the status of each register being set.
12169 If someone is using this register in this block, set this register
12170 to invalid since we will get confused between the two lives in this
12171 basic block. This makes using this register always invalid. In cse, we
12172 scan the table to invalidate all entries using this register, but this
12173 is too much work for us. */
12174
12175 for (i = regno; i < endregno; i++)
12176 {
12177 rsp = &VEC_index (reg_stat_type, reg_stat, i);
12178 rsp->last_set_label = label_tick;
12179 if (!insn
12180 || (value && rsp->last_set_table_tick >= label_tick_ebb_start))
12181 rsp->last_set_invalid = 1;
12182 else
12183 rsp->last_set_invalid = 0;
12184 }
12185
12186 /* The value being assigned might refer to X (like in "x++;"). In that
12187 case, we must replace it with (clobber (const_int 0)) to prevent
12188 infinite loops. */
12189 rsp = &VEC_index (reg_stat_type, reg_stat, regno);
12190 if (value && !get_last_value_validate (&value, insn, label_tick, 0))
12191 {
12192 value = copy_rtx (value);
12193 if (!get_last_value_validate (&value, insn, label_tick, 1))
12194 value = 0;
12195 }
12196
12197 /* For the main register being modified, update the value, the mode, the
12198 nonzero bits, and the number of sign bit copies. */
12199
12200 rsp->last_set_value = value;
12201
12202 if (value)
12203 {
12204 enum machine_mode mode = GET_MODE (reg);
12205 subst_low_luid = DF_INSN_LUID (insn);
12206 rsp->last_set_mode = mode;
12207 if (GET_MODE_CLASS (mode) == MODE_INT
12208 && HWI_COMPUTABLE_MODE_P (mode))
12209 mode = nonzero_bits_mode;
12210 rsp->last_set_nonzero_bits = nonzero_bits (value, mode);
12211 rsp->last_set_sign_bit_copies
12212 = num_sign_bit_copies (value, GET_MODE (reg));
12213 }
12214 }
12215
12216 /* Called via note_stores from record_dead_and_set_regs to handle one
12217 SET or CLOBBER in an insn. DATA is the instruction in which the
12218 set is occurring. */
12219
12220 static void
12221 record_dead_and_set_regs_1 (rtx dest, const_rtx setter, void *data)
12222 {
12223 rtx record_dead_insn = (rtx) data;
12224
12225 if (GET_CODE (dest) == SUBREG)
12226 dest = SUBREG_REG (dest);
12227
12228 if (!record_dead_insn)
12229 {
12230 if (REG_P (dest))
12231 record_value_for_reg (dest, NULL_RTX, NULL_RTX);
12232 return;
12233 }
12234
12235 if (REG_P (dest))
12236 {
12237 /* If we are setting the whole register, we know its value. Otherwise
12238 show that we don't know the value. We can handle SUBREG in
12239 some cases. */
12240 if (GET_CODE (setter) == SET && dest == SET_DEST (setter))
12241 record_value_for_reg (dest, record_dead_insn, SET_SRC (setter));
12242 else if (GET_CODE (setter) == SET
12243 && GET_CODE (SET_DEST (setter)) == SUBREG
12244 && SUBREG_REG (SET_DEST (setter)) == dest
12245 && GET_MODE_PRECISION (GET_MODE (dest)) <= BITS_PER_WORD
12246 && subreg_lowpart_p (SET_DEST (setter)))
12247 record_value_for_reg (dest, record_dead_insn,
12248 gen_lowpart (GET_MODE (dest),
12249 SET_SRC (setter)));
12250 else
12251 record_value_for_reg (dest, record_dead_insn, NULL_RTX);
12252 }
12253 else if (MEM_P (dest)
12254 /* Ignore pushes, they clobber nothing. */
12255 && ! push_operand (dest, GET_MODE (dest)))
12256 mem_last_set = DF_INSN_LUID (record_dead_insn);
12257 }
12258
12259 /* Update the records of when each REG was most recently set or killed
12260 for the things done by INSN. This is the last thing done in processing
12261 INSN in the combiner loop.
12262
12263 We update reg_stat[], in particular fields last_set, last_set_value,
12264 last_set_mode, last_set_nonzero_bits, last_set_sign_bit_copies,
12265 last_death, and also the similar information mem_last_set (which insn
12266 most recently modified memory) and last_call_luid (which insn was the
12267 most recent subroutine call). */
12268
12269 static void
12270 record_dead_and_set_regs (rtx insn)
12271 {
12272 rtx link;
12273 unsigned int i;
12274
12275 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
12276 {
12277 if (REG_NOTE_KIND (link) == REG_DEAD
12278 && REG_P (XEXP (link, 0)))
12279 {
12280 unsigned int regno = REGNO (XEXP (link, 0));
12281 unsigned int endregno = END_REGNO (XEXP (link, 0));
12282
12283 for (i = regno; i < endregno; i++)
12284 {
12285 reg_stat_type *rsp;
12286
12287 rsp = &VEC_index (reg_stat_type, reg_stat, i);
12288 rsp->last_death = insn;
12289 }
12290 }
12291 else if (REG_NOTE_KIND (link) == REG_INC)
12292 record_value_for_reg (XEXP (link, 0), insn, NULL_RTX);
12293 }
12294
12295 if (CALL_P (insn))
12296 {
12297 hard_reg_set_iterator hrsi;
12298 EXECUTE_IF_SET_IN_HARD_REG_SET (regs_invalidated_by_call, 0, i, hrsi)
12299 {
12300 reg_stat_type *rsp;
12301
12302 rsp = &VEC_index (reg_stat_type, reg_stat, i);
12303 rsp->last_set_invalid = 1;
12304 rsp->last_set = insn;
12305 rsp->last_set_value = 0;
12306 rsp->last_set_mode = VOIDmode;
12307 rsp->last_set_nonzero_bits = 0;
12308 rsp->last_set_sign_bit_copies = 0;
12309 rsp->last_death = 0;
12310 rsp->truncated_to_mode = VOIDmode;
12311 }
12312
12313 last_call_luid = mem_last_set = DF_INSN_LUID (insn);
12314
12315 /* We can't combine into a call pattern. Remember, though, that
12316 the return value register is set at this LUID. We could
12317 still replace a register with the return value from the
12318 wrong subroutine call! */
12319 note_stores (PATTERN (insn), record_dead_and_set_regs_1, NULL_RTX);
12320 }
12321 else
12322 note_stores (PATTERN (insn), record_dead_and_set_regs_1, insn);
12323 }
12324
12325 /* If a SUBREG has the promoted bit set, it is in fact a property of the
12326 register present in the SUBREG, so for each such SUBREG go back and
12327 adjust nonzero and sign bit information of the registers that are
12328 known to have some zero/sign bits set.
12329
12330 This is needed because when combine blows the SUBREGs away, the
12331 information on zero/sign bits is lost and further combines can be
12332 missed because of that. */
12333
12334 static void
12335 record_promoted_value (rtx insn, rtx subreg)
12336 {
12337 struct insn_link *links;
12338 rtx set;
12339 unsigned int regno = REGNO (SUBREG_REG (subreg));
12340 enum machine_mode mode = GET_MODE (subreg);
12341
12342 if (GET_MODE_PRECISION (mode) > HOST_BITS_PER_WIDE_INT)
12343 return;
12344
12345 for (links = LOG_LINKS (insn); links;)
12346 {
12347 reg_stat_type *rsp;
12348
12349 insn = links->insn;
12350 set = single_set (insn);
12351
12352 if (! set || !REG_P (SET_DEST (set))
12353 || REGNO (SET_DEST (set)) != regno
12354 || GET_MODE (SET_DEST (set)) != GET_MODE (SUBREG_REG (subreg)))
12355 {
12356 links = links->next;
12357 continue;
12358 }
12359
12360 rsp = &VEC_index (reg_stat_type, reg_stat, regno);
12361 if (rsp->last_set == insn)
12362 {
12363 if (SUBREG_PROMOTED_UNSIGNED_P (subreg) > 0)
12364 rsp->last_set_nonzero_bits &= GET_MODE_MASK (mode);
12365 }
12366
12367 if (REG_P (SET_SRC (set)))
12368 {
12369 regno = REGNO (SET_SRC (set));
12370 links = LOG_LINKS (insn);
12371 }
12372 else
12373 break;
12374 }
12375 }
12376
12377 /* Check if X, a register, is known to contain a value already
12378 truncated to MODE. In this case we can use a subreg to refer to
12379 the truncated value even though in the generic case we would need
12380 an explicit truncation. */
12381
12382 static bool
12383 reg_truncated_to_mode (enum machine_mode mode, const_rtx x)
12384 {
12385 reg_stat_type *rsp = &VEC_index (reg_stat_type, reg_stat, REGNO (x));
12386 enum machine_mode truncated = rsp->truncated_to_mode;
12387
12388 if (truncated == 0
12389 || rsp->truncation_label < label_tick_ebb_start)
12390 return false;
12391 if (GET_MODE_SIZE (truncated) <= GET_MODE_SIZE (mode))
12392 return true;
12393 if (TRULY_NOOP_TRUNCATION_MODES_P (mode, truncated))
12394 return true;
12395 return false;
12396 }
12397
12398 /* Callback for for_each_rtx. If *P is a hard reg or a subreg record the mode
12399 that the register is accessed in. For non-TRULY_NOOP_TRUNCATION targets we
12400 might be able to turn a truncate into a subreg using this information.
12401 Return -1 if traversing *P is complete or 0 otherwise. */
12402
12403 static int
12404 record_truncated_value (rtx *p, void *data ATTRIBUTE_UNUSED)
12405 {
12406 rtx x = *p;
12407 enum machine_mode truncated_mode;
12408 reg_stat_type *rsp;
12409
12410 if (GET_CODE (x) == SUBREG && REG_P (SUBREG_REG (x)))
12411 {
12412 enum machine_mode original_mode = GET_MODE (SUBREG_REG (x));
12413 truncated_mode = GET_MODE (x);
12414
12415 if (GET_MODE_SIZE (original_mode) <= GET_MODE_SIZE (truncated_mode))
12416 return -1;
12417
12418 if (TRULY_NOOP_TRUNCATION_MODES_P (truncated_mode, original_mode))
12419 return -1;
12420
12421 x = SUBREG_REG (x);
12422 }
12423 /* ??? For hard-regs we now record everything. We might be able to
12424 optimize this using last_set_mode. */
12425 else if (REG_P (x) && REGNO (x) < FIRST_PSEUDO_REGISTER)
12426 truncated_mode = GET_MODE (x);
12427 else
12428 return 0;
12429
12430 rsp = &VEC_index (reg_stat_type, reg_stat, REGNO (x));
12431 if (rsp->truncated_to_mode == 0
12432 || rsp->truncation_label < label_tick_ebb_start
12433 || (GET_MODE_SIZE (truncated_mode)
12434 < GET_MODE_SIZE (rsp->truncated_to_mode)))
12435 {
12436 rsp->truncated_to_mode = truncated_mode;
12437 rsp->truncation_label = label_tick;
12438 }
12439
12440 return -1;
12441 }
12442
12443 /* Callback for note_uses. Find hardregs and subregs of pseudos and
12444 the modes they are used in. This can help truning TRUNCATEs into
12445 SUBREGs. */
12446
12447 static void
12448 record_truncated_values (rtx *x, void *data ATTRIBUTE_UNUSED)
12449 {
12450 for_each_rtx (x, record_truncated_value, NULL);
12451 }
12452
12453 /* Scan X for promoted SUBREGs. For each one found,
12454 note what it implies to the registers used in it. */
12455
12456 static void
12457 check_promoted_subreg (rtx insn, rtx x)
12458 {
12459 if (GET_CODE (x) == SUBREG
12460 && SUBREG_PROMOTED_VAR_P (x)
12461 && REG_P (SUBREG_REG (x)))
12462 record_promoted_value (insn, x);
12463 else
12464 {
12465 const char *format = GET_RTX_FORMAT (GET_CODE (x));
12466 int i, j;
12467
12468 for (i = 0; i < GET_RTX_LENGTH (GET_CODE (x)); i++)
12469 switch (format[i])
12470 {
12471 case 'e':
12472 check_promoted_subreg (insn, XEXP (x, i));
12473 break;
12474 case 'V':
12475 case 'E':
12476 if (XVEC (x, i) != 0)
12477 for (j = 0; j < XVECLEN (x, i); j++)
12478 check_promoted_subreg (insn, XVECEXP (x, i, j));
12479 break;
12480 }
12481 }
12482 }
12483 \f
12484 /* Verify that all the registers and memory references mentioned in *LOC are
12485 still valid. *LOC was part of a value set in INSN when label_tick was
12486 equal to TICK. Return 0 if some are not. If REPLACE is nonzero, replace
12487 the invalid references with (clobber (const_int 0)) and return 1. This
12488 replacement is useful because we often can get useful information about
12489 the form of a value (e.g., if it was produced by a shift that always
12490 produces -1 or 0) even though we don't know exactly what registers it
12491 was produced from. */
12492
12493 static int
12494 get_last_value_validate (rtx *loc, rtx insn, int tick, int replace)
12495 {
12496 rtx x = *loc;
12497 const char *fmt = GET_RTX_FORMAT (GET_CODE (x));
12498 int len = GET_RTX_LENGTH (GET_CODE (x));
12499 int i, j;
12500
12501 if (REG_P (x))
12502 {
12503 unsigned int regno = REGNO (x);
12504 unsigned int endregno = END_REGNO (x);
12505 unsigned int j;
12506
12507 for (j = regno; j < endregno; j++)
12508 {
12509 reg_stat_type *rsp = &VEC_index (reg_stat_type, reg_stat, j);
12510 if (rsp->last_set_invalid
12511 /* If this is a pseudo-register that was only set once and not
12512 live at the beginning of the function, it is always valid. */
12513 || (! (regno >= FIRST_PSEUDO_REGISTER
12514 && REG_N_SETS (regno) == 1
12515 && (!REGNO_REG_SET_P
12516 (DF_LR_IN (ENTRY_BLOCK_PTR->next_bb), regno)))
12517 && rsp->last_set_label > tick))
12518 {
12519 if (replace)
12520 *loc = gen_rtx_CLOBBER (GET_MODE (x), const0_rtx);
12521 return replace;
12522 }
12523 }
12524
12525 return 1;
12526 }
12527 /* If this is a memory reference, make sure that there were no stores after
12528 it that might have clobbered the value. We don't have alias info, so we
12529 assume any store invalidates it. Moreover, we only have local UIDs, so
12530 we also assume that there were stores in the intervening basic blocks. */
12531 else if (MEM_P (x) && !MEM_READONLY_P (x)
12532 && (tick != label_tick || DF_INSN_LUID (insn) <= mem_last_set))
12533 {
12534 if (replace)
12535 *loc = gen_rtx_CLOBBER (GET_MODE (x), const0_rtx);
12536 return replace;
12537 }
12538
12539 for (i = 0; i < len; i++)
12540 {
12541 if (fmt[i] == 'e')
12542 {
12543 /* Check for identical subexpressions. If x contains
12544 identical subexpression we only have to traverse one of
12545 them. */
12546 if (i == 1 && ARITHMETIC_P (x))
12547 {
12548 /* Note that at this point x0 has already been checked
12549 and found valid. */
12550 rtx x0 = XEXP (x, 0);
12551 rtx x1 = XEXP (x, 1);
12552
12553 /* If x0 and x1 are identical then x is also valid. */
12554 if (x0 == x1)
12555 return 1;
12556
12557 /* If x1 is identical to a subexpression of x0 then
12558 while checking x0, x1 has already been checked. Thus
12559 it is valid and so as x. */
12560 if (ARITHMETIC_P (x0)
12561 && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
12562 return 1;
12563
12564 /* If x0 is identical to a subexpression of x1 then x is
12565 valid iff the rest of x1 is valid. */
12566 if (ARITHMETIC_P (x1)
12567 && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
12568 return
12569 get_last_value_validate (&XEXP (x1,
12570 x0 == XEXP (x1, 0) ? 1 : 0),
12571 insn, tick, replace);
12572 }
12573
12574 if (get_last_value_validate (&XEXP (x, i), insn, tick,
12575 replace) == 0)
12576 return 0;
12577 }
12578 else if (fmt[i] == 'E')
12579 for (j = 0; j < XVECLEN (x, i); j++)
12580 if (get_last_value_validate (&XVECEXP (x, i, j),
12581 insn, tick, replace) == 0)
12582 return 0;
12583 }
12584
12585 /* If we haven't found a reason for it to be invalid, it is valid. */
12586 return 1;
12587 }
12588
12589 /* Get the last value assigned to X, if known. Some registers
12590 in the value may be replaced with (clobber (const_int 0)) if their value
12591 is known longer known reliably. */
12592
12593 static rtx
12594 get_last_value (const_rtx x)
12595 {
12596 unsigned int regno;
12597 rtx value;
12598 reg_stat_type *rsp;
12599
12600 /* If this is a non-paradoxical SUBREG, get the value of its operand and
12601 then convert it to the desired mode. If this is a paradoxical SUBREG,
12602 we cannot predict what values the "extra" bits might have. */
12603 if (GET_CODE (x) == SUBREG
12604 && subreg_lowpart_p (x)
12605 && !paradoxical_subreg_p (x)
12606 && (value = get_last_value (SUBREG_REG (x))) != 0)
12607 return gen_lowpart (GET_MODE (x), value);
12608
12609 if (!REG_P (x))
12610 return 0;
12611
12612 regno = REGNO (x);
12613 rsp = &VEC_index (reg_stat_type, reg_stat, regno);
12614 value = rsp->last_set_value;
12615
12616 /* If we don't have a value, or if it isn't for this basic block and
12617 it's either a hard register, set more than once, or it's a live
12618 at the beginning of the function, return 0.
12619
12620 Because if it's not live at the beginning of the function then the reg
12621 is always set before being used (is never used without being set).
12622 And, if it's set only once, and it's always set before use, then all
12623 uses must have the same last value, even if it's not from this basic
12624 block. */
12625
12626 if (value == 0
12627 || (rsp->last_set_label < label_tick_ebb_start
12628 && (regno < FIRST_PSEUDO_REGISTER
12629 || REG_N_SETS (regno) != 1
12630 || REGNO_REG_SET_P
12631 (DF_LR_IN (ENTRY_BLOCK_PTR->next_bb), regno))))
12632 return 0;
12633
12634 /* If the value was set in a later insn than the ones we are processing,
12635 we can't use it even if the register was only set once. */
12636 if (rsp->last_set_label == label_tick
12637 && DF_INSN_LUID (rsp->last_set) >= subst_low_luid)
12638 return 0;
12639
12640 /* If the value has all its registers valid, return it. */
12641 if (get_last_value_validate (&value, rsp->last_set, rsp->last_set_label, 0))
12642 return value;
12643
12644 /* Otherwise, make a copy and replace any invalid register with
12645 (clobber (const_int 0)). If that fails for some reason, return 0. */
12646
12647 value = copy_rtx (value);
12648 if (get_last_value_validate (&value, rsp->last_set, rsp->last_set_label, 1))
12649 return value;
12650
12651 return 0;
12652 }
12653 \f
12654 /* Return nonzero if expression X refers to a REG or to memory
12655 that is set in an instruction more recent than FROM_LUID. */
12656
12657 static int
12658 use_crosses_set_p (const_rtx x, int from_luid)
12659 {
12660 const char *fmt;
12661 int i;
12662 enum rtx_code code = GET_CODE (x);
12663
12664 if (code == REG)
12665 {
12666 unsigned int regno = REGNO (x);
12667 unsigned endreg = END_REGNO (x);
12668
12669 #ifdef PUSH_ROUNDING
12670 /* Don't allow uses of the stack pointer to be moved,
12671 because we don't know whether the move crosses a push insn. */
12672 if (regno == STACK_POINTER_REGNUM && PUSH_ARGS)
12673 return 1;
12674 #endif
12675 for (; regno < endreg; regno++)
12676 {
12677 reg_stat_type *rsp = &VEC_index (reg_stat_type, reg_stat, regno);
12678 if (rsp->last_set
12679 && rsp->last_set_label == label_tick
12680 && DF_INSN_LUID (rsp->last_set) > from_luid)
12681 return 1;
12682 }
12683 return 0;
12684 }
12685
12686 if (code == MEM && mem_last_set > from_luid)
12687 return 1;
12688
12689 fmt = GET_RTX_FORMAT (code);
12690
12691 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
12692 {
12693 if (fmt[i] == 'E')
12694 {
12695 int j;
12696 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
12697 if (use_crosses_set_p (XVECEXP (x, i, j), from_luid))
12698 return 1;
12699 }
12700 else if (fmt[i] == 'e'
12701 && use_crosses_set_p (XEXP (x, i), from_luid))
12702 return 1;
12703 }
12704 return 0;
12705 }
12706 \f
12707 /* Define three variables used for communication between the following
12708 routines. */
12709
12710 static unsigned int reg_dead_regno, reg_dead_endregno;
12711 static int reg_dead_flag;
12712
12713 /* Function called via note_stores from reg_dead_at_p.
12714
12715 If DEST is within [reg_dead_regno, reg_dead_endregno), set
12716 reg_dead_flag to 1 if X is a CLOBBER and to -1 it is a SET. */
12717
12718 static void
12719 reg_dead_at_p_1 (rtx dest, const_rtx x, void *data ATTRIBUTE_UNUSED)
12720 {
12721 unsigned int regno, endregno;
12722
12723 if (!REG_P (dest))
12724 return;
12725
12726 regno = REGNO (dest);
12727 endregno = END_REGNO (dest);
12728 if (reg_dead_endregno > regno && reg_dead_regno < endregno)
12729 reg_dead_flag = (GET_CODE (x) == CLOBBER) ? 1 : -1;
12730 }
12731
12732 /* Return nonzero if REG is known to be dead at INSN.
12733
12734 We scan backwards from INSN. If we hit a REG_DEAD note or a CLOBBER
12735 referencing REG, it is dead. If we hit a SET referencing REG, it is
12736 live. Otherwise, see if it is live or dead at the start of the basic
12737 block we are in. Hard regs marked as being live in NEWPAT_USED_REGS
12738 must be assumed to be always live. */
12739
12740 static int
12741 reg_dead_at_p (rtx reg, rtx insn)
12742 {
12743 basic_block block;
12744 unsigned int i;
12745
12746 /* Set variables for reg_dead_at_p_1. */
12747 reg_dead_regno = REGNO (reg);
12748 reg_dead_endregno = END_REGNO (reg);
12749
12750 reg_dead_flag = 0;
12751
12752 /* Check that reg isn't mentioned in NEWPAT_USED_REGS. For fixed registers
12753 we allow the machine description to decide whether use-and-clobber
12754 patterns are OK. */
12755 if (reg_dead_regno < FIRST_PSEUDO_REGISTER)
12756 {
12757 for (i = reg_dead_regno; i < reg_dead_endregno; i++)
12758 if (!fixed_regs[i] && TEST_HARD_REG_BIT (newpat_used_regs, i))
12759 return 0;
12760 }
12761
12762 /* Scan backwards until we find a REG_DEAD note, SET, CLOBBER, or
12763 beginning of basic block. */
12764 block = BLOCK_FOR_INSN (insn);
12765 for (;;)
12766 {
12767 if (INSN_P (insn))
12768 {
12769 note_stores (PATTERN (insn), reg_dead_at_p_1, NULL);
12770 if (reg_dead_flag)
12771 return reg_dead_flag == 1 ? 1 : 0;
12772
12773 if (find_regno_note (insn, REG_DEAD, reg_dead_regno))
12774 return 1;
12775 }
12776
12777 if (insn == BB_HEAD (block))
12778 break;
12779
12780 insn = PREV_INSN (insn);
12781 }
12782
12783 /* Look at live-in sets for the basic block that we were in. */
12784 for (i = reg_dead_regno; i < reg_dead_endregno; i++)
12785 if (REGNO_REG_SET_P (df_get_live_in (block), i))
12786 return 0;
12787
12788 return 1;
12789 }
12790 \f
12791 /* Note hard registers in X that are used. */
12792
12793 static void
12794 mark_used_regs_combine (rtx x)
12795 {
12796 RTX_CODE code = GET_CODE (x);
12797 unsigned int regno;
12798 int i;
12799
12800 switch (code)
12801 {
12802 case LABEL_REF:
12803 case SYMBOL_REF:
12804 case CONST:
12805 CASE_CONST_ANY:
12806 case PC:
12807 case ADDR_VEC:
12808 case ADDR_DIFF_VEC:
12809 case ASM_INPUT:
12810 #ifdef HAVE_cc0
12811 /* CC0 must die in the insn after it is set, so we don't need to take
12812 special note of it here. */
12813 case CC0:
12814 #endif
12815 return;
12816
12817 case CLOBBER:
12818 /* If we are clobbering a MEM, mark any hard registers inside the
12819 address as used. */
12820 if (MEM_P (XEXP (x, 0)))
12821 mark_used_regs_combine (XEXP (XEXP (x, 0), 0));
12822 return;
12823
12824 case REG:
12825 regno = REGNO (x);
12826 /* A hard reg in a wide mode may really be multiple registers.
12827 If so, mark all of them just like the first. */
12828 if (regno < FIRST_PSEUDO_REGISTER)
12829 {
12830 /* None of this applies to the stack, frame or arg pointers. */
12831 if (regno == STACK_POINTER_REGNUM
12832 #if !HARD_FRAME_POINTER_IS_FRAME_POINTER
12833 || regno == HARD_FRAME_POINTER_REGNUM
12834 #endif
12835 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
12836 || (regno == ARG_POINTER_REGNUM && fixed_regs[regno])
12837 #endif
12838 || regno == FRAME_POINTER_REGNUM)
12839 return;
12840
12841 add_to_hard_reg_set (&newpat_used_regs, GET_MODE (x), regno);
12842 }
12843 return;
12844
12845 case SET:
12846 {
12847 /* If setting a MEM, or a SUBREG of a MEM, then note any hard regs in
12848 the address. */
12849 rtx testreg = SET_DEST (x);
12850
12851 while (GET_CODE (testreg) == SUBREG
12852 || GET_CODE (testreg) == ZERO_EXTRACT
12853 || GET_CODE (testreg) == STRICT_LOW_PART)
12854 testreg = XEXP (testreg, 0);
12855
12856 if (MEM_P (testreg))
12857 mark_used_regs_combine (XEXP (testreg, 0));
12858
12859 mark_used_regs_combine (SET_SRC (x));
12860 }
12861 return;
12862
12863 default:
12864 break;
12865 }
12866
12867 /* Recursively scan the operands of this expression. */
12868
12869 {
12870 const char *fmt = GET_RTX_FORMAT (code);
12871
12872 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
12873 {
12874 if (fmt[i] == 'e')
12875 mark_used_regs_combine (XEXP (x, i));
12876 else if (fmt[i] == 'E')
12877 {
12878 int j;
12879
12880 for (j = 0; j < XVECLEN (x, i); j++)
12881 mark_used_regs_combine (XVECEXP (x, i, j));
12882 }
12883 }
12884 }
12885 }
12886 \f
12887 /* Remove register number REGNO from the dead registers list of INSN.
12888
12889 Return the note used to record the death, if there was one. */
12890
12891 rtx
12892 remove_death (unsigned int regno, rtx insn)
12893 {
12894 rtx note = find_regno_note (insn, REG_DEAD, regno);
12895
12896 if (note)
12897 remove_note (insn, note);
12898
12899 return note;
12900 }
12901
12902 /* For each register (hardware or pseudo) used within expression X, if its
12903 death is in an instruction with luid between FROM_LUID (inclusive) and
12904 TO_INSN (exclusive), put a REG_DEAD note for that register in the
12905 list headed by PNOTES.
12906
12907 That said, don't move registers killed by maybe_kill_insn.
12908
12909 This is done when X is being merged by combination into TO_INSN. These
12910 notes will then be distributed as needed. */
12911
12912 static void
12913 move_deaths (rtx x, rtx maybe_kill_insn, int from_luid, rtx to_insn,
12914 rtx *pnotes)
12915 {
12916 const char *fmt;
12917 int len, i;
12918 enum rtx_code code = GET_CODE (x);
12919
12920 if (code == REG)
12921 {
12922 unsigned int regno = REGNO (x);
12923 rtx where_dead = VEC_index (reg_stat_type, reg_stat, regno).last_death;
12924
12925 /* Don't move the register if it gets killed in between from and to. */
12926 if (maybe_kill_insn && reg_set_p (x, maybe_kill_insn)
12927 && ! reg_referenced_p (x, maybe_kill_insn))
12928 return;
12929
12930 if (where_dead
12931 && BLOCK_FOR_INSN (where_dead) == BLOCK_FOR_INSN (to_insn)
12932 && DF_INSN_LUID (where_dead) >= from_luid
12933 && DF_INSN_LUID (where_dead) < DF_INSN_LUID (to_insn))
12934 {
12935 rtx note = remove_death (regno, where_dead);
12936
12937 /* It is possible for the call above to return 0. This can occur
12938 when last_death points to I2 or I1 that we combined with.
12939 In that case make a new note.
12940
12941 We must also check for the case where X is a hard register
12942 and NOTE is a death note for a range of hard registers
12943 including X. In that case, we must put REG_DEAD notes for
12944 the remaining registers in place of NOTE. */
12945
12946 if (note != 0 && regno < FIRST_PSEUDO_REGISTER
12947 && (GET_MODE_SIZE (GET_MODE (XEXP (note, 0)))
12948 > GET_MODE_SIZE (GET_MODE (x))))
12949 {
12950 unsigned int deadregno = REGNO (XEXP (note, 0));
12951 unsigned int deadend = END_HARD_REGNO (XEXP (note, 0));
12952 unsigned int ourend = END_HARD_REGNO (x);
12953 unsigned int i;
12954
12955 for (i = deadregno; i < deadend; i++)
12956 if (i < regno || i >= ourend)
12957 add_reg_note (where_dead, REG_DEAD, regno_reg_rtx[i]);
12958 }
12959
12960 /* If we didn't find any note, or if we found a REG_DEAD note that
12961 covers only part of the given reg, and we have a multi-reg hard
12962 register, then to be safe we must check for REG_DEAD notes
12963 for each register other than the first. They could have
12964 their own REG_DEAD notes lying around. */
12965 else if ((note == 0
12966 || (note != 0
12967 && (GET_MODE_SIZE (GET_MODE (XEXP (note, 0)))
12968 < GET_MODE_SIZE (GET_MODE (x)))))
12969 && regno < FIRST_PSEUDO_REGISTER
12970 && hard_regno_nregs[regno][GET_MODE (x)] > 1)
12971 {
12972 unsigned int ourend = END_HARD_REGNO (x);
12973 unsigned int i, offset;
12974 rtx oldnotes = 0;
12975
12976 if (note)
12977 offset = hard_regno_nregs[regno][GET_MODE (XEXP (note, 0))];
12978 else
12979 offset = 1;
12980
12981 for (i = regno + offset; i < ourend; i++)
12982 move_deaths (regno_reg_rtx[i],
12983 maybe_kill_insn, from_luid, to_insn, &oldnotes);
12984 }
12985
12986 if (note != 0 && GET_MODE (XEXP (note, 0)) == GET_MODE (x))
12987 {
12988 XEXP (note, 1) = *pnotes;
12989 *pnotes = note;
12990 }
12991 else
12992 *pnotes = alloc_reg_note (REG_DEAD, x, *pnotes);
12993 }
12994
12995 return;
12996 }
12997
12998 else if (GET_CODE (x) == SET)
12999 {
13000 rtx dest = SET_DEST (x);
13001
13002 move_deaths (SET_SRC (x), maybe_kill_insn, from_luid, to_insn, pnotes);
13003
13004 /* In the case of a ZERO_EXTRACT, a STRICT_LOW_PART, or a SUBREG
13005 that accesses one word of a multi-word item, some
13006 piece of everything register in the expression is used by
13007 this insn, so remove any old death. */
13008 /* ??? So why do we test for equality of the sizes? */
13009
13010 if (GET_CODE (dest) == ZERO_EXTRACT
13011 || GET_CODE (dest) == STRICT_LOW_PART
13012 || (GET_CODE (dest) == SUBREG
13013 && (((GET_MODE_SIZE (GET_MODE (dest))
13014 + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
13015 == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest)))
13016 + UNITS_PER_WORD - 1) / UNITS_PER_WORD))))
13017 {
13018 move_deaths (dest, maybe_kill_insn, from_luid, to_insn, pnotes);
13019 return;
13020 }
13021
13022 /* If this is some other SUBREG, we know it replaces the entire
13023 value, so use that as the destination. */
13024 if (GET_CODE (dest) == SUBREG)
13025 dest = SUBREG_REG (dest);
13026
13027 /* If this is a MEM, adjust deaths of anything used in the address.
13028 For a REG (the only other possibility), the entire value is
13029 being replaced so the old value is not used in this insn. */
13030
13031 if (MEM_P (dest))
13032 move_deaths (XEXP (dest, 0), maybe_kill_insn, from_luid,
13033 to_insn, pnotes);
13034 return;
13035 }
13036
13037 else if (GET_CODE (x) == CLOBBER)
13038 return;
13039
13040 len = GET_RTX_LENGTH (code);
13041 fmt = GET_RTX_FORMAT (code);
13042
13043 for (i = 0; i < len; i++)
13044 {
13045 if (fmt[i] == 'E')
13046 {
13047 int j;
13048 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
13049 move_deaths (XVECEXP (x, i, j), maybe_kill_insn, from_luid,
13050 to_insn, pnotes);
13051 }
13052 else if (fmt[i] == 'e')
13053 move_deaths (XEXP (x, i), maybe_kill_insn, from_luid, to_insn, pnotes);
13054 }
13055 }
13056 \f
13057 /* Return 1 if X is the target of a bit-field assignment in BODY, the
13058 pattern of an insn. X must be a REG. */
13059
13060 static int
13061 reg_bitfield_target_p (rtx x, rtx body)
13062 {
13063 int i;
13064
13065 if (GET_CODE (body) == SET)
13066 {
13067 rtx dest = SET_DEST (body);
13068 rtx target;
13069 unsigned int regno, tregno, endregno, endtregno;
13070
13071 if (GET_CODE (dest) == ZERO_EXTRACT)
13072 target = XEXP (dest, 0);
13073 else if (GET_CODE (dest) == STRICT_LOW_PART)
13074 target = SUBREG_REG (XEXP (dest, 0));
13075 else
13076 return 0;
13077
13078 if (GET_CODE (target) == SUBREG)
13079 target = SUBREG_REG (target);
13080
13081 if (!REG_P (target))
13082 return 0;
13083
13084 tregno = REGNO (target), regno = REGNO (x);
13085 if (tregno >= FIRST_PSEUDO_REGISTER || regno >= FIRST_PSEUDO_REGISTER)
13086 return target == x;
13087
13088 endtregno = end_hard_regno (GET_MODE (target), tregno);
13089 endregno = end_hard_regno (GET_MODE (x), regno);
13090
13091 return endregno > tregno && regno < endtregno;
13092 }
13093
13094 else if (GET_CODE (body) == PARALLEL)
13095 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
13096 if (reg_bitfield_target_p (x, XVECEXP (body, 0, i)))
13097 return 1;
13098
13099 return 0;
13100 }
13101 \f
13102 /* Given a chain of REG_NOTES originally from FROM_INSN, try to place them
13103 as appropriate. I3 and I2 are the insns resulting from the combination
13104 insns including FROM (I2 may be zero).
13105
13106 ELIM_I2 and ELIM_I1 are either zero or registers that we know will
13107 not need REG_DEAD notes because they are being substituted for. This
13108 saves searching in the most common cases.
13109
13110 Each note in the list is either ignored or placed on some insns, depending
13111 on the type of note. */
13112
13113 static void
13114 distribute_notes (rtx notes, rtx from_insn, rtx i3, rtx i2, rtx elim_i2,
13115 rtx elim_i1, rtx elim_i0)
13116 {
13117 rtx note, next_note;
13118 rtx tem;
13119
13120 for (note = notes; note; note = next_note)
13121 {
13122 rtx place = 0, place2 = 0;
13123
13124 next_note = XEXP (note, 1);
13125 switch (REG_NOTE_KIND (note))
13126 {
13127 case REG_BR_PROB:
13128 case REG_BR_PRED:
13129 /* Doesn't matter much where we put this, as long as it's somewhere.
13130 It is preferable to keep these notes on branches, which is most
13131 likely to be i3. */
13132 place = i3;
13133 break;
13134
13135 case REG_NON_LOCAL_GOTO:
13136 if (JUMP_P (i3))
13137 place = i3;
13138 else
13139 {
13140 gcc_assert (i2 && JUMP_P (i2));
13141 place = i2;
13142 }
13143 break;
13144
13145 case REG_EH_REGION:
13146 /* These notes must remain with the call or trapping instruction. */
13147 if (CALL_P (i3))
13148 place = i3;
13149 else if (i2 && CALL_P (i2))
13150 place = i2;
13151 else
13152 {
13153 gcc_assert (cfun->can_throw_non_call_exceptions);
13154 if (may_trap_p (i3))
13155 place = i3;
13156 else if (i2 && may_trap_p (i2))
13157 place = i2;
13158 /* ??? Otherwise assume we've combined things such that we
13159 can now prove that the instructions can't trap. Drop the
13160 note in this case. */
13161 }
13162 break;
13163
13164 case REG_ARGS_SIZE:
13165 /* ??? How to distribute between i3-i1. Assume i3 contains the
13166 entire adjustment. Assert i3 contains at least some adjust. */
13167 if (!noop_move_p (i3))
13168 {
13169 int old_size, args_size = INTVAL (XEXP (note, 0));
13170 /* fixup_args_size_notes looks at REG_NORETURN note,
13171 so ensure the note is placed there first. */
13172 if (CALL_P (i3))
13173 {
13174 rtx *np;
13175 for (np = &next_note; *np; np = &XEXP (*np, 1))
13176 if (REG_NOTE_KIND (*np) == REG_NORETURN)
13177 {
13178 rtx n = *np;
13179 *np = XEXP (n, 1);
13180 XEXP (n, 1) = REG_NOTES (i3);
13181 REG_NOTES (i3) = n;
13182 break;
13183 }
13184 }
13185 old_size = fixup_args_size_notes (PREV_INSN (i3), i3, args_size);
13186 /* emit_call_1 adds for !ACCUMULATE_OUTGOING_ARGS
13187 REG_ARGS_SIZE note to all noreturn calls, allow that here. */
13188 gcc_assert (old_size != args_size
13189 || (CALL_P (i3)
13190 && !ACCUMULATE_OUTGOING_ARGS
13191 && find_reg_note (i3, REG_NORETURN, NULL_RTX)));
13192 }
13193 break;
13194
13195 case REG_NORETURN:
13196 case REG_SETJMP:
13197 case REG_TM:
13198 /* These notes must remain with the call. It should not be
13199 possible for both I2 and I3 to be a call. */
13200 if (CALL_P (i3))
13201 place = i3;
13202 else
13203 {
13204 gcc_assert (i2 && CALL_P (i2));
13205 place = i2;
13206 }
13207 break;
13208
13209 case REG_UNUSED:
13210 /* Any clobbers for i3 may still exist, and so we must process
13211 REG_UNUSED notes from that insn.
13212
13213 Any clobbers from i2 or i1 can only exist if they were added by
13214 recog_for_combine. In that case, recog_for_combine created the
13215 necessary REG_UNUSED notes. Trying to keep any original
13216 REG_UNUSED notes from these insns can cause incorrect output
13217 if it is for the same register as the original i3 dest.
13218 In that case, we will notice that the register is set in i3,
13219 and then add a REG_UNUSED note for the destination of i3, which
13220 is wrong. However, it is possible to have REG_UNUSED notes from
13221 i2 or i1 for register which were both used and clobbered, so
13222 we keep notes from i2 or i1 if they will turn into REG_DEAD
13223 notes. */
13224
13225 /* If this register is set or clobbered in I3, put the note there
13226 unless there is one already. */
13227 if (reg_set_p (XEXP (note, 0), PATTERN (i3)))
13228 {
13229 if (from_insn != i3)
13230 break;
13231
13232 if (! (REG_P (XEXP (note, 0))
13233 ? find_regno_note (i3, REG_UNUSED, REGNO (XEXP (note, 0)))
13234 : find_reg_note (i3, REG_UNUSED, XEXP (note, 0))))
13235 place = i3;
13236 }
13237 /* Otherwise, if this register is used by I3, then this register
13238 now dies here, so we must put a REG_DEAD note here unless there
13239 is one already. */
13240 else if (reg_referenced_p (XEXP (note, 0), PATTERN (i3))
13241 && ! (REG_P (XEXP (note, 0))
13242 ? find_regno_note (i3, REG_DEAD,
13243 REGNO (XEXP (note, 0)))
13244 : find_reg_note (i3, REG_DEAD, XEXP (note, 0))))
13245 {
13246 PUT_REG_NOTE_KIND (note, REG_DEAD);
13247 place = i3;
13248 }
13249 break;
13250
13251 case REG_EQUAL:
13252 case REG_EQUIV:
13253 case REG_NOALIAS:
13254 /* These notes say something about results of an insn. We can
13255 only support them if they used to be on I3 in which case they
13256 remain on I3. Otherwise they are ignored.
13257
13258 If the note refers to an expression that is not a constant, we
13259 must also ignore the note since we cannot tell whether the
13260 equivalence is still true. It might be possible to do
13261 slightly better than this (we only have a problem if I2DEST
13262 or I1DEST is present in the expression), but it doesn't
13263 seem worth the trouble. */
13264
13265 if (from_insn == i3
13266 && (XEXP (note, 0) == 0 || CONSTANT_P (XEXP (note, 0))))
13267 place = i3;
13268 break;
13269
13270 case REG_INC:
13271 /* These notes say something about how a register is used. They must
13272 be present on any use of the register in I2 or I3. */
13273 if (reg_mentioned_p (XEXP (note, 0), PATTERN (i3)))
13274 place = i3;
13275
13276 if (i2 && reg_mentioned_p (XEXP (note, 0), PATTERN (i2)))
13277 {
13278 if (place)
13279 place2 = i2;
13280 else
13281 place = i2;
13282 }
13283 break;
13284
13285 case REG_LABEL_TARGET:
13286 case REG_LABEL_OPERAND:
13287 /* This can show up in several ways -- either directly in the
13288 pattern, or hidden off in the constant pool with (or without?)
13289 a REG_EQUAL note. */
13290 /* ??? Ignore the without-reg_equal-note problem for now. */
13291 if (reg_mentioned_p (XEXP (note, 0), PATTERN (i3))
13292 || ((tem = find_reg_note (i3, REG_EQUAL, NULL_RTX))
13293 && GET_CODE (XEXP (tem, 0)) == LABEL_REF
13294 && XEXP (XEXP (tem, 0), 0) == XEXP (note, 0)))
13295 place = i3;
13296
13297 if (i2
13298 && (reg_mentioned_p (XEXP (note, 0), PATTERN (i2))
13299 || ((tem = find_reg_note (i2, REG_EQUAL, NULL_RTX))
13300 && GET_CODE (XEXP (tem, 0)) == LABEL_REF
13301 && XEXP (XEXP (tem, 0), 0) == XEXP (note, 0))))
13302 {
13303 if (place)
13304 place2 = i2;
13305 else
13306 place = i2;
13307 }
13308
13309 /* For REG_LABEL_TARGET on a JUMP_P, we prefer to put the note
13310 as a JUMP_LABEL or decrement LABEL_NUSES if it's already
13311 there. */
13312 if (place && JUMP_P (place)
13313 && REG_NOTE_KIND (note) == REG_LABEL_TARGET
13314 && (JUMP_LABEL (place) == NULL
13315 || JUMP_LABEL (place) == XEXP (note, 0)))
13316 {
13317 rtx label = JUMP_LABEL (place);
13318
13319 if (!label)
13320 JUMP_LABEL (place) = XEXP (note, 0);
13321 else if (LABEL_P (label))
13322 LABEL_NUSES (label)--;
13323 }
13324
13325 if (place2 && JUMP_P (place2)
13326 && REG_NOTE_KIND (note) == REG_LABEL_TARGET
13327 && (JUMP_LABEL (place2) == NULL
13328 || JUMP_LABEL (place2) == XEXP (note, 0)))
13329 {
13330 rtx label = JUMP_LABEL (place2);
13331
13332 if (!label)
13333 JUMP_LABEL (place2) = XEXP (note, 0);
13334 else if (LABEL_P (label))
13335 LABEL_NUSES (label)--;
13336 place2 = 0;
13337 }
13338 break;
13339
13340 case REG_NONNEG:
13341 /* This note says something about the value of a register prior
13342 to the execution of an insn. It is too much trouble to see
13343 if the note is still correct in all situations. It is better
13344 to simply delete it. */
13345 break;
13346
13347 case REG_DEAD:
13348 /* If we replaced the right hand side of FROM_INSN with a
13349 REG_EQUAL note, the original use of the dying register
13350 will not have been combined into I3 and I2. In such cases,
13351 FROM_INSN is guaranteed to be the first of the combined
13352 instructions, so we simply need to search back before
13353 FROM_INSN for the previous use or set of this register,
13354 then alter the notes there appropriately.
13355
13356 If the register is used as an input in I3, it dies there.
13357 Similarly for I2, if it is nonzero and adjacent to I3.
13358
13359 If the register is not used as an input in either I3 or I2
13360 and it is not one of the registers we were supposed to eliminate,
13361 there are two possibilities. We might have a non-adjacent I2
13362 or we might have somehow eliminated an additional register
13363 from a computation. For example, we might have had A & B where
13364 we discover that B will always be zero. In this case we will
13365 eliminate the reference to A.
13366
13367 In both cases, we must search to see if we can find a previous
13368 use of A and put the death note there. */
13369
13370 if (from_insn
13371 && from_insn == i2mod
13372 && !reg_overlap_mentioned_p (XEXP (note, 0), i2mod_new_rhs))
13373 tem = from_insn;
13374 else
13375 {
13376 if (from_insn
13377 && CALL_P (from_insn)
13378 && find_reg_fusage (from_insn, USE, XEXP (note, 0)))
13379 place = from_insn;
13380 else if (reg_referenced_p (XEXP (note, 0), PATTERN (i3)))
13381 place = i3;
13382 else if (i2 != 0 && next_nonnote_nondebug_insn (i2) == i3
13383 && reg_referenced_p (XEXP (note, 0), PATTERN (i2)))
13384 place = i2;
13385 else if ((rtx_equal_p (XEXP (note, 0), elim_i2)
13386 && !(i2mod
13387 && reg_overlap_mentioned_p (XEXP (note, 0),
13388 i2mod_old_rhs)))
13389 || rtx_equal_p (XEXP (note, 0), elim_i1)
13390 || rtx_equal_p (XEXP (note, 0), elim_i0))
13391 break;
13392 tem = i3;
13393 }
13394
13395 if (place == 0)
13396 {
13397 basic_block bb = this_basic_block;
13398
13399 for (tem = PREV_INSN (tem); place == 0; tem = PREV_INSN (tem))
13400 {
13401 if (!NONDEBUG_INSN_P (tem))
13402 {
13403 if (tem == BB_HEAD (bb))
13404 break;
13405 continue;
13406 }
13407
13408 /* If the register is being set at TEM, see if that is all
13409 TEM is doing. If so, delete TEM. Otherwise, make this
13410 into a REG_UNUSED note instead. Don't delete sets to
13411 global register vars. */
13412 if ((REGNO (XEXP (note, 0)) >= FIRST_PSEUDO_REGISTER
13413 || !global_regs[REGNO (XEXP (note, 0))])
13414 && reg_set_p (XEXP (note, 0), PATTERN (tem)))
13415 {
13416 rtx set = single_set (tem);
13417 rtx inner_dest = 0;
13418 #ifdef HAVE_cc0
13419 rtx cc0_setter = NULL_RTX;
13420 #endif
13421
13422 if (set != 0)
13423 for (inner_dest = SET_DEST (set);
13424 (GET_CODE (inner_dest) == STRICT_LOW_PART
13425 || GET_CODE (inner_dest) == SUBREG
13426 || GET_CODE (inner_dest) == ZERO_EXTRACT);
13427 inner_dest = XEXP (inner_dest, 0))
13428 ;
13429
13430 /* Verify that it was the set, and not a clobber that
13431 modified the register.
13432
13433 CC0 targets must be careful to maintain setter/user
13434 pairs. If we cannot delete the setter due to side
13435 effects, mark the user with an UNUSED note instead
13436 of deleting it. */
13437
13438 if (set != 0 && ! side_effects_p (SET_SRC (set))
13439 && rtx_equal_p (XEXP (note, 0), inner_dest)
13440 #ifdef HAVE_cc0
13441 && (! reg_mentioned_p (cc0_rtx, SET_SRC (set))
13442 || ((cc0_setter = prev_cc0_setter (tem)) != NULL
13443 && sets_cc0_p (PATTERN (cc0_setter)) > 0))
13444 #endif
13445 )
13446 {
13447 /* Move the notes and links of TEM elsewhere.
13448 This might delete other dead insns recursively.
13449 First set the pattern to something that won't use
13450 any register. */
13451 rtx old_notes = REG_NOTES (tem);
13452
13453 PATTERN (tem) = pc_rtx;
13454 REG_NOTES (tem) = NULL;
13455
13456 distribute_notes (old_notes, tem, tem, NULL_RTX,
13457 NULL_RTX, NULL_RTX, NULL_RTX);
13458 distribute_links (LOG_LINKS (tem));
13459
13460 SET_INSN_DELETED (tem);
13461 if (tem == i2)
13462 i2 = NULL_RTX;
13463
13464 #ifdef HAVE_cc0
13465 /* Delete the setter too. */
13466 if (cc0_setter)
13467 {
13468 PATTERN (cc0_setter) = pc_rtx;
13469 old_notes = REG_NOTES (cc0_setter);
13470 REG_NOTES (cc0_setter) = NULL;
13471
13472 distribute_notes (old_notes, cc0_setter,
13473 cc0_setter, NULL_RTX,
13474 NULL_RTX, NULL_RTX, NULL_RTX);
13475 distribute_links (LOG_LINKS (cc0_setter));
13476
13477 SET_INSN_DELETED (cc0_setter);
13478 if (cc0_setter == i2)
13479 i2 = NULL_RTX;
13480 }
13481 #endif
13482 }
13483 else
13484 {
13485 PUT_REG_NOTE_KIND (note, REG_UNUSED);
13486
13487 /* If there isn't already a REG_UNUSED note, put one
13488 here. Do not place a REG_DEAD note, even if
13489 the register is also used here; that would not
13490 match the algorithm used in lifetime analysis
13491 and can cause the consistency check in the
13492 scheduler to fail. */
13493 if (! find_regno_note (tem, REG_UNUSED,
13494 REGNO (XEXP (note, 0))))
13495 place = tem;
13496 break;
13497 }
13498 }
13499 else if (reg_referenced_p (XEXP (note, 0), PATTERN (tem))
13500 || (CALL_P (tem)
13501 && find_reg_fusage (tem, USE, XEXP (note, 0))))
13502 {
13503 place = tem;
13504
13505 /* If we are doing a 3->2 combination, and we have a
13506 register which formerly died in i3 and was not used
13507 by i2, which now no longer dies in i3 and is used in
13508 i2 but does not die in i2, and place is between i2
13509 and i3, then we may need to move a link from place to
13510 i2. */
13511 if (i2 && DF_INSN_LUID (place) > DF_INSN_LUID (i2)
13512 && from_insn
13513 && DF_INSN_LUID (from_insn) > DF_INSN_LUID (i2)
13514 && reg_referenced_p (XEXP (note, 0), PATTERN (i2)))
13515 {
13516 struct insn_link *links = LOG_LINKS (place);
13517 LOG_LINKS (place) = NULL;
13518 distribute_links (links);
13519 }
13520 break;
13521 }
13522
13523 if (tem == BB_HEAD (bb))
13524 break;
13525 }
13526
13527 }
13528
13529 /* If the register is set or already dead at PLACE, we needn't do
13530 anything with this note if it is still a REG_DEAD note.
13531 We check here if it is set at all, not if is it totally replaced,
13532 which is what `dead_or_set_p' checks, so also check for it being
13533 set partially. */
13534
13535 if (place && REG_NOTE_KIND (note) == REG_DEAD)
13536 {
13537 unsigned int regno = REGNO (XEXP (note, 0));
13538 reg_stat_type *rsp = &VEC_index (reg_stat_type, reg_stat, regno);
13539
13540 if (dead_or_set_p (place, XEXP (note, 0))
13541 || reg_bitfield_target_p (XEXP (note, 0), PATTERN (place)))
13542 {
13543 /* Unless the register previously died in PLACE, clear
13544 last_death. [I no longer understand why this is
13545 being done.] */
13546 if (rsp->last_death != place)
13547 rsp->last_death = 0;
13548 place = 0;
13549 }
13550 else
13551 rsp->last_death = place;
13552
13553 /* If this is a death note for a hard reg that is occupying
13554 multiple registers, ensure that we are still using all
13555 parts of the object. If we find a piece of the object
13556 that is unused, we must arrange for an appropriate REG_DEAD
13557 note to be added for it. However, we can't just emit a USE
13558 and tag the note to it, since the register might actually
13559 be dead; so we recourse, and the recursive call then finds
13560 the previous insn that used this register. */
13561
13562 if (place && regno < FIRST_PSEUDO_REGISTER
13563 && hard_regno_nregs[regno][GET_MODE (XEXP (note, 0))] > 1)
13564 {
13565 unsigned int endregno = END_HARD_REGNO (XEXP (note, 0));
13566 int all_used = 1;
13567 unsigned int i;
13568
13569 for (i = regno; i < endregno; i++)
13570 if ((! refers_to_regno_p (i, i + 1, PATTERN (place), 0)
13571 && ! find_regno_fusage (place, USE, i))
13572 || dead_or_set_regno_p (place, i))
13573 all_used = 0;
13574
13575 if (! all_used)
13576 {
13577 /* Put only REG_DEAD notes for pieces that are
13578 not already dead or set. */
13579
13580 for (i = regno; i < endregno;
13581 i += hard_regno_nregs[i][reg_raw_mode[i]])
13582 {
13583 rtx piece = regno_reg_rtx[i];
13584 basic_block bb = this_basic_block;
13585
13586 if (! dead_or_set_p (place, piece)
13587 && ! reg_bitfield_target_p (piece,
13588 PATTERN (place)))
13589 {
13590 rtx new_note = alloc_reg_note (REG_DEAD, piece,
13591 NULL_RTX);
13592
13593 distribute_notes (new_note, place, place,
13594 NULL_RTX, NULL_RTX, NULL_RTX,
13595 NULL_RTX);
13596 }
13597 else if (! refers_to_regno_p (i, i + 1,
13598 PATTERN (place), 0)
13599 && ! find_regno_fusage (place, USE, i))
13600 for (tem = PREV_INSN (place); ;
13601 tem = PREV_INSN (tem))
13602 {
13603 if (!NONDEBUG_INSN_P (tem))
13604 {
13605 if (tem == BB_HEAD (bb))
13606 break;
13607 continue;
13608 }
13609 if (dead_or_set_p (tem, piece)
13610 || reg_bitfield_target_p (piece,
13611 PATTERN (tem)))
13612 {
13613 add_reg_note (tem, REG_UNUSED, piece);
13614 break;
13615 }
13616 }
13617
13618 }
13619
13620 place = 0;
13621 }
13622 }
13623 }
13624 break;
13625
13626 default:
13627 /* Any other notes should not be present at this point in the
13628 compilation. */
13629 gcc_unreachable ();
13630 }
13631
13632 if (place)
13633 {
13634 XEXP (note, 1) = REG_NOTES (place);
13635 REG_NOTES (place) = note;
13636 }
13637
13638 if (place2)
13639 add_reg_note (place2, REG_NOTE_KIND (note), XEXP (note, 0));
13640 }
13641 }
13642 \f
13643 /* Similarly to above, distribute the LOG_LINKS that used to be present on
13644 I3, I2, and I1 to new locations. This is also called to add a link
13645 pointing at I3 when I3's destination is changed. */
13646
13647 static void
13648 distribute_links (struct insn_link *links)
13649 {
13650 struct insn_link *link, *next_link;
13651
13652 for (link = links; link; link = next_link)
13653 {
13654 rtx place = 0;
13655 rtx insn;
13656 rtx set, reg;
13657
13658 next_link = link->next;
13659
13660 /* If the insn that this link points to is a NOTE or isn't a single
13661 set, ignore it. In the latter case, it isn't clear what we
13662 can do other than ignore the link, since we can't tell which
13663 register it was for. Such links wouldn't be used by combine
13664 anyway.
13665
13666 It is not possible for the destination of the target of the link to
13667 have been changed by combine. The only potential of this is if we
13668 replace I3, I2, and I1 by I3 and I2. But in that case the
13669 destination of I2 also remains unchanged. */
13670
13671 if (NOTE_P (link->insn)
13672 || (set = single_set (link->insn)) == 0)
13673 continue;
13674
13675 reg = SET_DEST (set);
13676 while (GET_CODE (reg) == SUBREG || GET_CODE (reg) == ZERO_EXTRACT
13677 || GET_CODE (reg) == STRICT_LOW_PART)
13678 reg = XEXP (reg, 0);
13679
13680 /* A LOG_LINK is defined as being placed on the first insn that uses
13681 a register and points to the insn that sets the register. Start
13682 searching at the next insn after the target of the link and stop
13683 when we reach a set of the register or the end of the basic block.
13684
13685 Note that this correctly handles the link that used to point from
13686 I3 to I2. Also note that not much searching is typically done here
13687 since most links don't point very far away. */
13688
13689 for (insn = NEXT_INSN (link->insn);
13690 (insn && (this_basic_block->next_bb == EXIT_BLOCK_PTR
13691 || BB_HEAD (this_basic_block->next_bb) != insn));
13692 insn = NEXT_INSN (insn))
13693 if (DEBUG_INSN_P (insn))
13694 continue;
13695 else if (INSN_P (insn) && reg_overlap_mentioned_p (reg, PATTERN (insn)))
13696 {
13697 if (reg_referenced_p (reg, PATTERN (insn)))
13698 place = insn;
13699 break;
13700 }
13701 else if (CALL_P (insn)
13702 && find_reg_fusage (insn, USE, reg))
13703 {
13704 place = insn;
13705 break;
13706 }
13707 else if (INSN_P (insn) && reg_set_p (reg, insn))
13708 break;
13709
13710 /* If we found a place to put the link, place it there unless there
13711 is already a link to the same insn as LINK at that point. */
13712
13713 if (place)
13714 {
13715 struct insn_link *link2;
13716
13717 FOR_EACH_LOG_LINK (link2, place)
13718 if (link2->insn == link->insn)
13719 break;
13720
13721 if (link2 == NULL)
13722 {
13723 link->next = LOG_LINKS (place);
13724 LOG_LINKS (place) = link;
13725
13726 /* Set added_links_insn to the earliest insn we added a
13727 link to. */
13728 if (added_links_insn == 0
13729 || DF_INSN_LUID (added_links_insn) > DF_INSN_LUID (place))
13730 added_links_insn = place;
13731 }
13732 }
13733 }
13734 }
13735 \f
13736 /* Subroutine of unmentioned_reg_p and callback from for_each_rtx.
13737 Check whether the expression pointer to by LOC is a register or
13738 memory, and if so return 1 if it isn't mentioned in the rtx EXPR.
13739 Otherwise return zero. */
13740
13741 static int
13742 unmentioned_reg_p_1 (rtx *loc, void *expr)
13743 {
13744 rtx x = *loc;
13745
13746 if (x != NULL_RTX
13747 && (REG_P (x) || MEM_P (x))
13748 && ! reg_mentioned_p (x, (rtx) expr))
13749 return 1;
13750 return 0;
13751 }
13752
13753 /* Check for any register or memory mentioned in EQUIV that is not
13754 mentioned in EXPR. This is used to restrict EQUIV to "specializations"
13755 of EXPR where some registers may have been replaced by constants. */
13756
13757 static bool
13758 unmentioned_reg_p (rtx equiv, rtx expr)
13759 {
13760 return for_each_rtx (&equiv, unmentioned_reg_p_1, expr);
13761 }
13762 \f
13763 DEBUG_FUNCTION void
13764 dump_combine_stats (FILE *file)
13765 {
13766 fprintf
13767 (file,
13768 ";; Combiner statistics: %d attempts, %d substitutions (%d requiring new space),\n;; %d successes.\n\n",
13769 combine_attempts, combine_merges, combine_extras, combine_successes);
13770 }
13771
13772 void
13773 dump_combine_total_stats (FILE *file)
13774 {
13775 fprintf
13776 (file,
13777 "\n;; Combiner totals: %d attempts, %d substitutions (%d requiring new space),\n;; %d successes.\n",
13778 total_attempts, total_merges, total_extras, total_successes);
13779 }
13780 \f
13781 static bool
13782 gate_handle_combine (void)
13783 {
13784 return (optimize > 0);
13785 }
13786
13787 /* Try combining insns through substitution. */
13788 static unsigned int
13789 rest_of_handle_combine (void)
13790 {
13791 int rebuild_jump_labels_after_combine;
13792
13793 df_set_flags (DF_LR_RUN_DCE + DF_DEFER_INSN_RESCAN);
13794 df_note_add_problem ();
13795 df_analyze ();
13796
13797 regstat_init_n_sets_and_refs ();
13798
13799 rebuild_jump_labels_after_combine
13800 = combine_instructions (get_insns (), max_reg_num ());
13801
13802 /* Combining insns may have turned an indirect jump into a
13803 direct jump. Rebuild the JUMP_LABEL fields of jumping
13804 instructions. */
13805 if (rebuild_jump_labels_after_combine)
13806 {
13807 timevar_push (TV_JUMP);
13808 rebuild_jump_labels (get_insns ());
13809 cleanup_cfg (0);
13810 timevar_pop (TV_JUMP);
13811 }
13812
13813 regstat_free_n_sets_and_refs ();
13814 return 0;
13815 }
13816
13817 struct rtl_opt_pass pass_combine =
13818 {
13819 {
13820 RTL_PASS,
13821 "combine", /* name */
13822 OPTGROUP_NONE, /* optinfo_flags */
13823 gate_handle_combine, /* gate */
13824 rest_of_handle_combine, /* execute */
13825 NULL, /* sub */
13826 NULL, /* next */
13827 0, /* static_pass_number */
13828 TV_COMBINE, /* tv_id */
13829 PROP_cfglayout, /* properties_required */
13830 0, /* properties_provided */
13831 0, /* properties_destroyed */
13832 0, /* todo_flags_start */
13833 TODO_df_finish | TODO_verify_rtl_sharing |
13834 TODO_ggc_collect, /* todo_flags_finish */
13835 }
13836 };