Collections.java (UnmodifiableMap.toArray): Imported changes from Classpath.
[gcc.git] / gcc / fwprop.c
1 /* RTL-based forward propagation pass for GNU compiler.
2 Copyright (C) 2005, 2006 Free Software Foundation, Inc.
3 Contributed by Paolo Bonzini and Steven Bosscher.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
10 version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
20 02110-1301, USA. */
21
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "tm.h"
26 #include "toplev.h"
27
28 #include "timevar.h"
29 #include "rtl.h"
30 #include "tm_p.h"
31 #include "emit-rtl.h"
32 #include "insn-config.h"
33 #include "recog.h"
34 #include "flags.h"
35 #include "obstack.h"
36 #include "basic-block.h"
37 #include "output.h"
38 #include "df.h"
39 #include "target.h"
40 #include "cfgloop.h"
41 #include "tree-pass.h"
42
43
44 /* This pass does simple forward propagation and simplification when an
45 operand of an insn can only come from a single def. This pass uses
46 df.c, so it is global. However, we only do limited analysis of
47 available expressions.
48
49 1) The pass tries to propagate the source of the def into the use,
50 and checks if the result is independent of the substituted value.
51 For example, the high word of a (zero_extend:DI (reg:SI M)) is always
52 zero, independent of the source register.
53
54 In particular, we propagate constants into the use site. Sometimes
55 RTL expansion did not put the constant in the same insn on purpose,
56 to satisfy a predicate, and the result will fail to be recognized;
57 but this happens rarely and in this case we can still create a
58 REG_EQUAL note. For multi-word operations, this
59
60 (set (subreg:SI (reg:DI 120) 0) (const_int 0))
61 (set (subreg:SI (reg:DI 120) 4) (const_int -1))
62 (set (subreg:SI (reg:DI 122) 0)
63 (ior:SI (subreg:SI (reg:DI 119) 0) (subreg:SI (reg:DI 120) 0)))
64 (set (subreg:SI (reg:DI 122) 4)
65 (ior:SI (subreg:SI (reg:DI 119) 4) (subreg:SI (reg:DI 120) 4)))
66
67 can be simplified to the much simpler
68
69 (set (subreg:SI (reg:DI 122) 0) (subreg:SI (reg:DI 119)))
70 (set (subreg:SI (reg:DI 122) 4) (const_int -1))
71
72 This particular propagation is also effective at putting together
73 complex addressing modes. We are more aggressive inside MEMs, in
74 that all definitions are propagated if the use is in a MEM; if the
75 result is a valid memory address we check address_cost to decide
76 whether the substitution is worthwhile.
77
78 2) The pass propagates register copies. This is not as effective as
79 the copy propagation done by CSE's canon_reg, which works by walking
80 the instruction chain, it can help the other transformations.
81
82 We should consider removing this optimization, and instead reorder the
83 RTL passes, because GCSE does this transformation too. With some luck,
84 the CSE pass at the end of rest_of_handle_gcse could also go away.
85
86 3) The pass looks for paradoxical subregs that are actually unnecessary.
87 Things like this:
88
89 (set (reg:QI 120) (subreg:QI (reg:SI 118) 0))
90 (set (reg:QI 121) (subreg:QI (reg:SI 119) 0))
91 (set (reg:SI 122) (plus:SI (subreg:SI (reg:QI 120) 0)
92 (subreg:SI (reg:QI 121) 0)))
93
94 are very common on machines that can only do word-sized operations.
95 For each use of a paradoxical subreg (subreg:WIDER (reg:NARROW N) 0),
96 if it has a single def and it is (subreg:NARROW (reg:WIDE M) 0),
97 we can replace the paradoxical subreg with simply (reg:WIDE M). The
98 above will simplify this to
99
100 (set (reg:QI 120) (subreg:QI (reg:SI 118) 0))
101 (set (reg:QI 121) (subreg:QI (reg:SI 119) 0))
102 (set (reg:SI 122) (plus:SI (reg:SI 118) (reg:SI 119)))
103
104 where the first two insns are now dead. */
105
106
107 static struct df *df;
108 static int num_changes;
109
110 \f
111 /* Do not try to replace constant addresses or addresses of local and
112 argument slots. These MEM expressions are made only once and inserted
113 in many instructions, as well as being used to control symbol table
114 output. It is not safe to clobber them.
115
116 There are some uncommon cases where the address is already in a register
117 for some reason, but we cannot take advantage of that because we have
118 no easy way to unshare the MEM. In addition, looking up all stack
119 addresses is costly. */
120
121 static bool
122 can_simplify_addr (rtx addr)
123 {
124 rtx reg;
125
126 if (CONSTANT_ADDRESS_P (addr))
127 return false;
128
129 if (GET_CODE (addr) == PLUS)
130 reg = XEXP (addr, 0);
131 else
132 reg = addr;
133
134 return (!REG_P (reg)
135 || (REGNO (reg) != FRAME_POINTER_REGNUM
136 && REGNO (reg) != HARD_FRAME_POINTER_REGNUM
137 && REGNO (reg) != ARG_POINTER_REGNUM));
138 }
139
140 /* Returns a canonical version of X for the address, from the point of view,
141 that all multiplications are represented as MULT instead of the multiply
142 by a power of 2 being represented as ASHIFT.
143
144 Every ASHIFT we find has been made by simplify_gen_binary and was not
145 there before, so it is not shared. So we can do this in place. */
146
147 static void
148 canonicalize_address (rtx x)
149 {
150 for (;;)
151 switch (GET_CODE (x))
152 {
153 case ASHIFT:
154 if (GET_CODE (XEXP (x, 1)) == CONST_INT
155 && INTVAL (XEXP (x, 1)) < GET_MODE_BITSIZE (GET_MODE (x))
156 && INTVAL (XEXP (x, 1)) >= 0)
157 {
158 HOST_WIDE_INT shift = INTVAL (XEXP (x, 1));
159 PUT_CODE (x, MULT);
160 XEXP (x, 1) = gen_int_mode ((HOST_WIDE_INT) 1 << shift,
161 GET_MODE (x));
162 }
163
164 x = XEXP (x, 0);
165 break;
166
167 case PLUS:
168 if (GET_CODE (XEXP (x, 0)) == PLUS
169 || GET_CODE (XEXP (x, 0)) == ASHIFT
170 || GET_CODE (XEXP (x, 0)) == CONST)
171 canonicalize_address (XEXP (x, 0));
172
173 x = XEXP (x, 1);
174 break;
175
176 case CONST:
177 x = XEXP (x, 0);
178 break;
179
180 default:
181 return;
182 }
183 }
184
185 /* OLD is a memory address. Return whether it is good to use NEW instead,
186 for a memory access in the given MODE. */
187
188 static bool
189 should_replace_address (rtx old, rtx new, enum machine_mode mode)
190 {
191 int gain;
192
193 if (rtx_equal_p (old, new) || !memory_address_p (mode, new))
194 return false;
195
196 /* Copy propagation is always ok. */
197 if (REG_P (old) && REG_P (new))
198 return true;
199
200 /* Prefer the new address if it is less expensive. */
201 gain = address_cost (old, mode) - address_cost (new, mode);
202
203 /* If the addresses have equivalent cost, prefer the new address
204 if it has the highest `rtx_cost'. That has the potential of
205 eliminating the most insns without additional costs, and it
206 is the same that cse.c used to do. */
207 if (gain == 0)
208 gain = rtx_cost (new, SET) - rtx_cost (old, SET);
209
210 return (gain > 0);
211 }
212
213 /* Replace all occurrences of OLD in *PX with NEW and try to simplify the
214 resulting expression. Replace *PX with a new RTL expression if an
215 occurrence of OLD was found.
216
217 If CAN_APPEAR is true, we always return true; if it is false, we
218 can return false if, for at least one occurrence OLD, we failed to
219 collapse the result to a constant. For example, (mult:M (reg:M A)
220 (minus:M (reg:M B) (reg:M A))) may collapse to zero if replacing
221 (reg:M B) with (reg:M A).
222
223 CAN_APPEAR is disregarded inside MEMs: in that case, we always return
224 true if the simplification is a cheaper and valid memory address.
225
226 This is only a wrapper around simplify-rtx.c: do not add any pattern
227 matching code here. (The sole exception is the handling of LO_SUM, but
228 that is because there is no simplify_gen_* function for LO_SUM). */
229
230 static bool
231 propagate_rtx_1 (rtx *px, rtx old, rtx new, bool can_appear)
232 {
233 rtx x = *px, tem = NULL_RTX, op0, op1, op2;
234 enum rtx_code code = GET_CODE (x);
235 enum machine_mode mode = GET_MODE (x);
236 enum machine_mode op_mode;
237 bool valid_ops = true;
238
239 /* If X is OLD_RTX, return NEW_RTX. Otherwise, if this is an expression,
240 try to build a new expression from recursive substitution. */
241
242 if (x == old)
243 {
244 *px = new;
245 return can_appear;
246 }
247
248 switch (GET_RTX_CLASS (code))
249 {
250 case RTX_UNARY:
251 op0 = XEXP (x, 0);
252 op_mode = GET_MODE (op0);
253 valid_ops &= propagate_rtx_1 (&op0, old, new, can_appear);
254 if (op0 == XEXP (x, 0))
255 return true;
256 tem = simplify_gen_unary (code, mode, op0, op_mode);
257 break;
258
259 case RTX_BIN_ARITH:
260 case RTX_COMM_ARITH:
261 op0 = XEXP (x, 0);
262 op1 = XEXP (x, 1);
263 valid_ops &= propagate_rtx_1 (&op0, old, new, can_appear);
264 valid_ops &= propagate_rtx_1 (&op1, old, new, can_appear);
265 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
266 return true;
267 tem = simplify_gen_binary (code, mode, op0, op1);
268 break;
269
270 case RTX_COMPARE:
271 case RTX_COMM_COMPARE:
272 op0 = XEXP (x, 0);
273 op1 = XEXP (x, 1);
274 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
275 valid_ops &= propagate_rtx_1 (&op0, old, new, can_appear);
276 valid_ops &= propagate_rtx_1 (&op1, old, new, can_appear);
277 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
278 return true;
279 tem = simplify_gen_relational (code, mode, op_mode, op0, op1);
280 break;
281
282 case RTX_TERNARY:
283 case RTX_BITFIELD_OPS:
284 op0 = XEXP (x, 0);
285 op1 = XEXP (x, 1);
286 op2 = XEXP (x, 2);
287 op_mode = GET_MODE (op0);
288 valid_ops &= propagate_rtx_1 (&op0, old, new, can_appear);
289 valid_ops &= propagate_rtx_1 (&op1, old, new, can_appear);
290 valid_ops &= propagate_rtx_1 (&op2, old, new, can_appear);
291 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
292 return true;
293 if (op_mode == VOIDmode)
294 op_mode = GET_MODE (op0);
295 tem = simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
296 break;
297
298 case RTX_EXTRA:
299 /* The only case we try to handle is a SUBREG. */
300 if (code == SUBREG)
301 {
302 op0 = XEXP (x, 0);
303 valid_ops &= propagate_rtx_1 (&op0, old, new, can_appear);
304 if (op0 == XEXP (x, 0))
305 return true;
306 tem = simplify_gen_subreg (mode, op0, GET_MODE (SUBREG_REG (x)),
307 SUBREG_BYTE (x));
308 }
309 break;
310
311 case RTX_OBJ:
312 if (code == MEM && x != new)
313 {
314 rtx new_op0;
315 op0 = XEXP (x, 0);
316
317 /* There are some addresses that we cannot work on. */
318 if (!can_simplify_addr (op0))
319 return true;
320
321 op0 = new_op0 = targetm.delegitimize_address (op0);
322 valid_ops &= propagate_rtx_1 (&new_op0, old, new, true);
323
324 /* Dismiss transformation that we do not want to carry on. */
325 if (!valid_ops
326 || new_op0 == op0
327 || GET_MODE (new_op0) != GET_MODE (op0))
328 return true;
329
330 canonicalize_address (new_op0);
331
332 /* Copy propagations are always ok. Otherwise check the costs. */
333 if (!(REG_P (old) && REG_P (new))
334 && !should_replace_address (op0, new_op0, GET_MODE (x)))
335 return true;
336
337 tem = replace_equiv_address_nv (x, new_op0);
338 }
339
340 else if (code == LO_SUM)
341 {
342 op0 = XEXP (x, 0);
343 op1 = XEXP (x, 1);
344
345 /* The only simplification we do attempts to remove references to op0
346 or make it constant -- in both cases, op0's invalidity will not
347 make the result invalid. */
348 propagate_rtx_1 (&op0, old, new, true);
349 valid_ops &= propagate_rtx_1 (&op1, old, new, can_appear);
350 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
351 return true;
352
353 /* (lo_sum (high x) x) -> x */
354 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
355 tem = op1;
356 else
357 tem = gen_rtx_LO_SUM (mode, op0, op1);
358
359 /* OP1 is likely not a legitimate address, otherwise there would have
360 been no LO_SUM. We want it to disappear if it is invalid, return
361 false in that case. */
362 return memory_address_p (mode, tem);
363 }
364
365 else if (code == REG)
366 {
367 if (rtx_equal_p (x, old))
368 {
369 *px = new;
370 return can_appear;
371 }
372 }
373 break;
374
375 default:
376 break;
377 }
378
379 /* No change, no trouble. */
380 if (tem == NULL_RTX)
381 return true;
382
383 *px = tem;
384
385 /* The replacement we made so far is valid, if all of the recursive
386 replacements were valid, or we could simplify everything to
387 a constant. */
388 return valid_ops || can_appear || CONSTANT_P (tem);
389 }
390
391 /* Replace all occurrences of OLD in X with NEW and try to simplify the
392 resulting expression (in mode MODE). Return a new expression if it is
393 a constant, otherwise X.
394
395 Simplifications where occurrences of NEW collapse to a constant are always
396 accepted. All simplifications are accepted if NEW is a pseudo too.
397 Otherwise, we accept simplifications that have a lower or equal cost. */
398
399 static rtx
400 propagate_rtx (rtx x, enum machine_mode mode, rtx old, rtx new)
401 {
402 rtx tem;
403 bool collapsed;
404
405 if (REG_P (new) && REGNO (new) < FIRST_PSEUDO_REGISTER)
406 return NULL_RTX;
407
408 new = copy_rtx (new);
409
410 tem = x;
411 collapsed = propagate_rtx_1 (&tem, old, new, REG_P (new) || CONSTANT_P (new));
412 if (tem == x || !collapsed)
413 return NULL_RTX;
414
415 /* gen_lowpart_common will not be able to process VOIDmode entities other
416 than CONST_INTs. */
417 if (GET_MODE (tem) == VOIDmode && GET_CODE (tem) != CONST_INT)
418 return NULL_RTX;
419
420 if (GET_MODE (tem) == VOIDmode)
421 tem = rtl_hooks.gen_lowpart_no_emit (mode, tem);
422 else
423 gcc_assert (GET_MODE (tem) == mode);
424
425 return tem;
426 }
427
428
429 \f
430
431 /* Return true if the register from reference REF is killed
432 between FROM to (but not including) TO. */
433
434 static bool
435 local_ref_killed_between_p (struct df_ref * ref, rtx from, rtx to)
436 {
437 rtx insn;
438 struct df_ref *def;
439
440 for (insn = from; insn != to; insn = NEXT_INSN (insn))
441 {
442 if (!INSN_P (insn))
443 continue;
444
445 def = DF_INSN_DEFS (df, insn);
446 while (def)
447 {
448 if (DF_REF_REGNO (ref) == DF_REF_REGNO (def))
449 return true;
450 def = def->next_ref;
451 }
452 }
453 return false;
454 }
455
456
457 /* Check if the given DEF is available in INSN. This would require full
458 computation of available expressions; we check only restricted conditions:
459 - if DEF is the sole definition of its register, go ahead;
460 - in the same basic block, we check for no definitions killing the
461 definition of DEF_INSN;
462 - if USE's basic block has DEF's basic block as the sole predecessor,
463 we check if the definition is killed after DEF_INSN or before
464 TARGET_INSN insn, in their respective basic blocks. */
465 static bool
466 use_killed_between (struct df_ref *use, rtx def_insn, rtx target_insn)
467 {
468 basic_block def_bb = BLOCK_FOR_INSN (def_insn);
469 basic_block target_bb = BLOCK_FOR_INSN (target_insn);
470 int regno;
471 struct df_ref * def;
472
473 /* In some obscure situations we can have a def reaching a use
474 that is _before_ the def. In other words the def does not
475 dominate the use even though the use and def are in the same
476 basic block. This can happen when a register may be used
477 uninitialized in a loop. In such cases, we must assume that
478 DEF is not available. */
479 if (def_bb == target_bb
480 ? DF_INSN_LUID (df, def_insn) >= DF_INSN_LUID (df, target_insn)
481 : !dominated_by_p (CDI_DOMINATORS, target_bb, def_bb))
482 return true;
483
484 /* Check if the reg in USE has only one definition. We already
485 know that this definition reaches use, or we wouldn't be here. */
486 regno = DF_REF_REGNO (use);
487 def = DF_REG_DEF_GET (df, regno)->reg_chain;
488 if (def && (def->next_reg == NULL))
489 return false;
490
491 /* Check locally if we are in the same basic block. */
492 if (def_bb == target_bb)
493 return local_ref_killed_between_p (use, def_insn, target_insn);
494
495 /* Finally, if DEF_BB is the sole predecessor of TARGET_BB. */
496 if (single_pred_p (target_bb)
497 && single_pred (target_bb) == def_bb)
498 {
499 struct df_ref *x;
500
501 /* See if USE is killed between DEF_INSN and the last insn in the
502 basic block containing DEF_INSN. */
503 x = df_bb_regno_last_def_find (df, def_bb, regno);
504 if (x && DF_INSN_LUID (df, x->insn) >= DF_INSN_LUID (df, def_insn))
505 return true;
506
507 /* See if USE is killed between TARGET_INSN and the first insn in the
508 basic block containing TARGET_INSN. */
509 x = df_bb_regno_first_def_find (df, target_bb, regno);
510 if (x && DF_INSN_LUID (df, x->insn) < DF_INSN_LUID (df, target_insn))
511 return true;
512
513 return false;
514 }
515
516 /* Otherwise assume the worst case. */
517 return true;
518 }
519
520
521 /* for_each_rtx traversal function that returns 1 if BODY points to
522 a non-constant mem. */
523
524 static int
525 varying_mem_p (rtx *body, void *data ATTRIBUTE_UNUSED)
526 {
527 rtx x = *body;
528 return MEM_P (x) && !MEM_READONLY_P (x);
529 }
530
531 /* Check if all uses in DEF_INSN can be used in TARGET_INSN. This
532 would require full computation of available expressions;
533 we check only restricted conditions, see use_killed_between. */
534 static bool
535 all_uses_available_at (rtx def_insn, rtx target_insn)
536 {
537 struct df_ref * use;
538 rtx def_set = single_set (def_insn);
539
540 gcc_assert (def_set);
541
542 /* If target_insn comes right after def_insn, which is very common
543 for addresses, we can use a quicker test. */
544 if (NEXT_INSN (def_insn) == target_insn
545 && REG_P (SET_DEST (def_set)))
546 {
547 rtx def_reg = SET_DEST (def_set);
548
549 /* If the insn uses the reg that it defines, the substitution is
550 invalid. */
551 for (use = DF_INSN_USES (df, def_insn); use; use = use->next_ref)
552 if (rtx_equal_p (use->reg, def_reg))
553 return false;
554 }
555 else
556 {
557 /* Look at all the uses of DEF_INSN, and see if they are not
558 killed between DEF_INSN and TARGET_INSN. */
559 for (use = DF_INSN_USES (df, def_insn); use; use = use->next_ref)
560 if (use_killed_between (use, def_insn, target_insn))
561 return false;
562 }
563
564 /* We don't do any analysis of memories or aliasing. Reject any
565 instruction that involves references to non-constant memory. */
566 return !for_each_rtx (&SET_SRC (def_set), varying_mem_p, NULL);
567 }
568
569 \f
570 struct find_occurrence_data
571 {
572 rtx find;
573 rtx *retval;
574 };
575
576 /* Callback for for_each_rtx, used in find_occurrence.
577 See if PX is the rtx we have to find. Return 1 to stop for_each_rtx
578 if successful, or 0 to continue traversing otherwise. */
579
580 static int
581 find_occurrence_callback (rtx *px, void *data)
582 {
583 struct find_occurrence_data *fod = (struct find_occurrence_data *) data;
584 rtx x = *px;
585 rtx find = fod->find;
586
587 if (x == find)
588 {
589 fod->retval = px;
590 return 1;
591 }
592
593 return 0;
594 }
595
596 /* Return a pointer to one of the occurrences of register FIND in *PX. */
597
598 static rtx *
599 find_occurrence (rtx *px, rtx find)
600 {
601 struct find_occurrence_data data;
602
603 gcc_assert (REG_P (find)
604 || (GET_CODE (find) == SUBREG
605 && REG_P (SUBREG_REG (find))));
606
607 data.find = find;
608 data.retval = NULL;
609 for_each_rtx (px, find_occurrence_callback, &data);
610 return data.retval;
611 }
612
613 \f
614 /* Inside INSN, the expression rooted at *LOC has been changed, moving some
615 uses from ORIG_USES. Find those that are present, and create new items
616 in the data flow object of the pass. Mark any new uses as having the
617 given TYPE. */
618 static void
619 update_df (rtx insn, rtx *loc, struct df_ref *orig_uses, enum df_ref_type type,
620 int new_flags)
621 {
622 struct df_ref *use;
623
624 /* Add a use for the registers that were propagated. */
625 for (use = orig_uses; use; use = use->next_ref)
626 {
627 struct df_ref *orig_use = use, *new_use;
628 rtx *new_loc = find_occurrence (loc, DF_REF_REG (orig_use));
629
630 if (!new_loc)
631 continue;
632
633 /* Add a new insn use. Use the original type, because it says if the
634 use was within a MEM. */
635 new_use = df_ref_create (df, DF_REF_REG (orig_use), new_loc,
636 insn, BLOCK_FOR_INSN (insn),
637 type, DF_REF_FLAGS (orig_use) | new_flags);
638
639 /* Set up the use-def chain. */
640 df_chain_copy (df->problems_by_index[DF_CHAIN],
641 new_use, DF_REF_CHAIN (orig_use));
642 }
643 }
644
645
646 /* Try substituting NEW into LOC, which originated from forward propagation
647 of USE's value from DEF_INSN. SET_REG_EQUAL says whether we are
648 substituting the whole SET_SRC, so we can set a REG_EQUAL note if the
649 new insn is not recognized. Return whether the substitution was
650 performed. */
651
652 static bool
653 try_fwprop_subst (struct df_ref *use, rtx *loc, rtx new, rtx def_insn, bool set_reg_equal)
654 {
655 rtx insn = DF_REF_INSN (use);
656 enum df_ref_type type = DF_REF_TYPE (use);
657 int flags = DF_REF_FLAGS (use);
658
659 if (dump_file)
660 {
661 fprintf (dump_file, "\nIn insn %d, replacing\n ", INSN_UID (insn));
662 print_inline_rtx (dump_file, *loc, 2);
663 fprintf (dump_file, "\n with ");
664 print_inline_rtx (dump_file, new, 2);
665 fprintf (dump_file, "\n");
666 }
667
668 if (validate_change (insn, loc, new, false))
669 {
670 num_changes++;
671 if (dump_file)
672 fprintf (dump_file, "Changed insn %d\n", INSN_UID (insn));
673
674 /* Unlink the use that we changed. */
675 df_ref_remove (df, use);
676 if (!CONSTANT_P (new))
677 update_df (insn, loc, DF_INSN_USES (df, def_insn), type, flags);
678
679 return true;
680 }
681 else
682 {
683 if (dump_file)
684 fprintf (dump_file, "Changes to insn %d not recognized\n",
685 INSN_UID (insn));
686
687 /* Can also record a simplified value in a REG_EQUAL note, making a
688 new one if one does not already exist. */
689 if (set_reg_equal)
690 {
691 if (dump_file)
692 fprintf (dump_file, " Setting REG_EQUAL note\n");
693
694 set_unique_reg_note (insn, REG_EQUAL, copy_rtx (new));
695
696 /* ??? Is this still necessary if we add the note through
697 set_unique_reg_note? */
698 if (!CONSTANT_P (new))
699 update_df (insn, loc, DF_INSN_USES (df, def_insn),
700 type, DF_REF_IN_NOTE);
701 }
702
703 return false;
704 }
705 }
706
707
708 /* If USE is a paradoxical subreg, see if it can be replaced by a pseudo. */
709
710 static bool
711 forward_propagate_subreg (struct df_ref *use, rtx def_insn, rtx def_set)
712 {
713 rtx use_reg = DF_REF_REG (use);
714 rtx use_insn, src;
715
716 /* Only consider paradoxical subregs... */
717 enum machine_mode use_mode = GET_MODE (use_reg);
718 if (GET_CODE (use_reg) != SUBREG
719 || !REG_P (SET_DEST (def_set))
720 || GET_MODE_SIZE (use_mode)
721 <= GET_MODE_SIZE (GET_MODE (SUBREG_REG (use_reg))))
722 return false;
723
724 /* If this is a paradoxical SUBREG, we have no idea what value the
725 extra bits would have. However, if the operand is equivalent to
726 a SUBREG whose operand is the same as our mode, and all the modes
727 are within a word, we can just use the inner operand because
728 these SUBREGs just say how to treat the register. */
729 use_insn = DF_REF_INSN (use);
730 src = SET_SRC (def_set);
731 if (GET_CODE (src) == SUBREG
732 && REG_P (SUBREG_REG (src))
733 && GET_MODE (SUBREG_REG (src)) == use_mode
734 && subreg_lowpart_p (src)
735 && all_uses_available_at (def_insn, use_insn))
736 return try_fwprop_subst (use, DF_REF_LOC (use), SUBREG_REG (src),
737 def_insn, false);
738 else
739 return false;
740 }
741
742 /* Try to replace USE with SRC (defined in DEF_INSN) and simplify the
743 result. */
744
745 static bool
746 forward_propagate_and_simplify (struct df_ref *use, rtx def_insn, rtx def_set)
747 {
748 rtx use_insn = DF_REF_INSN (use);
749 rtx use_set = single_set (use_insn);
750 rtx src, reg, new, *loc;
751 bool set_reg_equal;
752 enum machine_mode mode;
753
754 if (!use_set)
755 return false;
756
757 /* Do not propagate into PC, CC0, etc. */
758 if (GET_MODE (SET_DEST (use_set)) == VOIDmode)
759 return false;
760
761 /* If def and use are subreg, check if they match. */
762 reg = DF_REF_REG (use);
763 if (GET_CODE (reg) == SUBREG
764 && GET_CODE (SET_DEST (def_set)) == SUBREG
765 && (SUBREG_BYTE (SET_DEST (def_set)) != SUBREG_BYTE (reg)
766 || GET_MODE (SET_DEST (def_set)) != GET_MODE (reg)))
767 return false;
768
769 /* Check if the def had a subreg, but the use has the whole reg. */
770 if (REG_P (reg) && GET_CODE (SET_DEST (def_set)) == SUBREG)
771 return false;
772
773 /* Check if the use has a subreg, but the def had the whole reg. Unlike the
774 previous case, the optimization is possible and often useful indeed. */
775 if (GET_CODE (reg) == SUBREG && REG_P (SET_DEST (def_set)))
776 reg = SUBREG_REG (reg);
777
778 /* Check if the substitution is valid (last, because it's the most
779 expensive check!). */
780 src = SET_SRC (def_set);
781 if (!CONSTANT_P (src) && !all_uses_available_at (def_insn, use_insn))
782 return false;
783
784 /* Check if the def is loading something from the constant pool; in this
785 case we would undo optimization such as compress_float_constant.
786 Still, we can set a REG_EQUAL note. */
787 if (MEM_P (src) && MEM_READONLY_P (src))
788 {
789 rtx x = avoid_constant_pool_reference (src);
790 if (x != src)
791 {
792 rtx note = find_reg_note (use_insn, REG_EQUAL, NULL_RTX);
793 rtx old = note ? XEXP (note, 0) : SET_SRC (use_set);
794 rtx new = simplify_replace_rtx (old, src, x);
795 if (old != new)
796 set_unique_reg_note (use_insn, REG_EQUAL, copy_rtx (new));
797 }
798 return false;
799 }
800
801 /* Else try simplifying. */
802
803 if (DF_REF_TYPE (use) == DF_REF_REG_MEM_STORE)
804 {
805 loc = &SET_DEST (use_set);
806 set_reg_equal = false;
807 }
808 else
809 {
810 rtx note = find_reg_note (use_insn, REG_EQUAL, NULL_RTX);
811 if (DF_REF_FLAGS (use) & DF_REF_IN_NOTE)
812 loc = &XEXP (note, 0);
813 else
814 loc = &SET_SRC (use_set);
815
816 /* Do not replace an existing REG_EQUAL note if the insn is not
817 recognized. Either we're already replacing in the note, or
818 we'll separately try plugging the definition in the note and
819 simplifying. */
820 set_reg_equal = (note == NULL_RTX);
821 }
822
823 if (GET_MODE (*loc) == VOIDmode)
824 mode = GET_MODE (SET_DEST (use_set));
825 else
826 mode = GET_MODE (*loc);
827
828 new = propagate_rtx (*loc, mode, reg, src);
829
830 if (!new)
831 return false;
832
833 return try_fwprop_subst (use, loc, new, def_insn, set_reg_equal);
834 }
835
836
837 /* Given a use USE of an insn, if it has a single reaching
838 definition, try to forward propagate it into that insn. */
839
840 static void
841 forward_propagate_into (struct df_ref *use)
842 {
843 struct df_link *defs;
844 struct df_ref *def;
845 rtx def_insn, def_set, use_insn;
846 rtx parent;
847
848 if (DF_REF_FLAGS (use) & DF_REF_READ_WRITE)
849 return;
850 if (DF_REF_FLAGS (use) & DF_REF_ARTIFICIAL)
851 return;
852
853 /* Only consider uses that have a single definition. */
854 defs = DF_REF_CHAIN (use);
855 if (!defs || defs->next)
856 return;
857
858 def = defs->ref;
859 if (DF_REF_FLAGS (def) & DF_REF_READ_WRITE)
860 return;
861 if (DF_REF_FLAGS (def) & DF_REF_ARTIFICIAL)
862 return;
863
864 /* Do not propagate loop invariant definitions inside the loop if
865 we are going to unroll. */
866 if (current_loops
867 && DF_REF_BB (def)->loop_father != DF_REF_BB (use)->loop_father)
868 return;
869
870 /* Check if the use is still present in the insn! */
871 use_insn = DF_REF_INSN (use);
872 if (DF_REF_FLAGS (use) & DF_REF_IN_NOTE)
873 parent = find_reg_note (use_insn, REG_EQUAL, NULL_RTX);
874 else
875 parent = PATTERN (use_insn);
876
877 if (!loc_mentioned_in_p (DF_REF_LOC (use), parent))
878 return;
879
880 def_insn = DF_REF_INSN (def);
881 def_set = single_set (def_insn);
882 if (!def_set)
883 return;
884
885 /* Only try one kind of propagation. If two are possible, we'll
886 do it on the following iterations. */
887 if (!forward_propagate_and_simplify (use, def_insn, def_set))
888 forward_propagate_subreg (use, def_insn, def_set);
889 }
890
891 \f
892 static void
893 fwprop_init (void)
894 {
895 num_changes = 0;
896 calculate_dominance_info (CDI_DOMINATORS);
897
898 /* We do not always want to propagate into loops, so we have to find
899 loops and be careful about them. But we have to call flow_loops_find
900 before df_analyze, because flow_loops_find may introduce new jump
901 insns (sadly) if we are not working in cfglayout mode. */
902 if (flag_rerun_cse_after_loop && (flag_unroll_loops || flag_peel_loops))
903 loop_optimizer_init (0);
904
905 /* Now set up the dataflow problem (we only want use-def chains) and
906 put the dataflow solver to work. */
907 df = df_init (DF_HARD_REGS | DF_SUBREGS | DF_EQUIV_NOTES);
908 df_chain_add_problem (df, DF_UD_CHAIN);
909 df_analyze (df);
910 df_dump (df, dump_file);
911 }
912
913 static void
914 fwprop_done (void)
915 {
916 df_finish (df);
917
918 if (flag_rerun_cse_after_loop && (flag_unroll_loops || flag_peel_loops))
919 loop_optimizer_finalize ();
920
921 free_dominance_info (CDI_DOMINATORS);
922 cleanup_cfg (0);
923 delete_trivially_dead_insns (get_insns (), max_reg_num ());
924
925 if (dump_file)
926 fprintf (dump_file,
927 "\nNumber of successful forward propagations: %d\n\n",
928 num_changes);
929 }
930
931
932
933 /* Main entry point. */
934
935 static bool
936 gate_fwprop (void)
937 {
938 return optimize > 0 && flag_forward_propagate;
939 }
940
941 static unsigned int
942 fwprop (void)
943 {
944 unsigned i;
945
946 fwprop_init ();
947
948 /* Go through all the uses. update_df will create new ones at the
949 end, and we'll go through them as well.
950
951 Do not forward propagate addresses into loops until after unrolling.
952 CSE did so because it was able to fix its own mess, but we are not. */
953
954 df_reorganize_refs (&df->use_info);
955 for (i = 0; i < DF_USES_SIZE (df); i++)
956 {
957 struct df_ref *use = DF_USES_GET (df, i);
958 if (use)
959 if (!current_loops
960 || DF_REF_TYPE (use) == DF_REF_REG_USE
961 || DF_REF_BB (use)->loop_father == NULL)
962 forward_propagate_into (use);
963 }
964
965 fwprop_done ();
966
967 return 0;
968 }
969
970 struct tree_opt_pass pass_rtl_fwprop =
971 {
972 "fwprop1", /* name */
973 gate_fwprop, /* gate */
974 fwprop, /* execute */
975 NULL, /* sub */
976 NULL, /* next */
977 0, /* static_pass_number */
978 TV_FWPROP, /* tv_id */
979 0, /* properties_required */
980 0, /* properties_provided */
981 0, /* properties_destroyed */
982 0, /* todo_flags_start */
983 TODO_dump_func, /* todo_flags_finish */
984 0 /* letter */
985 };
986
987 static bool
988 gate_fwprop_addr (void)
989 {
990 return optimize > 0 && flag_forward_propagate && flag_rerun_cse_after_loop
991 && (flag_unroll_loops || flag_peel_loops);
992 }
993
994 static unsigned int
995 fwprop_addr (void)
996 {
997 unsigned i;
998 fwprop_init ();
999
1000 /* Go through all the uses. update_df will create new ones at the
1001 end, and we'll go through them as well. */
1002 df_reorganize_refs (&df->use_info);
1003 for (i = 0; i < DF_USES_SIZE (df); i++)
1004 {
1005 struct df_ref *use = DF_USES_GET (df, i);
1006 if (use)
1007 if (DF_REF_TYPE (use) != DF_REF_REG_USE
1008 && DF_REF_BB (use)->loop_father != NULL)
1009 forward_propagate_into (use);
1010 }
1011
1012 fwprop_done ();
1013
1014 return 0;
1015 }
1016
1017 struct tree_opt_pass pass_rtl_fwprop_addr =
1018 {
1019 "fwprop2", /* name */
1020 gate_fwprop_addr, /* gate */
1021 fwprop_addr, /* execute */
1022 NULL, /* sub */
1023 NULL, /* next */
1024 0, /* static_pass_number */
1025 TV_FWPROP, /* tv_id */
1026 0, /* properties_required */
1027 0, /* properties_provided */
1028 0, /* properties_destroyed */
1029 0, /* todo_flags_start */
1030 TODO_dump_func, /* todo_flags_finish */
1031 0 /* letter */
1032 };