df.h (struct df_ref): Replace 'insn' field with 'insn_info' field.
[gcc.git] / gcc / fwprop.c
1 /* RTL-based forward propagation pass for GNU compiler.
2 Copyright (C) 2005, 2006, 2007 Free Software Foundation, Inc.
3 Contributed by Paolo Bonzini and Steven Bosscher.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "toplev.h"
26
27 #include "timevar.h"
28 #include "rtl.h"
29 #include "tm_p.h"
30 #include "emit-rtl.h"
31 #include "insn-config.h"
32 #include "recog.h"
33 #include "flags.h"
34 #include "obstack.h"
35 #include "basic-block.h"
36 #include "output.h"
37 #include "df.h"
38 #include "target.h"
39 #include "cfgloop.h"
40 #include "tree-pass.h"
41
42
43 /* This pass does simple forward propagation and simplification when an
44 operand of an insn can only come from a single def. This pass uses
45 df.c, so it is global. However, we only do limited analysis of
46 available expressions.
47
48 1) The pass tries to propagate the source of the def into the use,
49 and checks if the result is independent of the substituted value.
50 For example, the high word of a (zero_extend:DI (reg:SI M)) is always
51 zero, independent of the source register.
52
53 In particular, we propagate constants into the use site. Sometimes
54 RTL expansion did not put the constant in the same insn on purpose,
55 to satisfy a predicate, and the result will fail to be recognized;
56 but this happens rarely and in this case we can still create a
57 REG_EQUAL note. For multi-word operations, this
58
59 (set (subreg:SI (reg:DI 120) 0) (const_int 0))
60 (set (subreg:SI (reg:DI 120) 4) (const_int -1))
61 (set (subreg:SI (reg:DI 122) 0)
62 (ior:SI (subreg:SI (reg:DI 119) 0) (subreg:SI (reg:DI 120) 0)))
63 (set (subreg:SI (reg:DI 122) 4)
64 (ior:SI (subreg:SI (reg:DI 119) 4) (subreg:SI (reg:DI 120) 4)))
65
66 can be simplified to the much simpler
67
68 (set (subreg:SI (reg:DI 122) 0) (subreg:SI (reg:DI 119)))
69 (set (subreg:SI (reg:DI 122) 4) (const_int -1))
70
71 This particular propagation is also effective at putting together
72 complex addressing modes. We are more aggressive inside MEMs, in
73 that all definitions are propagated if the use is in a MEM; if the
74 result is a valid memory address we check address_cost to decide
75 whether the substitution is worthwhile.
76
77 2) The pass propagates register copies. This is not as effective as
78 the copy propagation done by CSE's canon_reg, which works by walking
79 the instruction chain, it can help the other transformations.
80
81 We should consider removing this optimization, and instead reorder the
82 RTL passes, because GCSE does this transformation too. With some luck,
83 the CSE pass at the end of rest_of_handle_gcse could also go away.
84
85 3) The pass looks for paradoxical subregs that are actually unnecessary.
86 Things like this:
87
88 (set (reg:QI 120) (subreg:QI (reg:SI 118) 0))
89 (set (reg:QI 121) (subreg:QI (reg:SI 119) 0))
90 (set (reg:SI 122) (plus:SI (subreg:SI (reg:QI 120) 0)
91 (subreg:SI (reg:QI 121) 0)))
92
93 are very common on machines that can only do word-sized operations.
94 For each use of a paradoxical subreg (subreg:WIDER (reg:NARROW N) 0),
95 if it has a single def and it is (subreg:NARROW (reg:WIDE M) 0),
96 we can replace the paradoxical subreg with simply (reg:WIDE M). The
97 above will simplify this to
98
99 (set (reg:QI 120) (subreg:QI (reg:SI 118) 0))
100 (set (reg:QI 121) (subreg:QI (reg:SI 119) 0))
101 (set (reg:SI 122) (plus:SI (reg:SI 118) (reg:SI 119)))
102
103 where the first two insns are now dead. */
104
105
106 static int num_changes;
107
108 \f
109 /* Do not try to replace constant addresses or addresses of local and
110 argument slots. These MEM expressions are made only once and inserted
111 in many instructions, as well as being used to control symbol table
112 output. It is not safe to clobber them.
113
114 There are some uncommon cases where the address is already in a register
115 for some reason, but we cannot take advantage of that because we have
116 no easy way to unshare the MEM. In addition, looking up all stack
117 addresses is costly. */
118
119 static bool
120 can_simplify_addr (rtx addr)
121 {
122 rtx reg;
123
124 if (CONSTANT_ADDRESS_P (addr))
125 return false;
126
127 if (GET_CODE (addr) == PLUS)
128 reg = XEXP (addr, 0);
129 else
130 reg = addr;
131
132 return (!REG_P (reg)
133 || (REGNO (reg) != FRAME_POINTER_REGNUM
134 && REGNO (reg) != HARD_FRAME_POINTER_REGNUM
135 && REGNO (reg) != ARG_POINTER_REGNUM));
136 }
137
138 /* Returns a canonical version of X for the address, from the point of view,
139 that all multiplications are represented as MULT instead of the multiply
140 by a power of 2 being represented as ASHIFT.
141
142 Every ASHIFT we find has been made by simplify_gen_binary and was not
143 there before, so it is not shared. So we can do this in place. */
144
145 static void
146 canonicalize_address (rtx x)
147 {
148 for (;;)
149 switch (GET_CODE (x))
150 {
151 case ASHIFT:
152 if (GET_CODE (XEXP (x, 1)) == CONST_INT
153 && INTVAL (XEXP (x, 1)) < GET_MODE_BITSIZE (GET_MODE (x))
154 && INTVAL (XEXP (x, 1)) >= 0)
155 {
156 HOST_WIDE_INT shift = INTVAL (XEXP (x, 1));
157 PUT_CODE (x, MULT);
158 XEXP (x, 1) = gen_int_mode ((HOST_WIDE_INT) 1 << shift,
159 GET_MODE (x));
160 }
161
162 x = XEXP (x, 0);
163 break;
164
165 case PLUS:
166 if (GET_CODE (XEXP (x, 0)) == PLUS
167 || GET_CODE (XEXP (x, 0)) == ASHIFT
168 || GET_CODE (XEXP (x, 0)) == CONST)
169 canonicalize_address (XEXP (x, 0));
170
171 x = XEXP (x, 1);
172 break;
173
174 case CONST:
175 x = XEXP (x, 0);
176 break;
177
178 default:
179 return;
180 }
181 }
182
183 /* OLD is a memory address. Return whether it is good to use NEW instead,
184 for a memory access in the given MODE. */
185
186 static bool
187 should_replace_address (rtx old, rtx new, enum machine_mode mode)
188 {
189 int gain;
190
191 if (rtx_equal_p (old, new) || !memory_address_p (mode, new))
192 return false;
193
194 /* Copy propagation is always ok. */
195 if (REG_P (old) && REG_P (new))
196 return true;
197
198 /* Prefer the new address if it is less expensive. */
199 gain = address_cost (old, mode) - address_cost (new, mode);
200
201 /* If the addresses have equivalent cost, prefer the new address
202 if it has the highest `rtx_cost'. That has the potential of
203 eliminating the most insns without additional costs, and it
204 is the same that cse.c used to do. */
205 if (gain == 0)
206 gain = rtx_cost (new, SET) - rtx_cost (old, SET);
207
208 return (gain > 0);
209 }
210
211
212 /* Flags for the last parameter of propagate_rtx_1. */
213
214 enum {
215 /* If PR_CAN_APPEAR is true, propagate_rtx_1 always returns true;
216 if it is false, propagate_rtx_1 returns false if, for at least
217 one occurrence OLD, it failed to collapse the result to a constant.
218 For example, (mult:M (reg:M A) (minus:M (reg:M B) (reg:M A))) may
219 collapse to zero if replacing (reg:M B) with (reg:M A).
220
221 PR_CAN_APPEAR is disregarded inside MEMs: in that case,
222 propagate_rtx_1 just tries to make cheaper and valid memory
223 addresses. */
224 PR_CAN_APPEAR = 1,
225
226 /* If PR_HANDLE_MEM is not set, propagate_rtx_1 won't attempt any replacement
227 outside memory addresses. This is needed because propagate_rtx_1 does
228 not do any analysis on memory; thus it is very conservative and in general
229 it will fail if non-read-only MEMs are found in the source expression.
230
231 PR_HANDLE_MEM is set when the source of the propagation was not
232 another MEM. Then, it is safe not to treat non-read-only MEMs as
233 ``opaque'' objects. */
234 PR_HANDLE_MEM = 2
235 };
236
237
238 /* Replace all occurrences of OLD in *PX with NEW and try to simplify the
239 resulting expression. Replace *PX with a new RTL expression if an
240 occurrence of OLD was found.
241
242 This is only a wrapper around simplify-rtx.c: do not add any pattern
243 matching code here. (The sole exception is the handling of LO_SUM, but
244 that is because there is no simplify_gen_* function for LO_SUM). */
245
246 static bool
247 propagate_rtx_1 (rtx *px, rtx old, rtx new, int flags)
248 {
249 rtx x = *px, tem = NULL_RTX, op0, op1, op2;
250 enum rtx_code code = GET_CODE (x);
251 enum machine_mode mode = GET_MODE (x);
252 enum machine_mode op_mode;
253 bool can_appear = (flags & PR_CAN_APPEAR) != 0;
254 bool valid_ops = true;
255
256 if (!(flags & PR_HANDLE_MEM) && MEM_P (x) && !MEM_READONLY_P (x))
257 {
258 /* If unsafe, change MEMs to CLOBBERs or SCRATCHes (to preserve whether
259 they have side effects or not). */
260 *px = (side_effects_p (x)
261 ? gen_rtx_CLOBBER (GET_MODE (x), const0_rtx)
262 : gen_rtx_SCRATCH (GET_MODE (x)));
263 return false;
264 }
265
266 /* If X is OLD_RTX, return NEW_RTX. But not if replacing only within an
267 address, and we are *not* inside one. */
268 if (x == old)
269 {
270 *px = new;
271 return can_appear;
272 }
273
274 /* If this is an expression, try recursive substitution. */
275 switch (GET_RTX_CLASS (code))
276 {
277 case RTX_UNARY:
278 op0 = XEXP (x, 0);
279 op_mode = GET_MODE (op0);
280 valid_ops &= propagate_rtx_1 (&op0, old, new, flags);
281 if (op0 == XEXP (x, 0))
282 return true;
283 tem = simplify_gen_unary (code, mode, op0, op_mode);
284 break;
285
286 case RTX_BIN_ARITH:
287 case RTX_COMM_ARITH:
288 op0 = XEXP (x, 0);
289 op1 = XEXP (x, 1);
290 valid_ops &= propagate_rtx_1 (&op0, old, new, flags);
291 valid_ops &= propagate_rtx_1 (&op1, old, new, flags);
292 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
293 return true;
294 tem = simplify_gen_binary (code, mode, op0, op1);
295 break;
296
297 case RTX_COMPARE:
298 case RTX_COMM_COMPARE:
299 op0 = XEXP (x, 0);
300 op1 = XEXP (x, 1);
301 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
302 valid_ops &= propagate_rtx_1 (&op0, old, new, flags);
303 valid_ops &= propagate_rtx_1 (&op1, old, new, flags);
304 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
305 return true;
306 tem = simplify_gen_relational (code, mode, op_mode, op0, op1);
307 break;
308
309 case RTX_TERNARY:
310 case RTX_BITFIELD_OPS:
311 op0 = XEXP (x, 0);
312 op1 = XEXP (x, 1);
313 op2 = XEXP (x, 2);
314 op_mode = GET_MODE (op0);
315 valid_ops &= propagate_rtx_1 (&op0, old, new, flags);
316 valid_ops &= propagate_rtx_1 (&op1, old, new, flags);
317 valid_ops &= propagate_rtx_1 (&op2, old, new, flags);
318 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
319 return true;
320 if (op_mode == VOIDmode)
321 op_mode = GET_MODE (op0);
322 tem = simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
323 break;
324
325 case RTX_EXTRA:
326 /* The only case we try to handle is a SUBREG. */
327 if (code == SUBREG)
328 {
329 op0 = XEXP (x, 0);
330 valid_ops &= propagate_rtx_1 (&op0, old, new, flags);
331 if (op0 == XEXP (x, 0))
332 return true;
333 tem = simplify_gen_subreg (mode, op0, GET_MODE (SUBREG_REG (x)),
334 SUBREG_BYTE (x));
335 }
336 break;
337
338 case RTX_OBJ:
339 if (code == MEM && x != new)
340 {
341 rtx new_op0;
342 op0 = XEXP (x, 0);
343
344 /* There are some addresses that we cannot work on. */
345 if (!can_simplify_addr (op0))
346 return true;
347
348 op0 = new_op0 = targetm.delegitimize_address (op0);
349 valid_ops &= propagate_rtx_1 (&new_op0, old, new,
350 flags | PR_CAN_APPEAR);
351
352 /* Dismiss transformation that we do not want to carry on. */
353 if (!valid_ops
354 || new_op0 == op0
355 || !(GET_MODE (new_op0) == GET_MODE (op0)
356 || GET_MODE (new_op0) == VOIDmode))
357 return true;
358
359 canonicalize_address (new_op0);
360
361 /* Copy propagations are always ok. Otherwise check the costs. */
362 if (!(REG_P (old) && REG_P (new))
363 && !should_replace_address (op0, new_op0, GET_MODE (x)))
364 return true;
365
366 tem = replace_equiv_address_nv (x, new_op0);
367 }
368
369 else if (code == LO_SUM)
370 {
371 op0 = XEXP (x, 0);
372 op1 = XEXP (x, 1);
373
374 /* The only simplification we do attempts to remove references to op0
375 or make it constant -- in both cases, op0's invalidity will not
376 make the result invalid. */
377 propagate_rtx_1 (&op0, old, new, flags | PR_CAN_APPEAR);
378 valid_ops &= propagate_rtx_1 (&op1, old, new, flags);
379 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
380 return true;
381
382 /* (lo_sum (high x) x) -> x */
383 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
384 tem = op1;
385 else
386 tem = gen_rtx_LO_SUM (mode, op0, op1);
387
388 /* OP1 is likely not a legitimate address, otherwise there would have
389 been no LO_SUM. We want it to disappear if it is invalid, return
390 false in that case. */
391 return memory_address_p (mode, tem);
392 }
393
394 else if (code == REG)
395 {
396 if (rtx_equal_p (x, old))
397 {
398 *px = new;
399 return can_appear;
400 }
401 }
402 break;
403
404 default:
405 break;
406 }
407
408 /* No change, no trouble. */
409 if (tem == NULL_RTX)
410 return true;
411
412 *px = tem;
413
414 /* The replacement we made so far is valid, if all of the recursive
415 replacements were valid, or we could simplify everything to
416 a constant. */
417 return valid_ops || can_appear || CONSTANT_P (tem);
418 }
419
420
421 /* for_each_rtx traversal function that returns 1 if BODY points to
422 a non-constant mem. */
423
424 static int
425 varying_mem_p (rtx *body, void *data ATTRIBUTE_UNUSED)
426 {
427 rtx x = *body;
428 return MEM_P (x) && !MEM_READONLY_P (x);
429 }
430
431
432 /* Replace all occurrences of OLD in X with NEW and try to simplify the
433 resulting expression (in mode MODE). Return a new expression if it is
434 a constant, otherwise X.
435
436 Simplifications where occurrences of NEW collapse to a constant are always
437 accepted. All simplifications are accepted if NEW is a pseudo too.
438 Otherwise, we accept simplifications that have a lower or equal cost. */
439
440 static rtx
441 propagate_rtx (rtx x, enum machine_mode mode, rtx old, rtx new)
442 {
443 rtx tem;
444 bool collapsed;
445 int flags;
446
447 if (REG_P (new) && REGNO (new) < FIRST_PSEUDO_REGISTER)
448 return NULL_RTX;
449
450 flags = 0;
451 if (REG_P (new) || CONSTANT_P (new))
452 flags |= PR_CAN_APPEAR;
453 if (!for_each_rtx (&new, varying_mem_p, NULL))
454 flags |= PR_HANDLE_MEM;
455
456 tem = x;
457 collapsed = propagate_rtx_1 (&tem, old, copy_rtx (new), flags);
458 if (tem == x || !collapsed)
459 return NULL_RTX;
460
461 /* gen_lowpart_common will not be able to process VOIDmode entities other
462 than CONST_INTs. */
463 if (GET_MODE (tem) == VOIDmode && GET_CODE (tem) != CONST_INT)
464 return NULL_RTX;
465
466 if (GET_MODE (tem) == VOIDmode)
467 tem = rtl_hooks.gen_lowpart_no_emit (mode, tem);
468 else
469 gcc_assert (GET_MODE (tem) == mode);
470
471 return tem;
472 }
473
474
475 \f
476
477 /* Return true if the register from reference REF is killed
478 between FROM to (but not including) TO. */
479
480 static bool
481 local_ref_killed_between_p (struct df_ref * ref, rtx from, rtx to)
482 {
483 rtx insn;
484
485 for (insn = from; insn != to; insn = NEXT_INSN (insn))
486 {
487 struct df_ref **def_rec;
488 if (!INSN_P (insn))
489 continue;
490
491 for (def_rec = DF_INSN_DEFS (insn); *def_rec; def_rec++)
492 {
493 struct df_ref *def = *def_rec;
494 if (DF_REF_REGNO (ref) == DF_REF_REGNO (def))
495 return true;
496 }
497 }
498 return false;
499 }
500
501
502 /* Check if the given DEF is available in INSN. This would require full
503 computation of available expressions; we check only restricted conditions:
504 - if DEF is the sole definition of its register, go ahead;
505 - in the same basic block, we check for no definitions killing the
506 definition of DEF_INSN;
507 - if USE's basic block has DEF's basic block as the sole predecessor,
508 we check if the definition is killed after DEF_INSN or before
509 TARGET_INSN insn, in their respective basic blocks. */
510 static bool
511 use_killed_between (struct df_ref *use, rtx def_insn, rtx target_insn)
512 {
513 basic_block def_bb = BLOCK_FOR_INSN (def_insn);
514 basic_block target_bb = BLOCK_FOR_INSN (target_insn);
515 int regno;
516 struct df_ref * def;
517
518 /* In some obscure situations we can have a def reaching a use
519 that is _before_ the def. In other words the def does not
520 dominate the use even though the use and def are in the same
521 basic block. This can happen when a register may be used
522 uninitialized in a loop. In such cases, we must assume that
523 DEF is not available. */
524 if (def_bb == target_bb
525 ? DF_INSN_LUID (def_insn) >= DF_INSN_LUID (target_insn)
526 : !dominated_by_p (CDI_DOMINATORS, target_bb, def_bb))
527 return true;
528
529 /* Check if the reg in USE has only one definition. We already
530 know that this definition reaches use, or we wouldn't be here. */
531 regno = DF_REF_REGNO (use);
532 def = DF_REG_DEF_CHAIN (regno);
533 if (def && (def->next_reg == NULL))
534 return false;
535
536 /* Check locally if we are in the same basic block. */
537 if (def_bb == target_bb)
538 return local_ref_killed_between_p (use, def_insn, target_insn);
539
540 /* Finally, if DEF_BB is the sole predecessor of TARGET_BB. */
541 if (single_pred_p (target_bb)
542 && single_pred (target_bb) == def_bb)
543 {
544 struct df_ref *x;
545
546 /* See if USE is killed between DEF_INSN and the last insn in the
547 basic block containing DEF_INSN. */
548 x = df_bb_regno_last_def_find (def_bb, regno);
549 if (x && DF_INSN_LUID (DF_REF_INSN (x)) >= DF_INSN_LUID (def_insn))
550 return true;
551
552 /* See if USE is killed between TARGET_INSN and the first insn in the
553 basic block containing TARGET_INSN. */
554 x = df_bb_regno_first_def_find (target_bb, regno);
555 if (x && DF_INSN_LUID (DF_REF_INSN (x)) < DF_INSN_LUID (target_insn))
556 return true;
557
558 return false;
559 }
560
561 /* Otherwise assume the worst case. */
562 return true;
563 }
564
565
566 /* Check if all uses in DEF_INSN can be used in TARGET_INSN. This
567 would require full computation of available expressions;
568 we check only restricted conditions, see use_killed_between. */
569 static bool
570 all_uses_available_at (rtx def_insn, rtx target_insn)
571 {
572 struct df_ref **use_rec;
573 struct df_insn_info *insn_info = DF_INSN_INFO_GET (def_insn);
574 rtx def_set = single_set (def_insn);
575
576 gcc_assert (def_set);
577
578 /* If target_insn comes right after def_insn, which is very common
579 for addresses, we can use a quicker test. */
580 if (NEXT_INSN (def_insn) == target_insn
581 && REG_P (SET_DEST (def_set)))
582 {
583 rtx def_reg = SET_DEST (def_set);
584
585 /* If the insn uses the reg that it defines, the substitution is
586 invalid. */
587 for (use_rec = DF_INSN_INFO_USES (insn_info); *use_rec; use_rec++)
588 {
589 struct df_ref *use = *use_rec;
590 if (rtx_equal_p (DF_REF_REG (use), def_reg))
591 return false;
592 }
593 for (use_rec = DF_INSN_INFO_EQ_USES (insn_info); *use_rec; use_rec++)
594 {
595 struct df_ref *use = *use_rec;
596 if (rtx_equal_p (use->reg, def_reg))
597 return false;
598 }
599 }
600 else
601 {
602 /* Look at all the uses of DEF_INSN, and see if they are not
603 killed between DEF_INSN and TARGET_INSN. */
604 for (use_rec = DF_INSN_INFO_USES (insn_info); *use_rec; use_rec++)
605 {
606 struct df_ref *use = *use_rec;
607 if (use_killed_between (use, def_insn, target_insn))
608 return false;
609 }
610 for (use_rec = DF_INSN_INFO_EQ_USES (insn_info); *use_rec; use_rec++)
611 {
612 struct df_ref *use = *use_rec;
613 if (use_killed_between (use, def_insn, target_insn))
614 return false;
615 }
616 }
617
618 return true;
619 }
620
621 \f
622 struct find_occurrence_data
623 {
624 rtx find;
625 rtx *retval;
626 };
627
628 /* Callback for for_each_rtx, used in find_occurrence.
629 See if PX is the rtx we have to find. Return 1 to stop for_each_rtx
630 if successful, or 0 to continue traversing otherwise. */
631
632 static int
633 find_occurrence_callback (rtx *px, void *data)
634 {
635 struct find_occurrence_data *fod = (struct find_occurrence_data *) data;
636 rtx x = *px;
637 rtx find = fod->find;
638
639 if (x == find)
640 {
641 fod->retval = px;
642 return 1;
643 }
644
645 return 0;
646 }
647
648 /* Return a pointer to one of the occurrences of register FIND in *PX. */
649
650 static rtx *
651 find_occurrence (rtx *px, rtx find)
652 {
653 struct find_occurrence_data data;
654
655 gcc_assert (REG_P (find)
656 || (GET_CODE (find) == SUBREG
657 && REG_P (SUBREG_REG (find))));
658
659 data.find = find;
660 data.retval = NULL;
661 for_each_rtx (px, find_occurrence_callback, &data);
662 return data.retval;
663 }
664
665 \f
666 /* Inside INSN, the expression rooted at *LOC has been changed, moving some
667 uses from USE_VEC. Find those that are present, and create new items
668 in the data flow object of the pass. Mark any new uses as having the
669 given TYPE. */
670 static void
671 update_df (rtx insn, rtx *loc, struct df_ref **use_rec, enum df_ref_type type,
672 int new_flags)
673 {
674 bool changed = false;
675
676 /* Add a use for the registers that were propagated. */
677 while (*use_rec)
678 {
679 struct df_ref *use = *use_rec;
680 struct df_ref *orig_use = use, *new_use;
681 int width = -1;
682 int offset = -1;
683 enum machine_mode mode = 0;
684 rtx *new_loc = find_occurrence (loc, DF_REF_REG (orig_use));
685 use_rec++;
686
687 if (!new_loc)
688 continue;
689
690 if (DF_REF_FLAGS_IS_SET (orig_use, DF_REF_SIGN_EXTRACT | DF_REF_ZERO_EXTRACT))
691 {
692 width = DF_REF_EXTRACT_WIDTH (orig_use);
693 offset = DF_REF_EXTRACT_OFFSET (orig_use);
694 mode = DF_REF_EXTRACT_MODE (orig_use);
695 }
696
697 /* Add a new insn use. Use the original type, because it says if the
698 use was within a MEM. */
699 new_use = df_ref_create (DF_REF_REG (orig_use), new_loc,
700 insn, BLOCK_FOR_INSN (insn),
701 type, DF_REF_FLAGS (orig_use) | new_flags,
702 width, offset, mode);
703
704 /* Set up the use-def chain. */
705 df_chain_copy (new_use, DF_REF_CHAIN (orig_use));
706 changed = true;
707 }
708 if (changed)
709 df_insn_rescan (insn);
710 }
711
712
713 /* Try substituting NEW into LOC, which originated from forward propagation
714 of USE's value from DEF_INSN. SET_REG_EQUAL says whether we are
715 substituting the whole SET_SRC, so we can set a REG_EQUAL note if the
716 new insn is not recognized. Return whether the substitution was
717 performed. */
718
719 static bool
720 try_fwprop_subst (struct df_ref *use, rtx *loc, rtx new, rtx def_insn, bool set_reg_equal)
721 {
722 rtx insn = DF_REF_INSN (use);
723 enum df_ref_type type = DF_REF_TYPE (use);
724 int flags = DF_REF_FLAGS (use);
725 rtx set = single_set (insn);
726 int old_cost = rtx_cost (SET_SRC (set), SET);
727 bool ok;
728
729 if (dump_file)
730 {
731 fprintf (dump_file, "\nIn insn %d, replacing\n ", INSN_UID (insn));
732 print_inline_rtx (dump_file, *loc, 2);
733 fprintf (dump_file, "\n with ");
734 print_inline_rtx (dump_file, new, 2);
735 fprintf (dump_file, "\n");
736 }
737
738 validate_unshare_change (insn, loc, new, true);
739 if (!verify_changes (0))
740 {
741 if (dump_file)
742 fprintf (dump_file, "Changes to insn %d not recognized\n",
743 INSN_UID (insn));
744 ok = false;
745 }
746
747 else if (DF_REF_TYPE (use) == DF_REF_REG_USE
748 && rtx_cost (SET_SRC (set), SET) > old_cost)
749 {
750 if (dump_file)
751 fprintf (dump_file, "Changes to insn %d not profitable\n",
752 INSN_UID (insn));
753 ok = false;
754 }
755
756 else
757 {
758 if (dump_file)
759 fprintf (dump_file, "Changed insn %d\n", INSN_UID (insn));
760 ok = true;
761 }
762
763 if (ok)
764 {
765 confirm_change_group ();
766 num_changes++;
767
768 df_ref_remove (use);
769 if (!CONSTANT_P (new))
770 {
771 struct df_insn_info *insn_info = DF_INSN_INFO_GET (def_insn);
772 update_df (insn, loc, DF_INSN_INFO_USES (insn_info), type, flags);
773 update_df (insn, loc, DF_INSN_INFO_EQ_USES (insn_info), type, flags);
774 }
775 }
776 else
777 {
778 cancel_changes (0);
779
780 /* Can also record a simplified value in a REG_EQUAL note,
781 making a new one if one does not already exist. */
782 if (set_reg_equal)
783 {
784 if (dump_file)
785 fprintf (dump_file, " Setting REG_EQUAL note\n");
786
787 set_unique_reg_note (insn, REG_EQUAL, copy_rtx (new));
788
789 /* ??? Is this still necessary if we add the note through
790 set_unique_reg_note? */
791 if (!CONSTANT_P (new))
792 {
793 struct df_insn_info *insn_info = DF_INSN_INFO_GET (def_insn);
794 update_df (insn, loc, DF_INSN_INFO_USES (insn_info),
795 type, DF_REF_IN_NOTE);
796 update_df (insn, loc, DF_INSN_INFO_EQ_USES (insn_info),
797 type, DF_REF_IN_NOTE);
798 }
799 }
800 }
801
802 return ok;
803 }
804
805
806 /* If USE is a paradoxical subreg, see if it can be replaced by a pseudo. */
807
808 static bool
809 forward_propagate_subreg (struct df_ref *use, rtx def_insn, rtx def_set)
810 {
811 rtx use_reg = DF_REF_REG (use);
812 rtx use_insn, src;
813
814 /* Only consider paradoxical subregs... */
815 enum machine_mode use_mode = GET_MODE (use_reg);
816 if (GET_CODE (use_reg) != SUBREG
817 || !REG_P (SET_DEST (def_set))
818 || GET_MODE_SIZE (use_mode)
819 <= GET_MODE_SIZE (GET_MODE (SUBREG_REG (use_reg))))
820 return false;
821
822 /* If this is a paradoxical SUBREG, we have no idea what value the
823 extra bits would have. However, if the operand is equivalent to
824 a SUBREG whose operand is the same as our mode, and all the modes
825 are within a word, we can just use the inner operand because
826 these SUBREGs just say how to treat the register. */
827 use_insn = DF_REF_INSN (use);
828 src = SET_SRC (def_set);
829 if (GET_CODE (src) == SUBREG
830 && REG_P (SUBREG_REG (src))
831 && GET_MODE (SUBREG_REG (src)) == use_mode
832 && subreg_lowpart_p (src)
833 && all_uses_available_at (def_insn, use_insn))
834 return try_fwprop_subst (use, DF_REF_LOC (use), SUBREG_REG (src),
835 def_insn, false);
836 else
837 return false;
838 }
839
840 /* Try to replace USE with SRC (defined in DEF_INSN) and simplify the
841 result. */
842
843 static bool
844 forward_propagate_and_simplify (struct df_ref *use, rtx def_insn, rtx def_set)
845 {
846 rtx use_insn = DF_REF_INSN (use);
847 rtx use_set = single_set (use_insn);
848 rtx src, reg, new, *loc;
849 bool set_reg_equal;
850 enum machine_mode mode;
851
852 if (!use_set)
853 return false;
854
855 /* Do not propagate into PC, CC0, etc. */
856 if (GET_MODE (SET_DEST (use_set)) == VOIDmode)
857 return false;
858
859 /* If def and use are subreg, check if they match. */
860 reg = DF_REF_REG (use);
861 if (GET_CODE (reg) == SUBREG
862 && GET_CODE (SET_DEST (def_set)) == SUBREG
863 && (SUBREG_BYTE (SET_DEST (def_set)) != SUBREG_BYTE (reg)
864 || GET_MODE (SET_DEST (def_set)) != GET_MODE (reg)))
865 return false;
866
867 /* Check if the def had a subreg, but the use has the whole reg. */
868 if (REG_P (reg) && GET_CODE (SET_DEST (def_set)) == SUBREG)
869 return false;
870
871 /* Check if the use has a subreg, but the def had the whole reg. Unlike the
872 previous case, the optimization is possible and often useful indeed. */
873 if (GET_CODE (reg) == SUBREG && REG_P (SET_DEST (def_set)))
874 reg = SUBREG_REG (reg);
875
876 /* Check if the substitution is valid (last, because it's the most
877 expensive check!). */
878 src = SET_SRC (def_set);
879 if (!CONSTANT_P (src) && !all_uses_available_at (def_insn, use_insn))
880 return false;
881
882 /* Check if the def is loading something from the constant pool; in this
883 case we would undo optimization such as compress_float_constant.
884 Still, we can set a REG_EQUAL note. */
885 if (MEM_P (src) && MEM_READONLY_P (src))
886 {
887 rtx x = avoid_constant_pool_reference (src);
888 if (x != src)
889 {
890 rtx note = find_reg_note (use_insn, REG_EQUAL, NULL_RTX);
891 rtx old = note ? XEXP (note, 0) : SET_SRC (use_set);
892 rtx new = simplify_replace_rtx (old, src, x);
893 if (old != new)
894 set_unique_reg_note (use_insn, REG_EQUAL, copy_rtx (new));
895 }
896 return false;
897 }
898
899 /* Else try simplifying. */
900
901 if (DF_REF_TYPE (use) == DF_REF_REG_MEM_STORE)
902 {
903 loc = &SET_DEST (use_set);
904 set_reg_equal = false;
905 }
906 else
907 {
908 rtx note = find_reg_note (use_insn, REG_EQUAL, NULL_RTX);
909 if (DF_REF_FLAGS (use) & DF_REF_IN_NOTE)
910 loc = &XEXP (note, 0);
911 else
912 loc = &SET_SRC (use_set);
913
914 /* Do not replace an existing REG_EQUAL note if the insn is not
915 recognized. Either we're already replacing in the note, or
916 we'll separately try plugging the definition in the note and
917 simplifying. */
918 set_reg_equal = (note == NULL_RTX);
919 }
920
921 if (GET_MODE (*loc) == VOIDmode)
922 mode = GET_MODE (SET_DEST (use_set));
923 else
924 mode = GET_MODE (*loc);
925
926 new = propagate_rtx (*loc, mode, reg, src);
927
928 if (!new)
929 return false;
930
931 return try_fwprop_subst (use, loc, new, def_insn, set_reg_equal);
932 }
933
934
935 /* Given a use USE of an insn, if it has a single reaching
936 definition, try to forward propagate it into that insn. */
937
938 static void
939 forward_propagate_into (struct df_ref *use)
940 {
941 struct df_link *defs;
942 struct df_ref *def;
943 rtx def_insn, def_set, use_insn;
944 rtx parent;
945
946 if (DF_REF_FLAGS (use) & DF_REF_READ_WRITE)
947 return;
948 if (DF_REF_IS_ARTIFICIAL (use))
949 return;
950
951 /* Only consider uses that have a single definition. */
952 defs = DF_REF_CHAIN (use);
953 if (!defs || defs->next)
954 return;
955
956 def = defs->ref;
957 if (DF_REF_FLAGS (def) & DF_REF_READ_WRITE)
958 return;
959 if (DF_REF_IS_ARTIFICIAL (def))
960 return;
961
962 /* Do not propagate loop invariant definitions inside the loop. */
963 if (DF_REF_BB (def)->loop_father != DF_REF_BB (use)->loop_father)
964 return;
965
966 /* Check if the use is still present in the insn! */
967 use_insn = DF_REF_INSN (use);
968 if (DF_REF_FLAGS (use) & DF_REF_IN_NOTE)
969 parent = find_reg_note (use_insn, REG_EQUAL, NULL_RTX);
970 else
971 parent = PATTERN (use_insn);
972
973 if (!reg_mentioned_p (DF_REF_REG (use), parent))
974 return;
975
976 def_insn = DF_REF_INSN (def);
977 if (multiple_sets (def_insn))
978 return;
979 def_set = single_set (def_insn);
980 if (!def_set)
981 return;
982
983 /* Only try one kind of propagation. If two are possible, we'll
984 do it on the following iterations. */
985 if (!forward_propagate_and_simplify (use, def_insn, def_set))
986 forward_propagate_subreg (use, def_insn, def_set);
987 }
988
989 \f
990 static void
991 fwprop_init (void)
992 {
993 num_changes = 0;
994 calculate_dominance_info (CDI_DOMINATORS);
995
996 /* We do not always want to propagate into loops, so we have to find
997 loops and be careful about them. But we have to call flow_loops_find
998 before df_analyze, because flow_loops_find may introduce new jump
999 insns (sadly) if we are not working in cfglayout mode. */
1000 loop_optimizer_init (0);
1001
1002 /* Now set up the dataflow problem (we only want use-def chains) and
1003 put the dataflow solver to work. */
1004 df_set_flags (DF_EQ_NOTES);
1005 df_chain_add_problem (DF_UD_CHAIN);
1006 df_analyze ();
1007 df_maybe_reorganize_use_refs (DF_REF_ORDER_BY_INSN_WITH_NOTES);
1008 df_set_flags (DF_DEFER_INSN_RESCAN);
1009 }
1010
1011 static void
1012 fwprop_done (void)
1013 {
1014 loop_optimizer_finalize ();
1015
1016 free_dominance_info (CDI_DOMINATORS);
1017 cleanup_cfg (0);
1018 delete_trivially_dead_insns (get_insns (), max_reg_num ());
1019
1020 if (dump_file)
1021 fprintf (dump_file,
1022 "\nNumber of successful forward propagations: %d\n\n",
1023 num_changes);
1024 }
1025
1026
1027
1028 /* Main entry point. */
1029
1030 static bool
1031 gate_fwprop (void)
1032 {
1033 return optimize > 0 && flag_forward_propagate;
1034 }
1035
1036 static unsigned int
1037 fwprop (void)
1038 {
1039 unsigned i;
1040
1041 fwprop_init ();
1042
1043 /* Go through all the uses. update_df will create new ones at the
1044 end, and we'll go through them as well.
1045
1046 Do not forward propagate addresses into loops until after unrolling.
1047 CSE did so because it was able to fix its own mess, but we are not. */
1048
1049 for (i = 0; i < DF_USES_TABLE_SIZE (); i++)
1050 {
1051 struct df_ref *use = DF_USES_GET (i);
1052 if (use)
1053 if (DF_REF_TYPE (use) == DF_REF_REG_USE
1054 || DF_REF_BB (use)->loop_father == NULL)
1055 forward_propagate_into (use);
1056 }
1057
1058 fwprop_done ();
1059 return 0;
1060 }
1061
1062 struct rtl_opt_pass pass_rtl_fwprop =
1063 {
1064 {
1065 RTL_PASS,
1066 "fwprop1", /* name */
1067 gate_fwprop, /* gate */
1068 fwprop, /* execute */
1069 NULL, /* sub */
1070 NULL, /* next */
1071 0, /* static_pass_number */
1072 TV_FWPROP, /* tv_id */
1073 0, /* properties_required */
1074 0, /* properties_provided */
1075 0, /* properties_destroyed */
1076 0, /* todo_flags_start */
1077 TODO_df_finish | TODO_verify_rtl_sharing |
1078 TODO_dump_func /* todo_flags_finish */
1079 }
1080 };
1081
1082 static unsigned int
1083 fwprop_addr (void)
1084 {
1085 unsigned i;
1086 fwprop_init ();
1087
1088 /* Go through all the uses. update_df will create new ones at the
1089 end, and we'll go through them as well. */
1090 df_set_flags (DF_DEFER_INSN_RESCAN);
1091
1092 for (i = 0; i < DF_USES_TABLE_SIZE (); i++)
1093 {
1094 struct df_ref *use = DF_USES_GET (i);
1095 if (use)
1096 if (DF_REF_TYPE (use) != DF_REF_REG_USE
1097 && DF_REF_BB (use)->loop_father != NULL)
1098 forward_propagate_into (use);
1099 }
1100
1101 fwprop_done ();
1102
1103 return 0;
1104 }
1105
1106 struct rtl_opt_pass pass_rtl_fwprop_addr =
1107 {
1108 {
1109 RTL_PASS,
1110 "fwprop2", /* name */
1111 gate_fwprop, /* gate */
1112 fwprop_addr, /* execute */
1113 NULL, /* sub */
1114 NULL, /* next */
1115 0, /* static_pass_number */
1116 TV_FWPROP, /* tv_id */
1117 0, /* properties_required */
1118 0, /* properties_provided */
1119 0, /* properties_destroyed */
1120 0, /* todo_flags_start */
1121 TODO_df_finish | TODO_verify_rtl_sharing |
1122 TODO_dump_func /* todo_flags_finish */
1123 }
1124 };