1 /* Conditional constant propagation pass for the GNU compiler.
2 Copyright (C) 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009,
3 2010, 2011, 2012 Free Software Foundation, Inc.
4 Adapted from original RTL SSA-CCP by Daniel Berlin <dberlin@dberlin.org>
5 Adapted to GIMPLE trees by Diego Novillo <dnovillo@redhat.com>
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify it
10 under the terms of the GNU General Public License as published by the
11 Free Software Foundation; either version 3, or (at your option) any
14 GCC is distributed in the hope that it will be useful, but WITHOUT
15 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
23 /* Conditional constant propagation (CCP) is based on the SSA
24 propagation engine (tree-ssa-propagate.c). Constant assignments of
25 the form VAR = CST are propagated from the assignments into uses of
26 VAR, which in turn may generate new constants. The simulation uses
27 a four level lattice to keep track of constant values associated
28 with SSA names. Given an SSA name V_i, it may take one of the
31 UNINITIALIZED -> the initial state of the value. This value
32 is replaced with a correct initial value
33 the first time the value is used, so the
34 rest of the pass does not need to care about
35 it. Using this value simplifies initialization
36 of the pass, and prevents us from needlessly
37 scanning statements that are never reached.
39 UNDEFINED -> V_i is a local variable whose definition
40 has not been processed yet. Therefore we
41 don't yet know if its value is a constant
44 CONSTANT -> V_i has been found to hold a constant
47 VARYING -> V_i cannot take a constant value, or if it
48 does, it is not possible to determine it
51 The core of SSA-CCP is in ccp_visit_stmt and ccp_visit_phi_node:
53 1- In ccp_visit_stmt, we are interested in assignments whose RHS
54 evaluates into a constant and conditional jumps whose predicate
55 evaluates into a boolean true or false. When an assignment of
56 the form V_i = CONST is found, V_i's lattice value is set to
57 CONSTANT and CONST is associated with it. This causes the
58 propagation engine to add all the SSA edges coming out the
59 assignment into the worklists, so that statements that use V_i
62 If the statement is a conditional with a constant predicate, we
63 mark the outgoing edges as executable or not executable
64 depending on the predicate's value. This is then used when
65 visiting PHI nodes to know when a PHI argument can be ignored.
68 2- In ccp_visit_phi_node, if all the PHI arguments evaluate to the
69 same constant C, then the LHS of the PHI is set to C. This
70 evaluation is known as the "meet operation". Since one of the
71 goals of this evaluation is to optimistically return constant
72 values as often as possible, it uses two main short cuts:
74 - If an argument is flowing in through a non-executable edge, it
75 is ignored. This is useful in cases like this:
81 a_11 = PHI (a_9, a_10)
83 If PRED is known to always evaluate to false, then we can
84 assume that a_11 will always take its value from a_10, meaning
85 that instead of consider it VARYING (a_9 and a_10 have
86 different values), we can consider it CONSTANT 100.
88 - If an argument has an UNDEFINED value, then it does not affect
89 the outcome of the meet operation. If a variable V_i has an
90 UNDEFINED value, it means that either its defining statement
91 hasn't been visited yet or V_i has no defining statement, in
92 which case the original symbol 'V' is being used
93 uninitialized. Since 'V' is a local variable, the compiler
94 may assume any initial value for it.
97 After propagation, every variable V_i that ends up with a lattice
98 value of CONSTANT will have the associated constant value in the
99 array CONST_VAL[i].VALUE. That is fed into substitute_and_fold for
100 final substitution and folding.
104 Constant propagation with conditional branches,
105 Wegman and Zadeck, ACM TOPLAS 13(2):181-210.
107 Building an Optimizing Compiler,
108 Robert Morgan, Butterworth-Heinemann, 1998, Section 8.9.
110 Advanced Compiler Design and Implementation,
111 Steven Muchnick, Morgan Kaufmann, 1997, Section 12.6 */
115 #include "coretypes.h"
120 #include "basic-block.h"
122 #include "function.h"
123 #include "tree-pretty-print.h"
124 #include "gimple-pretty-print.h"
126 #include "tree-dump.h"
127 #include "tree-flow.h"
128 #include "tree-pass.h"
129 #include "tree-ssa-propagate.h"
130 #include "value-prof.h"
131 #include "langhooks.h"
133 #include "diagnostic-core.h"
135 #include "gimple-fold.h"
139 /* Possible lattice values. */
148 struct prop_value_d
{
150 ccp_lattice_t lattice_val
;
152 /* Propagated value. */
155 /* Mask that applies to the propagated value during CCP. For
156 X with a CONSTANT lattice value X & ~mask == value & ~mask. */
160 typedef struct prop_value_d prop_value_t
;
162 /* Array of propagated constant values. After propagation,
163 CONST_VAL[I].VALUE holds the constant value for SSA_NAME(I). If
164 the constant is held in an SSA name representing a memory store
165 (i.e., a VDEF), CONST_VAL[I].MEM_REF will contain the actual
166 memory reference used to store (i.e., the LHS of the assignment
168 static prop_value_t
*const_val
;
170 static void canonicalize_float_value (prop_value_t
*);
171 static bool ccp_fold_stmt (gimple_stmt_iterator
*);
173 /* Dump constant propagation value VAL to file OUTF prefixed by PREFIX. */
176 dump_lattice_value (FILE *outf
, const char *prefix
, prop_value_t val
)
178 switch (val
.lattice_val
)
181 fprintf (outf
, "%sUNINITIALIZED", prefix
);
184 fprintf (outf
, "%sUNDEFINED", prefix
);
187 fprintf (outf
, "%sVARYING", prefix
);
190 fprintf (outf
, "%sCONSTANT ", prefix
);
191 if (TREE_CODE (val
.value
) != INTEGER_CST
192 || double_int_zero_p (val
.mask
))
193 print_generic_expr (outf
, val
.value
, dump_flags
);
196 double_int cval
= double_int_and_not (tree_to_double_int (val
.value
),
198 fprintf (outf
, "%sCONSTANT " HOST_WIDE_INT_PRINT_DOUBLE_HEX
,
199 prefix
, cval
.high
, cval
.low
);
200 fprintf (outf
, " (" HOST_WIDE_INT_PRINT_DOUBLE_HEX
")",
201 val
.mask
.high
, val
.mask
.low
);
210 /* Print lattice value VAL to stderr. */
212 void debug_lattice_value (prop_value_t val
);
215 debug_lattice_value (prop_value_t val
)
217 dump_lattice_value (stderr
, "", val
);
218 fprintf (stderr
, "\n");
222 /* Compute a default value for variable VAR and store it in the
223 CONST_VAL array. The following rules are used to get default
226 1- Global and static variables that are declared constant are
229 2- Any other value is considered UNDEFINED. This is useful when
230 considering PHI nodes. PHI arguments that are undefined do not
231 change the constant value of the PHI node, which allows for more
232 constants to be propagated.
234 3- Variables defined by statements other than assignments and PHI
235 nodes are considered VARYING.
237 4- Initial values of variables that are not GIMPLE registers are
238 considered VARYING. */
241 get_default_value (tree var
)
243 tree sym
= SSA_NAME_VAR (var
);
244 prop_value_t val
= { UNINITIALIZED
, NULL_TREE
, { 0, 0 } };
247 stmt
= SSA_NAME_DEF_STMT (var
);
249 if (gimple_nop_p (stmt
))
251 /* Variables defined by an empty statement are those used
252 before being initialized. If VAR is a local variable, we
253 can assume initially that it is UNDEFINED, otherwise we must
254 consider it VARYING. */
255 if (is_gimple_reg (sym
)
256 && TREE_CODE (sym
) == VAR_DECL
)
257 val
.lattice_val
= UNDEFINED
;
260 val
.lattice_val
= VARYING
;
261 val
.mask
= double_int_minus_one
;
264 else if (is_gimple_assign (stmt
)
265 /* Value-returning GIMPLE_CALL statements assign to
266 a variable, and are treated similarly to GIMPLE_ASSIGN. */
267 || (is_gimple_call (stmt
)
268 && gimple_call_lhs (stmt
) != NULL_TREE
)
269 || gimple_code (stmt
) == GIMPLE_PHI
)
272 if (gimple_assign_single_p (stmt
)
273 && DECL_P (gimple_assign_rhs1 (stmt
))
274 && (cst
= get_symbol_constant_value (gimple_assign_rhs1 (stmt
))))
276 val
.lattice_val
= CONSTANT
;
280 /* Any other variable defined by an assignment or a PHI node
281 is considered UNDEFINED. */
282 val
.lattice_val
= UNDEFINED
;
286 /* Otherwise, VAR will never take on a constant value. */
287 val
.lattice_val
= VARYING
;
288 val
.mask
= double_int_minus_one
;
295 /* Get the constant value associated with variable VAR. */
297 static inline prop_value_t
*
302 if (const_val
== NULL
)
305 val
= &const_val
[SSA_NAME_VERSION (var
)];
306 if (val
->lattice_val
== UNINITIALIZED
)
307 *val
= get_default_value (var
);
309 canonicalize_float_value (val
);
314 /* Return the constant tree value associated with VAR. */
317 get_constant_value (tree var
)
320 if (TREE_CODE (var
) != SSA_NAME
)
322 if (is_gimple_min_invariant (var
))
326 val
= get_value (var
);
328 && val
->lattice_val
== CONSTANT
329 && (TREE_CODE (val
->value
) != INTEGER_CST
330 || double_int_zero_p (val
->mask
)))
335 /* Sets the value associated with VAR to VARYING. */
338 set_value_varying (tree var
)
340 prop_value_t
*val
= &const_val
[SSA_NAME_VERSION (var
)];
342 val
->lattice_val
= VARYING
;
343 val
->value
= NULL_TREE
;
344 val
->mask
= double_int_minus_one
;
347 /* For float types, modify the value of VAL to make ccp work correctly
348 for non-standard values (-0, NaN):
350 If HONOR_SIGNED_ZEROS is false, and VAL = -0, we canonicalize it to 0.
351 If HONOR_NANS is false, and VAL is NaN, we canonicalize it to UNDEFINED.
352 This is to fix the following problem (see PR 29921): Suppose we have
356 and we set value of y to NaN. This causes value of x to be set to NaN.
357 When we later determine that y is in fact VARYING, fold uses the fact
358 that HONOR_NANS is false, and we try to change the value of x to 0,
359 causing an ICE. With HONOR_NANS being false, the real appearance of
360 NaN would cause undefined behavior, though, so claiming that y (and x)
361 are UNDEFINED initially is correct. */
364 canonicalize_float_value (prop_value_t
*val
)
366 enum machine_mode mode
;
370 if (val
->lattice_val
!= CONSTANT
371 || TREE_CODE (val
->value
) != REAL_CST
)
374 d
= TREE_REAL_CST (val
->value
);
375 type
= TREE_TYPE (val
->value
);
376 mode
= TYPE_MODE (type
);
378 if (!HONOR_SIGNED_ZEROS (mode
)
379 && REAL_VALUE_MINUS_ZERO (d
))
381 val
->value
= build_real (type
, dconst0
);
385 if (!HONOR_NANS (mode
)
386 && REAL_VALUE_ISNAN (d
))
388 val
->lattice_val
= UNDEFINED
;
394 /* Return whether the lattice transition is valid. */
397 valid_lattice_transition (prop_value_t old_val
, prop_value_t new_val
)
399 /* Lattice transitions must always be monotonically increasing in
401 if (old_val
.lattice_val
< new_val
.lattice_val
)
404 if (old_val
.lattice_val
!= new_val
.lattice_val
)
407 if (!old_val
.value
&& !new_val
.value
)
410 /* Now both lattice values are CONSTANT. */
412 /* Allow transitioning from &x to &x & ~3. */
413 if (TREE_CODE (old_val
.value
) != INTEGER_CST
414 && TREE_CODE (new_val
.value
) == INTEGER_CST
)
417 /* Bit-lattices have to agree in the still valid bits. */
418 if (TREE_CODE (old_val
.value
) == INTEGER_CST
419 && TREE_CODE (new_val
.value
) == INTEGER_CST
)
420 return double_int_equal_p
421 (double_int_and_not (tree_to_double_int (old_val
.value
),
423 double_int_and_not (tree_to_double_int (new_val
.value
),
426 /* Otherwise constant values have to agree. */
427 return operand_equal_p (old_val
.value
, new_val
.value
, 0);
430 /* Set the value for variable VAR to NEW_VAL. Return true if the new
431 value is different from VAR's previous value. */
434 set_lattice_value (tree var
, prop_value_t new_val
)
436 /* We can deal with old UNINITIALIZED values just fine here. */
437 prop_value_t
*old_val
= &const_val
[SSA_NAME_VERSION (var
)];
439 canonicalize_float_value (&new_val
);
441 /* We have to be careful to not go up the bitwise lattice
442 represented by the mask.
443 ??? This doesn't seem to be the best place to enforce this. */
444 if (new_val
.lattice_val
== CONSTANT
445 && old_val
->lattice_val
== CONSTANT
446 && TREE_CODE (new_val
.value
) == INTEGER_CST
447 && TREE_CODE (old_val
->value
) == INTEGER_CST
)
450 diff
= double_int_xor (tree_to_double_int (new_val
.value
),
451 tree_to_double_int (old_val
->value
));
452 new_val
.mask
= double_int_ior (new_val
.mask
,
453 double_int_ior (old_val
->mask
, diff
));
456 gcc_assert (valid_lattice_transition (*old_val
, new_val
));
458 /* If *OLD_VAL and NEW_VAL are the same, return false to inform the
459 caller that this was a non-transition. */
460 if (old_val
->lattice_val
!= new_val
.lattice_val
461 || (new_val
.lattice_val
== CONSTANT
462 && TREE_CODE (new_val
.value
) == INTEGER_CST
463 && (TREE_CODE (old_val
->value
) != INTEGER_CST
464 || !double_int_equal_p (new_val
.mask
, old_val
->mask
))))
466 /* ??? We would like to delay creation of INTEGER_CSTs from
467 partially constants here. */
469 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
471 dump_lattice_value (dump_file
, "Lattice value changed to ", new_val
);
472 fprintf (dump_file
, ". Adding SSA edges to worklist.\n");
477 gcc_assert (new_val
.lattice_val
!= UNINITIALIZED
);
484 static prop_value_t
get_value_for_expr (tree
, bool);
485 static prop_value_t
bit_value_binop (enum tree_code
, tree
, tree
, tree
);
486 static void bit_value_binop_1 (enum tree_code
, tree
, double_int
*, double_int
*,
487 tree
, double_int
, double_int
,
488 tree
, double_int
, double_int
);
490 /* Return a double_int that can be used for bitwise simplifications
494 value_to_double_int (prop_value_t val
)
497 && TREE_CODE (val
.value
) == INTEGER_CST
)
498 return tree_to_double_int (val
.value
);
500 return double_int_zero
;
503 /* Return the value for the address expression EXPR based on alignment
507 get_value_from_alignment (tree expr
)
509 tree type
= TREE_TYPE (expr
);
511 unsigned HOST_WIDE_INT bitpos
;
514 gcc_assert (TREE_CODE (expr
) == ADDR_EXPR
);
516 align
= get_object_alignment_1 (TREE_OPERAND (expr
, 0), &bitpos
);
518 = double_int_and_not (POINTER_TYPE_P (type
) || TYPE_UNSIGNED (type
)
519 ? double_int_mask (TYPE_PRECISION (type
))
520 : double_int_minus_one
,
521 uhwi_to_double_int (align
/ BITS_PER_UNIT
- 1));
522 val
.lattice_val
= double_int_minus_one_p (val
.mask
) ? VARYING
: CONSTANT
;
523 if (val
.lattice_val
== CONSTANT
)
525 = double_int_to_tree (type
, uhwi_to_double_int (bitpos
/ BITS_PER_UNIT
));
527 val
.value
= NULL_TREE
;
532 /* Return the value for the tree operand EXPR. If FOR_BITS_P is true
533 return constant bits extracted from alignment information for
534 invariant addresses. */
537 get_value_for_expr (tree expr
, bool for_bits_p
)
541 if (TREE_CODE (expr
) == SSA_NAME
)
543 val
= *get_value (expr
);
545 && val
.lattice_val
== CONSTANT
546 && TREE_CODE (val
.value
) == ADDR_EXPR
)
547 val
= get_value_from_alignment (val
.value
);
549 else if (is_gimple_min_invariant (expr
)
550 && (!for_bits_p
|| TREE_CODE (expr
) != ADDR_EXPR
))
552 val
.lattice_val
= CONSTANT
;
554 val
.mask
= double_int_zero
;
555 canonicalize_float_value (&val
);
557 else if (TREE_CODE (expr
) == ADDR_EXPR
)
558 val
= get_value_from_alignment (expr
);
561 val
.lattice_val
= VARYING
;
562 val
.mask
= double_int_minus_one
;
563 val
.value
= NULL_TREE
;
568 /* Return the likely CCP lattice value for STMT.
570 If STMT has no operands, then return CONSTANT.
572 Else if undefinedness of operands of STMT cause its value to be
573 undefined, then return UNDEFINED.
575 Else if any operands of STMT are constants, then return CONSTANT.
577 Else return VARYING. */
580 likely_value (gimple stmt
)
582 bool has_constant_operand
, has_undefined_operand
, all_undefined_operands
;
587 enum gimple_code code
= gimple_code (stmt
);
589 /* This function appears to be called only for assignments, calls,
590 conditionals, and switches, due to the logic in visit_stmt. */
591 gcc_assert (code
== GIMPLE_ASSIGN
592 || code
== GIMPLE_CALL
593 || code
== GIMPLE_COND
594 || code
== GIMPLE_SWITCH
);
596 /* If the statement has volatile operands, it won't fold to a
598 if (gimple_has_volatile_ops (stmt
))
601 /* Arrive here for more complex cases. */
602 has_constant_operand
= false;
603 has_undefined_operand
= false;
604 all_undefined_operands
= true;
605 FOR_EACH_SSA_TREE_OPERAND (use
, stmt
, iter
, SSA_OP_USE
)
607 prop_value_t
*val
= get_value (use
);
609 if (val
->lattice_val
== UNDEFINED
)
610 has_undefined_operand
= true;
612 all_undefined_operands
= false;
614 if (val
->lattice_val
== CONSTANT
)
615 has_constant_operand
= true;
618 /* There may be constants in regular rhs operands. For calls we
619 have to ignore lhs, fndecl and static chain, otherwise only
621 for (i
= (is_gimple_call (stmt
) ? 2 : 0) + gimple_has_lhs (stmt
);
622 i
< gimple_num_ops (stmt
); ++i
)
624 tree op
= gimple_op (stmt
, i
);
625 if (!op
|| TREE_CODE (op
) == SSA_NAME
)
627 if (is_gimple_min_invariant (op
))
628 has_constant_operand
= true;
631 if (has_constant_operand
)
632 all_undefined_operands
= false;
634 /* If the operation combines operands like COMPLEX_EXPR make sure to
635 not mark the result UNDEFINED if only one part of the result is
637 if (has_undefined_operand
&& all_undefined_operands
)
639 else if (code
== GIMPLE_ASSIGN
&& has_undefined_operand
)
641 switch (gimple_assign_rhs_code (stmt
))
643 /* Unary operators are handled with all_undefined_operands. */
646 case POINTER_PLUS_EXPR
:
647 /* Not MIN_EXPR, MAX_EXPR. One VARYING operand may be selected.
648 Not bitwise operators, one VARYING operand may specify the
649 result completely. Not logical operators for the same reason.
650 Not COMPLEX_EXPR as one VARYING operand makes the result partly
651 not UNDEFINED. Not *DIV_EXPR, comparisons and shifts because
652 the undefined operand may be promoted. */
659 /* If there was an UNDEFINED operand but the result may be not UNDEFINED
660 fall back to CONSTANT. During iteration UNDEFINED may still drop
662 if (has_undefined_operand
)
665 /* We do not consider virtual operands here -- load from read-only
666 memory may have only VARYING virtual operands, but still be
668 if (has_constant_operand
669 || gimple_references_memory_p (stmt
))
675 /* Returns true if STMT cannot be constant. */
678 surely_varying_stmt_p (gimple stmt
)
680 /* If the statement has operands that we cannot handle, it cannot be
682 if (gimple_has_volatile_ops (stmt
))
685 /* If it is a call and does not return a value or is not a
686 builtin and not an indirect call, it is varying. */
687 if (is_gimple_call (stmt
))
690 if (!gimple_call_lhs (stmt
)
691 || ((fndecl
= gimple_call_fndecl (stmt
)) != NULL_TREE
692 && !DECL_BUILT_IN (fndecl
)))
696 /* Any other store operation is not interesting. */
697 else if (gimple_vdef (stmt
))
700 /* Anything other than assignments and conditional jumps are not
701 interesting for CCP. */
702 if (gimple_code (stmt
) != GIMPLE_ASSIGN
703 && gimple_code (stmt
) != GIMPLE_COND
704 && gimple_code (stmt
) != GIMPLE_SWITCH
705 && gimple_code (stmt
) != GIMPLE_CALL
)
711 /* Initialize local data structures for CCP. */
714 ccp_initialize (void)
718 const_val
= XCNEWVEC (prop_value_t
, num_ssa_names
);
720 /* Initialize simulation flags for PHI nodes and statements. */
723 gimple_stmt_iterator i
;
725 for (i
= gsi_start_bb (bb
); !gsi_end_p (i
); gsi_next (&i
))
727 gimple stmt
= gsi_stmt (i
);
730 /* If the statement is a control insn, then we do not
731 want to avoid simulating the statement once. Failure
732 to do so means that those edges will never get added. */
733 if (stmt_ends_bb_p (stmt
))
736 is_varying
= surely_varying_stmt_p (stmt
);
743 /* If the statement will not produce a constant, mark
744 all its outputs VARYING. */
745 FOR_EACH_SSA_TREE_OPERAND (def
, stmt
, iter
, SSA_OP_ALL_DEFS
)
746 set_value_varying (def
);
748 prop_set_simulate_again (stmt
, !is_varying
);
752 /* Now process PHI nodes. We never clear the simulate_again flag on
753 phi nodes, since we do not know which edges are executable yet,
754 except for phi nodes for virtual operands when we do not do store ccp. */
757 gimple_stmt_iterator i
;
759 for (i
= gsi_start_phis (bb
); !gsi_end_p (i
); gsi_next (&i
))
761 gimple phi
= gsi_stmt (i
);
763 if (!is_gimple_reg (gimple_phi_result (phi
)))
764 prop_set_simulate_again (phi
, false);
766 prop_set_simulate_again (phi
, true);
771 /* Debug count support. Reset the values of ssa names
772 VARYING when the total number ssa names analyzed is
773 beyond the debug count specified. */
779 for (i
= 0; i
< num_ssa_names
; i
++)
783 const_val
[i
].lattice_val
= VARYING
;
784 const_val
[i
].mask
= double_int_minus_one
;
785 const_val
[i
].value
= NULL_TREE
;
791 /* Do final substitution of propagated values, cleanup the flowgraph and
792 free allocated storage.
794 Return TRUE when something was optimized. */
799 bool something_changed
;
804 /* Derive alignment and misalignment information from partially
805 constant pointers in the lattice. */
806 for (i
= 1; i
< num_ssa_names
; ++i
)
808 tree name
= ssa_name (i
);
810 struct ptr_info_def
*pi
;
811 unsigned int tem
, align
;
814 || !POINTER_TYPE_P (TREE_TYPE (name
)))
817 val
= get_value (name
);
818 if (val
->lattice_val
!= CONSTANT
819 || TREE_CODE (val
->value
) != INTEGER_CST
)
822 /* Trailing constant bits specify the alignment, trailing value
823 bits the misalignment. */
825 align
= (tem
& -tem
);
829 pi
= get_ptr_info (name
);
831 pi
->misalign
= TREE_INT_CST_LOW (val
->value
) & (align
- 1);
834 /* Perform substitutions based on the known constant values. */
835 something_changed
= substitute_and_fold (get_constant_value
,
836 ccp_fold_stmt
, true);
840 return something_changed
;;
844 /* Compute the meet operator between *VAL1 and *VAL2. Store the result
847 any M UNDEFINED = any
848 any M VARYING = VARYING
849 Ci M Cj = Ci if (i == j)
850 Ci M Cj = VARYING if (i != j)
854 ccp_lattice_meet (prop_value_t
*val1
, prop_value_t
*val2
)
856 if (val1
->lattice_val
== UNDEFINED
)
858 /* UNDEFINED M any = any */
861 else if (val2
->lattice_val
== UNDEFINED
)
863 /* any M UNDEFINED = any
864 Nothing to do. VAL1 already contains the value we want. */
867 else if (val1
->lattice_val
== VARYING
868 || val2
->lattice_val
== VARYING
)
870 /* any M VARYING = VARYING. */
871 val1
->lattice_val
= VARYING
;
872 val1
->mask
= double_int_minus_one
;
873 val1
->value
= NULL_TREE
;
875 else if (val1
->lattice_val
== CONSTANT
876 && val2
->lattice_val
== CONSTANT
877 && TREE_CODE (val1
->value
) == INTEGER_CST
878 && TREE_CODE (val2
->value
) == INTEGER_CST
)
880 /* Ci M Cj = Ci if (i == j)
881 Ci M Cj = VARYING if (i != j)
883 For INTEGER_CSTs mask unequal bits. If no equal bits remain,
886 = double_int_ior (double_int_ior (val1
->mask
,
888 double_int_xor (tree_to_double_int (val1
->value
),
889 tree_to_double_int (val2
->value
)));
890 if (double_int_minus_one_p (val1
->mask
))
892 val1
->lattice_val
= VARYING
;
893 val1
->value
= NULL_TREE
;
896 else if (val1
->lattice_val
== CONSTANT
897 && val2
->lattice_val
== CONSTANT
898 && simple_cst_equal (val1
->value
, val2
->value
) == 1)
900 /* Ci M Cj = Ci if (i == j)
901 Ci M Cj = VARYING if (i != j)
903 VAL1 already contains the value we want for equivalent values. */
905 else if (val1
->lattice_val
== CONSTANT
906 && val2
->lattice_val
== CONSTANT
907 && (TREE_CODE (val1
->value
) == ADDR_EXPR
908 || TREE_CODE (val2
->value
) == ADDR_EXPR
))
910 /* When not equal addresses are involved try meeting for
912 prop_value_t tem
= *val2
;
913 if (TREE_CODE (val1
->value
) == ADDR_EXPR
)
914 *val1
= get_value_for_expr (val1
->value
, true);
915 if (TREE_CODE (val2
->value
) == ADDR_EXPR
)
916 tem
= get_value_for_expr (val2
->value
, true);
917 ccp_lattice_meet (val1
, &tem
);
921 /* Any other combination is VARYING. */
922 val1
->lattice_val
= VARYING
;
923 val1
->mask
= double_int_minus_one
;
924 val1
->value
= NULL_TREE
;
929 /* Loop through the PHI_NODE's parameters for BLOCK and compare their
930 lattice values to determine PHI_NODE's lattice value. The value of a
931 PHI node is determined calling ccp_lattice_meet with all the arguments
932 of the PHI node that are incoming via executable edges. */
934 static enum ssa_prop_result
935 ccp_visit_phi_node (gimple phi
)
938 prop_value_t
*old_val
, new_val
;
940 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
942 fprintf (dump_file
, "\nVisiting PHI node: ");
943 print_gimple_stmt (dump_file
, phi
, 0, dump_flags
);
946 old_val
= get_value (gimple_phi_result (phi
));
947 switch (old_val
->lattice_val
)
950 return SSA_PROP_VARYING
;
957 new_val
.lattice_val
= UNDEFINED
;
958 new_val
.value
= NULL_TREE
;
965 for (i
= 0; i
< gimple_phi_num_args (phi
); i
++)
967 /* Compute the meet operator over all the PHI arguments flowing
968 through executable edges. */
969 edge e
= gimple_phi_arg_edge (phi
, i
);
971 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
974 "\n Argument #%d (%d -> %d %sexecutable)\n",
975 i
, e
->src
->index
, e
->dest
->index
,
976 (e
->flags
& EDGE_EXECUTABLE
) ? "" : "not ");
979 /* If the incoming edge is executable, Compute the meet operator for
980 the existing value of the PHI node and the current PHI argument. */
981 if (e
->flags
& EDGE_EXECUTABLE
)
983 tree arg
= gimple_phi_arg (phi
, i
)->def
;
984 prop_value_t arg_val
= get_value_for_expr (arg
, false);
986 ccp_lattice_meet (&new_val
, &arg_val
);
988 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
990 fprintf (dump_file
, "\t");
991 print_generic_expr (dump_file
, arg
, dump_flags
);
992 dump_lattice_value (dump_file
, "\tValue: ", arg_val
);
993 fprintf (dump_file
, "\n");
996 if (new_val
.lattice_val
== VARYING
)
1001 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1003 dump_lattice_value (dump_file
, "\n PHI node value: ", new_val
);
1004 fprintf (dump_file
, "\n\n");
1007 /* Make the transition to the new value. */
1008 if (set_lattice_value (gimple_phi_result (phi
), new_val
))
1010 if (new_val
.lattice_val
== VARYING
)
1011 return SSA_PROP_VARYING
;
1013 return SSA_PROP_INTERESTING
;
1016 return SSA_PROP_NOT_INTERESTING
;
1019 /* Return the constant value for OP or OP otherwise. */
1022 valueize_op (tree op
)
1024 if (TREE_CODE (op
) == SSA_NAME
)
1026 tree tem
= get_constant_value (op
);
1033 /* CCP specific front-end to the non-destructive constant folding
1036 Attempt to simplify the RHS of STMT knowing that one or more
1037 operands are constants.
1039 If simplification is possible, return the simplified RHS,
1040 otherwise return the original RHS or NULL_TREE. */
1043 ccp_fold (gimple stmt
)
1045 location_t loc
= gimple_location (stmt
);
1046 switch (gimple_code (stmt
))
1050 /* Handle comparison operators that can appear in GIMPLE form. */
1051 tree op0
= valueize_op (gimple_cond_lhs (stmt
));
1052 tree op1
= valueize_op (gimple_cond_rhs (stmt
));
1053 enum tree_code code
= gimple_cond_code (stmt
);
1054 return fold_binary_loc (loc
, code
, boolean_type_node
, op0
, op1
);
1059 /* Return the constant switch index. */
1060 return valueize_op (gimple_switch_index (stmt
));
1065 return gimple_fold_stmt_to_constant_1 (stmt
, valueize_op
);
1072 /* Apply the operation CODE in type TYPE to the value, mask pair
1073 RVAL and RMASK representing a value of type RTYPE and set
1074 the value, mask pair *VAL and *MASK to the result. */
1077 bit_value_unop_1 (enum tree_code code
, tree type
,
1078 double_int
*val
, double_int
*mask
,
1079 tree rtype
, double_int rval
, double_int rmask
)
1085 *val
= double_int_not (rval
);
1090 double_int temv
, temm
;
1091 /* Return ~rval + 1. */
1092 bit_value_unop_1 (BIT_NOT_EXPR
, type
, &temv
, &temm
, type
, rval
, rmask
);
1093 bit_value_binop_1 (PLUS_EXPR
, type
, val
, mask
,
1095 type
, double_int_one
, double_int_zero
);
1103 /* First extend mask and value according to the original type. */
1104 uns
= (TREE_CODE (rtype
) == INTEGER_TYPE
&& TYPE_IS_SIZETYPE (rtype
)
1105 ? 0 : TYPE_UNSIGNED (rtype
));
1106 *mask
= double_int_ext (rmask
, TYPE_PRECISION (rtype
), uns
);
1107 *val
= double_int_ext (rval
, TYPE_PRECISION (rtype
), uns
);
1109 /* Then extend mask and value according to the target type. */
1110 uns
= (TREE_CODE (type
) == INTEGER_TYPE
&& TYPE_IS_SIZETYPE (type
)
1111 ? 0 : TYPE_UNSIGNED (type
));
1112 *mask
= double_int_ext (*mask
, TYPE_PRECISION (type
), uns
);
1113 *val
= double_int_ext (*val
, TYPE_PRECISION (type
), uns
);
1118 *mask
= double_int_minus_one
;
1123 /* Apply the operation CODE in type TYPE to the value, mask pairs
1124 R1VAL, R1MASK and R2VAL, R2MASK representing a values of type R1TYPE
1125 and R2TYPE and set the value, mask pair *VAL and *MASK to the result. */
1128 bit_value_binop_1 (enum tree_code code
, tree type
,
1129 double_int
*val
, double_int
*mask
,
1130 tree r1type
, double_int r1val
, double_int r1mask
,
1131 tree r2type
, double_int r2val
, double_int r2mask
)
1133 bool uns
= (TREE_CODE (type
) == INTEGER_TYPE
1134 && TYPE_IS_SIZETYPE (type
) ? 0 : TYPE_UNSIGNED (type
));
1135 /* Assume we'll get a constant result. Use an initial varying value,
1136 we fall back to varying in the end if necessary. */
1137 *mask
= double_int_minus_one
;
1141 /* The mask is constant where there is a known not
1142 set bit, (m1 | m2) & ((v1 | m1) & (v2 | m2)) */
1143 *mask
= double_int_and (double_int_ior (r1mask
, r2mask
),
1144 double_int_and (double_int_ior (r1val
, r1mask
),
1145 double_int_ior (r2val
, r2mask
)));
1146 *val
= double_int_and (r1val
, r2val
);
1150 /* The mask is constant where there is a known
1151 set bit, (m1 | m2) & ~((v1 & ~m1) | (v2 & ~m2)). */
1152 *mask
= double_int_and_not
1153 (double_int_ior (r1mask
, r2mask
),
1154 double_int_ior (double_int_and_not (r1val
, r1mask
),
1155 double_int_and_not (r2val
, r2mask
)));
1156 *val
= double_int_ior (r1val
, r2val
);
1161 *mask
= double_int_ior (r1mask
, r2mask
);
1162 *val
= double_int_xor (r1val
, r2val
);
1167 if (double_int_zero_p (r2mask
))
1169 HOST_WIDE_INT shift
= r2val
.low
;
1170 if (code
== RROTATE_EXPR
)
1172 *mask
= double_int_lrotate (r1mask
, shift
, TYPE_PRECISION (type
));
1173 *val
= double_int_lrotate (r1val
, shift
, TYPE_PRECISION (type
));
1179 /* ??? We can handle partially known shift counts if we know
1180 its sign. That way we can tell that (x << (y | 8)) & 255
1182 if (double_int_zero_p (r2mask
))
1184 HOST_WIDE_INT shift
= r2val
.low
;
1185 if (code
== RSHIFT_EXPR
)
1187 /* We need to know if we are doing a left or a right shift
1188 to properly shift in zeros for left shift and unsigned
1189 right shifts and the sign bit for signed right shifts.
1190 For signed right shifts we shift in varying in case
1191 the sign bit was varying. */
1194 *mask
= double_int_lshift (r1mask
, shift
,
1195 TYPE_PRECISION (type
), false);
1196 *val
= double_int_lshift (r1val
, shift
,
1197 TYPE_PRECISION (type
), false);
1201 /* ??? We can have sizetype related inconsistencies in
1203 if ((TREE_CODE (r1type
) == INTEGER_TYPE
1204 && (TYPE_IS_SIZETYPE (r1type
)
1205 ? 0 : TYPE_UNSIGNED (r1type
))) != uns
)
1209 *mask
= double_int_rshift (r1mask
, shift
,
1210 TYPE_PRECISION (type
), !uns
);
1211 *val
= double_int_rshift (r1val
, shift
,
1212 TYPE_PRECISION (type
), !uns
);
1223 case POINTER_PLUS_EXPR
:
1226 /* Do the addition with unknown bits set to zero, to give carry-ins of
1227 zero wherever possible. */
1228 lo
= double_int_add (double_int_and_not (r1val
, r1mask
),
1229 double_int_and_not (r2val
, r2mask
));
1230 lo
= double_int_ext (lo
, TYPE_PRECISION (type
), uns
);
1231 /* Do the addition with unknown bits set to one, to give carry-ins of
1232 one wherever possible. */
1233 hi
= double_int_add (double_int_ior (r1val
, r1mask
),
1234 double_int_ior (r2val
, r2mask
));
1235 hi
= double_int_ext (hi
, TYPE_PRECISION (type
), uns
);
1236 /* Each bit in the result is known if (a) the corresponding bits in
1237 both inputs are known, and (b) the carry-in to that bit position
1238 is known. We can check condition (b) by seeing if we got the same
1239 result with minimised carries as with maximised carries. */
1240 *mask
= double_int_ior (double_int_ior (r1mask
, r2mask
),
1241 double_int_xor (lo
, hi
));
1242 *mask
= double_int_ext (*mask
, TYPE_PRECISION (type
), uns
);
1243 /* It shouldn't matter whether we choose lo or hi here. */
1250 double_int temv
, temm
;
1251 bit_value_unop_1 (NEGATE_EXPR
, r2type
, &temv
, &temm
,
1252 r2type
, r2val
, r2mask
);
1253 bit_value_binop_1 (PLUS_EXPR
, type
, val
, mask
,
1254 r1type
, r1val
, r1mask
,
1255 r2type
, temv
, temm
);
1261 /* Just track trailing zeros in both operands and transfer
1262 them to the other. */
1263 int r1tz
= double_int_ctz (double_int_ior (r1val
, r1mask
));
1264 int r2tz
= double_int_ctz (double_int_ior (r2val
, r2mask
));
1265 if (r1tz
+ r2tz
>= HOST_BITS_PER_DOUBLE_INT
)
1267 *mask
= double_int_zero
;
1268 *val
= double_int_zero
;
1270 else if (r1tz
+ r2tz
> 0)
1272 *mask
= double_int_not (double_int_mask (r1tz
+ r2tz
));
1273 *mask
= double_int_ext (*mask
, TYPE_PRECISION (type
), uns
);
1274 *val
= double_int_zero
;
1282 double_int m
= double_int_ior (r1mask
, r2mask
);
1283 if (!double_int_equal_p (double_int_and_not (r1val
, m
),
1284 double_int_and_not (r2val
, m
)))
1286 *mask
= double_int_zero
;
1287 *val
= ((code
== EQ_EXPR
) ? double_int_zero
: double_int_one
);
1291 /* We know the result of a comparison is always one or zero. */
1292 *mask
= double_int_one
;
1293 *val
= double_int_zero
;
1301 double_int tem
= r1val
;
1307 code
= swap_tree_comparison (code
);
1314 /* If the most significant bits are not known we know nothing. */
1315 if (double_int_negative_p (r1mask
) || double_int_negative_p (r2mask
))
1318 /* For comparisons the signedness is in the comparison operands. */
1319 uns
= (TREE_CODE (r1type
) == INTEGER_TYPE
1320 && TYPE_IS_SIZETYPE (r1type
) ? 0 : TYPE_UNSIGNED (r1type
));
1321 /* ??? We can have sizetype related inconsistencies in the IL. */
1322 if ((TREE_CODE (r2type
) == INTEGER_TYPE
1323 && TYPE_IS_SIZETYPE (r2type
) ? 0 : TYPE_UNSIGNED (r2type
)) != uns
)
1326 /* If we know the most significant bits we know the values
1327 value ranges by means of treating varying bits as zero
1328 or one. Do a cross comparison of the max/min pairs. */
1329 maxmin
= double_int_cmp (double_int_ior (r1val
, r1mask
),
1330 double_int_and_not (r2val
, r2mask
), uns
);
1331 minmax
= double_int_cmp (double_int_and_not (r1val
, r1mask
),
1332 double_int_ior (r2val
, r2mask
), uns
);
1333 if (maxmin
< 0) /* r1 is less than r2. */
1335 *mask
= double_int_zero
;
1336 *val
= double_int_one
;
1338 else if (minmax
> 0) /* r1 is not less or equal to r2. */
1340 *mask
= double_int_zero
;
1341 *val
= double_int_zero
;
1343 else if (maxmin
== minmax
) /* r1 and r2 are equal. */
1345 /* This probably should never happen as we'd have
1346 folded the thing during fully constant value folding. */
1347 *mask
= double_int_zero
;
1348 *val
= (code
== LE_EXPR
? double_int_one
: double_int_zero
);
1352 /* We know the result of a comparison is always one or zero. */
1353 *mask
= double_int_one
;
1354 *val
= double_int_zero
;
1363 /* Return the propagation value when applying the operation CODE to
1364 the value RHS yielding type TYPE. */
1367 bit_value_unop (enum tree_code code
, tree type
, tree rhs
)
1369 prop_value_t rval
= get_value_for_expr (rhs
, true);
1370 double_int value
, mask
;
1373 if (rval
.lattice_val
== UNDEFINED
)
1376 gcc_assert ((rval
.lattice_val
== CONSTANT
1377 && TREE_CODE (rval
.value
) == INTEGER_CST
)
1378 || double_int_minus_one_p (rval
.mask
));
1379 bit_value_unop_1 (code
, type
, &value
, &mask
,
1380 TREE_TYPE (rhs
), value_to_double_int (rval
), rval
.mask
);
1381 if (!double_int_minus_one_p (mask
))
1383 val
.lattice_val
= CONSTANT
;
1385 /* ??? Delay building trees here. */
1386 val
.value
= double_int_to_tree (type
, value
);
1390 val
.lattice_val
= VARYING
;
1391 val
.value
= NULL_TREE
;
1392 val
.mask
= double_int_minus_one
;
1397 /* Return the propagation value when applying the operation CODE to
1398 the values RHS1 and RHS2 yielding type TYPE. */
1401 bit_value_binop (enum tree_code code
, tree type
, tree rhs1
, tree rhs2
)
1403 prop_value_t r1val
= get_value_for_expr (rhs1
, true);
1404 prop_value_t r2val
= get_value_for_expr (rhs2
, true);
1405 double_int value
, mask
;
1408 if (r1val
.lattice_val
== UNDEFINED
1409 || r2val
.lattice_val
== UNDEFINED
)
1411 val
.lattice_val
= VARYING
;
1412 val
.value
= NULL_TREE
;
1413 val
.mask
= double_int_minus_one
;
1417 gcc_assert ((r1val
.lattice_val
== CONSTANT
1418 && TREE_CODE (r1val
.value
) == INTEGER_CST
)
1419 || double_int_minus_one_p (r1val
.mask
));
1420 gcc_assert ((r2val
.lattice_val
== CONSTANT
1421 && TREE_CODE (r2val
.value
) == INTEGER_CST
)
1422 || double_int_minus_one_p (r2val
.mask
));
1423 bit_value_binop_1 (code
, type
, &value
, &mask
,
1424 TREE_TYPE (rhs1
), value_to_double_int (r1val
), r1val
.mask
,
1425 TREE_TYPE (rhs2
), value_to_double_int (r2val
), r2val
.mask
);
1426 if (!double_int_minus_one_p (mask
))
1428 val
.lattice_val
= CONSTANT
;
1430 /* ??? Delay building trees here. */
1431 val
.value
= double_int_to_tree (type
, value
);
1435 val
.lattice_val
= VARYING
;
1436 val
.value
= NULL_TREE
;
1437 val
.mask
= double_int_minus_one
;
1442 /* Return the propagation value when applying __builtin_assume_aligned to
1446 bit_value_assume_aligned (gimple stmt
)
1448 tree ptr
= gimple_call_arg (stmt
, 0), align
, misalign
= NULL_TREE
;
1449 tree type
= TREE_TYPE (ptr
);
1450 unsigned HOST_WIDE_INT aligni
, misaligni
= 0;
1451 prop_value_t ptrval
= get_value_for_expr (ptr
, true);
1452 prop_value_t alignval
;
1453 double_int value
, mask
;
1455 if (ptrval
.lattice_val
== UNDEFINED
)
1457 gcc_assert ((ptrval
.lattice_val
== CONSTANT
1458 && TREE_CODE (ptrval
.value
) == INTEGER_CST
)
1459 || double_int_minus_one_p (ptrval
.mask
));
1460 align
= gimple_call_arg (stmt
, 1);
1461 if (!host_integerp (align
, 1))
1463 aligni
= tree_low_cst (align
, 1);
1465 || (aligni
& (aligni
- 1)) != 0)
1467 if (gimple_call_num_args (stmt
) > 2)
1469 misalign
= gimple_call_arg (stmt
, 2);
1470 if (!host_integerp (misalign
, 1))
1472 misaligni
= tree_low_cst (misalign
, 1);
1473 if (misaligni
>= aligni
)
1476 align
= build_int_cst_type (type
, -aligni
);
1477 alignval
= get_value_for_expr (align
, true);
1478 bit_value_binop_1 (BIT_AND_EXPR
, type
, &value
, &mask
,
1479 type
, value_to_double_int (ptrval
), ptrval
.mask
,
1480 type
, value_to_double_int (alignval
), alignval
.mask
);
1481 if (!double_int_minus_one_p (mask
))
1483 val
.lattice_val
= CONSTANT
;
1485 gcc_assert ((mask
.low
& (aligni
- 1)) == 0);
1486 gcc_assert ((value
.low
& (aligni
- 1)) == 0);
1487 value
.low
|= misaligni
;
1488 /* ??? Delay building trees here. */
1489 val
.value
= double_int_to_tree (type
, value
);
1493 val
.lattice_val
= VARYING
;
1494 val
.value
= NULL_TREE
;
1495 val
.mask
= double_int_minus_one
;
1500 /* Evaluate statement STMT.
1501 Valid only for assignments, calls, conditionals, and switches. */
1504 evaluate_stmt (gimple stmt
)
1507 tree simplified
= NULL_TREE
;
1508 ccp_lattice_t likelyvalue
= likely_value (stmt
);
1509 bool is_constant
= false;
1512 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1514 fprintf (dump_file
, "which is likely ");
1515 switch (likelyvalue
)
1518 fprintf (dump_file
, "CONSTANT");
1521 fprintf (dump_file
, "UNDEFINED");
1524 fprintf (dump_file
, "VARYING");
1528 fprintf (dump_file
, "\n");
1531 /* If the statement is likely to have a CONSTANT result, then try
1532 to fold the statement to determine the constant value. */
1533 /* FIXME. This is the only place that we call ccp_fold.
1534 Since likely_value never returns CONSTANT for calls, we will
1535 not attempt to fold them, including builtins that may profit. */
1536 if (likelyvalue
== CONSTANT
)
1538 fold_defer_overflow_warnings ();
1539 simplified
= ccp_fold (stmt
);
1540 is_constant
= simplified
&& is_gimple_min_invariant (simplified
);
1541 fold_undefer_overflow_warnings (is_constant
, stmt
, 0);
1544 /* The statement produced a constant value. */
1545 val
.lattice_val
= CONSTANT
;
1546 val
.value
= simplified
;
1547 val
.mask
= double_int_zero
;
1550 /* If the statement is likely to have a VARYING result, then do not
1551 bother folding the statement. */
1552 else if (likelyvalue
== VARYING
)
1554 enum gimple_code code
= gimple_code (stmt
);
1555 if (code
== GIMPLE_ASSIGN
)
1557 enum tree_code subcode
= gimple_assign_rhs_code (stmt
);
1559 /* Other cases cannot satisfy is_gimple_min_invariant
1561 if (get_gimple_rhs_class (subcode
) == GIMPLE_SINGLE_RHS
)
1562 simplified
= gimple_assign_rhs1 (stmt
);
1564 else if (code
== GIMPLE_SWITCH
)
1565 simplified
= gimple_switch_index (stmt
);
1567 /* These cannot satisfy is_gimple_min_invariant without folding. */
1568 gcc_assert (code
== GIMPLE_CALL
|| code
== GIMPLE_COND
);
1569 is_constant
= simplified
&& is_gimple_min_invariant (simplified
);
1572 /* The statement produced a constant value. */
1573 val
.lattice_val
= CONSTANT
;
1574 val
.value
= simplified
;
1575 val
.mask
= double_int_zero
;
1579 /* Resort to simplification for bitwise tracking. */
1580 if (flag_tree_bit_ccp
1581 && (likelyvalue
== CONSTANT
|| is_gimple_call (stmt
))
1584 enum gimple_code code
= gimple_code (stmt
);
1586 val
.lattice_val
= VARYING
;
1587 val
.value
= NULL_TREE
;
1588 val
.mask
= double_int_minus_one
;
1589 if (code
== GIMPLE_ASSIGN
)
1591 enum tree_code subcode
= gimple_assign_rhs_code (stmt
);
1592 tree rhs1
= gimple_assign_rhs1 (stmt
);
1593 switch (get_gimple_rhs_class (subcode
))
1595 case GIMPLE_SINGLE_RHS
:
1596 if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1
))
1597 || POINTER_TYPE_P (TREE_TYPE (rhs1
)))
1598 val
= get_value_for_expr (rhs1
, true);
1601 case GIMPLE_UNARY_RHS
:
1602 if ((INTEGRAL_TYPE_P (TREE_TYPE (rhs1
))
1603 || POINTER_TYPE_P (TREE_TYPE (rhs1
)))
1604 && (INTEGRAL_TYPE_P (gimple_expr_type (stmt
))
1605 || POINTER_TYPE_P (gimple_expr_type (stmt
))))
1606 val
= bit_value_unop (subcode
, gimple_expr_type (stmt
), rhs1
);
1609 case GIMPLE_BINARY_RHS
:
1610 if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1
))
1611 || POINTER_TYPE_P (TREE_TYPE (rhs1
)))
1613 tree lhs
= gimple_assign_lhs (stmt
);
1614 tree rhs2
= gimple_assign_rhs2 (stmt
);
1615 val
= bit_value_binop (subcode
,
1616 TREE_TYPE (lhs
), rhs1
, rhs2
);
1623 else if (code
== GIMPLE_COND
)
1625 enum tree_code code
= gimple_cond_code (stmt
);
1626 tree rhs1
= gimple_cond_lhs (stmt
);
1627 tree rhs2
= gimple_cond_rhs (stmt
);
1628 if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1
))
1629 || POINTER_TYPE_P (TREE_TYPE (rhs1
)))
1630 val
= bit_value_binop (code
, TREE_TYPE (rhs1
), rhs1
, rhs2
);
1632 else if (code
== GIMPLE_CALL
1633 && (fndecl
= gimple_call_fndecl (stmt
))
1634 && DECL_BUILT_IN_CLASS (fndecl
) == BUILT_IN_NORMAL
)
1636 switch (DECL_FUNCTION_CODE (fndecl
))
1638 case BUILT_IN_MALLOC
:
1639 case BUILT_IN_REALLOC
:
1640 case BUILT_IN_CALLOC
:
1641 case BUILT_IN_STRDUP
:
1642 case BUILT_IN_STRNDUP
:
1643 val
.lattice_val
= CONSTANT
;
1644 val
.value
= build_int_cst (TREE_TYPE (gimple_get_lhs (stmt
)), 0);
1645 val
.mask
= shwi_to_double_int
1646 (~(((HOST_WIDE_INT
) MALLOC_ABI_ALIGNMENT
)
1647 / BITS_PER_UNIT
- 1));
1650 case BUILT_IN_ALLOCA
:
1651 case BUILT_IN_ALLOCA_WITH_ALIGN
:
1652 align
= (DECL_FUNCTION_CODE (fndecl
) == BUILT_IN_ALLOCA_WITH_ALIGN
1653 ? TREE_INT_CST_LOW (gimple_call_arg (stmt
, 1))
1654 : BIGGEST_ALIGNMENT
);
1655 val
.lattice_val
= CONSTANT
;
1656 val
.value
= build_int_cst (TREE_TYPE (gimple_get_lhs (stmt
)), 0);
1657 val
.mask
= shwi_to_double_int
1658 (~(((HOST_WIDE_INT
) align
)
1659 / BITS_PER_UNIT
- 1));
1662 /* These builtins return their first argument, unmodified. */
1663 case BUILT_IN_MEMCPY
:
1664 case BUILT_IN_MEMMOVE
:
1665 case BUILT_IN_MEMSET
:
1666 case BUILT_IN_STRCPY
:
1667 case BUILT_IN_STRNCPY
:
1668 case BUILT_IN_MEMCPY_CHK
:
1669 case BUILT_IN_MEMMOVE_CHK
:
1670 case BUILT_IN_MEMSET_CHK
:
1671 case BUILT_IN_STRCPY_CHK
:
1672 case BUILT_IN_STRNCPY_CHK
:
1673 val
= get_value_for_expr (gimple_call_arg (stmt
, 0), true);
1676 case BUILT_IN_ASSUME_ALIGNED
:
1677 val
= bit_value_assume_aligned (stmt
);
1683 is_constant
= (val
.lattice_val
== CONSTANT
);
1688 /* The statement produced a nonconstant value. If the statement
1689 had UNDEFINED operands, then the result of the statement
1690 should be UNDEFINED. Otherwise, the statement is VARYING. */
1691 if (likelyvalue
== UNDEFINED
)
1693 val
.lattice_val
= likelyvalue
;
1694 val
.mask
= double_int_zero
;
1698 val
.lattice_val
= VARYING
;
1699 val
.mask
= double_int_minus_one
;
1702 val
.value
= NULL_TREE
;
1708 /* Given a BUILT_IN_STACK_SAVE value SAVED_VAL, insert a clobber of VAR before
1709 each matching BUILT_IN_STACK_RESTORE. Mark visited phis in VISITED. */
1712 insert_clobber_before_stack_restore (tree saved_val
, tree var
, htab_t
*visited
)
1714 gimple stmt
, clobber_stmt
;
1716 imm_use_iterator iter
;
1717 gimple_stmt_iterator i
;
1720 FOR_EACH_IMM_USE_STMT (stmt
, iter
, saved_val
)
1721 if (gimple_call_builtin_p (stmt
, BUILT_IN_STACK_RESTORE
))
1723 clobber
= build_constructor (TREE_TYPE (var
), NULL
);
1724 TREE_THIS_VOLATILE (clobber
) = 1;
1725 clobber_stmt
= gimple_build_assign (var
, clobber
);
1727 i
= gsi_for_stmt (stmt
);
1728 gsi_insert_before (&i
, clobber_stmt
, GSI_SAME_STMT
);
1730 else if (gimple_code (stmt
) == GIMPLE_PHI
)
1732 if (*visited
== NULL
)
1733 *visited
= htab_create (10, htab_hash_pointer
, htab_eq_pointer
, NULL
);
1735 slot
= (gimple
*)htab_find_slot (*visited
, stmt
, INSERT
);
1740 insert_clobber_before_stack_restore (gimple_phi_result (stmt
), var
,
1744 gcc_assert (is_gimple_debug (stmt
));
1747 /* Advance the iterator to the previous non-debug gimple statement in the same
1748 or dominating basic block. */
1751 gsi_prev_dom_bb_nondebug (gimple_stmt_iterator
*i
)
1755 gsi_prev_nondebug (i
);
1756 while (gsi_end_p (*i
))
1758 dom
= get_immediate_dominator (CDI_DOMINATORS
, i
->bb
);
1759 if (dom
== NULL
|| dom
== ENTRY_BLOCK_PTR
)
1762 *i
= gsi_last_bb (dom
);
1766 /* Find a BUILT_IN_STACK_SAVE dominating gsi_stmt (I), and insert
1767 a clobber of VAR before each matching BUILT_IN_STACK_RESTORE. */
1770 insert_clobbers_for_var (gimple_stmt_iterator i
, tree var
)
1775 htab_t visited
= NULL
;
1777 for (save_found
= false; !gsi_end_p (i
); gsi_prev_dom_bb_nondebug (&i
))
1779 stmt
= gsi_stmt (i
);
1781 if (!gimple_call_builtin_p (stmt
, BUILT_IN_STACK_SAVE
))
1785 saved_val
= gimple_call_lhs (stmt
);
1786 if (saved_val
== NULL_TREE
)
1789 insert_clobber_before_stack_restore (saved_val
, var
, &visited
);
1793 if (visited
!= NULL
)
1794 htab_delete (visited
);
1795 gcc_assert (save_found
);
1798 /* Detects a __builtin_alloca_with_align with constant size argument. Declares
1799 fixed-size array and returns the address, if found, otherwise returns
1803 fold_builtin_alloca_with_align (gimple stmt
)
1805 unsigned HOST_WIDE_INT size
, threshold
, n_elem
;
1806 tree lhs
, arg
, block
, var
, elem_type
, array_type
;
1809 lhs
= gimple_call_lhs (stmt
);
1810 if (lhs
== NULL_TREE
)
1813 /* Detect constant argument. */
1814 arg
= get_constant_value (gimple_call_arg (stmt
, 0));
1815 if (arg
== NULL_TREE
1816 || TREE_CODE (arg
) != INTEGER_CST
1817 || !host_integerp (arg
, 1))
1820 size
= TREE_INT_CST_LOW (arg
);
1822 /* Heuristic: don't fold large allocas. */
1823 threshold
= (unsigned HOST_WIDE_INT
)PARAM_VALUE (PARAM_LARGE_STACK_FRAME
);
1824 /* In case the alloca is located at function entry, it has the same lifetime
1825 as a declared array, so we allow a larger size. */
1826 block
= gimple_block (stmt
);
1827 if (!(cfun
->after_inlining
1828 && TREE_CODE (BLOCK_SUPERCONTEXT (block
)) == FUNCTION_DECL
))
1830 if (size
> threshold
)
1833 /* Declare array. */
1834 elem_type
= build_nonstandard_integer_type (BITS_PER_UNIT
, 1);
1835 n_elem
= size
* 8 / BITS_PER_UNIT
;
1836 array_type
= build_array_type_nelts (elem_type
, n_elem
);
1837 var
= create_tmp_var (array_type
, NULL
);
1838 DECL_ALIGN (var
) = TREE_INT_CST_LOW (gimple_call_arg (stmt
, 1));
1840 struct ptr_info_def
*pi
= SSA_NAME_PTR_INFO (lhs
);
1841 if (pi
!= NULL
&& !pi
->pt
.anything
)
1845 singleton_p
= pt_solution_singleton_p (&pi
->pt
, &uid
);
1846 gcc_assert (singleton_p
);
1847 SET_DECL_PT_UID (var
, uid
);
1851 /* Fold alloca to the address of the array. */
1852 return fold_convert (TREE_TYPE (lhs
), build_fold_addr_expr (var
));
1855 /* Fold the stmt at *GSI with CCP specific information that propagating
1856 and regular folding does not catch. */
1859 ccp_fold_stmt (gimple_stmt_iterator
*gsi
)
1861 gimple stmt
= gsi_stmt (*gsi
);
1863 switch (gimple_code (stmt
))
1868 /* Statement evaluation will handle type mismatches in constants
1869 more gracefully than the final propagation. This allows us to
1870 fold more conditionals here. */
1871 val
= evaluate_stmt (stmt
);
1872 if (val
.lattice_val
!= CONSTANT
1873 || !double_int_zero_p (val
.mask
))
1878 fprintf (dump_file
, "Folding predicate ");
1879 print_gimple_expr (dump_file
, stmt
, 0, 0);
1880 fprintf (dump_file
, " to ");
1881 print_generic_expr (dump_file
, val
.value
, 0);
1882 fprintf (dump_file
, "\n");
1885 if (integer_zerop (val
.value
))
1886 gimple_cond_make_false (stmt
);
1888 gimple_cond_make_true (stmt
);
1895 tree lhs
= gimple_call_lhs (stmt
);
1896 int flags
= gimple_call_flags (stmt
);
1899 bool changed
= false;
1902 /* If the call was folded into a constant make sure it goes
1903 away even if we cannot propagate into all uses because of
1906 && TREE_CODE (lhs
) == SSA_NAME
1907 && (val
= get_constant_value (lhs
))
1908 /* Don't optimize away calls that have side-effects. */
1909 && (flags
& (ECF_CONST
|ECF_PURE
)) != 0
1910 && (flags
& ECF_LOOPING_CONST_OR_PURE
) == 0)
1912 tree new_rhs
= unshare_expr (val
);
1914 if (!useless_type_conversion_p (TREE_TYPE (lhs
),
1915 TREE_TYPE (new_rhs
)))
1916 new_rhs
= fold_convert (TREE_TYPE (lhs
), new_rhs
);
1917 res
= update_call_from_tree (gsi
, new_rhs
);
1922 /* Internal calls provide no argument types, so the extra laxity
1923 for normal calls does not apply. */
1924 if (gimple_call_internal_p (stmt
))
1927 /* The heuristic of fold_builtin_alloca_with_align differs before and
1928 after inlining, so we don't require the arg to be changed into a
1929 constant for folding, but just to be constant. */
1930 if (gimple_call_builtin_p (stmt
, BUILT_IN_ALLOCA_WITH_ALIGN
))
1932 tree new_rhs
= fold_builtin_alloca_with_align (stmt
);
1935 bool res
= update_call_from_tree (gsi
, new_rhs
);
1936 tree var
= TREE_OPERAND (TREE_OPERAND (new_rhs
, 0),0);
1938 insert_clobbers_for_var (*gsi
, var
);
1943 /* Propagate into the call arguments. Compared to replace_uses_in
1944 this can use the argument slot types for type verification
1945 instead of the current argument type. We also can safely
1946 drop qualifiers here as we are dealing with constants anyway. */
1947 argt
= TYPE_ARG_TYPES (gimple_call_fntype (stmt
));
1948 for (i
= 0; i
< gimple_call_num_args (stmt
) && argt
;
1949 ++i
, argt
= TREE_CHAIN (argt
))
1951 tree arg
= gimple_call_arg (stmt
, i
);
1952 if (TREE_CODE (arg
) == SSA_NAME
1953 && (val
= get_constant_value (arg
))
1954 && useless_type_conversion_p
1955 (TYPE_MAIN_VARIANT (TREE_VALUE (argt
)),
1956 TYPE_MAIN_VARIANT (TREE_TYPE (val
))))
1958 gimple_call_set_arg (stmt
, i
, unshare_expr (val
));
1968 tree lhs
= gimple_assign_lhs (stmt
);
1971 /* If we have a load that turned out to be constant replace it
1972 as we cannot propagate into all uses in all cases. */
1973 if (gimple_assign_single_p (stmt
)
1974 && TREE_CODE (lhs
) == SSA_NAME
1975 && (val
= get_constant_value (lhs
)))
1977 tree rhs
= unshare_expr (val
);
1978 if (!useless_type_conversion_p (TREE_TYPE (lhs
), TREE_TYPE (rhs
)))
1979 rhs
= fold_build1 (VIEW_CONVERT_EXPR
, TREE_TYPE (lhs
), rhs
);
1980 gimple_assign_set_rhs_from_tree (gsi
, rhs
);
1992 /* Visit the assignment statement STMT. Set the value of its LHS to the
1993 value computed by the RHS and store LHS in *OUTPUT_P. If STMT
1994 creates virtual definitions, set the value of each new name to that
1995 of the RHS (if we can derive a constant out of the RHS).
1996 Value-returning call statements also perform an assignment, and
1997 are handled here. */
1999 static enum ssa_prop_result
2000 visit_assignment (gimple stmt
, tree
*output_p
)
2003 enum ssa_prop_result retval
;
2005 tree lhs
= gimple_get_lhs (stmt
);
2007 gcc_assert (gimple_code (stmt
) != GIMPLE_CALL
2008 || gimple_call_lhs (stmt
) != NULL_TREE
);
2010 if (gimple_assign_single_p (stmt
)
2011 && gimple_assign_rhs_code (stmt
) == SSA_NAME
)
2012 /* For a simple copy operation, we copy the lattice values. */
2013 val
= *get_value (gimple_assign_rhs1 (stmt
));
2015 /* Evaluate the statement, which could be
2016 either a GIMPLE_ASSIGN or a GIMPLE_CALL. */
2017 val
= evaluate_stmt (stmt
);
2019 retval
= SSA_PROP_NOT_INTERESTING
;
2021 /* Set the lattice value of the statement's output. */
2022 if (TREE_CODE (lhs
) == SSA_NAME
)
2024 /* If STMT is an assignment to an SSA_NAME, we only have one
2026 if (set_lattice_value (lhs
, val
))
2029 if (val
.lattice_val
== VARYING
)
2030 retval
= SSA_PROP_VARYING
;
2032 retval
= SSA_PROP_INTERESTING
;
2040 /* Visit the conditional statement STMT. Return SSA_PROP_INTERESTING
2041 if it can determine which edge will be taken. Otherwise, return
2042 SSA_PROP_VARYING. */
2044 static enum ssa_prop_result
2045 visit_cond_stmt (gimple stmt
, edge
*taken_edge_p
)
2050 block
= gimple_bb (stmt
);
2051 val
= evaluate_stmt (stmt
);
2052 if (val
.lattice_val
!= CONSTANT
2053 || !double_int_zero_p (val
.mask
))
2054 return SSA_PROP_VARYING
;
2056 /* Find which edge out of the conditional block will be taken and add it
2057 to the worklist. If no single edge can be determined statically,
2058 return SSA_PROP_VARYING to feed all the outgoing edges to the
2059 propagation engine. */
2060 *taken_edge_p
= find_taken_edge (block
, val
.value
);
2062 return SSA_PROP_INTERESTING
;
2064 return SSA_PROP_VARYING
;
2068 /* Evaluate statement STMT. If the statement produces an output value and
2069 its evaluation changes the lattice value of its output, return
2070 SSA_PROP_INTERESTING and set *OUTPUT_P to the SSA_NAME holding the
2073 If STMT is a conditional branch and we can determine its truth
2074 value, set *TAKEN_EDGE_P accordingly. If STMT produces a varying
2075 value, return SSA_PROP_VARYING. */
2077 static enum ssa_prop_result
2078 ccp_visit_stmt (gimple stmt
, edge
*taken_edge_p
, tree
*output_p
)
2083 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2085 fprintf (dump_file
, "\nVisiting statement:\n");
2086 print_gimple_stmt (dump_file
, stmt
, 0, dump_flags
);
2089 switch (gimple_code (stmt
))
2092 /* If the statement is an assignment that produces a single
2093 output value, evaluate its RHS to see if the lattice value of
2094 its output has changed. */
2095 return visit_assignment (stmt
, output_p
);
2098 /* A value-returning call also performs an assignment. */
2099 if (gimple_call_lhs (stmt
) != NULL_TREE
)
2100 return visit_assignment (stmt
, output_p
);
2105 /* If STMT is a conditional branch, see if we can determine
2106 which branch will be taken. */
2107 /* FIXME. It appears that we should be able to optimize
2108 computed GOTOs here as well. */
2109 return visit_cond_stmt (stmt
, taken_edge_p
);
2115 /* Any other kind of statement is not interesting for constant
2116 propagation and, therefore, not worth simulating. */
2117 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2118 fprintf (dump_file
, "No interesting values produced. Marked VARYING.\n");
2120 /* Definitions made by statements other than assignments to
2121 SSA_NAMEs represent unknown modifications to their outputs.
2122 Mark them VARYING. */
2123 FOR_EACH_SSA_TREE_OPERAND (def
, stmt
, iter
, SSA_OP_ALL_DEFS
)
2125 prop_value_t v
= { VARYING
, NULL_TREE
, { -1, (HOST_WIDE_INT
) -1 } };
2126 set_lattice_value (def
, v
);
2129 return SSA_PROP_VARYING
;
2133 /* Main entry point for SSA Conditional Constant Propagation. */
2138 unsigned int todo
= 0;
2139 calculate_dominance_info (CDI_DOMINATORS
);
2141 ssa_propagate (ccp_visit_stmt
, ccp_visit_phi_node
);
2142 if (ccp_finalize ())
2143 todo
= (TODO_cleanup_cfg
| TODO_update_ssa
| TODO_remove_unused_locals
);
2144 free_dominance_info (CDI_DOMINATORS
);
2152 return flag_tree_ccp
!= 0;
2156 struct gimple_opt_pass pass_ccp
=
2161 gate_ccp
, /* gate */
2162 do_ssa_ccp
, /* execute */
2165 0, /* static_pass_number */
2166 TV_TREE_CCP
, /* tv_id */
2167 PROP_cfg
| PROP_ssa
, /* properties_required */
2168 0, /* properties_provided */
2169 0, /* properties_destroyed */
2170 0, /* todo_flags_start */
2172 | TODO_verify_stmts
| TODO_ggc_collect
/* todo_flags_finish */
2178 /* Try to optimize out __builtin_stack_restore. Optimize it out
2179 if there is another __builtin_stack_restore in the same basic
2180 block and no calls or ASM_EXPRs are in between, or if this block's
2181 only outgoing edge is to EXIT_BLOCK and there are no calls or
2182 ASM_EXPRs after this __builtin_stack_restore. */
2185 optimize_stack_restore (gimple_stmt_iterator i
)
2190 basic_block bb
= gsi_bb (i
);
2191 gimple call
= gsi_stmt (i
);
2193 if (gimple_code (call
) != GIMPLE_CALL
2194 || gimple_call_num_args (call
) != 1
2195 || TREE_CODE (gimple_call_arg (call
, 0)) != SSA_NAME
2196 || !POINTER_TYPE_P (TREE_TYPE (gimple_call_arg (call
, 0))))
2199 for (gsi_next (&i
); !gsi_end_p (i
); gsi_next (&i
))
2201 stmt
= gsi_stmt (i
);
2202 if (gimple_code (stmt
) == GIMPLE_ASM
)
2204 if (gimple_code (stmt
) != GIMPLE_CALL
)
2207 callee
= gimple_call_fndecl (stmt
);
2209 || DECL_BUILT_IN_CLASS (callee
) != BUILT_IN_NORMAL
2210 /* All regular builtins are ok, just obviously not alloca. */
2211 || DECL_FUNCTION_CODE (callee
) == BUILT_IN_ALLOCA
2212 || DECL_FUNCTION_CODE (callee
) == BUILT_IN_ALLOCA_WITH_ALIGN
)
2215 if (DECL_FUNCTION_CODE (callee
) == BUILT_IN_STACK_RESTORE
)
2216 goto second_stack_restore
;
2222 /* Allow one successor of the exit block, or zero successors. */
2223 switch (EDGE_COUNT (bb
->succs
))
2228 if (single_succ_edge (bb
)->dest
!= EXIT_BLOCK_PTR
)
2234 second_stack_restore
:
2236 /* If there's exactly one use, then zap the call to __builtin_stack_save.
2237 If there are multiple uses, then the last one should remove the call.
2238 In any case, whether the call to __builtin_stack_save can be removed
2239 or not is irrelevant to removing the call to __builtin_stack_restore. */
2240 if (has_single_use (gimple_call_arg (call
, 0)))
2242 gimple stack_save
= SSA_NAME_DEF_STMT (gimple_call_arg (call
, 0));
2243 if (is_gimple_call (stack_save
))
2245 callee
= gimple_call_fndecl (stack_save
);
2247 && DECL_BUILT_IN_CLASS (callee
) == BUILT_IN_NORMAL
2248 && DECL_FUNCTION_CODE (callee
) == BUILT_IN_STACK_SAVE
)
2250 gimple_stmt_iterator stack_save_gsi
;
2253 stack_save_gsi
= gsi_for_stmt (stack_save
);
2254 rhs
= build_int_cst (TREE_TYPE (gimple_call_arg (call
, 0)), 0);
2255 update_call_from_tree (&stack_save_gsi
, rhs
);
2260 /* No effect, so the statement will be deleted. */
2261 return integer_zero_node
;
2264 /* If va_list type is a simple pointer and nothing special is needed,
2265 optimize __builtin_va_start (&ap, 0) into ap = __builtin_next_arg (0),
2266 __builtin_va_end (&ap) out as NOP and __builtin_va_copy into a simple
2267 pointer assignment. */
2270 optimize_stdarg_builtin (gimple call
)
2272 tree callee
, lhs
, rhs
, cfun_va_list
;
2273 bool va_list_simple_ptr
;
2274 location_t loc
= gimple_location (call
);
2276 if (gimple_code (call
) != GIMPLE_CALL
)
2279 callee
= gimple_call_fndecl (call
);
2281 cfun_va_list
= targetm
.fn_abi_va_list (callee
);
2282 va_list_simple_ptr
= POINTER_TYPE_P (cfun_va_list
)
2283 && (TREE_TYPE (cfun_va_list
) == void_type_node
2284 || TREE_TYPE (cfun_va_list
) == char_type_node
);
2286 switch (DECL_FUNCTION_CODE (callee
))
2288 case BUILT_IN_VA_START
:
2289 if (!va_list_simple_ptr
2290 || targetm
.expand_builtin_va_start
!= NULL
2291 || builtin_decl_explicit_p (BUILT_IN_NEXT_ARG
))
2294 if (gimple_call_num_args (call
) != 2)
2297 lhs
= gimple_call_arg (call
, 0);
2298 if (!POINTER_TYPE_P (TREE_TYPE (lhs
))
2299 || TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (lhs
)))
2300 != TYPE_MAIN_VARIANT (cfun_va_list
))
2303 lhs
= build_fold_indirect_ref_loc (loc
, lhs
);
2304 rhs
= build_call_expr_loc (loc
, builtin_decl_explicit (BUILT_IN_NEXT_ARG
),
2305 1, integer_zero_node
);
2306 rhs
= fold_convert_loc (loc
, TREE_TYPE (lhs
), rhs
);
2307 return build2 (MODIFY_EXPR
, TREE_TYPE (lhs
), lhs
, rhs
);
2309 case BUILT_IN_VA_COPY
:
2310 if (!va_list_simple_ptr
)
2313 if (gimple_call_num_args (call
) != 2)
2316 lhs
= gimple_call_arg (call
, 0);
2317 if (!POINTER_TYPE_P (TREE_TYPE (lhs
))
2318 || TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (lhs
)))
2319 != TYPE_MAIN_VARIANT (cfun_va_list
))
2322 lhs
= build_fold_indirect_ref_loc (loc
, lhs
);
2323 rhs
= gimple_call_arg (call
, 1);
2324 if (TYPE_MAIN_VARIANT (TREE_TYPE (rhs
))
2325 != TYPE_MAIN_VARIANT (cfun_va_list
))
2328 rhs
= fold_convert_loc (loc
, TREE_TYPE (lhs
), rhs
);
2329 return build2 (MODIFY_EXPR
, TREE_TYPE (lhs
), lhs
, rhs
);
2331 case BUILT_IN_VA_END
:
2332 /* No effect, so the statement will be deleted. */
2333 return integer_zero_node
;
2340 /* A simple pass that attempts to fold all builtin functions. This pass
2341 is run after we've propagated as many constants as we can. */
2344 execute_fold_all_builtins (void)
2346 bool cfg_changed
= false;
2348 unsigned int todoflags
= 0;
2352 gimple_stmt_iterator i
;
2353 for (i
= gsi_start_bb (bb
); !gsi_end_p (i
); )
2355 gimple stmt
, old_stmt
;
2356 tree callee
, result
;
2357 enum built_in_function fcode
;
2359 stmt
= gsi_stmt (i
);
2361 if (gimple_code (stmt
) != GIMPLE_CALL
)
2366 callee
= gimple_call_fndecl (stmt
);
2367 if (!callee
|| DECL_BUILT_IN_CLASS (callee
) != BUILT_IN_NORMAL
)
2372 fcode
= DECL_FUNCTION_CODE (callee
);
2374 result
= gimple_fold_builtin (stmt
);
2377 gimple_remove_stmt_histograms (cfun
, stmt
);
2380 switch (DECL_FUNCTION_CODE (callee
))
2382 case BUILT_IN_CONSTANT_P
:
2383 /* Resolve __builtin_constant_p. If it hasn't been
2384 folded to integer_one_node by now, it's fairly
2385 certain that the value simply isn't constant. */
2386 result
= integer_zero_node
;
2389 case BUILT_IN_ASSUME_ALIGNED
:
2390 /* Remove __builtin_assume_aligned. */
2391 result
= gimple_call_arg (stmt
, 0);
2394 case BUILT_IN_STACK_RESTORE
:
2395 result
= optimize_stack_restore (i
);
2401 case BUILT_IN_VA_START
:
2402 case BUILT_IN_VA_END
:
2403 case BUILT_IN_VA_COPY
:
2404 /* These shouldn't be folded before pass_stdarg. */
2405 result
= optimize_stdarg_builtin (stmt
);
2415 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2417 fprintf (dump_file
, "Simplified\n ");
2418 print_gimple_stmt (dump_file
, stmt
, 0, dump_flags
);
2422 if (!update_call_from_tree (&i
, result
))
2424 gimplify_and_update_call_from_tree (&i
, result
);
2425 todoflags
|= TODO_update_address_taken
;
2428 stmt
= gsi_stmt (i
);
2431 if (maybe_clean_or_replace_eh_stmt (old_stmt
, stmt
)
2432 && gimple_purge_dead_eh_edges (bb
))
2435 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2437 fprintf (dump_file
, "to\n ");
2438 print_gimple_stmt (dump_file
, stmt
, 0, dump_flags
);
2439 fprintf (dump_file
, "\n");
2442 /* Retry the same statement if it changed into another
2443 builtin, there might be new opportunities now. */
2444 if (gimple_code (stmt
) != GIMPLE_CALL
)
2449 callee
= gimple_call_fndecl (stmt
);
2451 || DECL_BUILT_IN_CLASS (callee
) != BUILT_IN_NORMAL
2452 || DECL_FUNCTION_CODE (callee
) == fcode
)
2457 /* Delete unreachable blocks. */
2459 todoflags
|= TODO_cleanup_cfg
;
2465 struct gimple_opt_pass pass_fold_builtins
=
2471 execute_fold_all_builtins
, /* execute */
2474 0, /* static_pass_number */
2475 TV_NONE
, /* tv_id */
2476 PROP_cfg
| PROP_ssa
, /* properties_required */
2477 0, /* properties_provided */
2478 0, /* properties_destroyed */
2479 0, /* todo_flags_start */
2481 | TODO_update_ssa
/* todo_flags_finish */