1 /* Conditional constant propagation pass for the GNU compiler.
2 Copyright (C) 2000-2017 Free Software Foundation, Inc.
3 Adapted from original RTL SSA-CCP by Daniel Berlin <dberlin@dberlin.org>
4 Adapted to GIMPLE trees by Diego Novillo <dnovillo@redhat.com>
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it
9 under the terms of the GNU General Public License as published by the
10 Free Software Foundation; either version 3, or (at your option) any
13 GCC is distributed in the hope that it will be useful, but WITHOUT
14 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 /* Conditional constant propagation (CCP) is based on the SSA
23 propagation engine (tree-ssa-propagate.c). Constant assignments of
24 the form VAR = CST are propagated from the assignments into uses of
25 VAR, which in turn may generate new constants. The simulation uses
26 a four level lattice to keep track of constant values associated
27 with SSA names. Given an SSA name V_i, it may take one of the
30 UNINITIALIZED -> the initial state of the value. This value
31 is replaced with a correct initial value
32 the first time the value is used, so the
33 rest of the pass does not need to care about
34 it. Using this value simplifies initialization
35 of the pass, and prevents us from needlessly
36 scanning statements that are never reached.
38 UNDEFINED -> V_i is a local variable whose definition
39 has not been processed yet. Therefore we
40 don't yet know if its value is a constant
43 CONSTANT -> V_i has been found to hold a constant
46 VARYING -> V_i cannot take a constant value, or if it
47 does, it is not possible to determine it
50 The core of SSA-CCP is in ccp_visit_stmt and ccp_visit_phi_node:
52 1- In ccp_visit_stmt, we are interested in assignments whose RHS
53 evaluates into a constant and conditional jumps whose predicate
54 evaluates into a boolean true or false. When an assignment of
55 the form V_i = CONST is found, V_i's lattice value is set to
56 CONSTANT and CONST is associated with it. This causes the
57 propagation engine to add all the SSA edges coming out the
58 assignment into the worklists, so that statements that use V_i
61 If the statement is a conditional with a constant predicate, we
62 mark the outgoing edges as executable or not executable
63 depending on the predicate's value. This is then used when
64 visiting PHI nodes to know when a PHI argument can be ignored.
67 2- In ccp_visit_phi_node, if all the PHI arguments evaluate to the
68 same constant C, then the LHS of the PHI is set to C. This
69 evaluation is known as the "meet operation". Since one of the
70 goals of this evaluation is to optimistically return constant
71 values as often as possible, it uses two main short cuts:
73 - If an argument is flowing in through a non-executable edge, it
74 is ignored. This is useful in cases like this:
80 a_11 = PHI (a_9, a_10)
82 If PRED is known to always evaluate to false, then we can
83 assume that a_11 will always take its value from a_10, meaning
84 that instead of consider it VARYING (a_9 and a_10 have
85 different values), we can consider it CONSTANT 100.
87 - If an argument has an UNDEFINED value, then it does not affect
88 the outcome of the meet operation. If a variable V_i has an
89 UNDEFINED value, it means that either its defining statement
90 hasn't been visited yet or V_i has no defining statement, in
91 which case the original symbol 'V' is being used
92 uninitialized. Since 'V' is a local variable, the compiler
93 may assume any initial value for it.
96 After propagation, every variable V_i that ends up with a lattice
97 value of CONSTANT will have the associated constant value in the
98 array CONST_VAL[i].VALUE. That is fed into substitute_and_fold for
99 final substitution and folding.
101 This algorithm uses wide-ints at the max precision of the target.
102 This means that, with one uninteresting exception, variables with
103 UNSIGNED types never go to VARYING because the bits above the
104 precision of the type of the variable are always zero. The
105 uninteresting case is a variable of UNSIGNED type that has the
106 maximum precision of the target. Such variables can go to VARYING,
107 but this causes no loss of infomation since these variables will
112 Constant propagation with conditional branches,
113 Wegman and Zadeck, ACM TOPLAS 13(2):181-210.
115 Building an Optimizing Compiler,
116 Robert Morgan, Butterworth-Heinemann, 1998, Section 8.9.
118 Advanced Compiler Design and Implementation,
119 Steven Muchnick, Morgan Kaufmann, 1997, Section 12.6 */
123 #include "coretypes.h"
128 #include "tree-pass.h"
130 #include "gimple-pretty-print.h"
131 #include "fold-const.h"
132 #include "gimple-fold.h"
134 #include "gimplify.h"
135 #include "gimple-iterator.h"
136 #include "tree-cfg.h"
137 #include "tree-ssa-propagate.h"
140 #include "builtins.h"
141 #include "tree-chkp.h"
143 #include "stor-layout.h"
144 #include "optabs-query.h"
145 #include "tree-ssa-ccp.h"
146 #include "tree-dfa.h"
147 #include "diagnostic-core.h"
148 #include "stringpool.h"
151 /* Possible lattice values. */
160 struct ccp_prop_value_t
{
162 ccp_lattice_t lattice_val
;
164 /* Propagated value. */
167 /* Mask that applies to the propagated value during CCP. For X
168 with a CONSTANT lattice value X & ~mask == value & ~mask. The
169 zero bits in the mask cover constant values. The ones mean no
174 /* Array of propagated constant values. After propagation,
175 CONST_VAL[I].VALUE holds the constant value for SSA_NAME(I). If
176 the constant is held in an SSA name representing a memory store
177 (i.e., a VDEF), CONST_VAL[I].MEM_REF will contain the actual
178 memory reference used to store (i.e., the LHS of the assignment
180 static ccp_prop_value_t
*const_val
;
181 static unsigned n_const_val
;
183 static void canonicalize_value (ccp_prop_value_t
*);
184 static bool ccp_fold_stmt (gimple_stmt_iterator
*);
185 static void ccp_lattice_meet (ccp_prop_value_t
*, ccp_prop_value_t
*);
187 /* Dump constant propagation value VAL to file OUTF prefixed by PREFIX. */
190 dump_lattice_value (FILE *outf
, const char *prefix
, ccp_prop_value_t val
)
192 switch (val
.lattice_val
)
195 fprintf (outf
, "%sUNINITIALIZED", prefix
);
198 fprintf (outf
, "%sUNDEFINED", prefix
);
201 fprintf (outf
, "%sVARYING", prefix
);
204 if (TREE_CODE (val
.value
) != INTEGER_CST
207 fprintf (outf
, "%sCONSTANT ", prefix
);
208 print_generic_expr (outf
, val
.value
, dump_flags
);
212 widest_int cval
= wi::bit_and_not (wi::to_widest (val
.value
),
214 fprintf (outf
, "%sCONSTANT ", prefix
);
215 print_hex (cval
, outf
);
216 fprintf (outf
, " (");
217 print_hex (val
.mask
, outf
);
227 /* Print lattice value VAL to stderr. */
229 void debug_lattice_value (ccp_prop_value_t val
);
232 debug_lattice_value (ccp_prop_value_t val
)
234 dump_lattice_value (stderr
, "", val
);
235 fprintf (stderr
, "\n");
238 /* Extend NONZERO_BITS to a full mask, based on sgn. */
241 extend_mask (const wide_int
&nonzero_bits
, signop sgn
)
243 return widest_int::from (nonzero_bits
, sgn
);
246 /* Compute a default value for variable VAR and store it in the
247 CONST_VAL array. The following rules are used to get default
250 1- Global and static variables that are declared constant are
253 2- Any other value is considered UNDEFINED. This is useful when
254 considering PHI nodes. PHI arguments that are undefined do not
255 change the constant value of the PHI node, which allows for more
256 constants to be propagated.
258 3- Variables defined by statements other than assignments and PHI
259 nodes are considered VARYING.
261 4- Initial values of variables that are not GIMPLE registers are
262 considered VARYING. */
264 static ccp_prop_value_t
265 get_default_value (tree var
)
267 ccp_prop_value_t val
= { UNINITIALIZED
, NULL_TREE
, 0 };
270 stmt
= SSA_NAME_DEF_STMT (var
);
272 if (gimple_nop_p (stmt
))
274 /* Variables defined by an empty statement are those used
275 before being initialized. If VAR is a local variable, we
276 can assume initially that it is UNDEFINED, otherwise we must
277 consider it VARYING. */
278 if (!virtual_operand_p (var
)
279 && SSA_NAME_VAR (var
)
280 && TREE_CODE (SSA_NAME_VAR (var
)) == VAR_DECL
)
281 val
.lattice_val
= UNDEFINED
;
284 val
.lattice_val
= VARYING
;
286 if (flag_tree_bit_ccp
)
288 wide_int nonzero_bits
= get_nonzero_bits (var
);
289 if (nonzero_bits
!= -1)
291 val
.lattice_val
= CONSTANT
;
292 val
.value
= build_zero_cst (TREE_TYPE (var
));
293 val
.mask
= extend_mask (nonzero_bits
, TYPE_SIGN (TREE_TYPE (var
)));
298 else if (is_gimple_assign (stmt
))
301 if (gimple_assign_single_p (stmt
)
302 && DECL_P (gimple_assign_rhs1 (stmt
))
303 && (cst
= get_symbol_constant_value (gimple_assign_rhs1 (stmt
))))
305 val
.lattice_val
= CONSTANT
;
310 /* Any other variable defined by an assignment is considered
312 val
.lattice_val
= UNDEFINED
;
315 else if ((is_gimple_call (stmt
)
316 && gimple_call_lhs (stmt
) != NULL_TREE
)
317 || gimple_code (stmt
) == GIMPLE_PHI
)
319 /* A variable defined by a call or a PHI node is considered
321 val
.lattice_val
= UNDEFINED
;
325 /* Otherwise, VAR will never take on a constant value. */
326 val
.lattice_val
= VARYING
;
334 /* Get the constant value associated with variable VAR. */
336 static inline ccp_prop_value_t
*
339 ccp_prop_value_t
*val
;
341 if (const_val
== NULL
342 || SSA_NAME_VERSION (var
) >= n_const_val
)
345 val
= &const_val
[SSA_NAME_VERSION (var
)];
346 if (val
->lattice_val
== UNINITIALIZED
)
347 *val
= get_default_value (var
);
349 canonicalize_value (val
);
354 /* Return the constant tree value associated with VAR. */
357 get_constant_value (tree var
)
359 ccp_prop_value_t
*val
;
360 if (TREE_CODE (var
) != SSA_NAME
)
362 if (is_gimple_min_invariant (var
))
366 val
= get_value (var
);
368 && val
->lattice_val
== CONSTANT
369 && (TREE_CODE (val
->value
) != INTEGER_CST
375 /* Sets the value associated with VAR to VARYING. */
378 set_value_varying (tree var
)
380 ccp_prop_value_t
*val
= &const_val
[SSA_NAME_VERSION (var
)];
382 val
->lattice_val
= VARYING
;
383 val
->value
= NULL_TREE
;
387 /* For integer constants, make sure to drop TREE_OVERFLOW. */
390 canonicalize_value (ccp_prop_value_t
*val
)
392 if (val
->lattice_val
!= CONSTANT
)
395 if (TREE_OVERFLOW_P (val
->value
))
396 val
->value
= drop_tree_overflow (val
->value
);
399 /* Return whether the lattice transition is valid. */
402 valid_lattice_transition (ccp_prop_value_t old_val
, ccp_prop_value_t new_val
)
404 /* Lattice transitions must always be monotonically increasing in
406 if (old_val
.lattice_val
< new_val
.lattice_val
)
409 if (old_val
.lattice_val
!= new_val
.lattice_val
)
412 if (!old_val
.value
&& !new_val
.value
)
415 /* Now both lattice values are CONSTANT. */
417 /* Allow arbitrary copy changes as we might look through PHI <a_1, ...>
418 when only a single copy edge is executable. */
419 if (TREE_CODE (old_val
.value
) == SSA_NAME
420 && TREE_CODE (new_val
.value
) == SSA_NAME
)
423 /* Allow transitioning from a constant to a copy. */
424 if (is_gimple_min_invariant (old_val
.value
)
425 && TREE_CODE (new_val
.value
) == SSA_NAME
)
428 /* Allow transitioning from PHI <&x, not executable> == &x
429 to PHI <&x, &y> == common alignment. */
430 if (TREE_CODE (old_val
.value
) != INTEGER_CST
431 && TREE_CODE (new_val
.value
) == INTEGER_CST
)
434 /* Bit-lattices have to agree in the still valid bits. */
435 if (TREE_CODE (old_val
.value
) == INTEGER_CST
436 && TREE_CODE (new_val
.value
) == INTEGER_CST
)
437 return (wi::bit_and_not (wi::to_widest (old_val
.value
), new_val
.mask
)
438 == wi::bit_and_not (wi::to_widest (new_val
.value
), new_val
.mask
));
440 /* Otherwise constant values have to agree. */
441 if (operand_equal_p (old_val
.value
, new_val
.value
, 0))
444 /* At least the kinds and types should agree now. */
445 if (TREE_CODE (old_val
.value
) != TREE_CODE (new_val
.value
)
446 || !types_compatible_p (TREE_TYPE (old_val
.value
),
447 TREE_TYPE (new_val
.value
)))
450 /* For floats and !HONOR_NANS allow transitions from (partial) NaN
452 tree type
= TREE_TYPE (new_val
.value
);
453 if (SCALAR_FLOAT_TYPE_P (type
)
454 && !HONOR_NANS (type
))
456 if (REAL_VALUE_ISNAN (TREE_REAL_CST (old_val
.value
)))
459 else if (VECTOR_FLOAT_TYPE_P (type
)
460 && !HONOR_NANS (type
))
462 for (unsigned i
= 0; i
< VECTOR_CST_NELTS (old_val
.value
); ++i
)
463 if (!REAL_VALUE_ISNAN
464 (TREE_REAL_CST (VECTOR_CST_ELT (old_val
.value
, i
)))
465 && !operand_equal_p (VECTOR_CST_ELT (old_val
.value
, i
),
466 VECTOR_CST_ELT (new_val
.value
, i
), 0))
470 else if (COMPLEX_FLOAT_TYPE_P (type
)
471 && !HONOR_NANS (type
))
473 if (!REAL_VALUE_ISNAN (TREE_REAL_CST (TREE_REALPART (old_val
.value
)))
474 && !operand_equal_p (TREE_REALPART (old_val
.value
),
475 TREE_REALPART (new_val
.value
), 0))
477 if (!REAL_VALUE_ISNAN (TREE_REAL_CST (TREE_IMAGPART (old_val
.value
)))
478 && !operand_equal_p (TREE_IMAGPART (old_val
.value
),
479 TREE_IMAGPART (new_val
.value
), 0))
486 /* Set the value for variable VAR to NEW_VAL. Return true if the new
487 value is different from VAR's previous value. */
490 set_lattice_value (tree var
, ccp_prop_value_t
*new_val
)
492 /* We can deal with old UNINITIALIZED values just fine here. */
493 ccp_prop_value_t
*old_val
= &const_val
[SSA_NAME_VERSION (var
)];
495 canonicalize_value (new_val
);
497 /* We have to be careful to not go up the bitwise lattice
498 represented by the mask. Instead of dropping to VARYING
499 use the meet operator to retain a conservative value.
500 Missed optimizations like PR65851 makes this necessary.
501 It also ensures we converge to a stable lattice solution. */
502 if (old_val
->lattice_val
!= UNINITIALIZED
)
503 ccp_lattice_meet (new_val
, old_val
);
505 gcc_checking_assert (valid_lattice_transition (*old_val
, *new_val
));
507 /* If *OLD_VAL and NEW_VAL are the same, return false to inform the
508 caller that this was a non-transition. */
509 if (old_val
->lattice_val
!= new_val
->lattice_val
510 || (new_val
->lattice_val
== CONSTANT
511 && (TREE_CODE (new_val
->value
) != TREE_CODE (old_val
->value
)
512 || (TREE_CODE (new_val
->value
) == INTEGER_CST
513 && (new_val
->mask
!= old_val
->mask
514 || (wi::bit_and_not (wi::to_widest (old_val
->value
),
516 != wi::bit_and_not (wi::to_widest (new_val
->value
),
518 || (TREE_CODE (new_val
->value
) != INTEGER_CST
519 && !operand_equal_p (new_val
->value
, old_val
->value
, 0)))))
521 /* ??? We would like to delay creation of INTEGER_CSTs from
522 partially constants here. */
524 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
526 dump_lattice_value (dump_file
, "Lattice value changed to ", *new_val
);
527 fprintf (dump_file
, ". Adding SSA edges to worklist.\n");
532 gcc_assert (new_val
->lattice_val
!= UNINITIALIZED
);
539 static ccp_prop_value_t
get_value_for_expr (tree
, bool);
540 static ccp_prop_value_t
bit_value_binop (enum tree_code
, tree
, tree
, tree
);
541 void bit_value_binop (enum tree_code
, signop
, int, widest_int
*, widest_int
*,
542 signop
, int, const widest_int
&, const widest_int
&,
543 signop
, int, const widest_int
&, const widest_int
&);
545 /* Return a widest_int that can be used for bitwise simplifications
549 value_to_wide_int (ccp_prop_value_t val
)
552 && TREE_CODE (val
.value
) == INTEGER_CST
)
553 return wi::to_widest (val
.value
);
558 /* Return the value for the address expression EXPR based on alignment
561 static ccp_prop_value_t
562 get_value_from_alignment (tree expr
)
564 tree type
= TREE_TYPE (expr
);
565 ccp_prop_value_t val
;
566 unsigned HOST_WIDE_INT bitpos
;
569 gcc_assert (TREE_CODE (expr
) == ADDR_EXPR
);
571 get_pointer_alignment_1 (expr
, &align
, &bitpos
);
572 val
.mask
= wi::bit_and_not
573 (POINTER_TYPE_P (type
) || TYPE_UNSIGNED (type
)
574 ? wi::mask
<widest_int
> (TYPE_PRECISION (type
), false)
576 align
/ BITS_PER_UNIT
- 1);
578 = wi::sext (val
.mask
, TYPE_PRECISION (type
)) == -1 ? VARYING
: CONSTANT
;
579 if (val
.lattice_val
== CONSTANT
)
580 val
.value
= build_int_cstu (type
, bitpos
/ BITS_PER_UNIT
);
582 val
.value
= NULL_TREE
;
587 /* Return the value for the tree operand EXPR. If FOR_BITS_P is true
588 return constant bits extracted from alignment information for
589 invariant addresses. */
591 static ccp_prop_value_t
592 get_value_for_expr (tree expr
, bool for_bits_p
)
594 ccp_prop_value_t val
;
596 if (TREE_CODE (expr
) == SSA_NAME
)
598 ccp_prop_value_t
*val_
= get_value (expr
);
603 val
.lattice_val
= VARYING
;
604 val
.value
= NULL_TREE
;
608 && val
.lattice_val
== CONSTANT
609 && TREE_CODE (val
.value
) == ADDR_EXPR
)
610 val
= get_value_from_alignment (val
.value
);
611 /* Fall back to a copy value. */
613 && val
.lattice_val
== VARYING
614 && !SSA_NAME_OCCURS_IN_ABNORMAL_PHI (expr
))
616 val
.lattice_val
= CONSTANT
;
621 else if (is_gimple_min_invariant (expr
)
622 && (!for_bits_p
|| TREE_CODE (expr
) == INTEGER_CST
))
624 val
.lattice_val
= CONSTANT
;
627 canonicalize_value (&val
);
629 else if (TREE_CODE (expr
) == ADDR_EXPR
)
630 val
= get_value_from_alignment (expr
);
633 val
.lattice_val
= VARYING
;
635 val
.value
= NULL_TREE
;
638 if (val
.lattice_val
== VARYING
639 && TYPE_UNSIGNED (TREE_TYPE (expr
)))
640 val
.mask
= wi::zext (val
.mask
, TYPE_PRECISION (TREE_TYPE (expr
)));
645 /* Return the likely CCP lattice value for STMT.
647 If STMT has no operands, then return CONSTANT.
649 Else if undefinedness of operands of STMT cause its value to be
650 undefined, then return UNDEFINED.
652 Else if any operands of STMT are constants, then return CONSTANT.
654 Else return VARYING. */
657 likely_value (gimple
*stmt
)
659 bool has_constant_operand
, has_undefined_operand
, all_undefined_operands
;
660 bool has_nsa_operand
;
665 enum gimple_code code
= gimple_code (stmt
);
667 /* This function appears to be called only for assignments, calls,
668 conditionals, and switches, due to the logic in visit_stmt. */
669 gcc_assert (code
== GIMPLE_ASSIGN
670 || code
== GIMPLE_CALL
671 || code
== GIMPLE_COND
672 || code
== GIMPLE_SWITCH
);
674 /* If the statement has volatile operands, it won't fold to a
676 if (gimple_has_volatile_ops (stmt
))
679 /* Arrive here for more complex cases. */
680 has_constant_operand
= false;
681 has_undefined_operand
= false;
682 all_undefined_operands
= true;
683 has_nsa_operand
= false;
684 FOR_EACH_SSA_TREE_OPERAND (use
, stmt
, iter
, SSA_OP_USE
)
686 ccp_prop_value_t
*val
= get_value (use
);
688 if (val
&& val
->lattice_val
== UNDEFINED
)
689 has_undefined_operand
= true;
691 all_undefined_operands
= false;
693 if (val
&& val
->lattice_val
== CONSTANT
)
694 has_constant_operand
= true;
696 if (SSA_NAME_IS_DEFAULT_DEF (use
)
697 || !prop_simulate_again_p (SSA_NAME_DEF_STMT (use
)))
698 has_nsa_operand
= true;
701 /* There may be constants in regular rhs operands. For calls we
702 have to ignore lhs, fndecl and static chain, otherwise only
704 for (i
= (is_gimple_call (stmt
) ? 2 : 0) + gimple_has_lhs (stmt
);
705 i
< gimple_num_ops (stmt
); ++i
)
707 tree op
= gimple_op (stmt
, i
);
708 if (!op
|| TREE_CODE (op
) == SSA_NAME
)
710 if (is_gimple_min_invariant (op
))
711 has_constant_operand
= true;
714 if (has_constant_operand
)
715 all_undefined_operands
= false;
717 if (has_undefined_operand
718 && code
== GIMPLE_CALL
719 && gimple_call_internal_p (stmt
))
720 switch (gimple_call_internal_fn (stmt
))
722 /* These 3 builtins use the first argument just as a magic
723 way how to find out a decl uid. */
724 case IFN_GOMP_SIMD_LANE
:
725 case IFN_GOMP_SIMD_VF
:
726 case IFN_GOMP_SIMD_LAST_LANE
:
727 has_undefined_operand
= false;
733 /* If the operation combines operands like COMPLEX_EXPR make sure to
734 not mark the result UNDEFINED if only one part of the result is
736 if (has_undefined_operand
&& all_undefined_operands
)
738 else if (code
== GIMPLE_ASSIGN
&& has_undefined_operand
)
740 switch (gimple_assign_rhs_code (stmt
))
742 /* Unary operators are handled with all_undefined_operands. */
745 case POINTER_PLUS_EXPR
:
747 /* Not MIN_EXPR, MAX_EXPR. One VARYING operand may be selected.
748 Not bitwise operators, one VARYING operand may specify the
750 Not logical operators for the same reason, apart from XOR.
751 Not COMPLEX_EXPR as one VARYING operand makes the result partly
752 not UNDEFINED. Not *DIV_EXPR, comparisons and shifts because
753 the undefined operand may be promoted. */
757 /* If any part of an address is UNDEFINED, like the index
758 of an ARRAY_EXPR, then treat the result as UNDEFINED. */
765 /* If there was an UNDEFINED operand but the result may be not UNDEFINED
766 fall back to CONSTANT. During iteration UNDEFINED may still drop
768 if (has_undefined_operand
)
771 /* We do not consider virtual operands here -- load from read-only
772 memory may have only VARYING virtual operands, but still be
773 constant. Also we can combine the stmt with definitions from
774 operands whose definitions are not simulated again. */
775 if (has_constant_operand
777 || gimple_references_memory_p (stmt
))
783 /* Returns true if STMT cannot be constant. */
786 surely_varying_stmt_p (gimple
*stmt
)
788 /* If the statement has operands that we cannot handle, it cannot be
790 if (gimple_has_volatile_ops (stmt
))
793 /* If it is a call and does not return a value or is not a
794 builtin and not an indirect call or a call to function with
795 assume_aligned/alloc_align attribute, it is varying. */
796 if (is_gimple_call (stmt
))
798 tree fndecl
, fntype
= gimple_call_fntype (stmt
);
799 if (!gimple_call_lhs (stmt
)
800 || ((fndecl
= gimple_call_fndecl (stmt
)) != NULL_TREE
801 && !DECL_BUILT_IN (fndecl
)
802 && !lookup_attribute ("assume_aligned",
803 TYPE_ATTRIBUTES (fntype
))
804 && !lookup_attribute ("alloc_align",
805 TYPE_ATTRIBUTES (fntype
))))
809 /* Any other store operation is not interesting. */
810 else if (gimple_vdef (stmt
))
813 /* Anything other than assignments and conditional jumps are not
814 interesting for CCP. */
815 if (gimple_code (stmt
) != GIMPLE_ASSIGN
816 && gimple_code (stmt
) != GIMPLE_COND
817 && gimple_code (stmt
) != GIMPLE_SWITCH
818 && gimple_code (stmt
) != GIMPLE_CALL
)
824 /* Initialize local data structures for CCP. */
827 ccp_initialize (void)
831 n_const_val
= num_ssa_names
;
832 const_val
= XCNEWVEC (ccp_prop_value_t
, n_const_val
);
834 /* Initialize simulation flags for PHI nodes and statements. */
835 FOR_EACH_BB_FN (bb
, cfun
)
837 gimple_stmt_iterator i
;
839 for (i
= gsi_start_bb (bb
); !gsi_end_p (i
); gsi_next (&i
))
841 gimple
*stmt
= gsi_stmt (i
);
844 /* If the statement is a control insn, then we do not
845 want to avoid simulating the statement once. Failure
846 to do so means that those edges will never get added. */
847 if (stmt_ends_bb_p (stmt
))
850 is_varying
= surely_varying_stmt_p (stmt
);
857 /* If the statement will not produce a constant, mark
858 all its outputs VARYING. */
859 FOR_EACH_SSA_TREE_OPERAND (def
, stmt
, iter
, SSA_OP_ALL_DEFS
)
860 set_value_varying (def
);
862 prop_set_simulate_again (stmt
, !is_varying
);
866 /* Now process PHI nodes. We never clear the simulate_again flag on
867 phi nodes, since we do not know which edges are executable yet,
868 except for phi nodes for virtual operands when we do not do store ccp. */
869 FOR_EACH_BB_FN (bb
, cfun
)
873 for (i
= gsi_start_phis (bb
); !gsi_end_p (i
); gsi_next (&i
))
875 gphi
*phi
= i
.phi ();
877 if (virtual_operand_p (gimple_phi_result (phi
)))
878 prop_set_simulate_again (phi
, false);
880 prop_set_simulate_again (phi
, true);
885 /* Debug count support. Reset the values of ssa names
886 VARYING when the total number ssa names analyzed is
887 beyond the debug count specified. */
893 for (i
= 0; i
< num_ssa_names
; i
++)
897 const_val
[i
].lattice_val
= VARYING
;
898 const_val
[i
].mask
= -1;
899 const_val
[i
].value
= NULL_TREE
;
905 /* Do final substitution of propagated values, cleanup the flowgraph and
906 free allocated storage. If NONZERO_P, record nonzero bits.
908 Return TRUE when something was optimized. */
911 ccp_finalize (bool nonzero_p
)
913 bool something_changed
;
919 /* Derive alignment and misalignment information from partially
920 constant pointers in the lattice or nonzero bits from partially
921 constant integers. */
922 FOR_EACH_SSA_NAME (i
, name
, cfun
)
924 ccp_prop_value_t
*val
;
925 unsigned int tem
, align
;
927 if (!POINTER_TYPE_P (TREE_TYPE (name
))
928 && (!INTEGRAL_TYPE_P (TREE_TYPE (name
))
929 /* Don't record nonzero bits before IPA to avoid
930 using too much memory. */
934 val
= get_value (name
);
935 if (val
->lattice_val
!= CONSTANT
936 || TREE_CODE (val
->value
) != INTEGER_CST
940 if (POINTER_TYPE_P (TREE_TYPE (name
)))
942 /* Trailing mask bits specify the alignment, trailing value
943 bits the misalignment. */
944 tem
= val
->mask
.to_uhwi ();
945 align
= least_bit_hwi (tem
);
947 set_ptr_info_alignment (get_ptr_info (name
), align
,
948 (TREE_INT_CST_LOW (val
->value
)
953 unsigned int precision
= TYPE_PRECISION (TREE_TYPE (val
->value
));
954 wide_int nonzero_bits
= wide_int::from (val
->mask
, precision
,
955 UNSIGNED
) | val
->value
;
956 nonzero_bits
&= get_nonzero_bits (name
);
957 set_nonzero_bits (name
, nonzero_bits
);
961 /* Perform substitutions based on the known constant values. */
962 something_changed
= substitute_and_fold (get_constant_value
, ccp_fold_stmt
);
966 return something_changed
;;
970 /* Compute the meet operator between *VAL1 and *VAL2. Store the result
973 any M UNDEFINED = any
974 any M VARYING = VARYING
975 Ci M Cj = Ci if (i == j)
976 Ci M Cj = VARYING if (i != j)
980 ccp_lattice_meet (ccp_prop_value_t
*val1
, ccp_prop_value_t
*val2
)
982 if (val1
->lattice_val
== UNDEFINED
983 /* For UNDEFINED M SSA we can't always SSA because its definition
984 may not dominate the PHI node. Doing optimistic copy propagation
985 also causes a lot of gcc.dg/uninit-pred*.c FAILs. */
986 && (val2
->lattice_val
!= CONSTANT
987 || TREE_CODE (val2
->value
) != SSA_NAME
))
989 /* UNDEFINED M any = any */
992 else if (val2
->lattice_val
== UNDEFINED
994 && (val1
->lattice_val
!= CONSTANT
995 || TREE_CODE (val1
->value
) != SSA_NAME
))
997 /* any M UNDEFINED = any
998 Nothing to do. VAL1 already contains the value we want. */
1001 else if (val1
->lattice_val
== VARYING
1002 || val2
->lattice_val
== VARYING
)
1004 /* any M VARYING = VARYING. */
1005 val1
->lattice_val
= VARYING
;
1007 val1
->value
= NULL_TREE
;
1009 else if (val1
->lattice_val
== CONSTANT
1010 && val2
->lattice_val
== CONSTANT
1011 && TREE_CODE (val1
->value
) == INTEGER_CST
1012 && TREE_CODE (val2
->value
) == INTEGER_CST
)
1014 /* Ci M Cj = Ci if (i == j)
1015 Ci M Cj = VARYING if (i != j)
1017 For INTEGER_CSTs mask unequal bits. If no equal bits remain,
1019 val1
->mask
= (val1
->mask
| val2
->mask
1020 | (wi::to_widest (val1
->value
)
1021 ^ wi::to_widest (val2
->value
)));
1022 if (wi::sext (val1
->mask
, TYPE_PRECISION (TREE_TYPE (val1
->value
))) == -1)
1024 val1
->lattice_val
= VARYING
;
1025 val1
->value
= NULL_TREE
;
1028 else if (val1
->lattice_val
== CONSTANT
1029 && val2
->lattice_val
== CONSTANT
1030 && operand_equal_p (val1
->value
, val2
->value
, 0))
1032 /* Ci M Cj = Ci if (i == j)
1033 Ci M Cj = VARYING if (i != j)
1035 VAL1 already contains the value we want for equivalent values. */
1037 else if (val1
->lattice_val
== CONSTANT
1038 && val2
->lattice_val
== CONSTANT
1039 && (TREE_CODE (val1
->value
) == ADDR_EXPR
1040 || TREE_CODE (val2
->value
) == ADDR_EXPR
))
1042 /* When not equal addresses are involved try meeting for
1044 ccp_prop_value_t tem
= *val2
;
1045 if (TREE_CODE (val1
->value
) == ADDR_EXPR
)
1046 *val1
= get_value_for_expr (val1
->value
, true);
1047 if (TREE_CODE (val2
->value
) == ADDR_EXPR
)
1048 tem
= get_value_for_expr (val2
->value
, true);
1049 ccp_lattice_meet (val1
, &tem
);
1053 /* Any other combination is VARYING. */
1054 val1
->lattice_val
= VARYING
;
1056 val1
->value
= NULL_TREE
;
1061 /* Loop through the PHI_NODE's parameters for BLOCK and compare their
1062 lattice values to determine PHI_NODE's lattice value. The value of a
1063 PHI node is determined calling ccp_lattice_meet with all the arguments
1064 of the PHI node that are incoming via executable edges. */
1066 static enum ssa_prop_result
1067 ccp_visit_phi_node (gphi
*phi
)
1070 ccp_prop_value_t new_val
;
1072 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1074 fprintf (dump_file
, "\nVisiting PHI node: ");
1075 print_gimple_stmt (dump_file
, phi
, 0, dump_flags
);
1078 new_val
.lattice_val
= UNDEFINED
;
1079 new_val
.value
= NULL_TREE
;
1083 bool non_exec_edge
= false;
1084 for (i
= 0; i
< gimple_phi_num_args (phi
); i
++)
1086 /* Compute the meet operator over all the PHI arguments flowing
1087 through executable edges. */
1088 edge e
= gimple_phi_arg_edge (phi
, i
);
1090 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1093 "\n Argument #%d (%d -> %d %sexecutable)\n",
1094 i
, e
->src
->index
, e
->dest
->index
,
1095 (e
->flags
& EDGE_EXECUTABLE
) ? "" : "not ");
1098 /* If the incoming edge is executable, Compute the meet operator for
1099 the existing value of the PHI node and the current PHI argument. */
1100 if (e
->flags
& EDGE_EXECUTABLE
)
1102 tree arg
= gimple_phi_arg (phi
, i
)->def
;
1103 ccp_prop_value_t arg_val
= get_value_for_expr (arg
, false);
1111 ccp_lattice_meet (&new_val
, &arg_val
);
1113 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1115 fprintf (dump_file
, "\t");
1116 print_generic_expr (dump_file
, arg
, dump_flags
);
1117 dump_lattice_value (dump_file
, "\tValue: ", arg_val
);
1118 fprintf (dump_file
, "\n");
1121 if (new_val
.lattice_val
== VARYING
)
1125 non_exec_edge
= true;
1128 /* In case there were non-executable edges and the value is a copy
1129 make sure its definition dominates the PHI node. */
1131 && new_val
.lattice_val
== CONSTANT
1132 && TREE_CODE (new_val
.value
) == SSA_NAME
1133 && ! SSA_NAME_IS_DEFAULT_DEF (new_val
.value
)
1134 && ! dominated_by_p (CDI_DOMINATORS
, gimple_bb (phi
),
1135 gimple_bb (SSA_NAME_DEF_STMT (new_val
.value
))))
1137 new_val
.lattice_val
= VARYING
;
1138 new_val
.value
= NULL_TREE
;
1142 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1144 dump_lattice_value (dump_file
, "\n PHI node value: ", new_val
);
1145 fprintf (dump_file
, "\n\n");
1148 /* Make the transition to the new value. */
1149 if (set_lattice_value (gimple_phi_result (phi
), &new_val
))
1151 if (new_val
.lattice_val
== VARYING
)
1152 return SSA_PROP_VARYING
;
1154 return SSA_PROP_INTERESTING
;
1157 return SSA_PROP_NOT_INTERESTING
;
1160 /* Return the constant value for OP or OP otherwise. */
1163 valueize_op (tree op
)
1165 if (TREE_CODE (op
) == SSA_NAME
)
1167 tree tem
= get_constant_value (op
);
1174 /* Return the constant value for OP, but signal to not follow SSA
1175 edges if the definition may be simulated again. */
1178 valueize_op_1 (tree op
)
1180 if (TREE_CODE (op
) == SSA_NAME
)
1182 /* If the definition may be simulated again we cannot follow
1183 this SSA edge as the SSA propagator does not necessarily
1184 re-visit the use. */
1185 gimple
*def_stmt
= SSA_NAME_DEF_STMT (op
);
1186 if (!gimple_nop_p (def_stmt
)
1187 && prop_simulate_again_p (def_stmt
))
1189 tree tem
= get_constant_value (op
);
1196 /* CCP specific front-end to the non-destructive constant folding
1199 Attempt to simplify the RHS of STMT knowing that one or more
1200 operands are constants.
1202 If simplification is possible, return the simplified RHS,
1203 otherwise return the original RHS or NULL_TREE. */
1206 ccp_fold (gimple
*stmt
)
1208 location_t loc
= gimple_location (stmt
);
1209 switch (gimple_code (stmt
))
1213 /* Handle comparison operators that can appear in GIMPLE form. */
1214 tree op0
= valueize_op (gimple_cond_lhs (stmt
));
1215 tree op1
= valueize_op (gimple_cond_rhs (stmt
));
1216 enum tree_code code
= gimple_cond_code (stmt
);
1217 return fold_binary_loc (loc
, code
, boolean_type_node
, op0
, op1
);
1222 /* Return the constant switch index. */
1223 return valueize_op (gimple_switch_index (as_a
<gswitch
*> (stmt
)));
1228 return gimple_fold_stmt_to_constant_1 (stmt
,
1229 valueize_op
, valueize_op_1
);
1236 /* Apply the operation CODE in type TYPE to the value, mask pair
1237 RVAL and RMASK representing a value of type RTYPE and set
1238 the value, mask pair *VAL and *MASK to the result. */
1241 bit_value_unop (enum tree_code code
, signop type_sgn
, int type_precision
,
1242 widest_int
*val
, widest_int
*mask
,
1243 signop rtype_sgn
, int rtype_precision
,
1244 const widest_int
&rval
, const widest_int
&rmask
)
1255 widest_int temv
, temm
;
1256 /* Return ~rval + 1. */
1257 bit_value_unop (BIT_NOT_EXPR
, type_sgn
, type_precision
, &temv
, &temm
,
1258 type_sgn
, type_precision
, rval
, rmask
);
1259 bit_value_binop (PLUS_EXPR
, type_sgn
, type_precision
, val
, mask
,
1260 type_sgn
, type_precision
, temv
, temm
,
1261 type_sgn
, type_precision
, 1, 0);
1267 /* First extend mask and value according to the original type. */
1268 *mask
= wi::ext (rmask
, rtype_precision
, rtype_sgn
);
1269 *val
= wi::ext (rval
, rtype_precision
, rtype_sgn
);
1271 /* Then extend mask and value according to the target type. */
1272 *mask
= wi::ext (*mask
, type_precision
, type_sgn
);
1273 *val
= wi::ext (*val
, type_precision
, type_sgn
);
1283 /* Apply the operation CODE in type TYPE to the value, mask pairs
1284 R1VAL, R1MASK and R2VAL, R2MASK representing a values of type R1TYPE
1285 and R2TYPE and set the value, mask pair *VAL and *MASK to the result. */
1288 bit_value_binop (enum tree_code code
, signop sgn
, int width
,
1289 widest_int
*val
, widest_int
*mask
,
1290 signop r1type_sgn
, int r1type_precision
,
1291 const widest_int
&r1val
, const widest_int
&r1mask
,
1292 signop r2type_sgn
, int r2type_precision
,
1293 const widest_int
&r2val
, const widest_int
&r2mask
)
1295 bool swap_p
= false;
1297 /* Assume we'll get a constant result. Use an initial non varying
1298 value, we fall back to varying in the end if necessary. */
1304 /* The mask is constant where there is a known not
1305 set bit, (m1 | m2) & ((v1 | m1) & (v2 | m2)) */
1306 *mask
= (r1mask
| r2mask
) & (r1val
| r1mask
) & (r2val
| r2mask
);
1307 *val
= r1val
& r2val
;
1311 /* The mask is constant where there is a known
1312 set bit, (m1 | m2) & ~((v1 & ~m1) | (v2 & ~m2)). */
1313 *mask
= wi::bit_and_not (r1mask
| r2mask
,
1314 wi::bit_and_not (r1val
, r1mask
)
1315 | wi::bit_and_not (r2val
, r2mask
));
1316 *val
= r1val
| r2val
;
1321 *mask
= r1mask
| r2mask
;
1322 *val
= r1val
^ r2val
;
1329 widest_int shift
= r2val
;
1337 if (wi::neg_p (shift
))
1340 if (code
== RROTATE_EXPR
)
1341 code
= LROTATE_EXPR
;
1343 code
= RROTATE_EXPR
;
1345 if (code
== RROTATE_EXPR
)
1347 *mask
= wi::rrotate (r1mask
, shift
, width
);
1348 *val
= wi::rrotate (r1val
, shift
, width
);
1352 *mask
= wi::lrotate (r1mask
, shift
, width
);
1353 *val
= wi::lrotate (r1val
, shift
, width
);
1361 /* ??? We can handle partially known shift counts if we know
1362 its sign. That way we can tell that (x << (y | 8)) & 255
1366 widest_int shift
= r2val
;
1374 if (wi::neg_p (shift
))
1377 if (code
== RSHIFT_EXPR
)
1382 if (code
== RSHIFT_EXPR
)
1384 *mask
= wi::rshift (wi::ext (r1mask
, width
, sgn
), shift
, sgn
);
1385 *val
= wi::rshift (wi::ext (r1val
, width
, sgn
), shift
, sgn
);
1389 *mask
= wi::ext (r1mask
<< shift
, width
, sgn
);
1390 *val
= wi::ext (r1val
<< shift
, width
, sgn
);
1397 case POINTER_PLUS_EXPR
:
1399 /* Do the addition with unknown bits set to zero, to give carry-ins of
1400 zero wherever possible. */
1401 widest_int lo
= (wi::bit_and_not (r1val
, r1mask
)
1402 + wi::bit_and_not (r2val
, r2mask
));
1403 lo
= wi::ext (lo
, width
, sgn
);
1404 /* Do the addition with unknown bits set to one, to give carry-ins of
1405 one wherever possible. */
1406 widest_int hi
= (r1val
| r1mask
) + (r2val
| r2mask
);
1407 hi
= wi::ext (hi
, width
, sgn
);
1408 /* Each bit in the result is known if (a) the corresponding bits in
1409 both inputs are known, and (b) the carry-in to that bit position
1410 is known. We can check condition (b) by seeing if we got the same
1411 result with minimised carries as with maximised carries. */
1412 *mask
= r1mask
| r2mask
| (lo
^ hi
);
1413 *mask
= wi::ext (*mask
, width
, sgn
);
1414 /* It shouldn't matter whether we choose lo or hi here. */
1421 widest_int temv
, temm
;
1422 bit_value_unop (NEGATE_EXPR
, r2type_sgn
, r2type_precision
, &temv
, &temm
,
1423 r2type_sgn
, r2type_precision
, r2val
, r2mask
);
1424 bit_value_binop (PLUS_EXPR
, sgn
, width
, val
, mask
,
1425 r1type_sgn
, r1type_precision
, r1val
, r1mask
,
1426 r2type_sgn
, r2type_precision
, temv
, temm
);
1432 /* Just track trailing zeros in both operands and transfer
1433 them to the other. */
1434 int r1tz
= wi::ctz (r1val
| r1mask
);
1435 int r2tz
= wi::ctz (r2val
| r2mask
);
1436 if (r1tz
+ r2tz
>= width
)
1441 else if (r1tz
+ r2tz
> 0)
1443 *mask
= wi::ext (wi::mask
<widest_int
> (r1tz
+ r2tz
, true),
1453 widest_int m
= r1mask
| r2mask
;
1454 if (wi::bit_and_not (r1val
, m
) != wi::bit_and_not (r2val
, m
))
1457 *val
= ((code
== EQ_EXPR
) ? 0 : 1);
1461 /* We know the result of a comparison is always one or zero. */
1471 code
= swap_tree_comparison (code
);
1478 const widest_int
&o1val
= swap_p
? r2val
: r1val
;
1479 const widest_int
&o1mask
= swap_p
? r2mask
: r1mask
;
1480 const widest_int
&o2val
= swap_p
? r1val
: r2val
;
1481 const widest_int
&o2mask
= swap_p
? r1mask
: r2mask
;
1483 /* If the most significant bits are not known we know nothing. */
1484 if (wi::neg_p (o1mask
) || wi::neg_p (o2mask
))
1487 /* For comparisons the signedness is in the comparison operands. */
1490 /* If we know the most significant bits we know the values
1491 value ranges by means of treating varying bits as zero
1492 or one. Do a cross comparison of the max/min pairs. */
1493 maxmin
= wi::cmp (o1val
| o1mask
,
1494 wi::bit_and_not (o2val
, o2mask
), sgn
);
1495 minmax
= wi::cmp (wi::bit_and_not (o1val
, o1mask
),
1496 o2val
| o2mask
, sgn
);
1497 if (maxmin
< 0) /* o1 is less than o2. */
1502 else if (minmax
> 0) /* o1 is not less or equal to o2. */
1507 else if (maxmin
== minmax
) /* o1 and o2 are equal. */
1509 /* This probably should never happen as we'd have
1510 folded the thing during fully constant value folding. */
1512 *val
= (code
== LE_EXPR
? 1 : 0);
1516 /* We know the result of a comparison is always one or zero. */
1527 /* Return the propagation value when applying the operation CODE to
1528 the value RHS yielding type TYPE. */
1530 static ccp_prop_value_t
1531 bit_value_unop (enum tree_code code
, tree type
, tree rhs
)
1533 ccp_prop_value_t rval
= get_value_for_expr (rhs
, true);
1534 widest_int value
, mask
;
1535 ccp_prop_value_t val
;
1537 if (rval
.lattice_val
== UNDEFINED
)
1540 gcc_assert ((rval
.lattice_val
== CONSTANT
1541 && TREE_CODE (rval
.value
) == INTEGER_CST
)
1542 || wi::sext (rval
.mask
, TYPE_PRECISION (TREE_TYPE (rhs
))) == -1);
1543 bit_value_unop (code
, TYPE_SIGN (type
), TYPE_PRECISION (type
), &value
, &mask
,
1544 TYPE_SIGN (TREE_TYPE (rhs
)), TYPE_PRECISION (TREE_TYPE (rhs
)),
1545 value_to_wide_int (rval
), rval
.mask
);
1546 if (wi::sext (mask
, TYPE_PRECISION (type
)) != -1)
1548 val
.lattice_val
= CONSTANT
;
1550 /* ??? Delay building trees here. */
1551 val
.value
= wide_int_to_tree (type
, value
);
1555 val
.lattice_val
= VARYING
;
1556 val
.value
= NULL_TREE
;
1562 /* Return the propagation value when applying the operation CODE to
1563 the values RHS1 and RHS2 yielding type TYPE. */
1565 static ccp_prop_value_t
1566 bit_value_binop (enum tree_code code
, tree type
, tree rhs1
, tree rhs2
)
1568 ccp_prop_value_t r1val
= get_value_for_expr (rhs1
, true);
1569 ccp_prop_value_t r2val
= get_value_for_expr (rhs2
, true);
1570 widest_int value
, mask
;
1571 ccp_prop_value_t val
;
1573 if (r1val
.lattice_val
== UNDEFINED
1574 || r2val
.lattice_val
== UNDEFINED
)
1576 val
.lattice_val
= VARYING
;
1577 val
.value
= NULL_TREE
;
1582 gcc_assert ((r1val
.lattice_val
== CONSTANT
1583 && TREE_CODE (r1val
.value
) == INTEGER_CST
)
1584 || wi::sext (r1val
.mask
,
1585 TYPE_PRECISION (TREE_TYPE (rhs1
))) == -1);
1586 gcc_assert ((r2val
.lattice_val
== CONSTANT
1587 && TREE_CODE (r2val
.value
) == INTEGER_CST
)
1588 || wi::sext (r2val
.mask
,
1589 TYPE_PRECISION (TREE_TYPE (rhs2
))) == -1);
1590 bit_value_binop (code
, TYPE_SIGN (type
), TYPE_PRECISION (type
), &value
, &mask
,
1591 TYPE_SIGN (TREE_TYPE (rhs1
)), TYPE_PRECISION (TREE_TYPE (rhs1
)),
1592 value_to_wide_int (r1val
), r1val
.mask
,
1593 TYPE_SIGN (TREE_TYPE (rhs2
)), TYPE_PRECISION (TREE_TYPE (rhs2
)),
1594 value_to_wide_int (r2val
), r2val
.mask
);
1596 if (wi::sext (mask
, TYPE_PRECISION (type
)) != -1)
1598 val
.lattice_val
= CONSTANT
;
1600 /* ??? Delay building trees here. */
1601 val
.value
= wide_int_to_tree (type
, value
);
1605 val
.lattice_val
= VARYING
;
1606 val
.value
= NULL_TREE
;
1612 /* Return the propagation value for __builtin_assume_aligned
1613 and functions with assume_aligned or alloc_aligned attribute.
1614 For __builtin_assume_aligned, ATTR is NULL_TREE,
1615 for assume_aligned attribute ATTR is non-NULL and ALLOC_ALIGNED
1616 is false, for alloc_aligned attribute ATTR is non-NULL and
1617 ALLOC_ALIGNED is true. */
1619 static ccp_prop_value_t
1620 bit_value_assume_aligned (gimple
*stmt
, tree attr
, ccp_prop_value_t ptrval
,
1623 tree align
, misalign
= NULL_TREE
, type
;
1624 unsigned HOST_WIDE_INT aligni
, misaligni
= 0;
1625 ccp_prop_value_t alignval
;
1626 widest_int value
, mask
;
1627 ccp_prop_value_t val
;
1629 if (attr
== NULL_TREE
)
1631 tree ptr
= gimple_call_arg (stmt
, 0);
1632 type
= TREE_TYPE (ptr
);
1633 ptrval
= get_value_for_expr (ptr
, true);
1637 tree lhs
= gimple_call_lhs (stmt
);
1638 type
= TREE_TYPE (lhs
);
1641 if (ptrval
.lattice_val
== UNDEFINED
)
1643 gcc_assert ((ptrval
.lattice_val
== CONSTANT
1644 && TREE_CODE (ptrval
.value
) == INTEGER_CST
)
1645 || wi::sext (ptrval
.mask
, TYPE_PRECISION (type
)) == -1);
1646 if (attr
== NULL_TREE
)
1648 /* Get aligni and misaligni from __builtin_assume_aligned. */
1649 align
= gimple_call_arg (stmt
, 1);
1650 if (!tree_fits_uhwi_p (align
))
1652 aligni
= tree_to_uhwi (align
);
1653 if (gimple_call_num_args (stmt
) > 2)
1655 misalign
= gimple_call_arg (stmt
, 2);
1656 if (!tree_fits_uhwi_p (misalign
))
1658 misaligni
= tree_to_uhwi (misalign
);
1663 /* Get aligni and misaligni from assume_aligned or
1664 alloc_align attributes. */
1665 if (TREE_VALUE (attr
) == NULL_TREE
)
1667 attr
= TREE_VALUE (attr
);
1668 align
= TREE_VALUE (attr
);
1669 if (!tree_fits_uhwi_p (align
))
1671 aligni
= tree_to_uhwi (align
);
1674 if (aligni
== 0 || aligni
> gimple_call_num_args (stmt
))
1676 align
= gimple_call_arg (stmt
, aligni
- 1);
1677 if (!tree_fits_uhwi_p (align
))
1679 aligni
= tree_to_uhwi (align
);
1681 else if (TREE_CHAIN (attr
) && TREE_VALUE (TREE_CHAIN (attr
)))
1683 misalign
= TREE_VALUE (TREE_CHAIN (attr
));
1684 if (!tree_fits_uhwi_p (misalign
))
1686 misaligni
= tree_to_uhwi (misalign
);
1689 if (aligni
<= 1 || (aligni
& (aligni
- 1)) != 0 || misaligni
>= aligni
)
1692 align
= build_int_cst_type (type
, -aligni
);
1693 alignval
= get_value_for_expr (align
, true);
1694 bit_value_binop (BIT_AND_EXPR
, TYPE_SIGN (type
), TYPE_PRECISION (type
), &value
, &mask
,
1695 TYPE_SIGN (type
), TYPE_PRECISION (type
), value_to_wide_int (ptrval
), ptrval
.mask
,
1696 TYPE_SIGN (type
), TYPE_PRECISION (type
), value_to_wide_int (alignval
), alignval
.mask
);
1698 if (wi::sext (mask
, TYPE_PRECISION (type
)) != -1)
1700 val
.lattice_val
= CONSTANT
;
1702 gcc_assert ((mask
.to_uhwi () & (aligni
- 1)) == 0);
1703 gcc_assert ((value
.to_uhwi () & (aligni
- 1)) == 0);
1705 /* ??? Delay building trees here. */
1706 val
.value
= wide_int_to_tree (type
, value
);
1710 val
.lattice_val
= VARYING
;
1711 val
.value
= NULL_TREE
;
1717 /* Evaluate statement STMT.
1718 Valid only for assignments, calls, conditionals, and switches. */
1720 static ccp_prop_value_t
1721 evaluate_stmt (gimple
*stmt
)
1723 ccp_prop_value_t val
;
1724 tree simplified
= NULL_TREE
;
1725 ccp_lattice_t likelyvalue
= likely_value (stmt
);
1726 bool is_constant
= false;
1729 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1731 fprintf (dump_file
, "which is likely ");
1732 switch (likelyvalue
)
1735 fprintf (dump_file
, "CONSTANT");
1738 fprintf (dump_file
, "UNDEFINED");
1741 fprintf (dump_file
, "VARYING");
1745 fprintf (dump_file
, "\n");
1748 /* If the statement is likely to have a CONSTANT result, then try
1749 to fold the statement to determine the constant value. */
1750 /* FIXME. This is the only place that we call ccp_fold.
1751 Since likely_value never returns CONSTANT for calls, we will
1752 not attempt to fold them, including builtins that may profit. */
1753 if (likelyvalue
== CONSTANT
)
1755 fold_defer_overflow_warnings ();
1756 simplified
= ccp_fold (stmt
);
1758 && TREE_CODE (simplified
) == SSA_NAME
)
1760 /* We may not use values of something that may be simulated again,
1761 see valueize_op_1. */
1762 if (SSA_NAME_IS_DEFAULT_DEF (simplified
)
1763 || ! prop_simulate_again_p (SSA_NAME_DEF_STMT (simplified
)))
1765 ccp_prop_value_t
*val
= get_value (simplified
);
1766 if (val
&& val
->lattice_val
!= VARYING
)
1768 fold_undefer_overflow_warnings (true, stmt
, 0);
1773 /* We may also not place a non-valueized copy in the lattice
1774 as that might become stale if we never re-visit this stmt. */
1775 simplified
= NULL_TREE
;
1777 is_constant
= simplified
&& is_gimple_min_invariant (simplified
);
1778 fold_undefer_overflow_warnings (is_constant
, stmt
, 0);
1781 /* The statement produced a constant value. */
1782 val
.lattice_val
= CONSTANT
;
1783 val
.value
= simplified
;
1788 /* If the statement is likely to have a VARYING result, then do not
1789 bother folding the statement. */
1790 else if (likelyvalue
== VARYING
)
1792 enum gimple_code code
= gimple_code (stmt
);
1793 if (code
== GIMPLE_ASSIGN
)
1795 enum tree_code subcode
= gimple_assign_rhs_code (stmt
);
1797 /* Other cases cannot satisfy is_gimple_min_invariant
1799 if (get_gimple_rhs_class (subcode
) == GIMPLE_SINGLE_RHS
)
1800 simplified
= gimple_assign_rhs1 (stmt
);
1802 else if (code
== GIMPLE_SWITCH
)
1803 simplified
= gimple_switch_index (as_a
<gswitch
*> (stmt
));
1805 /* These cannot satisfy is_gimple_min_invariant without folding. */
1806 gcc_assert (code
== GIMPLE_CALL
|| code
== GIMPLE_COND
);
1807 is_constant
= simplified
&& is_gimple_min_invariant (simplified
);
1810 /* The statement produced a constant value. */
1811 val
.lattice_val
= CONSTANT
;
1812 val
.value
= simplified
;
1816 /* If the statement result is likely UNDEFINED, make it so. */
1817 else if (likelyvalue
== UNDEFINED
)
1819 val
.lattice_val
= UNDEFINED
;
1820 val
.value
= NULL_TREE
;
1825 /* Resort to simplification for bitwise tracking. */
1826 if (flag_tree_bit_ccp
1827 && (likelyvalue
== CONSTANT
|| is_gimple_call (stmt
)
1828 || (gimple_assign_single_p (stmt
)
1829 && gimple_assign_rhs_code (stmt
) == ADDR_EXPR
))
1832 enum gimple_code code
= gimple_code (stmt
);
1833 val
.lattice_val
= VARYING
;
1834 val
.value
= NULL_TREE
;
1836 if (code
== GIMPLE_ASSIGN
)
1838 enum tree_code subcode
= gimple_assign_rhs_code (stmt
);
1839 tree rhs1
= gimple_assign_rhs1 (stmt
);
1840 tree lhs
= gimple_assign_lhs (stmt
);
1841 if ((INTEGRAL_TYPE_P (TREE_TYPE (lhs
))
1842 || POINTER_TYPE_P (TREE_TYPE (lhs
)))
1843 && (INTEGRAL_TYPE_P (TREE_TYPE (rhs1
))
1844 || POINTER_TYPE_P (TREE_TYPE (rhs1
))))
1845 switch (get_gimple_rhs_class (subcode
))
1847 case GIMPLE_SINGLE_RHS
:
1848 val
= get_value_for_expr (rhs1
, true);
1851 case GIMPLE_UNARY_RHS
:
1852 val
= bit_value_unop (subcode
, TREE_TYPE (lhs
), rhs1
);
1855 case GIMPLE_BINARY_RHS
:
1856 val
= bit_value_binop (subcode
, TREE_TYPE (lhs
), rhs1
,
1857 gimple_assign_rhs2 (stmt
));
1863 else if (code
== GIMPLE_COND
)
1865 enum tree_code code
= gimple_cond_code (stmt
);
1866 tree rhs1
= gimple_cond_lhs (stmt
);
1867 tree rhs2
= gimple_cond_rhs (stmt
);
1868 if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1
))
1869 || POINTER_TYPE_P (TREE_TYPE (rhs1
)))
1870 val
= bit_value_binop (code
, TREE_TYPE (rhs1
), rhs1
, rhs2
);
1872 else if (gimple_call_builtin_p (stmt
, BUILT_IN_NORMAL
))
1874 tree fndecl
= gimple_call_fndecl (stmt
);
1875 switch (DECL_FUNCTION_CODE (fndecl
))
1877 case BUILT_IN_MALLOC
:
1878 case BUILT_IN_REALLOC
:
1879 case BUILT_IN_CALLOC
:
1880 case BUILT_IN_STRDUP
:
1881 case BUILT_IN_STRNDUP
:
1882 val
.lattice_val
= CONSTANT
;
1883 val
.value
= build_int_cst (TREE_TYPE (gimple_get_lhs (stmt
)), 0);
1884 val
.mask
= ~((HOST_WIDE_INT
) MALLOC_ABI_ALIGNMENT
1885 / BITS_PER_UNIT
- 1);
1888 case BUILT_IN_ALLOCA
:
1889 case BUILT_IN_ALLOCA_WITH_ALIGN
:
1890 align
= (DECL_FUNCTION_CODE (fndecl
) == BUILT_IN_ALLOCA_WITH_ALIGN
1891 ? TREE_INT_CST_LOW (gimple_call_arg (stmt
, 1))
1892 : BIGGEST_ALIGNMENT
);
1893 val
.lattice_val
= CONSTANT
;
1894 val
.value
= build_int_cst (TREE_TYPE (gimple_get_lhs (stmt
)), 0);
1895 val
.mask
= ~((HOST_WIDE_INT
) align
/ BITS_PER_UNIT
- 1);
1898 /* These builtins return their first argument, unmodified. */
1899 case BUILT_IN_MEMCPY
:
1900 case BUILT_IN_MEMMOVE
:
1901 case BUILT_IN_MEMSET
:
1902 case BUILT_IN_STRCPY
:
1903 case BUILT_IN_STRNCPY
:
1904 case BUILT_IN_MEMCPY_CHK
:
1905 case BUILT_IN_MEMMOVE_CHK
:
1906 case BUILT_IN_MEMSET_CHK
:
1907 case BUILT_IN_STRCPY_CHK
:
1908 case BUILT_IN_STRNCPY_CHK
:
1909 val
= get_value_for_expr (gimple_call_arg (stmt
, 0), true);
1912 case BUILT_IN_ASSUME_ALIGNED
:
1913 val
= bit_value_assume_aligned (stmt
, NULL_TREE
, val
, false);
1916 case BUILT_IN_ALIGNED_ALLOC
:
1918 tree align
= get_constant_value (gimple_call_arg (stmt
, 0));
1920 && tree_fits_uhwi_p (align
))
1922 unsigned HOST_WIDE_INT aligni
= tree_to_uhwi (align
);
1924 /* align must be power-of-two */
1925 && (aligni
& (aligni
- 1)) == 0)
1927 val
.lattice_val
= CONSTANT
;
1928 val
.value
= build_int_cst (ptr_type_node
, 0);
1938 if (is_gimple_call (stmt
) && gimple_call_lhs (stmt
))
1940 tree fntype
= gimple_call_fntype (stmt
);
1943 tree attrs
= lookup_attribute ("assume_aligned",
1944 TYPE_ATTRIBUTES (fntype
));
1946 val
= bit_value_assume_aligned (stmt
, attrs
, val
, false);
1947 attrs
= lookup_attribute ("alloc_align",
1948 TYPE_ATTRIBUTES (fntype
));
1950 val
= bit_value_assume_aligned (stmt
, attrs
, val
, true);
1953 is_constant
= (val
.lattice_val
== CONSTANT
);
1956 if (flag_tree_bit_ccp
1957 && ((is_constant
&& TREE_CODE (val
.value
) == INTEGER_CST
)
1959 && gimple_get_lhs (stmt
)
1960 && TREE_CODE (gimple_get_lhs (stmt
)) == SSA_NAME
)
1962 tree lhs
= gimple_get_lhs (stmt
);
1963 wide_int nonzero_bits
= get_nonzero_bits (lhs
);
1964 if (nonzero_bits
!= -1)
1968 val
.lattice_val
= CONSTANT
;
1969 val
.value
= build_zero_cst (TREE_TYPE (lhs
));
1970 val
.mask
= extend_mask (nonzero_bits
, TYPE_SIGN (TREE_TYPE (lhs
)));
1975 if (wi::bit_and_not (val
.value
, nonzero_bits
) != 0)
1976 val
.value
= wide_int_to_tree (TREE_TYPE (lhs
),
1977 nonzero_bits
& val
.value
);
1978 if (nonzero_bits
== 0)
1981 val
.mask
= val
.mask
& extend_mask (nonzero_bits
,
1982 TYPE_SIGN (TREE_TYPE (lhs
)));
1987 /* The statement produced a nonconstant value. */
1990 /* The statement produced a copy. */
1991 if (simplified
&& TREE_CODE (simplified
) == SSA_NAME
1992 && !SSA_NAME_OCCURS_IN_ABNORMAL_PHI (simplified
))
1994 val
.lattice_val
= CONSTANT
;
1995 val
.value
= simplified
;
1998 /* The statement is VARYING. */
2001 val
.lattice_val
= VARYING
;
2002 val
.value
= NULL_TREE
;
2010 typedef hash_table
<nofree_ptr_hash
<gimple
> > gimple_htab
;
2012 /* Given a BUILT_IN_STACK_SAVE value SAVED_VAL, insert a clobber of VAR before
2013 each matching BUILT_IN_STACK_RESTORE. Mark visited phis in VISITED. */
2016 insert_clobber_before_stack_restore (tree saved_val
, tree var
,
2017 gimple_htab
**visited
)
2020 gassign
*clobber_stmt
;
2022 imm_use_iterator iter
;
2023 gimple_stmt_iterator i
;
2026 FOR_EACH_IMM_USE_STMT (stmt
, iter
, saved_val
)
2027 if (gimple_call_builtin_p (stmt
, BUILT_IN_STACK_RESTORE
))
2029 clobber
= build_constructor (TREE_TYPE (var
),
2031 TREE_THIS_VOLATILE (clobber
) = 1;
2032 clobber_stmt
= gimple_build_assign (var
, clobber
);
2034 i
= gsi_for_stmt (stmt
);
2035 gsi_insert_before (&i
, clobber_stmt
, GSI_SAME_STMT
);
2037 else if (gimple_code (stmt
) == GIMPLE_PHI
)
2040 *visited
= new gimple_htab (10);
2042 slot
= (*visited
)->find_slot (stmt
, INSERT
);
2047 insert_clobber_before_stack_restore (gimple_phi_result (stmt
), var
,
2050 else if (gimple_assign_ssa_name_copy_p (stmt
))
2051 insert_clobber_before_stack_restore (gimple_assign_lhs (stmt
), var
,
2053 else if (chkp_gimple_call_builtin_p (stmt
, BUILT_IN_CHKP_BNDRET
))
2056 gcc_assert (is_gimple_debug (stmt
));
2059 /* Advance the iterator to the previous non-debug gimple statement in the same
2060 or dominating basic block. */
2063 gsi_prev_dom_bb_nondebug (gimple_stmt_iterator
*i
)
2067 gsi_prev_nondebug (i
);
2068 while (gsi_end_p (*i
))
2070 dom
= get_immediate_dominator (CDI_DOMINATORS
, i
->bb
);
2071 if (dom
== NULL
|| dom
== ENTRY_BLOCK_PTR_FOR_FN (cfun
))
2074 *i
= gsi_last_bb (dom
);
2078 /* Find a BUILT_IN_STACK_SAVE dominating gsi_stmt (I), and insert
2079 a clobber of VAR before each matching BUILT_IN_STACK_RESTORE.
2081 It is possible that BUILT_IN_STACK_SAVE cannot be find in a dominator when a
2082 previous pass (such as DOM) duplicated it along multiple paths to a BB. In
2083 that case the function gives up without inserting the clobbers. */
2086 insert_clobbers_for_var (gimple_stmt_iterator i
, tree var
)
2090 gimple_htab
*visited
= NULL
;
2092 for (; !gsi_end_p (i
); gsi_prev_dom_bb_nondebug (&i
))
2094 stmt
= gsi_stmt (i
);
2096 if (!gimple_call_builtin_p (stmt
, BUILT_IN_STACK_SAVE
))
2099 saved_val
= gimple_call_lhs (stmt
);
2100 if (saved_val
== NULL_TREE
)
2103 insert_clobber_before_stack_restore (saved_val
, var
, &visited
);
2110 /* Detects a __builtin_alloca_with_align with constant size argument. Declares
2111 fixed-size array and returns the address, if found, otherwise returns
2115 fold_builtin_alloca_with_align (gimple
*stmt
)
2117 unsigned HOST_WIDE_INT size
, threshold
, n_elem
;
2118 tree lhs
, arg
, block
, var
, elem_type
, array_type
;
2121 lhs
= gimple_call_lhs (stmt
);
2122 if (lhs
== NULL_TREE
)
2125 /* Detect constant argument. */
2126 arg
= get_constant_value (gimple_call_arg (stmt
, 0));
2127 if (arg
== NULL_TREE
2128 || TREE_CODE (arg
) != INTEGER_CST
2129 || !tree_fits_uhwi_p (arg
))
2132 size
= tree_to_uhwi (arg
);
2134 /* Heuristic: don't fold large allocas. */
2135 threshold
= (unsigned HOST_WIDE_INT
)PARAM_VALUE (PARAM_LARGE_STACK_FRAME
);
2136 /* In case the alloca is located at function entry, it has the same lifetime
2137 as a declared array, so we allow a larger size. */
2138 block
= gimple_block (stmt
);
2139 if (!(cfun
->after_inlining
2141 && TREE_CODE (BLOCK_SUPERCONTEXT (block
)) == FUNCTION_DECL
))
2143 if (size
> threshold
)
2146 /* Declare array. */
2147 elem_type
= build_nonstandard_integer_type (BITS_PER_UNIT
, 1);
2148 n_elem
= size
* 8 / BITS_PER_UNIT
;
2149 array_type
= build_array_type_nelts (elem_type
, n_elem
);
2150 var
= create_tmp_var (array_type
);
2151 SET_DECL_ALIGN (var
, TREE_INT_CST_LOW (gimple_call_arg (stmt
, 1)));
2153 struct ptr_info_def
*pi
= SSA_NAME_PTR_INFO (lhs
);
2154 if (pi
!= NULL
&& !pi
->pt
.anything
)
2158 singleton_p
= pt_solution_singleton_or_null_p (&pi
->pt
, &uid
);
2159 gcc_assert (singleton_p
);
2160 SET_DECL_PT_UID (var
, uid
);
2164 /* Fold alloca to the address of the array. */
2165 return fold_convert (TREE_TYPE (lhs
), build_fold_addr_expr (var
));
2168 /* Fold the stmt at *GSI with CCP specific information that propagating
2169 and regular folding does not catch. */
2172 ccp_fold_stmt (gimple_stmt_iterator
*gsi
)
2174 gimple
*stmt
= gsi_stmt (*gsi
);
2176 switch (gimple_code (stmt
))
2180 gcond
*cond_stmt
= as_a
<gcond
*> (stmt
);
2181 ccp_prop_value_t val
;
2182 /* Statement evaluation will handle type mismatches in constants
2183 more gracefully than the final propagation. This allows us to
2184 fold more conditionals here. */
2185 val
= evaluate_stmt (stmt
);
2186 if (val
.lattice_val
!= CONSTANT
2192 fprintf (dump_file
, "Folding predicate ");
2193 print_gimple_expr (dump_file
, stmt
, 0);
2194 fprintf (dump_file
, " to ");
2195 print_generic_expr (dump_file
, val
.value
);
2196 fprintf (dump_file
, "\n");
2199 if (integer_zerop (val
.value
))
2200 gimple_cond_make_false (cond_stmt
);
2202 gimple_cond_make_true (cond_stmt
);
2209 tree lhs
= gimple_call_lhs (stmt
);
2210 int flags
= gimple_call_flags (stmt
);
2213 bool changed
= false;
2216 /* If the call was folded into a constant make sure it goes
2217 away even if we cannot propagate into all uses because of
2220 && TREE_CODE (lhs
) == SSA_NAME
2221 && (val
= get_constant_value (lhs
))
2222 /* Don't optimize away calls that have side-effects. */
2223 && (flags
& (ECF_CONST
|ECF_PURE
)) != 0
2224 && (flags
& ECF_LOOPING_CONST_OR_PURE
) == 0)
2226 tree new_rhs
= unshare_expr (val
);
2228 if (!useless_type_conversion_p (TREE_TYPE (lhs
),
2229 TREE_TYPE (new_rhs
)))
2230 new_rhs
= fold_convert (TREE_TYPE (lhs
), new_rhs
);
2231 res
= update_call_from_tree (gsi
, new_rhs
);
2236 /* Internal calls provide no argument types, so the extra laxity
2237 for normal calls does not apply. */
2238 if (gimple_call_internal_p (stmt
))
2241 /* The heuristic of fold_builtin_alloca_with_align differs before and
2242 after inlining, so we don't require the arg to be changed into a
2243 constant for folding, but just to be constant. */
2244 if (gimple_call_builtin_p (stmt
, BUILT_IN_ALLOCA_WITH_ALIGN
))
2246 tree new_rhs
= fold_builtin_alloca_with_align (stmt
);
2249 bool res
= update_call_from_tree (gsi
, new_rhs
);
2250 tree var
= TREE_OPERAND (TREE_OPERAND (new_rhs
, 0),0);
2252 insert_clobbers_for_var (*gsi
, var
);
2257 /* Propagate into the call arguments. Compared to replace_uses_in
2258 this can use the argument slot types for type verification
2259 instead of the current argument type. We also can safely
2260 drop qualifiers here as we are dealing with constants anyway. */
2261 argt
= TYPE_ARG_TYPES (gimple_call_fntype (stmt
));
2262 for (i
= 0; i
< gimple_call_num_args (stmt
) && argt
;
2263 ++i
, argt
= TREE_CHAIN (argt
))
2265 tree arg
= gimple_call_arg (stmt
, i
);
2266 if (TREE_CODE (arg
) == SSA_NAME
2267 && (val
= get_constant_value (arg
))
2268 && useless_type_conversion_p
2269 (TYPE_MAIN_VARIANT (TREE_VALUE (argt
)),
2270 TYPE_MAIN_VARIANT (TREE_TYPE (val
))))
2272 gimple_call_set_arg (stmt
, i
, unshare_expr (val
));
2282 tree lhs
= gimple_assign_lhs (stmt
);
2285 /* If we have a load that turned out to be constant replace it
2286 as we cannot propagate into all uses in all cases. */
2287 if (gimple_assign_single_p (stmt
)
2288 && TREE_CODE (lhs
) == SSA_NAME
2289 && (val
= get_constant_value (lhs
)))
2291 tree rhs
= unshare_expr (val
);
2292 if (!useless_type_conversion_p (TREE_TYPE (lhs
), TREE_TYPE (rhs
)))
2293 rhs
= fold_build1 (VIEW_CONVERT_EXPR
, TREE_TYPE (lhs
), rhs
);
2294 gimple_assign_set_rhs_from_tree (gsi
, rhs
);
2306 /* Visit the assignment statement STMT. Set the value of its LHS to the
2307 value computed by the RHS and store LHS in *OUTPUT_P. If STMT
2308 creates virtual definitions, set the value of each new name to that
2309 of the RHS (if we can derive a constant out of the RHS).
2310 Value-returning call statements also perform an assignment, and
2311 are handled here. */
2313 static enum ssa_prop_result
2314 visit_assignment (gimple
*stmt
, tree
*output_p
)
2316 ccp_prop_value_t val
;
2317 enum ssa_prop_result retval
= SSA_PROP_NOT_INTERESTING
;
2319 tree lhs
= gimple_get_lhs (stmt
);
2320 if (TREE_CODE (lhs
) == SSA_NAME
)
2322 /* Evaluate the statement, which could be
2323 either a GIMPLE_ASSIGN or a GIMPLE_CALL. */
2324 val
= evaluate_stmt (stmt
);
2326 /* If STMT is an assignment to an SSA_NAME, we only have one
2328 if (set_lattice_value (lhs
, &val
))
2331 if (val
.lattice_val
== VARYING
)
2332 retval
= SSA_PROP_VARYING
;
2334 retval
= SSA_PROP_INTERESTING
;
2342 /* Visit the conditional statement STMT. Return SSA_PROP_INTERESTING
2343 if it can determine which edge will be taken. Otherwise, return
2344 SSA_PROP_VARYING. */
2346 static enum ssa_prop_result
2347 visit_cond_stmt (gimple
*stmt
, edge
*taken_edge_p
)
2349 ccp_prop_value_t val
;
2352 block
= gimple_bb (stmt
);
2353 val
= evaluate_stmt (stmt
);
2354 if (val
.lattice_val
!= CONSTANT
2356 return SSA_PROP_VARYING
;
2358 /* Find which edge out of the conditional block will be taken and add it
2359 to the worklist. If no single edge can be determined statically,
2360 return SSA_PROP_VARYING to feed all the outgoing edges to the
2361 propagation engine. */
2362 *taken_edge_p
= find_taken_edge (block
, val
.value
);
2364 return SSA_PROP_INTERESTING
;
2366 return SSA_PROP_VARYING
;
2370 /* Evaluate statement STMT. If the statement produces an output value and
2371 its evaluation changes the lattice value of its output, return
2372 SSA_PROP_INTERESTING and set *OUTPUT_P to the SSA_NAME holding the
2375 If STMT is a conditional branch and we can determine its truth
2376 value, set *TAKEN_EDGE_P accordingly. If STMT produces a varying
2377 value, return SSA_PROP_VARYING. */
2379 static enum ssa_prop_result
2380 ccp_visit_stmt (gimple
*stmt
, edge
*taken_edge_p
, tree
*output_p
)
2385 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2387 fprintf (dump_file
, "\nVisiting statement:\n");
2388 print_gimple_stmt (dump_file
, stmt
, 0, dump_flags
);
2391 switch (gimple_code (stmt
))
2394 /* If the statement is an assignment that produces a single
2395 output value, evaluate its RHS to see if the lattice value of
2396 its output has changed. */
2397 return visit_assignment (stmt
, output_p
);
2400 /* A value-returning call also performs an assignment. */
2401 if (gimple_call_lhs (stmt
) != NULL_TREE
)
2402 return visit_assignment (stmt
, output_p
);
2407 /* If STMT is a conditional branch, see if we can determine
2408 which branch will be taken. */
2409 /* FIXME. It appears that we should be able to optimize
2410 computed GOTOs here as well. */
2411 return visit_cond_stmt (stmt
, taken_edge_p
);
2417 /* Any other kind of statement is not interesting for constant
2418 propagation and, therefore, not worth simulating. */
2419 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2420 fprintf (dump_file
, "No interesting values produced. Marked VARYING.\n");
2422 /* Definitions made by statements other than assignments to
2423 SSA_NAMEs represent unknown modifications to their outputs.
2424 Mark them VARYING. */
2425 FOR_EACH_SSA_TREE_OPERAND (def
, stmt
, iter
, SSA_OP_ALL_DEFS
)
2426 set_value_varying (def
);
2428 return SSA_PROP_VARYING
;
2432 /* Main entry point for SSA Conditional Constant Propagation. If NONZERO_P,
2433 record nonzero bits. */
2436 do_ssa_ccp (bool nonzero_p
)
2438 unsigned int todo
= 0;
2439 calculate_dominance_info (CDI_DOMINATORS
);
2442 ssa_propagate (ccp_visit_stmt
, ccp_visit_phi_node
);
2443 if (ccp_finalize (nonzero_p
|| flag_ipa_bit_cp
))
2445 todo
= (TODO_cleanup_cfg
| TODO_update_ssa
);
2447 /* ccp_finalize does not preserve loop-closed ssa. */
2448 loops_state_clear (LOOP_CLOSED_SSA
);
2451 free_dominance_info (CDI_DOMINATORS
);
2458 const pass_data pass_data_ccp
=
2460 GIMPLE_PASS
, /* type */
2462 OPTGROUP_NONE
, /* optinfo_flags */
2463 TV_TREE_CCP
, /* tv_id */
2464 ( PROP_cfg
| PROP_ssa
), /* properties_required */
2465 0, /* properties_provided */
2466 0, /* properties_destroyed */
2467 0, /* todo_flags_start */
2468 TODO_update_address_taken
, /* todo_flags_finish */
2471 class pass_ccp
: public gimple_opt_pass
2474 pass_ccp (gcc::context
*ctxt
)
2475 : gimple_opt_pass (pass_data_ccp
, ctxt
), nonzero_p (false)
2478 /* opt_pass methods: */
2479 opt_pass
* clone () { return new pass_ccp (m_ctxt
); }
2480 void set_pass_param (unsigned int n
, bool param
)
2482 gcc_assert (n
== 0);
2485 virtual bool gate (function
*) { return flag_tree_ccp
!= 0; }
2486 virtual unsigned int execute (function
*) { return do_ssa_ccp (nonzero_p
); }
2489 /* Determines whether the pass instance records nonzero bits. */
2491 }; // class pass_ccp
2496 make_pass_ccp (gcc::context
*ctxt
)
2498 return new pass_ccp (ctxt
);
2503 /* Try to optimize out __builtin_stack_restore. Optimize it out
2504 if there is another __builtin_stack_restore in the same basic
2505 block and no calls or ASM_EXPRs are in between, or if this block's
2506 only outgoing edge is to EXIT_BLOCK and there are no calls or
2507 ASM_EXPRs after this __builtin_stack_restore. */
2510 optimize_stack_restore (gimple_stmt_iterator i
)
2515 basic_block bb
= gsi_bb (i
);
2516 gimple
*call
= gsi_stmt (i
);
2518 if (gimple_code (call
) != GIMPLE_CALL
2519 || gimple_call_num_args (call
) != 1
2520 || TREE_CODE (gimple_call_arg (call
, 0)) != SSA_NAME
2521 || !POINTER_TYPE_P (TREE_TYPE (gimple_call_arg (call
, 0))))
2524 for (gsi_next (&i
); !gsi_end_p (i
); gsi_next (&i
))
2526 stmt
= gsi_stmt (i
);
2527 if (gimple_code (stmt
) == GIMPLE_ASM
)
2529 if (gimple_code (stmt
) != GIMPLE_CALL
)
2532 callee
= gimple_call_fndecl (stmt
);
2534 || DECL_BUILT_IN_CLASS (callee
) != BUILT_IN_NORMAL
2535 /* All regular builtins are ok, just obviously not alloca. */
2536 || DECL_FUNCTION_CODE (callee
) == BUILT_IN_ALLOCA
2537 || DECL_FUNCTION_CODE (callee
) == BUILT_IN_ALLOCA_WITH_ALIGN
)
2540 if (DECL_FUNCTION_CODE (callee
) == BUILT_IN_STACK_RESTORE
)
2541 goto second_stack_restore
;
2547 /* Allow one successor of the exit block, or zero successors. */
2548 switch (EDGE_COUNT (bb
->succs
))
2553 if (single_succ_edge (bb
)->dest
!= EXIT_BLOCK_PTR_FOR_FN (cfun
))
2559 second_stack_restore
:
2561 /* If there's exactly one use, then zap the call to __builtin_stack_save.
2562 If there are multiple uses, then the last one should remove the call.
2563 In any case, whether the call to __builtin_stack_save can be removed
2564 or not is irrelevant to removing the call to __builtin_stack_restore. */
2565 if (has_single_use (gimple_call_arg (call
, 0)))
2567 gimple
*stack_save
= SSA_NAME_DEF_STMT (gimple_call_arg (call
, 0));
2568 if (is_gimple_call (stack_save
))
2570 callee
= gimple_call_fndecl (stack_save
);
2572 && DECL_BUILT_IN_CLASS (callee
) == BUILT_IN_NORMAL
2573 && DECL_FUNCTION_CODE (callee
) == BUILT_IN_STACK_SAVE
)
2575 gimple_stmt_iterator stack_save_gsi
;
2578 stack_save_gsi
= gsi_for_stmt (stack_save
);
2579 rhs
= build_int_cst (TREE_TYPE (gimple_call_arg (call
, 0)), 0);
2580 update_call_from_tree (&stack_save_gsi
, rhs
);
2585 /* No effect, so the statement will be deleted. */
2586 return integer_zero_node
;
2589 /* If va_list type is a simple pointer and nothing special is needed,
2590 optimize __builtin_va_start (&ap, 0) into ap = __builtin_next_arg (0),
2591 __builtin_va_end (&ap) out as NOP and __builtin_va_copy into a simple
2592 pointer assignment. */
2595 optimize_stdarg_builtin (gimple
*call
)
2597 tree callee
, lhs
, rhs
, cfun_va_list
;
2598 bool va_list_simple_ptr
;
2599 location_t loc
= gimple_location (call
);
2601 if (gimple_code (call
) != GIMPLE_CALL
)
2604 callee
= gimple_call_fndecl (call
);
2606 cfun_va_list
= targetm
.fn_abi_va_list (callee
);
2607 va_list_simple_ptr
= POINTER_TYPE_P (cfun_va_list
)
2608 && (TREE_TYPE (cfun_va_list
) == void_type_node
2609 || TREE_TYPE (cfun_va_list
) == char_type_node
);
2611 switch (DECL_FUNCTION_CODE (callee
))
2613 case BUILT_IN_VA_START
:
2614 if (!va_list_simple_ptr
2615 || targetm
.expand_builtin_va_start
!= NULL
2616 || !builtin_decl_explicit_p (BUILT_IN_NEXT_ARG
))
2619 if (gimple_call_num_args (call
) != 2)
2622 lhs
= gimple_call_arg (call
, 0);
2623 if (!POINTER_TYPE_P (TREE_TYPE (lhs
))
2624 || TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (lhs
)))
2625 != TYPE_MAIN_VARIANT (cfun_va_list
))
2628 lhs
= build_fold_indirect_ref_loc (loc
, lhs
);
2629 rhs
= build_call_expr_loc (loc
, builtin_decl_explicit (BUILT_IN_NEXT_ARG
),
2630 1, integer_zero_node
);
2631 rhs
= fold_convert_loc (loc
, TREE_TYPE (lhs
), rhs
);
2632 return build2 (MODIFY_EXPR
, TREE_TYPE (lhs
), lhs
, rhs
);
2634 case BUILT_IN_VA_COPY
:
2635 if (!va_list_simple_ptr
)
2638 if (gimple_call_num_args (call
) != 2)
2641 lhs
= gimple_call_arg (call
, 0);
2642 if (!POINTER_TYPE_P (TREE_TYPE (lhs
))
2643 || TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (lhs
)))
2644 != TYPE_MAIN_VARIANT (cfun_va_list
))
2647 lhs
= build_fold_indirect_ref_loc (loc
, lhs
);
2648 rhs
= gimple_call_arg (call
, 1);
2649 if (TYPE_MAIN_VARIANT (TREE_TYPE (rhs
))
2650 != TYPE_MAIN_VARIANT (cfun_va_list
))
2653 rhs
= fold_convert_loc (loc
, TREE_TYPE (lhs
), rhs
);
2654 return build2 (MODIFY_EXPR
, TREE_TYPE (lhs
), lhs
, rhs
);
2656 case BUILT_IN_VA_END
:
2657 /* No effect, so the statement will be deleted. */
2658 return integer_zero_node
;
2665 /* Attemp to make the block of __builtin_unreachable I unreachable by changing
2666 the incoming jumps. Return true if at least one jump was changed. */
2669 optimize_unreachable (gimple_stmt_iterator i
)
2671 basic_block bb
= gsi_bb (i
);
2672 gimple_stmt_iterator gsi
;
2678 if (flag_sanitize
& SANITIZE_UNREACHABLE
)
2681 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
2683 stmt
= gsi_stmt (gsi
);
2685 if (is_gimple_debug (stmt
))
2688 if (glabel
*label_stmt
= dyn_cast
<glabel
*> (stmt
))
2690 /* Verify we do not need to preserve the label. */
2691 if (FORCED_LABEL (gimple_label_label (label_stmt
)))
2697 /* Only handle the case that __builtin_unreachable is the first statement
2698 in the block. We rely on DCE to remove stmts without side-effects
2699 before __builtin_unreachable. */
2700 if (gsi_stmt (gsi
) != gsi_stmt (i
))
2705 FOR_EACH_EDGE (e
, ei
, bb
->preds
)
2707 gsi
= gsi_last_bb (e
->src
);
2708 if (gsi_end_p (gsi
))
2711 stmt
= gsi_stmt (gsi
);
2712 if (gcond
*cond_stmt
= dyn_cast
<gcond
*> (stmt
))
2714 if (e
->flags
& EDGE_TRUE_VALUE
)
2715 gimple_cond_make_false (cond_stmt
);
2716 else if (e
->flags
& EDGE_FALSE_VALUE
)
2717 gimple_cond_make_true (cond_stmt
);
2720 update_stmt (cond_stmt
);
2724 /* Todo: handle other cases. Note that unreachable switch case
2725 statements have already been removed. */
2736 mask_2 = 1 << cnt_1;
2737 _4 = __atomic_fetch_or_* (ptr_6, mask_2, _3);
2740 _4 = ATOMIC_BIT_TEST_AND_SET (ptr_6, cnt_1, 0, _3);
2742 If _5 is only used in _5 != 0 or _5 == 0 comparisons, 1
2743 is passed instead of 0, and the builtin just returns a zero
2744 or 1 value instead of the actual bit.
2745 Similarly for __sync_fetch_and_or_* (without the ", _3" part
2746 in there), and/or if mask_2 is a power of 2 constant.
2747 Similarly for xor instead of or, use ATOMIC_BIT_TEST_AND_COMPLEMENT
2748 in that case. And similarly for and instead of or, except that
2749 the second argument to the builtin needs to be one's complement
2750 of the mask instead of mask. */
2753 optimize_atomic_bit_test_and (gimple_stmt_iterator
*gsip
,
2754 enum internal_fn fn
, bool has_model_arg
,
2757 gimple
*call
= gsi_stmt (*gsip
);
2758 tree lhs
= gimple_call_lhs (call
);
2759 use_operand_p use_p
;
2764 if (!flag_inline_atomics
2766 || !gimple_call_builtin_p (call
, BUILT_IN_NORMAL
)
2768 || SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs
)
2769 || !single_imm_use (lhs
, &use_p
, &use_stmt
)
2770 || !is_gimple_assign (use_stmt
)
2771 || gimple_assign_rhs_code (use_stmt
) != BIT_AND_EXPR
2772 || !gimple_vdef (call
))
2777 case IFN_ATOMIC_BIT_TEST_AND_SET
:
2778 optab
= atomic_bit_test_and_set_optab
;
2780 case IFN_ATOMIC_BIT_TEST_AND_COMPLEMENT
:
2781 optab
= atomic_bit_test_and_complement_optab
;
2783 case IFN_ATOMIC_BIT_TEST_AND_RESET
:
2784 optab
= atomic_bit_test_and_reset_optab
;
2790 if (optab_handler (optab
, TYPE_MODE (TREE_TYPE (lhs
))) == CODE_FOR_nothing
)
2793 mask
= gimple_call_arg (call
, 1);
2794 tree use_lhs
= gimple_assign_lhs (use_stmt
);
2798 if (TREE_CODE (mask
) == INTEGER_CST
)
2800 if (fn
== IFN_ATOMIC_BIT_TEST_AND_RESET
)
2801 mask
= const_unop (BIT_NOT_EXPR
, TREE_TYPE (mask
), mask
);
2802 mask
= fold_convert (TREE_TYPE (lhs
), mask
);
2803 int ibit
= tree_log2 (mask
);
2806 bit
= build_int_cst (TREE_TYPE (lhs
), ibit
);
2808 else if (TREE_CODE (mask
) == SSA_NAME
)
2810 gimple
*g
= SSA_NAME_DEF_STMT (mask
);
2811 if (fn
== IFN_ATOMIC_BIT_TEST_AND_RESET
)
2813 if (!is_gimple_assign (g
)
2814 || gimple_assign_rhs_code (g
) != BIT_NOT_EXPR
)
2816 mask
= gimple_assign_rhs1 (g
);
2817 if (TREE_CODE (mask
) != SSA_NAME
)
2819 g
= SSA_NAME_DEF_STMT (mask
);
2821 if (!is_gimple_assign (g
)
2822 || gimple_assign_rhs_code (g
) != LSHIFT_EXPR
2823 || !integer_onep (gimple_assign_rhs1 (g
)))
2825 bit
= gimple_assign_rhs2 (g
);
2830 if (gimple_assign_rhs1 (use_stmt
) == lhs
)
2832 if (!operand_equal_p (gimple_assign_rhs2 (use_stmt
), mask
, 0))
2835 else if (gimple_assign_rhs2 (use_stmt
) != lhs
2836 || !operand_equal_p (gimple_assign_rhs1 (use_stmt
), mask
, 0))
2839 bool use_bool
= true;
2840 bool has_debug_uses
= false;
2841 imm_use_iterator iter
;
2844 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (use_lhs
))
2846 FOR_EACH_IMM_USE_STMT (g
, iter
, use_lhs
)
2848 enum tree_code code
= ERROR_MARK
;
2849 tree op0
= NULL_TREE
, op1
= NULL_TREE
;
2850 if (is_gimple_debug (g
))
2852 has_debug_uses
= true;
2855 else if (is_gimple_assign (g
))
2856 switch (gimple_assign_rhs_code (g
))
2859 op1
= gimple_assign_rhs1 (g
);
2860 code
= TREE_CODE (op1
);
2861 op0
= TREE_OPERAND (op1
, 0);
2862 op1
= TREE_OPERAND (op1
, 1);
2866 code
= gimple_assign_rhs_code (g
);
2867 op0
= gimple_assign_rhs1 (g
);
2868 op1
= gimple_assign_rhs2 (g
);
2873 else if (gimple_code (g
) == GIMPLE_COND
)
2875 code
= gimple_cond_code (g
);
2876 op0
= gimple_cond_lhs (g
);
2877 op1
= gimple_cond_rhs (g
);
2880 if ((code
== EQ_EXPR
|| code
== NE_EXPR
)
2882 && integer_zerop (op1
))
2884 use_operand_p use_p
;
2886 FOR_EACH_IMM_USE_ON_STMT (use_p
, iter
)
2893 BREAK_FROM_IMM_USE_STMT (iter
);
2896 tree new_lhs
= make_ssa_name (TREE_TYPE (lhs
));
2897 tree flag
= build_int_cst (TREE_TYPE (lhs
), use_bool
);
2899 g
= gimple_build_call_internal (fn
, 4, gimple_call_arg (call
, 0),
2900 bit
, flag
, gimple_call_arg (call
, 2));
2902 g
= gimple_build_call_internal (fn
, 3, gimple_call_arg (call
, 0),
2904 gimple_call_set_lhs (g
, new_lhs
);
2905 gimple_set_location (g
, gimple_location (call
));
2906 gimple_set_vuse (g
, gimple_vuse (call
));
2907 gimple_set_vdef (g
, gimple_vdef (call
));
2908 bool throws
= stmt_can_throw_internal (call
);
2909 gimple_call_set_nothrow (as_a
<gcall
*> (g
),
2910 gimple_call_nothrow_p (as_a
<gcall
*> (call
)));
2911 SSA_NAME_DEF_STMT (gimple_vdef (call
)) = g
;
2912 gimple_stmt_iterator gsi
= *gsip
;
2913 gsi_insert_after (&gsi
, g
, GSI_NEW_STMT
);
2917 maybe_clean_or_replace_eh_stmt (call
, g
);
2918 if (after
|| (use_bool
&& has_debug_uses
))
2919 e
= find_fallthru_edge (gsi_bb (gsi
)->succs
);
2923 /* The internal function returns the value of the specified bit
2924 before the atomic operation. If we are interested in the value
2925 of the specified bit after the atomic operation (makes only sense
2926 for xor, otherwise the bit content is compile time known),
2927 we need to invert the bit. */
2928 g
= gimple_build_assign (make_ssa_name (TREE_TYPE (lhs
)),
2929 BIT_XOR_EXPR
, new_lhs
,
2930 use_bool
? build_int_cst (TREE_TYPE (lhs
), 1)
2932 new_lhs
= gimple_assign_lhs (g
);
2935 gsi_insert_on_edge_immediate (e
, g
);
2936 gsi
= gsi_for_stmt (g
);
2939 gsi_insert_after (&gsi
, g
, GSI_NEW_STMT
);
2941 if (use_bool
&& has_debug_uses
)
2943 tree temp
= NULL_TREE
;
2944 if (!throws
|| after
|| single_pred_p (e
->dest
))
2946 temp
= make_node (DEBUG_EXPR_DECL
);
2947 DECL_ARTIFICIAL (temp
) = 1;
2948 TREE_TYPE (temp
) = TREE_TYPE (lhs
);
2949 SET_DECL_MODE (temp
, TYPE_MODE (TREE_TYPE (lhs
)));
2950 tree t
= build2 (LSHIFT_EXPR
, TREE_TYPE (lhs
), new_lhs
, bit
);
2951 g
= gimple_build_debug_bind (temp
, t
, g
);
2952 if (throws
&& !after
)
2954 gsi
= gsi_after_labels (e
->dest
);
2955 gsi_insert_before (&gsi
, g
, GSI_SAME_STMT
);
2958 gsi_insert_after (&gsi
, g
, GSI_NEW_STMT
);
2960 FOR_EACH_IMM_USE_STMT (g
, iter
, use_lhs
)
2961 if (is_gimple_debug (g
))
2963 use_operand_p use_p
;
2964 if (temp
== NULL_TREE
)
2965 gimple_debug_bind_reset_value (g
);
2967 FOR_EACH_IMM_USE_ON_STMT (use_p
, iter
)
2968 SET_USE (use_p
, temp
);
2972 SSA_NAME_OCCURS_IN_ABNORMAL_PHI (new_lhs
)
2973 = SSA_NAME_OCCURS_IN_ABNORMAL_PHI (use_lhs
);
2974 replace_uses_by (use_lhs
, new_lhs
);
2975 gsi
= gsi_for_stmt (use_stmt
);
2976 gsi_remove (&gsi
, true);
2977 release_defs (use_stmt
);
2978 gsi_remove (gsip
, true);
2979 release_ssa_name (lhs
);
2988 Similarly for memset (&a, ..., sizeof (a)); instead of a = {};
2989 and/or memcpy (&b, &a, sizeof (a)); instead of b = a; */
2992 optimize_memcpy (gimple_stmt_iterator
*gsip
, tree dest
, tree src
, tree len
)
2994 gimple
*stmt
= gsi_stmt (*gsip
);
2995 if (gimple_has_volatile_ops (stmt
))
2998 tree vuse
= gimple_vuse (stmt
);
3002 gimple
*defstmt
= SSA_NAME_DEF_STMT (vuse
);
3003 tree src2
= NULL_TREE
, len2
= NULL_TREE
;
3004 HOST_WIDE_INT offset
, offset2
;
3005 tree val
= integer_zero_node
;
3006 if (gimple_store_p (defstmt
)
3007 && gimple_assign_single_p (defstmt
)
3008 && TREE_CODE (gimple_assign_rhs1 (defstmt
)) == CONSTRUCTOR
3009 && !gimple_clobber_p (defstmt
))
3010 src2
= gimple_assign_lhs (defstmt
);
3011 else if (gimple_call_builtin_p (defstmt
, BUILT_IN_MEMSET
)
3012 && TREE_CODE (gimple_call_arg (defstmt
, 0)) == ADDR_EXPR
3013 && TREE_CODE (gimple_call_arg (defstmt
, 1)) == INTEGER_CST
)
3015 src2
= TREE_OPERAND (gimple_call_arg (defstmt
, 0), 0);
3016 len2
= gimple_call_arg (defstmt
, 2);
3017 val
= gimple_call_arg (defstmt
, 1);
3018 /* For non-0 val, we'd have to transform stmt from assignment
3019 into memset (only if dest is addressable). */
3020 if (!integer_zerop (val
) && is_gimple_assign (stmt
))
3024 if (src2
== NULL_TREE
)
3027 if (len
== NULL_TREE
)
3028 len
= (TREE_CODE (src
) == COMPONENT_REF
3029 ? DECL_SIZE_UNIT (TREE_OPERAND (src
, 1))
3030 : TYPE_SIZE_UNIT (TREE_TYPE (src
)));
3031 if (len2
== NULL_TREE
)
3032 len2
= (TREE_CODE (src2
) == COMPONENT_REF
3033 ? DECL_SIZE_UNIT (TREE_OPERAND (src2
, 1))
3034 : TYPE_SIZE_UNIT (TREE_TYPE (src2
)));
3035 if (len
== NULL_TREE
3036 || TREE_CODE (len
) != INTEGER_CST
3037 || len2
== NULL_TREE
3038 || TREE_CODE (len2
) != INTEGER_CST
)
3041 src
= get_addr_base_and_unit_offset (src
, &offset
);
3042 src2
= get_addr_base_and_unit_offset (src2
, &offset2
);
3043 if (src
== NULL_TREE
3044 || src2
== NULL_TREE
3045 || offset
< offset2
)
3048 if (!operand_equal_p (src
, src2
, 0))
3051 /* [ src + offset2, src + offset2 + len2 - 1 ] is set to val.
3053 [ src + offset, src + offset + len - 1 ] is a subset of that. */
3054 if (wi::to_offset (len
) + (offset
- offset2
) > wi::to_offset (len2
))
3057 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3059 fprintf (dump_file
, "Simplified\n ");
3060 print_gimple_stmt (dump_file
, stmt
, 0, dump_flags
);
3061 fprintf (dump_file
, "after previous\n ");
3062 print_gimple_stmt (dump_file
, defstmt
, 0, dump_flags
);
3065 /* For simplicity, don't change the kind of the stmt,
3066 turn dest = src; into dest = {}; and memcpy (&dest, &src, len);
3067 into memset (&dest, val, len);
3068 In theory we could change dest = src into memset if dest
3069 is addressable (maybe beneficial if val is not 0), or
3070 memcpy (&dest, &src, len) into dest = {} if len is the size
3071 of dest, dest isn't volatile. */
3072 if (is_gimple_assign (stmt
))
3074 tree ctor
= build_constructor (TREE_TYPE (dest
), NULL
);
3075 gimple_assign_set_rhs_from_tree (gsip
, ctor
);
3078 else /* If stmt is memcpy, transform it into memset. */
3080 gcall
*call
= as_a
<gcall
*> (stmt
);
3081 tree fndecl
= builtin_decl_implicit (BUILT_IN_MEMSET
);
3082 gimple_call_set_fndecl (call
, fndecl
);
3083 gimple_call_set_fntype (call
, TREE_TYPE (fndecl
));
3084 gimple_call_set_arg (call
, 1, val
);
3088 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3090 fprintf (dump_file
, "into\n ");
3091 print_gimple_stmt (dump_file
, stmt
, 0, dump_flags
);
3095 /* A simple pass that attempts to fold all builtin functions. This pass
3096 is run after we've propagated as many constants as we can. */
3100 const pass_data pass_data_fold_builtins
=
3102 GIMPLE_PASS
, /* type */
3104 OPTGROUP_NONE
, /* optinfo_flags */
3105 TV_NONE
, /* tv_id */
3106 ( PROP_cfg
| PROP_ssa
), /* properties_required */
3107 0, /* properties_provided */
3108 0, /* properties_destroyed */
3109 0, /* todo_flags_start */
3110 TODO_update_ssa
, /* todo_flags_finish */
3113 class pass_fold_builtins
: public gimple_opt_pass
3116 pass_fold_builtins (gcc::context
*ctxt
)
3117 : gimple_opt_pass (pass_data_fold_builtins
, ctxt
)
3120 /* opt_pass methods: */
3121 opt_pass
* clone () { return new pass_fold_builtins (m_ctxt
); }
3122 virtual unsigned int execute (function
*);
3124 }; // class pass_fold_builtins
3127 pass_fold_builtins::execute (function
*fun
)
3129 bool cfg_changed
= false;
3131 unsigned int todoflags
= 0;
3133 FOR_EACH_BB_FN (bb
, fun
)
3135 gimple_stmt_iterator i
;
3136 for (i
= gsi_start_bb (bb
); !gsi_end_p (i
); )
3138 gimple
*stmt
, *old_stmt
;
3140 enum built_in_function fcode
;
3142 stmt
= gsi_stmt (i
);
3144 if (gimple_code (stmt
) != GIMPLE_CALL
)
3146 /* Remove all *ssaname_N ={v} {CLOBBER}; stmts,
3147 after the last GIMPLE DSE they aren't needed and might
3148 unnecessarily keep the SSA_NAMEs live. */
3149 if (gimple_clobber_p (stmt
))
3151 tree lhs
= gimple_assign_lhs (stmt
);
3152 if (TREE_CODE (lhs
) == MEM_REF
3153 && TREE_CODE (TREE_OPERAND (lhs
, 0)) == SSA_NAME
)
3155 unlink_stmt_vdef (stmt
);
3156 gsi_remove (&i
, true);
3157 release_defs (stmt
);
3161 else if (gimple_assign_load_p (stmt
) && gimple_store_p (stmt
))
3162 optimize_memcpy (&i
, gimple_assign_lhs (stmt
),
3163 gimple_assign_rhs1 (stmt
), NULL_TREE
);
3168 callee
= gimple_call_fndecl (stmt
);
3169 if (!callee
|| DECL_BUILT_IN_CLASS (callee
) != BUILT_IN_NORMAL
)
3175 fcode
= DECL_FUNCTION_CODE (callee
);
3180 tree result
= NULL_TREE
;
3181 switch (DECL_FUNCTION_CODE (callee
))
3183 case BUILT_IN_CONSTANT_P
:
3184 /* Resolve __builtin_constant_p. If it hasn't been
3185 folded to integer_one_node by now, it's fairly
3186 certain that the value simply isn't constant. */
3187 result
= integer_zero_node
;
3190 case BUILT_IN_ASSUME_ALIGNED
:
3191 /* Remove __builtin_assume_aligned. */
3192 result
= gimple_call_arg (stmt
, 0);
3195 case BUILT_IN_STACK_RESTORE
:
3196 result
= optimize_stack_restore (i
);
3202 case BUILT_IN_UNREACHABLE
:
3203 if (optimize_unreachable (i
))
3207 case BUILT_IN_ATOMIC_FETCH_OR_1
:
3208 case BUILT_IN_ATOMIC_FETCH_OR_2
:
3209 case BUILT_IN_ATOMIC_FETCH_OR_4
:
3210 case BUILT_IN_ATOMIC_FETCH_OR_8
:
3211 case BUILT_IN_ATOMIC_FETCH_OR_16
:
3212 optimize_atomic_bit_test_and (&i
,
3213 IFN_ATOMIC_BIT_TEST_AND_SET
,
3216 case BUILT_IN_SYNC_FETCH_AND_OR_1
:
3217 case BUILT_IN_SYNC_FETCH_AND_OR_2
:
3218 case BUILT_IN_SYNC_FETCH_AND_OR_4
:
3219 case BUILT_IN_SYNC_FETCH_AND_OR_8
:
3220 case BUILT_IN_SYNC_FETCH_AND_OR_16
:
3221 optimize_atomic_bit_test_and (&i
,
3222 IFN_ATOMIC_BIT_TEST_AND_SET
,
3226 case BUILT_IN_ATOMIC_FETCH_XOR_1
:
3227 case BUILT_IN_ATOMIC_FETCH_XOR_2
:
3228 case BUILT_IN_ATOMIC_FETCH_XOR_4
:
3229 case BUILT_IN_ATOMIC_FETCH_XOR_8
:
3230 case BUILT_IN_ATOMIC_FETCH_XOR_16
:
3231 optimize_atomic_bit_test_and
3232 (&i
, IFN_ATOMIC_BIT_TEST_AND_COMPLEMENT
, true, false);
3234 case BUILT_IN_SYNC_FETCH_AND_XOR_1
:
3235 case BUILT_IN_SYNC_FETCH_AND_XOR_2
:
3236 case BUILT_IN_SYNC_FETCH_AND_XOR_4
:
3237 case BUILT_IN_SYNC_FETCH_AND_XOR_8
:
3238 case BUILT_IN_SYNC_FETCH_AND_XOR_16
:
3239 optimize_atomic_bit_test_and
3240 (&i
, IFN_ATOMIC_BIT_TEST_AND_COMPLEMENT
, false, false);
3243 case BUILT_IN_ATOMIC_XOR_FETCH_1
:
3244 case BUILT_IN_ATOMIC_XOR_FETCH_2
:
3245 case BUILT_IN_ATOMIC_XOR_FETCH_4
:
3246 case BUILT_IN_ATOMIC_XOR_FETCH_8
:
3247 case BUILT_IN_ATOMIC_XOR_FETCH_16
:
3248 optimize_atomic_bit_test_and
3249 (&i
, IFN_ATOMIC_BIT_TEST_AND_COMPLEMENT
, true, true);
3251 case BUILT_IN_SYNC_XOR_AND_FETCH_1
:
3252 case BUILT_IN_SYNC_XOR_AND_FETCH_2
:
3253 case BUILT_IN_SYNC_XOR_AND_FETCH_4
:
3254 case BUILT_IN_SYNC_XOR_AND_FETCH_8
:
3255 case BUILT_IN_SYNC_XOR_AND_FETCH_16
:
3256 optimize_atomic_bit_test_and
3257 (&i
, IFN_ATOMIC_BIT_TEST_AND_COMPLEMENT
, false, true);
3260 case BUILT_IN_ATOMIC_FETCH_AND_1
:
3261 case BUILT_IN_ATOMIC_FETCH_AND_2
:
3262 case BUILT_IN_ATOMIC_FETCH_AND_4
:
3263 case BUILT_IN_ATOMIC_FETCH_AND_8
:
3264 case BUILT_IN_ATOMIC_FETCH_AND_16
:
3265 optimize_atomic_bit_test_and (&i
,
3266 IFN_ATOMIC_BIT_TEST_AND_RESET
,
3269 case BUILT_IN_SYNC_FETCH_AND_AND_1
:
3270 case BUILT_IN_SYNC_FETCH_AND_AND_2
:
3271 case BUILT_IN_SYNC_FETCH_AND_AND_4
:
3272 case BUILT_IN_SYNC_FETCH_AND_AND_8
:
3273 case BUILT_IN_SYNC_FETCH_AND_AND_16
:
3274 optimize_atomic_bit_test_and (&i
,
3275 IFN_ATOMIC_BIT_TEST_AND_RESET
,
3279 case BUILT_IN_MEMCPY
:
3280 if (gimple_call_builtin_p (stmt
, BUILT_IN_NORMAL
)
3281 && TREE_CODE (gimple_call_arg (stmt
, 0)) == ADDR_EXPR
3282 && TREE_CODE (gimple_call_arg (stmt
, 1)) == ADDR_EXPR
3283 && TREE_CODE (gimple_call_arg (stmt
, 2)) == INTEGER_CST
)
3285 tree dest
= TREE_OPERAND (gimple_call_arg (stmt
, 0), 0);
3286 tree src
= TREE_OPERAND (gimple_call_arg (stmt
, 1), 0);
3287 tree len
= gimple_call_arg (stmt
, 2);
3288 optimize_memcpy (&i
, dest
, src
, len
);
3292 case BUILT_IN_VA_START
:
3293 case BUILT_IN_VA_END
:
3294 case BUILT_IN_VA_COPY
:
3295 /* These shouldn't be folded before pass_stdarg. */
3296 result
= optimize_stdarg_builtin (stmt
);
3308 if (!update_call_from_tree (&i
, result
))
3309 gimplify_and_update_call_from_tree (&i
, result
);
3312 todoflags
|= TODO_update_address_taken
;
3314 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3316 fprintf (dump_file
, "Simplified\n ");
3317 print_gimple_stmt (dump_file
, stmt
, 0, dump_flags
);
3321 stmt
= gsi_stmt (i
);
3324 if (maybe_clean_or_replace_eh_stmt (old_stmt
, stmt
)
3325 && gimple_purge_dead_eh_edges (bb
))
3328 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3330 fprintf (dump_file
, "to\n ");
3331 print_gimple_stmt (dump_file
, stmt
, 0, dump_flags
);
3332 fprintf (dump_file
, "\n");
3335 /* Retry the same statement if it changed into another
3336 builtin, there might be new opportunities now. */
3337 if (gimple_code (stmt
) != GIMPLE_CALL
)
3342 callee
= gimple_call_fndecl (stmt
);
3344 || DECL_BUILT_IN_CLASS (callee
) != BUILT_IN_NORMAL
3345 || DECL_FUNCTION_CODE (callee
) == fcode
)
3350 /* Delete unreachable blocks. */
3352 todoflags
|= TODO_cleanup_cfg
;
3360 make_pass_fold_builtins (gcc::context
*ctxt
)
3362 return new pass_fold_builtins (ctxt
);
3365 /* A simple pass that emits some warnings post IPA. */
3369 const pass_data pass_data_post_ipa_warn
=
3371 GIMPLE_PASS
, /* type */
3372 "post_ipa_warn", /* name */
3373 OPTGROUP_NONE
, /* optinfo_flags */
3374 TV_NONE
, /* tv_id */
3375 ( PROP_cfg
| PROP_ssa
), /* properties_required */
3376 0, /* properties_provided */
3377 0, /* properties_destroyed */
3378 0, /* todo_flags_start */
3379 0, /* todo_flags_finish */
3382 class pass_post_ipa_warn
: public gimple_opt_pass
3385 pass_post_ipa_warn (gcc::context
*ctxt
)
3386 : gimple_opt_pass (pass_data_post_ipa_warn
, ctxt
)
3389 /* opt_pass methods: */
3390 opt_pass
* clone () { return new pass_post_ipa_warn (m_ctxt
); }
3391 virtual bool gate (function
*) { return warn_nonnull
!= 0; }
3392 virtual unsigned int execute (function
*);
3394 }; // class pass_fold_builtins
3397 pass_post_ipa_warn::execute (function
*fun
)
3401 FOR_EACH_BB_FN (bb
, fun
)
3403 gimple_stmt_iterator gsi
;
3404 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
3406 gimple
*stmt
= gsi_stmt (gsi
);
3407 if (!is_gimple_call (stmt
) || gimple_no_warning_p (stmt
))
3413 = get_nonnull_args (gimple_call_fntype (stmt
));
3416 for (unsigned i
= 0; i
< gimple_call_num_args (stmt
); i
++)
3418 tree arg
= gimple_call_arg (stmt
, i
);
3419 if (TREE_CODE (TREE_TYPE (arg
)) != POINTER_TYPE
)
3421 if (!integer_zerop (arg
))
3423 if (!bitmap_empty_p (nonnullargs
)
3424 && !bitmap_bit_p (nonnullargs
, i
))
3427 location_t loc
= gimple_location (stmt
);
3428 if (warning_at (loc
, OPT_Wnonnull
,
3429 "argument %u null where non-null "
3432 tree fndecl
= gimple_call_fndecl (stmt
);
3433 if (fndecl
&& DECL_IS_BUILTIN (fndecl
))
3434 inform (loc
, "in a call to built-in function %qD",
3437 inform (DECL_SOURCE_LOCATION (fndecl
),
3438 "in a call to function %qD declared here",
3443 BITMAP_FREE (nonnullargs
);
3454 make_pass_post_ipa_warn (gcc::context
*ctxt
)
3456 return new pass_post_ipa_warn (ctxt
);