1 /* Conditional constant propagation pass for the GNU compiler.
2 Copyright (C) 2000-2019 Free Software Foundation, Inc.
3 Adapted from original RTL SSA-CCP by Daniel Berlin <dberlin@dberlin.org>
4 Adapted to GIMPLE trees by Diego Novillo <dnovillo@redhat.com>
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it
9 under the terms of the GNU General Public License as published by the
10 Free Software Foundation; either version 3, or (at your option) any
13 GCC is distributed in the hope that it will be useful, but WITHOUT
14 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 /* Conditional constant propagation (CCP) is based on the SSA
23 propagation engine (tree-ssa-propagate.c). Constant assignments of
24 the form VAR = CST are propagated from the assignments into uses of
25 VAR, which in turn may generate new constants. The simulation uses
26 a four level lattice to keep track of constant values associated
27 with SSA names. Given an SSA name V_i, it may take one of the
30 UNINITIALIZED -> the initial state of the value. This value
31 is replaced with a correct initial value
32 the first time the value is used, so the
33 rest of the pass does not need to care about
34 it. Using this value simplifies initialization
35 of the pass, and prevents us from needlessly
36 scanning statements that are never reached.
38 UNDEFINED -> V_i is a local variable whose definition
39 has not been processed yet. Therefore we
40 don't yet know if its value is a constant
43 CONSTANT -> V_i has been found to hold a constant
46 VARYING -> V_i cannot take a constant value, or if it
47 does, it is not possible to determine it
50 The core of SSA-CCP is in ccp_visit_stmt and ccp_visit_phi_node:
52 1- In ccp_visit_stmt, we are interested in assignments whose RHS
53 evaluates into a constant and conditional jumps whose predicate
54 evaluates into a boolean true or false. When an assignment of
55 the form V_i = CONST is found, V_i's lattice value is set to
56 CONSTANT and CONST is associated with it. This causes the
57 propagation engine to add all the SSA edges coming out the
58 assignment into the worklists, so that statements that use V_i
61 If the statement is a conditional with a constant predicate, we
62 mark the outgoing edges as executable or not executable
63 depending on the predicate's value. This is then used when
64 visiting PHI nodes to know when a PHI argument can be ignored.
67 2- In ccp_visit_phi_node, if all the PHI arguments evaluate to the
68 same constant C, then the LHS of the PHI is set to C. This
69 evaluation is known as the "meet operation". Since one of the
70 goals of this evaluation is to optimistically return constant
71 values as often as possible, it uses two main short cuts:
73 - If an argument is flowing in through a non-executable edge, it
74 is ignored. This is useful in cases like this:
80 a_11 = PHI (a_9, a_10)
82 If PRED is known to always evaluate to false, then we can
83 assume that a_11 will always take its value from a_10, meaning
84 that instead of consider it VARYING (a_9 and a_10 have
85 different values), we can consider it CONSTANT 100.
87 - If an argument has an UNDEFINED value, then it does not affect
88 the outcome of the meet operation. If a variable V_i has an
89 UNDEFINED value, it means that either its defining statement
90 hasn't been visited yet or V_i has no defining statement, in
91 which case the original symbol 'V' is being used
92 uninitialized. Since 'V' is a local variable, the compiler
93 may assume any initial value for it.
96 After propagation, every variable V_i that ends up with a lattice
97 value of CONSTANT will have the associated constant value in the
98 array CONST_VAL[i].VALUE. That is fed into substitute_and_fold for
99 final substitution and folding.
101 This algorithm uses wide-ints at the max precision of the target.
102 This means that, with one uninteresting exception, variables with
103 UNSIGNED types never go to VARYING because the bits above the
104 precision of the type of the variable are always zero. The
105 uninteresting case is a variable of UNSIGNED type that has the
106 maximum precision of the target. Such variables can go to VARYING,
107 but this causes no loss of infomation since these variables will
112 Constant propagation with conditional branches,
113 Wegman and Zadeck, ACM TOPLAS 13(2):181-210.
115 Building an Optimizing Compiler,
116 Robert Morgan, Butterworth-Heinemann, 1998, Section 8.9.
118 Advanced Compiler Design and Implementation,
119 Steven Muchnick, Morgan Kaufmann, 1997, Section 12.6 */
123 #include "coretypes.h"
128 #include "tree-pass.h"
130 #include "gimple-pretty-print.h"
131 #include "fold-const.h"
132 #include "gimple-fold.h"
134 #include "gimplify.h"
135 #include "gimple-iterator.h"
136 #include "tree-cfg.h"
137 #include "tree-ssa-propagate.h"
139 #include "builtins.h"
141 #include "stor-layout.h"
142 #include "optabs-query.h"
143 #include "tree-ssa-ccp.h"
144 #include "tree-dfa.h"
145 #include "diagnostic-core.h"
146 #include "stringpool.h"
148 #include "tree-vector-builder.h"
150 /* Possible lattice values. */
159 class ccp_prop_value_t
{
162 ccp_lattice_t lattice_val
;
164 /* Propagated value. */
167 /* Mask that applies to the propagated value during CCP. For X
168 with a CONSTANT lattice value X & ~mask == value & ~mask. The
169 zero bits in the mask cover constant values. The ones mean no
174 class ccp_propagate
: public ssa_propagation_engine
177 enum ssa_prop_result
visit_stmt (gimple
*, edge
*, tree
*) FINAL OVERRIDE
;
178 enum ssa_prop_result
visit_phi (gphi
*) FINAL OVERRIDE
;
181 /* Array of propagated constant values. After propagation,
182 CONST_VAL[I].VALUE holds the constant value for SSA_NAME(I). If
183 the constant is held in an SSA name representing a memory store
184 (i.e., a VDEF), CONST_VAL[I].MEM_REF will contain the actual
185 memory reference used to store (i.e., the LHS of the assignment
187 static ccp_prop_value_t
*const_val
;
188 static unsigned n_const_val
;
190 static void canonicalize_value (ccp_prop_value_t
*);
191 static void ccp_lattice_meet (ccp_prop_value_t
*, ccp_prop_value_t
*);
193 /* Dump constant propagation value VAL to file OUTF prefixed by PREFIX. */
196 dump_lattice_value (FILE *outf
, const char *prefix
, ccp_prop_value_t val
)
198 switch (val
.lattice_val
)
201 fprintf (outf
, "%sUNINITIALIZED", prefix
);
204 fprintf (outf
, "%sUNDEFINED", prefix
);
207 fprintf (outf
, "%sVARYING", prefix
);
210 if (TREE_CODE (val
.value
) != INTEGER_CST
213 fprintf (outf
, "%sCONSTANT ", prefix
);
214 print_generic_expr (outf
, val
.value
, dump_flags
);
218 widest_int cval
= wi::bit_and_not (wi::to_widest (val
.value
),
220 fprintf (outf
, "%sCONSTANT ", prefix
);
221 print_hex (cval
, outf
);
222 fprintf (outf
, " (");
223 print_hex (val
.mask
, outf
);
233 /* Print lattice value VAL to stderr. */
235 void debug_lattice_value (ccp_prop_value_t val
);
238 debug_lattice_value (ccp_prop_value_t val
)
240 dump_lattice_value (stderr
, "", val
);
241 fprintf (stderr
, "\n");
244 /* Extend NONZERO_BITS to a full mask, based on sgn. */
247 extend_mask (const wide_int
&nonzero_bits
, signop sgn
)
249 return widest_int::from (nonzero_bits
, sgn
);
252 /* Compute a default value for variable VAR and store it in the
253 CONST_VAL array. The following rules are used to get default
256 1- Global and static variables that are declared constant are
259 2- Any other value is considered UNDEFINED. This is useful when
260 considering PHI nodes. PHI arguments that are undefined do not
261 change the constant value of the PHI node, which allows for more
262 constants to be propagated.
264 3- Variables defined by statements other than assignments and PHI
265 nodes are considered VARYING.
267 4- Initial values of variables that are not GIMPLE registers are
268 considered VARYING. */
270 static ccp_prop_value_t
271 get_default_value (tree var
)
273 ccp_prop_value_t val
= { UNINITIALIZED
, NULL_TREE
, 0 };
276 stmt
= SSA_NAME_DEF_STMT (var
);
278 if (gimple_nop_p (stmt
))
280 /* Variables defined by an empty statement are those used
281 before being initialized. If VAR is a local variable, we
282 can assume initially that it is UNDEFINED, otherwise we must
283 consider it VARYING. */
284 if (!virtual_operand_p (var
)
285 && SSA_NAME_VAR (var
)
286 && TREE_CODE (SSA_NAME_VAR (var
)) == VAR_DECL
)
287 val
.lattice_val
= UNDEFINED
;
290 val
.lattice_val
= VARYING
;
292 if (flag_tree_bit_ccp
)
294 wide_int nonzero_bits
= get_nonzero_bits (var
);
295 if (nonzero_bits
!= -1)
297 val
.lattice_val
= CONSTANT
;
298 val
.value
= build_zero_cst (TREE_TYPE (var
));
299 val
.mask
= extend_mask (nonzero_bits
, TYPE_SIGN (TREE_TYPE (var
)));
304 else if (is_gimple_assign (stmt
))
307 if (gimple_assign_single_p (stmt
)
308 && DECL_P (gimple_assign_rhs1 (stmt
))
309 && (cst
= get_symbol_constant_value (gimple_assign_rhs1 (stmt
))))
311 val
.lattice_val
= CONSTANT
;
316 /* Any other variable defined by an assignment is considered
318 val
.lattice_val
= UNDEFINED
;
321 else if ((is_gimple_call (stmt
)
322 && gimple_call_lhs (stmt
) != NULL_TREE
)
323 || gimple_code (stmt
) == GIMPLE_PHI
)
325 /* A variable defined by a call or a PHI node is considered
327 val
.lattice_val
= UNDEFINED
;
331 /* Otherwise, VAR will never take on a constant value. */
332 val
.lattice_val
= VARYING
;
340 /* Get the constant value associated with variable VAR. */
342 static inline ccp_prop_value_t
*
345 ccp_prop_value_t
*val
;
347 if (const_val
== NULL
348 || SSA_NAME_VERSION (var
) >= n_const_val
)
351 val
= &const_val
[SSA_NAME_VERSION (var
)];
352 if (val
->lattice_val
== UNINITIALIZED
)
353 *val
= get_default_value (var
);
355 canonicalize_value (val
);
360 /* Return the constant tree value associated with VAR. */
363 get_constant_value (tree var
)
365 ccp_prop_value_t
*val
;
366 if (TREE_CODE (var
) != SSA_NAME
)
368 if (is_gimple_min_invariant (var
))
372 val
= get_value (var
);
374 && val
->lattice_val
== CONSTANT
375 && (TREE_CODE (val
->value
) != INTEGER_CST
381 /* Sets the value associated with VAR to VARYING. */
384 set_value_varying (tree var
)
386 ccp_prop_value_t
*val
= &const_val
[SSA_NAME_VERSION (var
)];
388 val
->lattice_val
= VARYING
;
389 val
->value
= NULL_TREE
;
393 /* For integer constants, make sure to drop TREE_OVERFLOW. */
396 canonicalize_value (ccp_prop_value_t
*val
)
398 if (val
->lattice_val
!= CONSTANT
)
401 if (TREE_OVERFLOW_P (val
->value
))
402 val
->value
= drop_tree_overflow (val
->value
);
405 /* Return whether the lattice transition is valid. */
408 valid_lattice_transition (ccp_prop_value_t old_val
, ccp_prop_value_t new_val
)
410 /* Lattice transitions must always be monotonically increasing in
412 if (old_val
.lattice_val
< new_val
.lattice_val
)
415 if (old_val
.lattice_val
!= new_val
.lattice_val
)
418 if (!old_val
.value
&& !new_val
.value
)
421 /* Now both lattice values are CONSTANT. */
423 /* Allow arbitrary copy changes as we might look through PHI <a_1, ...>
424 when only a single copy edge is executable. */
425 if (TREE_CODE (old_val
.value
) == SSA_NAME
426 && TREE_CODE (new_val
.value
) == SSA_NAME
)
429 /* Allow transitioning from a constant to a copy. */
430 if (is_gimple_min_invariant (old_val
.value
)
431 && TREE_CODE (new_val
.value
) == SSA_NAME
)
434 /* Allow transitioning from PHI <&x, not executable> == &x
435 to PHI <&x, &y> == common alignment. */
436 if (TREE_CODE (old_val
.value
) != INTEGER_CST
437 && TREE_CODE (new_val
.value
) == INTEGER_CST
)
440 /* Bit-lattices have to agree in the still valid bits. */
441 if (TREE_CODE (old_val
.value
) == INTEGER_CST
442 && TREE_CODE (new_val
.value
) == INTEGER_CST
)
443 return (wi::bit_and_not (wi::to_widest (old_val
.value
), new_val
.mask
)
444 == wi::bit_and_not (wi::to_widest (new_val
.value
), new_val
.mask
));
446 /* Otherwise constant values have to agree. */
447 if (operand_equal_p (old_val
.value
, new_val
.value
, 0))
450 /* At least the kinds and types should agree now. */
451 if (TREE_CODE (old_val
.value
) != TREE_CODE (new_val
.value
)
452 || !types_compatible_p (TREE_TYPE (old_val
.value
),
453 TREE_TYPE (new_val
.value
)))
456 /* For floats and !HONOR_NANS allow transitions from (partial) NaN
458 tree type
= TREE_TYPE (new_val
.value
);
459 if (SCALAR_FLOAT_TYPE_P (type
)
460 && !HONOR_NANS (type
))
462 if (REAL_VALUE_ISNAN (TREE_REAL_CST (old_val
.value
)))
465 else if (VECTOR_FLOAT_TYPE_P (type
)
466 && !HONOR_NANS (type
))
469 = tree_vector_builder::binary_encoded_nelts (old_val
.value
,
471 for (unsigned int i
= 0; i
< count
; ++i
)
472 if (!REAL_VALUE_ISNAN
473 (TREE_REAL_CST (VECTOR_CST_ENCODED_ELT (old_val
.value
, i
)))
474 && !operand_equal_p (VECTOR_CST_ENCODED_ELT (old_val
.value
, i
),
475 VECTOR_CST_ENCODED_ELT (new_val
.value
, i
), 0))
479 else if (COMPLEX_FLOAT_TYPE_P (type
)
480 && !HONOR_NANS (type
))
482 if (!REAL_VALUE_ISNAN (TREE_REAL_CST (TREE_REALPART (old_val
.value
)))
483 && !operand_equal_p (TREE_REALPART (old_val
.value
),
484 TREE_REALPART (new_val
.value
), 0))
486 if (!REAL_VALUE_ISNAN (TREE_REAL_CST (TREE_IMAGPART (old_val
.value
)))
487 && !operand_equal_p (TREE_IMAGPART (old_val
.value
),
488 TREE_IMAGPART (new_val
.value
), 0))
495 /* Set the value for variable VAR to NEW_VAL. Return true if the new
496 value is different from VAR's previous value. */
499 set_lattice_value (tree var
, ccp_prop_value_t
*new_val
)
501 /* We can deal with old UNINITIALIZED values just fine here. */
502 ccp_prop_value_t
*old_val
= &const_val
[SSA_NAME_VERSION (var
)];
504 canonicalize_value (new_val
);
506 /* We have to be careful to not go up the bitwise lattice
507 represented by the mask. Instead of dropping to VARYING
508 use the meet operator to retain a conservative value.
509 Missed optimizations like PR65851 makes this necessary.
510 It also ensures we converge to a stable lattice solution. */
511 if (old_val
->lattice_val
!= UNINITIALIZED
)
512 ccp_lattice_meet (new_val
, old_val
);
514 gcc_checking_assert (valid_lattice_transition (*old_val
, *new_val
));
516 /* If *OLD_VAL and NEW_VAL are the same, return false to inform the
517 caller that this was a non-transition. */
518 if (old_val
->lattice_val
!= new_val
->lattice_val
519 || (new_val
->lattice_val
== CONSTANT
520 && (TREE_CODE (new_val
->value
) != TREE_CODE (old_val
->value
)
521 || (TREE_CODE (new_val
->value
) == INTEGER_CST
522 && (new_val
->mask
!= old_val
->mask
523 || (wi::bit_and_not (wi::to_widest (old_val
->value
),
525 != wi::bit_and_not (wi::to_widest (new_val
->value
),
527 || (TREE_CODE (new_val
->value
) != INTEGER_CST
528 && !operand_equal_p (new_val
->value
, old_val
->value
, 0)))))
530 /* ??? We would like to delay creation of INTEGER_CSTs from
531 partially constants here. */
533 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
535 dump_lattice_value (dump_file
, "Lattice value changed to ", *new_val
);
536 fprintf (dump_file
, ". Adding SSA edges to worklist.\n");
541 gcc_assert (new_val
->lattice_val
!= UNINITIALIZED
);
548 static ccp_prop_value_t
get_value_for_expr (tree
, bool);
549 static ccp_prop_value_t
bit_value_binop (enum tree_code
, tree
, tree
, tree
);
550 void bit_value_binop (enum tree_code
, signop
, int, widest_int
*, widest_int
*,
551 signop
, int, const widest_int
&, const widest_int
&,
552 signop
, int, const widest_int
&, const widest_int
&);
554 /* Return a widest_int that can be used for bitwise simplifications
558 value_to_wide_int (ccp_prop_value_t val
)
561 && TREE_CODE (val
.value
) == INTEGER_CST
)
562 return wi::to_widest (val
.value
);
567 /* Return the value for the address expression EXPR based on alignment
570 static ccp_prop_value_t
571 get_value_from_alignment (tree expr
)
573 tree type
= TREE_TYPE (expr
);
574 ccp_prop_value_t val
;
575 unsigned HOST_WIDE_INT bitpos
;
578 gcc_assert (TREE_CODE (expr
) == ADDR_EXPR
);
580 get_pointer_alignment_1 (expr
, &align
, &bitpos
);
581 val
.mask
= wi::bit_and_not
582 (POINTER_TYPE_P (type
) || TYPE_UNSIGNED (type
)
583 ? wi::mask
<widest_int
> (TYPE_PRECISION (type
), false)
585 align
/ BITS_PER_UNIT
- 1);
587 = wi::sext (val
.mask
, TYPE_PRECISION (type
)) == -1 ? VARYING
: CONSTANT
;
588 if (val
.lattice_val
== CONSTANT
)
589 val
.value
= build_int_cstu (type
, bitpos
/ BITS_PER_UNIT
);
591 val
.value
= NULL_TREE
;
596 /* Return the value for the tree operand EXPR. If FOR_BITS_P is true
597 return constant bits extracted from alignment information for
598 invariant addresses. */
600 static ccp_prop_value_t
601 get_value_for_expr (tree expr
, bool for_bits_p
)
603 ccp_prop_value_t val
;
605 if (TREE_CODE (expr
) == SSA_NAME
)
607 ccp_prop_value_t
*val_
= get_value (expr
);
612 val
.lattice_val
= VARYING
;
613 val
.value
= NULL_TREE
;
617 && val
.lattice_val
== CONSTANT
)
619 if (TREE_CODE (val
.value
) == ADDR_EXPR
)
620 val
= get_value_from_alignment (val
.value
);
621 else if (TREE_CODE (val
.value
) != INTEGER_CST
)
623 val
.lattice_val
= VARYING
;
624 val
.value
= NULL_TREE
;
628 /* Fall back to a copy value. */
630 && val
.lattice_val
== VARYING
631 && !SSA_NAME_OCCURS_IN_ABNORMAL_PHI (expr
))
633 val
.lattice_val
= CONSTANT
;
638 else if (is_gimple_min_invariant (expr
)
639 && (!for_bits_p
|| TREE_CODE (expr
) == INTEGER_CST
))
641 val
.lattice_val
= CONSTANT
;
644 canonicalize_value (&val
);
646 else if (TREE_CODE (expr
) == ADDR_EXPR
)
647 val
= get_value_from_alignment (expr
);
650 val
.lattice_val
= VARYING
;
652 val
.value
= NULL_TREE
;
655 if (val
.lattice_val
== VARYING
656 && TYPE_UNSIGNED (TREE_TYPE (expr
)))
657 val
.mask
= wi::zext (val
.mask
, TYPE_PRECISION (TREE_TYPE (expr
)));
662 /* Return the likely CCP lattice value for STMT.
664 If STMT has no operands, then return CONSTANT.
666 Else if undefinedness of operands of STMT cause its value to be
667 undefined, then return UNDEFINED.
669 Else if any operands of STMT are constants, then return CONSTANT.
671 Else return VARYING. */
674 likely_value (gimple
*stmt
)
676 bool has_constant_operand
, has_undefined_operand
, all_undefined_operands
;
677 bool has_nsa_operand
;
682 enum gimple_code code
= gimple_code (stmt
);
684 /* This function appears to be called only for assignments, calls,
685 conditionals, and switches, due to the logic in visit_stmt. */
686 gcc_assert (code
== GIMPLE_ASSIGN
687 || code
== GIMPLE_CALL
688 || code
== GIMPLE_COND
689 || code
== GIMPLE_SWITCH
);
691 /* If the statement has volatile operands, it won't fold to a
693 if (gimple_has_volatile_ops (stmt
))
696 /* Arrive here for more complex cases. */
697 has_constant_operand
= false;
698 has_undefined_operand
= false;
699 all_undefined_operands
= true;
700 has_nsa_operand
= false;
701 FOR_EACH_SSA_TREE_OPERAND (use
, stmt
, iter
, SSA_OP_USE
)
703 ccp_prop_value_t
*val
= get_value (use
);
705 if (val
&& val
->lattice_val
== UNDEFINED
)
706 has_undefined_operand
= true;
708 all_undefined_operands
= false;
710 if (val
&& val
->lattice_val
== CONSTANT
)
711 has_constant_operand
= true;
713 if (SSA_NAME_IS_DEFAULT_DEF (use
)
714 || !prop_simulate_again_p (SSA_NAME_DEF_STMT (use
)))
715 has_nsa_operand
= true;
718 /* There may be constants in regular rhs operands. For calls we
719 have to ignore lhs, fndecl and static chain, otherwise only
721 for (i
= (is_gimple_call (stmt
) ? 2 : 0) + gimple_has_lhs (stmt
);
722 i
< gimple_num_ops (stmt
); ++i
)
724 tree op
= gimple_op (stmt
, i
);
725 if (!op
|| TREE_CODE (op
) == SSA_NAME
)
727 if (is_gimple_min_invariant (op
))
728 has_constant_operand
= true;
731 if (has_constant_operand
)
732 all_undefined_operands
= false;
734 if (has_undefined_operand
735 && code
== GIMPLE_CALL
736 && gimple_call_internal_p (stmt
))
737 switch (gimple_call_internal_fn (stmt
))
739 /* These 3 builtins use the first argument just as a magic
740 way how to find out a decl uid. */
741 case IFN_GOMP_SIMD_LANE
:
742 case IFN_GOMP_SIMD_VF
:
743 case IFN_GOMP_SIMD_LAST_LANE
:
744 has_undefined_operand
= false;
750 /* If the operation combines operands like COMPLEX_EXPR make sure to
751 not mark the result UNDEFINED if only one part of the result is
753 if (has_undefined_operand
&& all_undefined_operands
)
755 else if (code
== GIMPLE_ASSIGN
&& has_undefined_operand
)
757 switch (gimple_assign_rhs_code (stmt
))
759 /* Unary operators are handled with all_undefined_operands. */
762 case POINTER_PLUS_EXPR
:
764 /* Not MIN_EXPR, MAX_EXPR. One VARYING operand may be selected.
765 Not bitwise operators, one VARYING operand may specify the
767 Not logical operators for the same reason, apart from XOR.
768 Not COMPLEX_EXPR as one VARYING operand makes the result partly
769 not UNDEFINED. Not *DIV_EXPR, comparisons and shifts because
770 the undefined operand may be promoted. */
774 /* If any part of an address is UNDEFINED, like the index
775 of an ARRAY_EXPR, then treat the result as UNDEFINED. */
782 /* If there was an UNDEFINED operand but the result may be not UNDEFINED
783 fall back to CONSTANT. During iteration UNDEFINED may still drop
785 if (has_undefined_operand
)
788 /* We do not consider virtual operands here -- load from read-only
789 memory may have only VARYING virtual operands, but still be
790 constant. Also we can combine the stmt with definitions from
791 operands whose definitions are not simulated again. */
792 if (has_constant_operand
794 || gimple_references_memory_p (stmt
))
800 /* Returns true if STMT cannot be constant. */
803 surely_varying_stmt_p (gimple
*stmt
)
805 /* If the statement has operands that we cannot handle, it cannot be
807 if (gimple_has_volatile_ops (stmt
))
810 /* If it is a call and does not return a value or is not a
811 builtin and not an indirect call or a call to function with
812 assume_aligned/alloc_align attribute, it is varying. */
813 if (is_gimple_call (stmt
))
815 tree fndecl
, fntype
= gimple_call_fntype (stmt
);
816 if (!gimple_call_lhs (stmt
)
817 || ((fndecl
= gimple_call_fndecl (stmt
)) != NULL_TREE
818 && !fndecl_built_in_p (fndecl
)
819 && !lookup_attribute ("assume_aligned",
820 TYPE_ATTRIBUTES (fntype
))
821 && !lookup_attribute ("alloc_align",
822 TYPE_ATTRIBUTES (fntype
))))
826 /* Any other store operation is not interesting. */
827 else if (gimple_vdef (stmt
))
830 /* Anything other than assignments and conditional jumps are not
831 interesting for CCP. */
832 if (gimple_code (stmt
) != GIMPLE_ASSIGN
833 && gimple_code (stmt
) != GIMPLE_COND
834 && gimple_code (stmt
) != GIMPLE_SWITCH
835 && gimple_code (stmt
) != GIMPLE_CALL
)
841 /* Initialize local data structures for CCP. */
844 ccp_initialize (void)
848 n_const_val
= num_ssa_names
;
849 const_val
= XCNEWVEC (ccp_prop_value_t
, n_const_val
);
851 /* Initialize simulation flags for PHI nodes and statements. */
852 FOR_EACH_BB_FN (bb
, cfun
)
854 gimple_stmt_iterator i
;
856 for (i
= gsi_start_bb (bb
); !gsi_end_p (i
); gsi_next (&i
))
858 gimple
*stmt
= gsi_stmt (i
);
861 /* If the statement is a control insn, then we do not
862 want to avoid simulating the statement once. Failure
863 to do so means that those edges will never get added. */
864 if (stmt_ends_bb_p (stmt
))
867 is_varying
= surely_varying_stmt_p (stmt
);
874 /* If the statement will not produce a constant, mark
875 all its outputs VARYING. */
876 FOR_EACH_SSA_TREE_OPERAND (def
, stmt
, iter
, SSA_OP_ALL_DEFS
)
877 set_value_varying (def
);
879 prop_set_simulate_again (stmt
, !is_varying
);
883 /* Now process PHI nodes. We never clear the simulate_again flag on
884 phi nodes, since we do not know which edges are executable yet,
885 except for phi nodes for virtual operands when we do not do store ccp. */
886 FOR_EACH_BB_FN (bb
, cfun
)
890 for (i
= gsi_start_phis (bb
); !gsi_end_p (i
); gsi_next (&i
))
892 gphi
*phi
= i
.phi ();
894 if (virtual_operand_p (gimple_phi_result (phi
)))
895 prop_set_simulate_again (phi
, false);
897 prop_set_simulate_again (phi
, true);
902 /* Debug count support. Reset the values of ssa names
903 VARYING when the total number ssa names analyzed is
904 beyond the debug count specified. */
910 for (i
= 0; i
< num_ssa_names
; i
++)
914 const_val
[i
].lattice_val
= VARYING
;
915 const_val
[i
].mask
= -1;
916 const_val
[i
].value
= NULL_TREE
;
922 /* We want to provide our own GET_VALUE and FOLD_STMT virtual methods. */
923 class ccp_folder
: public substitute_and_fold_engine
926 tree
get_value (tree
) FINAL OVERRIDE
;
927 bool fold_stmt (gimple_stmt_iterator
*) FINAL OVERRIDE
;
930 /* This method just wraps GET_CONSTANT_VALUE for now. Over time
931 naked calls to GET_CONSTANT_VALUE should be eliminated in favor
932 of calling member functions. */
935 ccp_folder::get_value (tree op
)
937 return get_constant_value (op
);
940 /* Do final substitution of propagated values, cleanup the flowgraph and
941 free allocated storage. If NONZERO_P, record nonzero bits.
943 Return TRUE when something was optimized. */
946 ccp_finalize (bool nonzero_p
)
948 bool something_changed
;
954 /* Derive alignment and misalignment information from partially
955 constant pointers in the lattice or nonzero bits from partially
956 constant integers. */
957 FOR_EACH_SSA_NAME (i
, name
, cfun
)
959 ccp_prop_value_t
*val
;
960 unsigned int tem
, align
;
962 if (!POINTER_TYPE_P (TREE_TYPE (name
))
963 && (!INTEGRAL_TYPE_P (TREE_TYPE (name
))
964 /* Don't record nonzero bits before IPA to avoid
965 using too much memory. */
969 val
= get_value (name
);
970 if (val
->lattice_val
!= CONSTANT
971 || TREE_CODE (val
->value
) != INTEGER_CST
975 if (POINTER_TYPE_P (TREE_TYPE (name
)))
977 /* Trailing mask bits specify the alignment, trailing value
978 bits the misalignment. */
979 tem
= val
->mask
.to_uhwi ();
980 align
= least_bit_hwi (tem
);
982 set_ptr_info_alignment (get_ptr_info (name
), align
,
983 (TREE_INT_CST_LOW (val
->value
)
988 unsigned int precision
= TYPE_PRECISION (TREE_TYPE (val
->value
));
989 wide_int nonzero_bits
990 = (wide_int::from (val
->mask
, precision
, UNSIGNED
)
991 | wi::to_wide (val
->value
));
992 nonzero_bits
&= get_nonzero_bits (name
);
993 set_nonzero_bits (name
, nonzero_bits
);
997 /* Perform substitutions based on the known constant values. */
998 class ccp_folder ccp_folder
;
999 something_changed
= ccp_folder
.substitute_and_fold ();
1003 return something_changed
;
1007 /* Compute the meet operator between *VAL1 and *VAL2. Store the result
1010 any M UNDEFINED = any
1011 any M VARYING = VARYING
1012 Ci M Cj = Ci if (i == j)
1013 Ci M Cj = VARYING if (i != j)
1017 ccp_lattice_meet (ccp_prop_value_t
*val1
, ccp_prop_value_t
*val2
)
1019 if (val1
->lattice_val
== UNDEFINED
1020 /* For UNDEFINED M SSA we can't always SSA because its definition
1021 may not dominate the PHI node. Doing optimistic copy propagation
1022 also causes a lot of gcc.dg/uninit-pred*.c FAILs. */
1023 && (val2
->lattice_val
!= CONSTANT
1024 || TREE_CODE (val2
->value
) != SSA_NAME
))
1026 /* UNDEFINED M any = any */
1029 else if (val2
->lattice_val
== UNDEFINED
1031 && (val1
->lattice_val
!= CONSTANT
1032 || TREE_CODE (val1
->value
) != SSA_NAME
))
1034 /* any M UNDEFINED = any
1035 Nothing to do. VAL1 already contains the value we want. */
1038 else if (val1
->lattice_val
== VARYING
1039 || val2
->lattice_val
== VARYING
)
1041 /* any M VARYING = VARYING. */
1042 val1
->lattice_val
= VARYING
;
1044 val1
->value
= NULL_TREE
;
1046 else if (val1
->lattice_val
== CONSTANT
1047 && val2
->lattice_val
== CONSTANT
1048 && TREE_CODE (val1
->value
) == INTEGER_CST
1049 && TREE_CODE (val2
->value
) == INTEGER_CST
)
1051 /* Ci M Cj = Ci if (i == j)
1052 Ci M Cj = VARYING if (i != j)
1054 For INTEGER_CSTs mask unequal bits. If no equal bits remain,
1056 val1
->mask
= (val1
->mask
| val2
->mask
1057 | (wi::to_widest (val1
->value
)
1058 ^ wi::to_widest (val2
->value
)));
1059 if (wi::sext (val1
->mask
, TYPE_PRECISION (TREE_TYPE (val1
->value
))) == -1)
1061 val1
->lattice_val
= VARYING
;
1062 val1
->value
= NULL_TREE
;
1065 else if (val1
->lattice_val
== CONSTANT
1066 && val2
->lattice_val
== CONSTANT
1067 && operand_equal_p (val1
->value
, val2
->value
, 0))
1069 /* Ci M Cj = Ci if (i == j)
1070 Ci M Cj = VARYING if (i != j)
1072 VAL1 already contains the value we want for equivalent values. */
1074 else if (val1
->lattice_val
== CONSTANT
1075 && val2
->lattice_val
== CONSTANT
1076 && (TREE_CODE (val1
->value
) == ADDR_EXPR
1077 || TREE_CODE (val2
->value
) == ADDR_EXPR
))
1079 /* When not equal addresses are involved try meeting for
1081 ccp_prop_value_t tem
= *val2
;
1082 if (TREE_CODE (val1
->value
) == ADDR_EXPR
)
1083 *val1
= get_value_for_expr (val1
->value
, true);
1084 if (TREE_CODE (val2
->value
) == ADDR_EXPR
)
1085 tem
= get_value_for_expr (val2
->value
, true);
1086 ccp_lattice_meet (val1
, &tem
);
1090 /* Any other combination is VARYING. */
1091 val1
->lattice_val
= VARYING
;
1093 val1
->value
= NULL_TREE
;
1098 /* Loop through the PHI_NODE's parameters for BLOCK and compare their
1099 lattice values to determine PHI_NODE's lattice value. The value of a
1100 PHI node is determined calling ccp_lattice_meet with all the arguments
1101 of the PHI node that are incoming via executable edges. */
1103 enum ssa_prop_result
1104 ccp_propagate::visit_phi (gphi
*phi
)
1107 ccp_prop_value_t new_val
;
1109 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1111 fprintf (dump_file
, "\nVisiting PHI node: ");
1112 print_gimple_stmt (dump_file
, phi
, 0, dump_flags
);
1115 new_val
.lattice_val
= UNDEFINED
;
1116 new_val
.value
= NULL_TREE
;
1120 bool non_exec_edge
= false;
1121 for (i
= 0; i
< gimple_phi_num_args (phi
); i
++)
1123 /* Compute the meet operator over all the PHI arguments flowing
1124 through executable edges. */
1125 edge e
= gimple_phi_arg_edge (phi
, i
);
1127 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1130 "\tArgument #%d (%d -> %d %sexecutable)\n",
1131 i
, e
->src
->index
, e
->dest
->index
,
1132 (e
->flags
& EDGE_EXECUTABLE
) ? "" : "not ");
1135 /* If the incoming edge is executable, Compute the meet operator for
1136 the existing value of the PHI node and the current PHI argument. */
1137 if (e
->flags
& EDGE_EXECUTABLE
)
1139 tree arg
= gimple_phi_arg (phi
, i
)->def
;
1140 ccp_prop_value_t arg_val
= get_value_for_expr (arg
, false);
1148 ccp_lattice_meet (&new_val
, &arg_val
);
1150 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1152 fprintf (dump_file
, "\t");
1153 print_generic_expr (dump_file
, arg
, dump_flags
);
1154 dump_lattice_value (dump_file
, "\tValue: ", arg_val
);
1155 fprintf (dump_file
, "\n");
1158 if (new_val
.lattice_val
== VARYING
)
1162 non_exec_edge
= true;
1165 /* In case there were non-executable edges and the value is a copy
1166 make sure its definition dominates the PHI node. */
1168 && new_val
.lattice_val
== CONSTANT
1169 && TREE_CODE (new_val
.value
) == SSA_NAME
1170 && ! SSA_NAME_IS_DEFAULT_DEF (new_val
.value
)
1171 && ! dominated_by_p (CDI_DOMINATORS
, gimple_bb (phi
),
1172 gimple_bb (SSA_NAME_DEF_STMT (new_val
.value
))))
1174 new_val
.lattice_val
= VARYING
;
1175 new_val
.value
= NULL_TREE
;
1179 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1181 dump_lattice_value (dump_file
, "\n PHI node value: ", new_val
);
1182 fprintf (dump_file
, "\n\n");
1185 /* Make the transition to the new value. */
1186 if (set_lattice_value (gimple_phi_result (phi
), &new_val
))
1188 if (new_val
.lattice_val
== VARYING
)
1189 return SSA_PROP_VARYING
;
1191 return SSA_PROP_INTERESTING
;
1194 return SSA_PROP_NOT_INTERESTING
;
1197 /* Return the constant value for OP or OP otherwise. */
1200 valueize_op (tree op
)
1202 if (TREE_CODE (op
) == SSA_NAME
)
1204 tree tem
= get_constant_value (op
);
1211 /* Return the constant value for OP, but signal to not follow SSA
1212 edges if the definition may be simulated again. */
1215 valueize_op_1 (tree op
)
1217 if (TREE_CODE (op
) == SSA_NAME
)
1219 /* If the definition may be simulated again we cannot follow
1220 this SSA edge as the SSA propagator does not necessarily
1221 re-visit the use. */
1222 gimple
*def_stmt
= SSA_NAME_DEF_STMT (op
);
1223 if (!gimple_nop_p (def_stmt
)
1224 && prop_simulate_again_p (def_stmt
))
1226 tree tem
= get_constant_value (op
);
1233 /* CCP specific front-end to the non-destructive constant folding
1236 Attempt to simplify the RHS of STMT knowing that one or more
1237 operands are constants.
1239 If simplification is possible, return the simplified RHS,
1240 otherwise return the original RHS or NULL_TREE. */
1243 ccp_fold (gimple
*stmt
)
1245 location_t loc
= gimple_location (stmt
);
1246 switch (gimple_code (stmt
))
1250 /* Handle comparison operators that can appear in GIMPLE form. */
1251 tree op0
= valueize_op (gimple_cond_lhs (stmt
));
1252 tree op1
= valueize_op (gimple_cond_rhs (stmt
));
1253 enum tree_code code
= gimple_cond_code (stmt
);
1254 return fold_binary_loc (loc
, code
, boolean_type_node
, op0
, op1
);
1259 /* Return the constant switch index. */
1260 return valueize_op (gimple_switch_index (as_a
<gswitch
*> (stmt
)));
1265 return gimple_fold_stmt_to_constant_1 (stmt
,
1266 valueize_op
, valueize_op_1
);
1273 /* Apply the operation CODE in type TYPE to the value, mask pair
1274 RVAL and RMASK representing a value of type RTYPE and set
1275 the value, mask pair *VAL and *MASK to the result. */
1278 bit_value_unop (enum tree_code code
, signop type_sgn
, int type_precision
,
1279 widest_int
*val
, widest_int
*mask
,
1280 signop rtype_sgn
, int rtype_precision
,
1281 const widest_int
&rval
, const widest_int
&rmask
)
1292 widest_int temv
, temm
;
1293 /* Return ~rval + 1. */
1294 bit_value_unop (BIT_NOT_EXPR
, type_sgn
, type_precision
, &temv
, &temm
,
1295 type_sgn
, type_precision
, rval
, rmask
);
1296 bit_value_binop (PLUS_EXPR
, type_sgn
, type_precision
, val
, mask
,
1297 type_sgn
, type_precision
, temv
, temm
,
1298 type_sgn
, type_precision
, 1, 0);
1304 /* First extend mask and value according to the original type. */
1305 *mask
= wi::ext (rmask
, rtype_precision
, rtype_sgn
);
1306 *val
= wi::ext (rval
, rtype_precision
, rtype_sgn
);
1308 /* Then extend mask and value according to the target type. */
1309 *mask
= wi::ext (*mask
, type_precision
, type_sgn
);
1310 *val
= wi::ext (*val
, type_precision
, type_sgn
);
1320 /* Apply the operation CODE in type TYPE to the value, mask pairs
1321 R1VAL, R1MASK and R2VAL, R2MASK representing a values of type R1TYPE
1322 and R2TYPE and set the value, mask pair *VAL and *MASK to the result. */
1325 bit_value_binop (enum tree_code code
, signop sgn
, int width
,
1326 widest_int
*val
, widest_int
*mask
,
1327 signop r1type_sgn
, int r1type_precision
,
1328 const widest_int
&r1val
, const widest_int
&r1mask
,
1329 signop r2type_sgn
, int r2type_precision
,
1330 const widest_int
&r2val
, const widest_int
&r2mask
)
1332 bool swap_p
= false;
1334 /* Assume we'll get a constant result. Use an initial non varying
1335 value, we fall back to varying in the end if necessary. */
1341 /* The mask is constant where there is a known not
1342 set bit, (m1 | m2) & ((v1 | m1) & (v2 | m2)) */
1343 *mask
= (r1mask
| r2mask
) & (r1val
| r1mask
) & (r2val
| r2mask
);
1344 *val
= r1val
& r2val
;
1348 /* The mask is constant where there is a known
1349 set bit, (m1 | m2) & ~((v1 & ~m1) | (v2 & ~m2)). */
1350 *mask
= wi::bit_and_not (r1mask
| r2mask
,
1351 wi::bit_and_not (r1val
, r1mask
)
1352 | wi::bit_and_not (r2val
, r2mask
));
1353 *val
= r1val
| r2val
;
1358 *mask
= r1mask
| r2mask
;
1359 *val
= r1val
^ r2val
;
1366 widest_int shift
= r2val
;
1374 if (wi::neg_p (shift
))
1377 if (code
== RROTATE_EXPR
)
1378 code
= LROTATE_EXPR
;
1380 code
= RROTATE_EXPR
;
1382 if (code
== RROTATE_EXPR
)
1384 *mask
= wi::rrotate (r1mask
, shift
, width
);
1385 *val
= wi::rrotate (r1val
, shift
, width
);
1389 *mask
= wi::lrotate (r1mask
, shift
, width
);
1390 *val
= wi::lrotate (r1val
, shift
, width
);
1398 /* ??? We can handle partially known shift counts if we know
1399 its sign. That way we can tell that (x << (y | 8)) & 255
1403 widest_int shift
= r2val
;
1411 if (wi::neg_p (shift
))
1414 if (code
== RSHIFT_EXPR
)
1419 if (code
== RSHIFT_EXPR
)
1421 *mask
= wi::rshift (wi::ext (r1mask
, width
, sgn
), shift
, sgn
);
1422 *val
= wi::rshift (wi::ext (r1val
, width
, sgn
), shift
, sgn
);
1426 *mask
= wi::ext (r1mask
<< shift
, width
, sgn
);
1427 *val
= wi::ext (r1val
<< shift
, width
, sgn
);
1434 case POINTER_PLUS_EXPR
:
1436 /* Do the addition with unknown bits set to zero, to give carry-ins of
1437 zero wherever possible. */
1438 widest_int lo
= (wi::bit_and_not (r1val
, r1mask
)
1439 + wi::bit_and_not (r2val
, r2mask
));
1440 lo
= wi::ext (lo
, width
, sgn
);
1441 /* Do the addition with unknown bits set to one, to give carry-ins of
1442 one wherever possible. */
1443 widest_int hi
= (r1val
| r1mask
) + (r2val
| r2mask
);
1444 hi
= wi::ext (hi
, width
, sgn
);
1445 /* Each bit in the result is known if (a) the corresponding bits in
1446 both inputs are known, and (b) the carry-in to that bit position
1447 is known. We can check condition (b) by seeing if we got the same
1448 result with minimised carries as with maximised carries. */
1449 *mask
= r1mask
| r2mask
| (lo
^ hi
);
1450 *mask
= wi::ext (*mask
, width
, sgn
);
1451 /* It shouldn't matter whether we choose lo or hi here. */
1458 widest_int temv
, temm
;
1459 bit_value_unop (NEGATE_EXPR
, r2type_sgn
, r2type_precision
, &temv
, &temm
,
1460 r2type_sgn
, r2type_precision
, r2val
, r2mask
);
1461 bit_value_binop (PLUS_EXPR
, sgn
, width
, val
, mask
,
1462 r1type_sgn
, r1type_precision
, r1val
, r1mask
,
1463 r2type_sgn
, r2type_precision
, temv
, temm
);
1469 /* Just track trailing zeros in both operands and transfer
1470 them to the other. */
1471 int r1tz
= wi::ctz (r1val
| r1mask
);
1472 int r2tz
= wi::ctz (r2val
| r2mask
);
1473 if (r1tz
+ r2tz
>= width
)
1478 else if (r1tz
+ r2tz
> 0)
1480 *mask
= wi::ext (wi::mask
<widest_int
> (r1tz
+ r2tz
, true),
1490 widest_int m
= r1mask
| r2mask
;
1491 if (wi::bit_and_not (r1val
, m
) != wi::bit_and_not (r2val
, m
))
1494 *val
= ((code
== EQ_EXPR
) ? 0 : 1);
1498 /* We know the result of a comparison is always one or zero. */
1508 code
= swap_tree_comparison (code
);
1515 const widest_int
&o1val
= swap_p
? r2val
: r1val
;
1516 const widest_int
&o1mask
= swap_p
? r2mask
: r1mask
;
1517 const widest_int
&o2val
= swap_p
? r1val
: r2val
;
1518 const widest_int
&o2mask
= swap_p
? r1mask
: r2mask
;
1520 /* If the most significant bits are not known we know nothing. */
1521 if (wi::neg_p (o1mask
) || wi::neg_p (o2mask
))
1524 /* For comparisons the signedness is in the comparison operands. */
1527 /* If we know the most significant bits we know the values
1528 value ranges by means of treating varying bits as zero
1529 or one. Do a cross comparison of the max/min pairs. */
1530 maxmin
= wi::cmp (o1val
| o1mask
,
1531 wi::bit_and_not (o2val
, o2mask
), sgn
);
1532 minmax
= wi::cmp (wi::bit_and_not (o1val
, o1mask
),
1533 o2val
| o2mask
, sgn
);
1534 if (maxmin
< 0) /* o1 is less than o2. */
1539 else if (minmax
> 0) /* o1 is not less or equal to o2. */
1544 else if (maxmin
== minmax
) /* o1 and o2 are equal. */
1546 /* This probably should never happen as we'd have
1547 folded the thing during fully constant value folding. */
1549 *val
= (code
== LE_EXPR
? 1 : 0);
1553 /* We know the result of a comparison is always one or zero. */
1564 /* Return the propagation value when applying the operation CODE to
1565 the value RHS yielding type TYPE. */
1567 static ccp_prop_value_t
1568 bit_value_unop (enum tree_code code
, tree type
, tree rhs
)
1570 ccp_prop_value_t rval
= get_value_for_expr (rhs
, true);
1571 widest_int value
, mask
;
1572 ccp_prop_value_t val
;
1574 if (rval
.lattice_val
== UNDEFINED
)
1577 gcc_assert ((rval
.lattice_val
== CONSTANT
1578 && TREE_CODE (rval
.value
) == INTEGER_CST
)
1579 || wi::sext (rval
.mask
, TYPE_PRECISION (TREE_TYPE (rhs
))) == -1);
1580 bit_value_unop (code
, TYPE_SIGN (type
), TYPE_PRECISION (type
), &value
, &mask
,
1581 TYPE_SIGN (TREE_TYPE (rhs
)), TYPE_PRECISION (TREE_TYPE (rhs
)),
1582 value_to_wide_int (rval
), rval
.mask
);
1583 if (wi::sext (mask
, TYPE_PRECISION (type
)) != -1)
1585 val
.lattice_val
= CONSTANT
;
1587 /* ??? Delay building trees here. */
1588 val
.value
= wide_int_to_tree (type
, value
);
1592 val
.lattice_val
= VARYING
;
1593 val
.value
= NULL_TREE
;
1599 /* Return the propagation value when applying the operation CODE to
1600 the values RHS1 and RHS2 yielding type TYPE. */
1602 static ccp_prop_value_t
1603 bit_value_binop (enum tree_code code
, tree type
, tree rhs1
, tree rhs2
)
1605 ccp_prop_value_t r1val
= get_value_for_expr (rhs1
, true);
1606 ccp_prop_value_t r2val
= get_value_for_expr (rhs2
, true);
1607 widest_int value
, mask
;
1608 ccp_prop_value_t val
;
1610 if (r1val
.lattice_val
== UNDEFINED
1611 || r2val
.lattice_val
== UNDEFINED
)
1613 val
.lattice_val
= VARYING
;
1614 val
.value
= NULL_TREE
;
1619 gcc_assert ((r1val
.lattice_val
== CONSTANT
1620 && TREE_CODE (r1val
.value
) == INTEGER_CST
)
1621 || wi::sext (r1val
.mask
,
1622 TYPE_PRECISION (TREE_TYPE (rhs1
))) == -1);
1623 gcc_assert ((r2val
.lattice_val
== CONSTANT
1624 && TREE_CODE (r2val
.value
) == INTEGER_CST
)
1625 || wi::sext (r2val
.mask
,
1626 TYPE_PRECISION (TREE_TYPE (rhs2
))) == -1);
1627 bit_value_binop (code
, TYPE_SIGN (type
), TYPE_PRECISION (type
), &value
, &mask
,
1628 TYPE_SIGN (TREE_TYPE (rhs1
)), TYPE_PRECISION (TREE_TYPE (rhs1
)),
1629 value_to_wide_int (r1val
), r1val
.mask
,
1630 TYPE_SIGN (TREE_TYPE (rhs2
)), TYPE_PRECISION (TREE_TYPE (rhs2
)),
1631 value_to_wide_int (r2val
), r2val
.mask
);
1633 if (wi::sext (mask
, TYPE_PRECISION (type
)) != -1)
1635 val
.lattice_val
= CONSTANT
;
1637 /* ??? Delay building trees here. */
1638 val
.value
= wide_int_to_tree (type
, value
);
1642 val
.lattice_val
= VARYING
;
1643 val
.value
= NULL_TREE
;
1649 /* Return the propagation value for __builtin_assume_aligned
1650 and functions with assume_aligned or alloc_aligned attribute.
1651 For __builtin_assume_aligned, ATTR is NULL_TREE,
1652 for assume_aligned attribute ATTR is non-NULL and ALLOC_ALIGNED
1653 is false, for alloc_aligned attribute ATTR is non-NULL and
1654 ALLOC_ALIGNED is true. */
1656 static ccp_prop_value_t
1657 bit_value_assume_aligned (gimple
*stmt
, tree attr
, ccp_prop_value_t ptrval
,
1660 tree align
, misalign
= NULL_TREE
, type
;
1661 unsigned HOST_WIDE_INT aligni
, misaligni
= 0;
1662 ccp_prop_value_t alignval
;
1663 widest_int value
, mask
;
1664 ccp_prop_value_t val
;
1666 if (attr
== NULL_TREE
)
1668 tree ptr
= gimple_call_arg (stmt
, 0);
1669 type
= TREE_TYPE (ptr
);
1670 ptrval
= get_value_for_expr (ptr
, true);
1674 tree lhs
= gimple_call_lhs (stmt
);
1675 type
= TREE_TYPE (lhs
);
1678 if (ptrval
.lattice_val
== UNDEFINED
)
1680 gcc_assert ((ptrval
.lattice_val
== CONSTANT
1681 && TREE_CODE (ptrval
.value
) == INTEGER_CST
)
1682 || wi::sext (ptrval
.mask
, TYPE_PRECISION (type
)) == -1);
1683 if (attr
== NULL_TREE
)
1685 /* Get aligni and misaligni from __builtin_assume_aligned. */
1686 align
= gimple_call_arg (stmt
, 1);
1687 if (!tree_fits_uhwi_p (align
))
1689 aligni
= tree_to_uhwi (align
);
1690 if (gimple_call_num_args (stmt
) > 2)
1692 misalign
= gimple_call_arg (stmt
, 2);
1693 if (!tree_fits_uhwi_p (misalign
))
1695 misaligni
= tree_to_uhwi (misalign
);
1700 /* Get aligni and misaligni from assume_aligned or
1701 alloc_align attributes. */
1702 if (TREE_VALUE (attr
) == NULL_TREE
)
1704 attr
= TREE_VALUE (attr
);
1705 align
= TREE_VALUE (attr
);
1706 if (!tree_fits_uhwi_p (align
))
1708 aligni
= tree_to_uhwi (align
);
1711 if (aligni
== 0 || aligni
> gimple_call_num_args (stmt
))
1713 align
= gimple_call_arg (stmt
, aligni
- 1);
1714 if (!tree_fits_uhwi_p (align
))
1716 aligni
= tree_to_uhwi (align
);
1718 else if (TREE_CHAIN (attr
) && TREE_VALUE (TREE_CHAIN (attr
)))
1720 misalign
= TREE_VALUE (TREE_CHAIN (attr
));
1721 if (!tree_fits_uhwi_p (misalign
))
1723 misaligni
= tree_to_uhwi (misalign
);
1726 if (aligni
<= 1 || (aligni
& (aligni
- 1)) != 0 || misaligni
>= aligni
)
1729 align
= build_int_cst_type (type
, -aligni
);
1730 alignval
= get_value_for_expr (align
, true);
1731 bit_value_binop (BIT_AND_EXPR
, TYPE_SIGN (type
), TYPE_PRECISION (type
), &value
, &mask
,
1732 TYPE_SIGN (type
), TYPE_PRECISION (type
), value_to_wide_int (ptrval
), ptrval
.mask
,
1733 TYPE_SIGN (type
), TYPE_PRECISION (type
), value_to_wide_int (alignval
), alignval
.mask
);
1735 if (wi::sext (mask
, TYPE_PRECISION (type
)) != -1)
1737 val
.lattice_val
= CONSTANT
;
1739 gcc_assert ((mask
.to_uhwi () & (aligni
- 1)) == 0);
1740 gcc_assert ((value
.to_uhwi () & (aligni
- 1)) == 0);
1742 /* ??? Delay building trees here. */
1743 val
.value
= wide_int_to_tree (type
, value
);
1747 val
.lattice_val
= VARYING
;
1748 val
.value
= NULL_TREE
;
1754 /* Evaluate statement STMT.
1755 Valid only for assignments, calls, conditionals, and switches. */
1757 static ccp_prop_value_t
1758 evaluate_stmt (gimple
*stmt
)
1760 ccp_prop_value_t val
;
1761 tree simplified
= NULL_TREE
;
1762 ccp_lattice_t likelyvalue
= likely_value (stmt
);
1763 bool is_constant
= false;
1766 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1768 fprintf (dump_file
, "which is likely ");
1769 switch (likelyvalue
)
1772 fprintf (dump_file
, "CONSTANT");
1775 fprintf (dump_file
, "UNDEFINED");
1778 fprintf (dump_file
, "VARYING");
1782 fprintf (dump_file
, "\n");
1785 /* If the statement is likely to have a CONSTANT result, then try
1786 to fold the statement to determine the constant value. */
1787 /* FIXME. This is the only place that we call ccp_fold.
1788 Since likely_value never returns CONSTANT for calls, we will
1789 not attempt to fold them, including builtins that may profit. */
1790 if (likelyvalue
== CONSTANT
)
1792 fold_defer_overflow_warnings ();
1793 simplified
= ccp_fold (stmt
);
1795 && TREE_CODE (simplified
) == SSA_NAME
)
1797 /* We may not use values of something that may be simulated again,
1798 see valueize_op_1. */
1799 if (SSA_NAME_IS_DEFAULT_DEF (simplified
)
1800 || ! prop_simulate_again_p (SSA_NAME_DEF_STMT (simplified
)))
1802 ccp_prop_value_t
*val
= get_value (simplified
);
1803 if (val
&& val
->lattice_val
!= VARYING
)
1805 fold_undefer_overflow_warnings (true, stmt
, 0);
1810 /* We may also not place a non-valueized copy in the lattice
1811 as that might become stale if we never re-visit this stmt. */
1812 simplified
= NULL_TREE
;
1814 is_constant
= simplified
&& is_gimple_min_invariant (simplified
);
1815 fold_undefer_overflow_warnings (is_constant
, stmt
, 0);
1818 /* The statement produced a constant value. */
1819 val
.lattice_val
= CONSTANT
;
1820 val
.value
= simplified
;
1825 /* If the statement is likely to have a VARYING result, then do not
1826 bother folding the statement. */
1827 else if (likelyvalue
== VARYING
)
1829 enum gimple_code code
= gimple_code (stmt
);
1830 if (code
== GIMPLE_ASSIGN
)
1832 enum tree_code subcode
= gimple_assign_rhs_code (stmt
);
1834 /* Other cases cannot satisfy is_gimple_min_invariant
1836 if (get_gimple_rhs_class (subcode
) == GIMPLE_SINGLE_RHS
)
1837 simplified
= gimple_assign_rhs1 (stmt
);
1839 else if (code
== GIMPLE_SWITCH
)
1840 simplified
= gimple_switch_index (as_a
<gswitch
*> (stmt
));
1842 /* These cannot satisfy is_gimple_min_invariant without folding. */
1843 gcc_assert (code
== GIMPLE_CALL
|| code
== GIMPLE_COND
);
1844 is_constant
= simplified
&& is_gimple_min_invariant (simplified
);
1847 /* The statement produced a constant value. */
1848 val
.lattice_val
= CONSTANT
;
1849 val
.value
= simplified
;
1853 /* If the statement result is likely UNDEFINED, make it so. */
1854 else if (likelyvalue
== UNDEFINED
)
1856 val
.lattice_val
= UNDEFINED
;
1857 val
.value
= NULL_TREE
;
1862 /* Resort to simplification for bitwise tracking. */
1863 if (flag_tree_bit_ccp
1864 && (likelyvalue
== CONSTANT
|| is_gimple_call (stmt
)
1865 || (gimple_assign_single_p (stmt
)
1866 && gimple_assign_rhs_code (stmt
) == ADDR_EXPR
))
1869 enum gimple_code code
= gimple_code (stmt
);
1870 val
.lattice_val
= VARYING
;
1871 val
.value
= NULL_TREE
;
1873 if (code
== GIMPLE_ASSIGN
)
1875 enum tree_code subcode
= gimple_assign_rhs_code (stmt
);
1876 tree rhs1
= gimple_assign_rhs1 (stmt
);
1877 tree lhs
= gimple_assign_lhs (stmt
);
1878 if ((INTEGRAL_TYPE_P (TREE_TYPE (lhs
))
1879 || POINTER_TYPE_P (TREE_TYPE (lhs
)))
1880 && (INTEGRAL_TYPE_P (TREE_TYPE (rhs1
))
1881 || POINTER_TYPE_P (TREE_TYPE (rhs1
))))
1882 switch (get_gimple_rhs_class (subcode
))
1884 case GIMPLE_SINGLE_RHS
:
1885 val
= get_value_for_expr (rhs1
, true);
1888 case GIMPLE_UNARY_RHS
:
1889 val
= bit_value_unop (subcode
, TREE_TYPE (lhs
), rhs1
);
1892 case GIMPLE_BINARY_RHS
:
1893 val
= bit_value_binop (subcode
, TREE_TYPE (lhs
), rhs1
,
1894 gimple_assign_rhs2 (stmt
));
1900 else if (code
== GIMPLE_COND
)
1902 enum tree_code code
= gimple_cond_code (stmt
);
1903 tree rhs1
= gimple_cond_lhs (stmt
);
1904 tree rhs2
= gimple_cond_rhs (stmt
);
1905 if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1
))
1906 || POINTER_TYPE_P (TREE_TYPE (rhs1
)))
1907 val
= bit_value_binop (code
, TREE_TYPE (rhs1
), rhs1
, rhs2
);
1909 else if (gimple_call_builtin_p (stmt
, BUILT_IN_NORMAL
))
1911 tree fndecl
= gimple_call_fndecl (stmt
);
1912 switch (DECL_FUNCTION_CODE (fndecl
))
1914 case BUILT_IN_MALLOC
:
1915 case BUILT_IN_REALLOC
:
1916 case BUILT_IN_CALLOC
:
1917 case BUILT_IN_STRDUP
:
1918 case BUILT_IN_STRNDUP
:
1919 val
.lattice_val
= CONSTANT
;
1920 val
.value
= build_int_cst (TREE_TYPE (gimple_get_lhs (stmt
)), 0);
1921 val
.mask
= ~((HOST_WIDE_INT
) MALLOC_ABI_ALIGNMENT
1922 / BITS_PER_UNIT
- 1);
1925 CASE_BUILT_IN_ALLOCA
:
1926 align
= (DECL_FUNCTION_CODE (fndecl
) == BUILT_IN_ALLOCA
1928 : TREE_INT_CST_LOW (gimple_call_arg (stmt
, 1)));
1929 val
.lattice_val
= CONSTANT
;
1930 val
.value
= build_int_cst (TREE_TYPE (gimple_get_lhs (stmt
)), 0);
1931 val
.mask
= ~((HOST_WIDE_INT
) align
/ BITS_PER_UNIT
- 1);
1934 /* These builtins return their first argument, unmodified. */
1935 case BUILT_IN_MEMCPY
:
1936 case BUILT_IN_MEMMOVE
:
1937 case BUILT_IN_MEMSET
:
1938 case BUILT_IN_STRCPY
:
1939 case BUILT_IN_STRNCPY
:
1940 case BUILT_IN_MEMCPY_CHK
:
1941 case BUILT_IN_MEMMOVE_CHK
:
1942 case BUILT_IN_MEMSET_CHK
:
1943 case BUILT_IN_STRCPY_CHK
:
1944 case BUILT_IN_STRNCPY_CHK
:
1945 val
= get_value_for_expr (gimple_call_arg (stmt
, 0), true);
1948 case BUILT_IN_ASSUME_ALIGNED
:
1949 val
= bit_value_assume_aligned (stmt
, NULL_TREE
, val
, false);
1952 case BUILT_IN_ALIGNED_ALLOC
:
1954 tree align
= get_constant_value (gimple_call_arg (stmt
, 0));
1956 && tree_fits_uhwi_p (align
))
1958 unsigned HOST_WIDE_INT aligni
= tree_to_uhwi (align
);
1960 /* align must be power-of-two */
1961 && (aligni
& (aligni
- 1)) == 0)
1963 val
.lattice_val
= CONSTANT
;
1964 val
.value
= build_int_cst (ptr_type_node
, 0);
1971 case BUILT_IN_BSWAP16
:
1972 case BUILT_IN_BSWAP32
:
1973 case BUILT_IN_BSWAP64
:
1974 val
= get_value_for_expr (gimple_call_arg (stmt
, 0), true);
1975 if (val
.lattice_val
== UNDEFINED
)
1977 else if (val
.lattice_val
== CONSTANT
1979 && TREE_CODE (val
.value
) == INTEGER_CST
)
1981 tree type
= TREE_TYPE (gimple_call_lhs (stmt
));
1982 int prec
= TYPE_PRECISION (type
);
1983 wide_int wval
= wi::to_wide (val
.value
);
1985 = wide_int_to_tree (type
,
1986 wide_int::from (wval
, prec
,
1987 UNSIGNED
).bswap ());
1989 = widest_int::from (wide_int::from (val
.mask
, prec
,
1992 if (wi::sext (val
.mask
, prec
) != -1)
1995 val
.lattice_val
= VARYING
;
1996 val
.value
= NULL_TREE
;
2003 if (is_gimple_call (stmt
) && gimple_call_lhs (stmt
))
2005 tree fntype
= gimple_call_fntype (stmt
);
2008 tree attrs
= lookup_attribute ("assume_aligned",
2009 TYPE_ATTRIBUTES (fntype
));
2011 val
= bit_value_assume_aligned (stmt
, attrs
, val
, false);
2012 attrs
= lookup_attribute ("alloc_align",
2013 TYPE_ATTRIBUTES (fntype
));
2015 val
= bit_value_assume_aligned (stmt
, attrs
, val
, true);
2018 is_constant
= (val
.lattice_val
== CONSTANT
);
2021 if (flag_tree_bit_ccp
2022 && ((is_constant
&& TREE_CODE (val
.value
) == INTEGER_CST
)
2024 && gimple_get_lhs (stmt
)
2025 && TREE_CODE (gimple_get_lhs (stmt
)) == SSA_NAME
)
2027 tree lhs
= gimple_get_lhs (stmt
);
2028 wide_int nonzero_bits
= get_nonzero_bits (lhs
);
2029 if (nonzero_bits
!= -1)
2033 val
.lattice_val
= CONSTANT
;
2034 val
.value
= build_zero_cst (TREE_TYPE (lhs
));
2035 val
.mask
= extend_mask (nonzero_bits
, TYPE_SIGN (TREE_TYPE (lhs
)));
2040 if (wi::bit_and_not (wi::to_wide (val
.value
), nonzero_bits
) != 0)
2041 val
.value
= wide_int_to_tree (TREE_TYPE (lhs
),
2043 & wi::to_wide (val
.value
));
2044 if (nonzero_bits
== 0)
2047 val
.mask
= val
.mask
& extend_mask (nonzero_bits
,
2048 TYPE_SIGN (TREE_TYPE (lhs
)));
2053 /* The statement produced a nonconstant value. */
2056 /* The statement produced a copy. */
2057 if (simplified
&& TREE_CODE (simplified
) == SSA_NAME
2058 && !SSA_NAME_OCCURS_IN_ABNORMAL_PHI (simplified
))
2060 val
.lattice_val
= CONSTANT
;
2061 val
.value
= simplified
;
2064 /* The statement is VARYING. */
2067 val
.lattice_val
= VARYING
;
2068 val
.value
= NULL_TREE
;
2076 typedef hash_table
<nofree_ptr_hash
<gimple
> > gimple_htab
;
2078 /* Given a BUILT_IN_STACK_SAVE value SAVED_VAL, insert a clobber of VAR before
2079 each matching BUILT_IN_STACK_RESTORE. Mark visited phis in VISITED. */
2082 insert_clobber_before_stack_restore (tree saved_val
, tree var
,
2083 gimple_htab
**visited
)
2086 gassign
*clobber_stmt
;
2088 imm_use_iterator iter
;
2089 gimple_stmt_iterator i
;
2092 FOR_EACH_IMM_USE_STMT (stmt
, iter
, saved_val
)
2093 if (gimple_call_builtin_p (stmt
, BUILT_IN_STACK_RESTORE
))
2095 clobber
= build_clobber (TREE_TYPE (var
));
2096 clobber_stmt
= gimple_build_assign (var
, clobber
);
2098 i
= gsi_for_stmt (stmt
);
2099 gsi_insert_before (&i
, clobber_stmt
, GSI_SAME_STMT
);
2101 else if (gimple_code (stmt
) == GIMPLE_PHI
)
2104 *visited
= new gimple_htab (10);
2106 slot
= (*visited
)->find_slot (stmt
, INSERT
);
2111 insert_clobber_before_stack_restore (gimple_phi_result (stmt
), var
,
2114 else if (gimple_assign_ssa_name_copy_p (stmt
))
2115 insert_clobber_before_stack_restore (gimple_assign_lhs (stmt
), var
,
2118 gcc_assert (is_gimple_debug (stmt
));
2121 /* Advance the iterator to the previous non-debug gimple statement in the same
2122 or dominating basic block. */
2125 gsi_prev_dom_bb_nondebug (gimple_stmt_iterator
*i
)
2129 gsi_prev_nondebug (i
);
2130 while (gsi_end_p (*i
))
2132 dom
= get_immediate_dominator (CDI_DOMINATORS
, i
->bb
);
2133 if (dom
== NULL
|| dom
== ENTRY_BLOCK_PTR_FOR_FN (cfun
))
2136 *i
= gsi_last_bb (dom
);
2140 /* Find a BUILT_IN_STACK_SAVE dominating gsi_stmt (I), and insert
2141 a clobber of VAR before each matching BUILT_IN_STACK_RESTORE.
2143 It is possible that BUILT_IN_STACK_SAVE cannot be find in a dominator when a
2144 previous pass (such as DOM) duplicated it along multiple paths to a BB. In
2145 that case the function gives up without inserting the clobbers. */
2148 insert_clobbers_for_var (gimple_stmt_iterator i
, tree var
)
2152 gimple_htab
*visited
= NULL
;
2154 for (; !gsi_end_p (i
); gsi_prev_dom_bb_nondebug (&i
))
2156 stmt
= gsi_stmt (i
);
2158 if (!gimple_call_builtin_p (stmt
, BUILT_IN_STACK_SAVE
))
2161 saved_val
= gimple_call_lhs (stmt
);
2162 if (saved_val
== NULL_TREE
)
2165 insert_clobber_before_stack_restore (saved_val
, var
, &visited
);
2172 /* Detects a __builtin_alloca_with_align with constant size argument. Declares
2173 fixed-size array and returns the address, if found, otherwise returns
2177 fold_builtin_alloca_with_align (gimple
*stmt
)
2179 unsigned HOST_WIDE_INT size
, threshold
, n_elem
;
2180 tree lhs
, arg
, block
, var
, elem_type
, array_type
;
2183 lhs
= gimple_call_lhs (stmt
);
2184 if (lhs
== NULL_TREE
)
2187 /* Detect constant argument. */
2188 arg
= get_constant_value (gimple_call_arg (stmt
, 0));
2189 if (arg
== NULL_TREE
2190 || TREE_CODE (arg
) != INTEGER_CST
2191 || !tree_fits_uhwi_p (arg
))
2194 size
= tree_to_uhwi (arg
);
2196 /* Heuristic: don't fold large allocas. */
2197 threshold
= (unsigned HOST_WIDE_INT
)param_large_stack_frame
;
2198 /* In case the alloca is located at function entry, it has the same lifetime
2199 as a declared array, so we allow a larger size. */
2200 block
= gimple_block (stmt
);
2201 if (!(cfun
->after_inlining
2203 && TREE_CODE (BLOCK_SUPERCONTEXT (block
)) == FUNCTION_DECL
))
2205 if (size
> threshold
)
2208 /* We have to be able to move points-to info. We used to assert
2209 that we can but IPA PTA might end up with two UIDs here
2210 as it might need to handle more than one instance being
2211 live at the same time. Instead of trying to detect this case
2212 (using the first UID would be OK) just give up for now. */
2213 struct ptr_info_def
*pi
= SSA_NAME_PTR_INFO (lhs
);
2217 && !pt_solution_singleton_or_null_p (&pi
->pt
, &uid
))
2220 /* Declare array. */
2221 elem_type
= build_nonstandard_integer_type (BITS_PER_UNIT
, 1);
2222 n_elem
= size
* 8 / BITS_PER_UNIT
;
2223 array_type
= build_array_type_nelts (elem_type
, n_elem
);
2225 if (tree ssa_name
= SSA_NAME_IDENTIFIER (lhs
))
2227 /* Give the temporary a name derived from the name of the VLA
2228 declaration so it can be referenced in diagnostics. */
2229 const char *name
= IDENTIFIER_POINTER (ssa_name
);
2230 var
= create_tmp_var (array_type
, name
);
2233 var
= create_tmp_var (array_type
);
2235 if (gimple
*lhsdef
= SSA_NAME_DEF_STMT (lhs
))
2237 /* Set the temporary's location to that of the VLA declaration
2238 so it can be pointed to in diagnostics. */
2239 location_t loc
= gimple_location (lhsdef
);
2240 DECL_SOURCE_LOCATION (var
) = loc
;
2243 SET_DECL_ALIGN (var
, TREE_INT_CST_LOW (gimple_call_arg (stmt
, 1)));
2245 SET_DECL_PT_UID (var
, uid
);
2247 /* Fold alloca to the address of the array. */
2248 return fold_convert (TREE_TYPE (lhs
), build_fold_addr_expr (var
));
2251 /* Fold the stmt at *GSI with CCP specific information that propagating
2252 and regular folding does not catch. */
2255 ccp_folder::fold_stmt (gimple_stmt_iterator
*gsi
)
2257 gimple
*stmt
= gsi_stmt (*gsi
);
2259 switch (gimple_code (stmt
))
2263 gcond
*cond_stmt
= as_a
<gcond
*> (stmt
);
2264 ccp_prop_value_t val
;
2265 /* Statement evaluation will handle type mismatches in constants
2266 more gracefully than the final propagation. This allows us to
2267 fold more conditionals here. */
2268 val
= evaluate_stmt (stmt
);
2269 if (val
.lattice_val
!= CONSTANT
2275 fprintf (dump_file
, "Folding predicate ");
2276 print_gimple_expr (dump_file
, stmt
, 0);
2277 fprintf (dump_file
, " to ");
2278 print_generic_expr (dump_file
, val
.value
);
2279 fprintf (dump_file
, "\n");
2282 if (integer_zerop (val
.value
))
2283 gimple_cond_make_false (cond_stmt
);
2285 gimple_cond_make_true (cond_stmt
);
2292 tree lhs
= gimple_call_lhs (stmt
);
2293 int flags
= gimple_call_flags (stmt
);
2296 bool changed
= false;
2299 /* If the call was folded into a constant make sure it goes
2300 away even if we cannot propagate into all uses because of
2303 && TREE_CODE (lhs
) == SSA_NAME
2304 && (val
= get_constant_value (lhs
))
2305 /* Don't optimize away calls that have side-effects. */
2306 && (flags
& (ECF_CONST
|ECF_PURE
)) != 0
2307 && (flags
& ECF_LOOPING_CONST_OR_PURE
) == 0)
2309 tree new_rhs
= unshare_expr (val
);
2311 if (!useless_type_conversion_p (TREE_TYPE (lhs
),
2312 TREE_TYPE (new_rhs
)))
2313 new_rhs
= fold_convert (TREE_TYPE (lhs
), new_rhs
);
2314 res
= update_call_from_tree (gsi
, new_rhs
);
2319 /* Internal calls provide no argument types, so the extra laxity
2320 for normal calls does not apply. */
2321 if (gimple_call_internal_p (stmt
))
2324 /* The heuristic of fold_builtin_alloca_with_align differs before and
2325 after inlining, so we don't require the arg to be changed into a
2326 constant for folding, but just to be constant. */
2327 if (gimple_call_builtin_p (stmt
, BUILT_IN_ALLOCA_WITH_ALIGN
)
2328 || gimple_call_builtin_p (stmt
, BUILT_IN_ALLOCA_WITH_ALIGN_AND_MAX
))
2330 tree new_rhs
= fold_builtin_alloca_with_align (stmt
);
2333 bool res
= update_call_from_tree (gsi
, new_rhs
);
2334 tree var
= TREE_OPERAND (TREE_OPERAND (new_rhs
, 0),0);
2336 insert_clobbers_for_var (*gsi
, var
);
2341 /* If there's no extra info from an assume_aligned call,
2342 drop it so it doesn't act as otherwise useless dataflow
2344 if (gimple_call_builtin_p (stmt
, BUILT_IN_ASSUME_ALIGNED
))
2346 tree ptr
= gimple_call_arg (stmt
, 0);
2347 ccp_prop_value_t ptrval
= get_value_for_expr (ptr
, true);
2348 if (ptrval
.lattice_val
== CONSTANT
2349 && TREE_CODE (ptrval
.value
) == INTEGER_CST
2350 && ptrval
.mask
!= 0)
2352 ccp_prop_value_t val
2353 = bit_value_assume_aligned (stmt
, NULL_TREE
, ptrval
, false);
2354 unsigned int ptralign
= least_bit_hwi (ptrval
.mask
.to_uhwi ());
2355 unsigned int align
= least_bit_hwi (val
.mask
.to_uhwi ());
2356 if (ptralign
== align
2357 && ((TREE_INT_CST_LOW (ptrval
.value
) & (align
- 1))
2358 == (TREE_INT_CST_LOW (val
.value
) & (align
- 1))))
2360 bool res
= update_call_from_tree (gsi
, ptr
);
2367 /* Propagate into the call arguments. Compared to replace_uses_in
2368 this can use the argument slot types for type verification
2369 instead of the current argument type. We also can safely
2370 drop qualifiers here as we are dealing with constants anyway. */
2371 argt
= TYPE_ARG_TYPES (gimple_call_fntype (stmt
));
2372 for (i
= 0; i
< gimple_call_num_args (stmt
) && argt
;
2373 ++i
, argt
= TREE_CHAIN (argt
))
2375 tree arg
= gimple_call_arg (stmt
, i
);
2376 if (TREE_CODE (arg
) == SSA_NAME
2377 && (val
= get_constant_value (arg
))
2378 && useless_type_conversion_p
2379 (TYPE_MAIN_VARIANT (TREE_VALUE (argt
)),
2380 TYPE_MAIN_VARIANT (TREE_TYPE (val
))))
2382 gimple_call_set_arg (stmt
, i
, unshare_expr (val
));
2392 tree lhs
= gimple_assign_lhs (stmt
);
2395 /* If we have a load that turned out to be constant replace it
2396 as we cannot propagate into all uses in all cases. */
2397 if (gimple_assign_single_p (stmt
)
2398 && TREE_CODE (lhs
) == SSA_NAME
2399 && (val
= get_constant_value (lhs
)))
2401 tree rhs
= unshare_expr (val
);
2402 if (!useless_type_conversion_p (TREE_TYPE (lhs
), TREE_TYPE (rhs
)))
2403 rhs
= fold_build1 (VIEW_CONVERT_EXPR
, TREE_TYPE (lhs
), rhs
);
2404 gimple_assign_set_rhs_from_tree (gsi
, rhs
);
2416 /* Visit the assignment statement STMT. Set the value of its LHS to the
2417 value computed by the RHS and store LHS in *OUTPUT_P. If STMT
2418 creates virtual definitions, set the value of each new name to that
2419 of the RHS (if we can derive a constant out of the RHS).
2420 Value-returning call statements also perform an assignment, and
2421 are handled here. */
2423 static enum ssa_prop_result
2424 visit_assignment (gimple
*stmt
, tree
*output_p
)
2426 ccp_prop_value_t val
;
2427 enum ssa_prop_result retval
= SSA_PROP_NOT_INTERESTING
;
2429 tree lhs
= gimple_get_lhs (stmt
);
2430 if (TREE_CODE (lhs
) == SSA_NAME
)
2432 /* Evaluate the statement, which could be
2433 either a GIMPLE_ASSIGN or a GIMPLE_CALL. */
2434 val
= evaluate_stmt (stmt
);
2436 /* If STMT is an assignment to an SSA_NAME, we only have one
2438 if (set_lattice_value (lhs
, &val
))
2441 if (val
.lattice_val
== VARYING
)
2442 retval
= SSA_PROP_VARYING
;
2444 retval
= SSA_PROP_INTERESTING
;
2452 /* Visit the conditional statement STMT. Return SSA_PROP_INTERESTING
2453 if it can determine which edge will be taken. Otherwise, return
2454 SSA_PROP_VARYING. */
2456 static enum ssa_prop_result
2457 visit_cond_stmt (gimple
*stmt
, edge
*taken_edge_p
)
2459 ccp_prop_value_t val
;
2462 block
= gimple_bb (stmt
);
2463 val
= evaluate_stmt (stmt
);
2464 if (val
.lattice_val
!= CONSTANT
2466 return SSA_PROP_VARYING
;
2468 /* Find which edge out of the conditional block will be taken and add it
2469 to the worklist. If no single edge can be determined statically,
2470 return SSA_PROP_VARYING to feed all the outgoing edges to the
2471 propagation engine. */
2472 *taken_edge_p
= find_taken_edge (block
, val
.value
);
2474 return SSA_PROP_INTERESTING
;
2476 return SSA_PROP_VARYING
;
2480 /* Evaluate statement STMT. If the statement produces an output value and
2481 its evaluation changes the lattice value of its output, return
2482 SSA_PROP_INTERESTING and set *OUTPUT_P to the SSA_NAME holding the
2485 If STMT is a conditional branch and we can determine its truth
2486 value, set *TAKEN_EDGE_P accordingly. If STMT produces a varying
2487 value, return SSA_PROP_VARYING. */
2489 enum ssa_prop_result
2490 ccp_propagate::visit_stmt (gimple
*stmt
, edge
*taken_edge_p
, tree
*output_p
)
2495 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2497 fprintf (dump_file
, "\nVisiting statement:\n");
2498 print_gimple_stmt (dump_file
, stmt
, 0, dump_flags
);
2501 switch (gimple_code (stmt
))
2504 /* If the statement is an assignment that produces a single
2505 output value, evaluate its RHS to see if the lattice value of
2506 its output has changed. */
2507 return visit_assignment (stmt
, output_p
);
2510 /* A value-returning call also performs an assignment. */
2511 if (gimple_call_lhs (stmt
) != NULL_TREE
)
2512 return visit_assignment (stmt
, output_p
);
2517 /* If STMT is a conditional branch, see if we can determine
2518 which branch will be taken. */
2519 /* FIXME. It appears that we should be able to optimize
2520 computed GOTOs here as well. */
2521 return visit_cond_stmt (stmt
, taken_edge_p
);
2527 /* Any other kind of statement is not interesting for constant
2528 propagation and, therefore, not worth simulating. */
2529 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2530 fprintf (dump_file
, "No interesting values produced. Marked VARYING.\n");
2532 /* Definitions made by statements other than assignments to
2533 SSA_NAMEs represent unknown modifications to their outputs.
2534 Mark them VARYING. */
2535 FOR_EACH_SSA_TREE_OPERAND (def
, stmt
, iter
, SSA_OP_ALL_DEFS
)
2536 set_value_varying (def
);
2538 return SSA_PROP_VARYING
;
2542 /* Main entry point for SSA Conditional Constant Propagation. If NONZERO_P,
2543 record nonzero bits. */
2546 do_ssa_ccp (bool nonzero_p
)
2548 unsigned int todo
= 0;
2549 calculate_dominance_info (CDI_DOMINATORS
);
2552 class ccp_propagate ccp_propagate
;
2553 ccp_propagate
.ssa_propagate ();
2554 if (ccp_finalize (nonzero_p
|| flag_ipa_bit_cp
))
2556 todo
= (TODO_cleanup_cfg
| TODO_update_ssa
);
2558 /* ccp_finalize does not preserve loop-closed ssa. */
2559 loops_state_clear (LOOP_CLOSED_SSA
);
2562 free_dominance_info (CDI_DOMINATORS
);
2569 const pass_data pass_data_ccp
=
2571 GIMPLE_PASS
, /* type */
2573 OPTGROUP_NONE
, /* optinfo_flags */
2574 TV_TREE_CCP
, /* tv_id */
2575 ( PROP_cfg
| PROP_ssa
), /* properties_required */
2576 0, /* properties_provided */
2577 0, /* properties_destroyed */
2578 0, /* todo_flags_start */
2579 TODO_update_address_taken
, /* todo_flags_finish */
2582 class pass_ccp
: public gimple_opt_pass
2585 pass_ccp (gcc::context
*ctxt
)
2586 : gimple_opt_pass (pass_data_ccp
, ctxt
), nonzero_p (false)
2589 /* opt_pass methods: */
2590 opt_pass
* clone () { return new pass_ccp (m_ctxt
); }
2591 void set_pass_param (unsigned int n
, bool param
)
2593 gcc_assert (n
== 0);
2596 virtual bool gate (function
*) { return flag_tree_ccp
!= 0; }
2597 virtual unsigned int execute (function
*) { return do_ssa_ccp (nonzero_p
); }
2600 /* Determines whether the pass instance records nonzero bits. */
2602 }; // class pass_ccp
2607 make_pass_ccp (gcc::context
*ctxt
)
2609 return new pass_ccp (ctxt
);
2614 /* Try to optimize out __builtin_stack_restore. Optimize it out
2615 if there is another __builtin_stack_restore in the same basic
2616 block and no calls or ASM_EXPRs are in between, or if this block's
2617 only outgoing edge is to EXIT_BLOCK and there are no calls or
2618 ASM_EXPRs after this __builtin_stack_restore. */
2621 optimize_stack_restore (gimple_stmt_iterator i
)
2626 basic_block bb
= gsi_bb (i
);
2627 gimple
*call
= gsi_stmt (i
);
2629 if (gimple_code (call
) != GIMPLE_CALL
2630 || gimple_call_num_args (call
) != 1
2631 || TREE_CODE (gimple_call_arg (call
, 0)) != SSA_NAME
2632 || !POINTER_TYPE_P (TREE_TYPE (gimple_call_arg (call
, 0))))
2635 for (gsi_next (&i
); !gsi_end_p (i
); gsi_next (&i
))
2637 stmt
= gsi_stmt (i
);
2638 if (gimple_code (stmt
) == GIMPLE_ASM
)
2640 if (gimple_code (stmt
) != GIMPLE_CALL
)
2643 callee
= gimple_call_fndecl (stmt
);
2645 || !fndecl_built_in_p (callee
, BUILT_IN_NORMAL
)
2646 /* All regular builtins are ok, just obviously not alloca. */
2647 || ALLOCA_FUNCTION_CODE_P (DECL_FUNCTION_CODE (callee
)))
2650 if (fndecl_built_in_p (callee
, BUILT_IN_STACK_RESTORE
))
2651 goto second_stack_restore
;
2657 /* Allow one successor of the exit block, or zero successors. */
2658 switch (EDGE_COUNT (bb
->succs
))
2663 if (single_succ_edge (bb
)->dest
!= EXIT_BLOCK_PTR_FOR_FN (cfun
))
2669 second_stack_restore
:
2671 /* If there's exactly one use, then zap the call to __builtin_stack_save.
2672 If there are multiple uses, then the last one should remove the call.
2673 In any case, whether the call to __builtin_stack_save can be removed
2674 or not is irrelevant to removing the call to __builtin_stack_restore. */
2675 if (has_single_use (gimple_call_arg (call
, 0)))
2677 gimple
*stack_save
= SSA_NAME_DEF_STMT (gimple_call_arg (call
, 0));
2678 if (is_gimple_call (stack_save
))
2680 callee
= gimple_call_fndecl (stack_save
);
2681 if (callee
&& fndecl_built_in_p (callee
, BUILT_IN_STACK_SAVE
))
2683 gimple_stmt_iterator stack_save_gsi
;
2686 stack_save_gsi
= gsi_for_stmt (stack_save
);
2687 rhs
= build_int_cst (TREE_TYPE (gimple_call_arg (call
, 0)), 0);
2688 update_call_from_tree (&stack_save_gsi
, rhs
);
2693 /* No effect, so the statement will be deleted. */
2694 return integer_zero_node
;
2697 /* If va_list type is a simple pointer and nothing special is needed,
2698 optimize __builtin_va_start (&ap, 0) into ap = __builtin_next_arg (0),
2699 __builtin_va_end (&ap) out as NOP and __builtin_va_copy into a simple
2700 pointer assignment. */
2703 optimize_stdarg_builtin (gimple
*call
)
2705 tree callee
, lhs
, rhs
, cfun_va_list
;
2706 bool va_list_simple_ptr
;
2707 location_t loc
= gimple_location (call
);
2709 callee
= gimple_call_fndecl (call
);
2711 cfun_va_list
= targetm
.fn_abi_va_list (callee
);
2712 va_list_simple_ptr
= POINTER_TYPE_P (cfun_va_list
)
2713 && (TREE_TYPE (cfun_va_list
) == void_type_node
2714 || TREE_TYPE (cfun_va_list
) == char_type_node
);
2716 switch (DECL_FUNCTION_CODE (callee
))
2718 case BUILT_IN_VA_START
:
2719 if (!va_list_simple_ptr
2720 || targetm
.expand_builtin_va_start
!= NULL
2721 || !builtin_decl_explicit_p (BUILT_IN_NEXT_ARG
))
2724 if (gimple_call_num_args (call
) != 2)
2727 lhs
= gimple_call_arg (call
, 0);
2728 if (!POINTER_TYPE_P (TREE_TYPE (lhs
))
2729 || TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (lhs
)))
2730 != TYPE_MAIN_VARIANT (cfun_va_list
))
2733 lhs
= build_fold_indirect_ref_loc (loc
, lhs
);
2734 rhs
= build_call_expr_loc (loc
, builtin_decl_explicit (BUILT_IN_NEXT_ARG
),
2735 1, integer_zero_node
);
2736 rhs
= fold_convert_loc (loc
, TREE_TYPE (lhs
), rhs
);
2737 return build2 (MODIFY_EXPR
, TREE_TYPE (lhs
), lhs
, rhs
);
2739 case BUILT_IN_VA_COPY
:
2740 if (!va_list_simple_ptr
)
2743 if (gimple_call_num_args (call
) != 2)
2746 lhs
= gimple_call_arg (call
, 0);
2747 if (!POINTER_TYPE_P (TREE_TYPE (lhs
))
2748 || TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (lhs
)))
2749 != TYPE_MAIN_VARIANT (cfun_va_list
))
2752 lhs
= build_fold_indirect_ref_loc (loc
, lhs
);
2753 rhs
= gimple_call_arg (call
, 1);
2754 if (TYPE_MAIN_VARIANT (TREE_TYPE (rhs
))
2755 != TYPE_MAIN_VARIANT (cfun_va_list
))
2758 rhs
= fold_convert_loc (loc
, TREE_TYPE (lhs
), rhs
);
2759 return build2 (MODIFY_EXPR
, TREE_TYPE (lhs
), lhs
, rhs
);
2761 case BUILT_IN_VA_END
:
2762 /* No effect, so the statement will be deleted. */
2763 return integer_zero_node
;
2770 /* Attemp to make the block of __builtin_unreachable I unreachable by changing
2771 the incoming jumps. Return true if at least one jump was changed. */
2774 optimize_unreachable (gimple_stmt_iterator i
)
2776 basic_block bb
= gsi_bb (i
);
2777 gimple_stmt_iterator gsi
;
2783 if (flag_sanitize
& SANITIZE_UNREACHABLE
)
2786 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
2788 stmt
= gsi_stmt (gsi
);
2790 if (is_gimple_debug (stmt
))
2793 if (glabel
*label_stmt
= dyn_cast
<glabel
*> (stmt
))
2795 /* Verify we do not need to preserve the label. */
2796 if (FORCED_LABEL (gimple_label_label (label_stmt
)))
2802 /* Only handle the case that __builtin_unreachable is the first statement
2803 in the block. We rely on DCE to remove stmts without side-effects
2804 before __builtin_unreachable. */
2805 if (gsi_stmt (gsi
) != gsi_stmt (i
))
2810 FOR_EACH_EDGE (e
, ei
, bb
->preds
)
2812 gsi
= gsi_last_bb (e
->src
);
2813 if (gsi_end_p (gsi
))
2816 stmt
= gsi_stmt (gsi
);
2817 if (gcond
*cond_stmt
= dyn_cast
<gcond
*> (stmt
))
2819 if (e
->flags
& EDGE_TRUE_VALUE
)
2820 gimple_cond_make_false (cond_stmt
);
2821 else if (e
->flags
& EDGE_FALSE_VALUE
)
2822 gimple_cond_make_true (cond_stmt
);
2825 update_stmt (cond_stmt
);
2829 /* Todo: handle other cases. Note that unreachable switch case
2830 statements have already been removed. */
2841 mask_2 = 1 << cnt_1;
2842 _4 = __atomic_fetch_or_* (ptr_6, mask_2, _3);
2845 _4 = ATOMIC_BIT_TEST_AND_SET (ptr_6, cnt_1, 0, _3);
2847 If _5 is only used in _5 != 0 or _5 == 0 comparisons, 1
2848 is passed instead of 0, and the builtin just returns a zero
2849 or 1 value instead of the actual bit.
2850 Similarly for __sync_fetch_and_or_* (without the ", _3" part
2851 in there), and/or if mask_2 is a power of 2 constant.
2852 Similarly for xor instead of or, use ATOMIC_BIT_TEST_AND_COMPLEMENT
2853 in that case. And similarly for and instead of or, except that
2854 the second argument to the builtin needs to be one's complement
2855 of the mask instead of mask. */
2858 optimize_atomic_bit_test_and (gimple_stmt_iterator
*gsip
,
2859 enum internal_fn fn
, bool has_model_arg
,
2862 gimple
*call
= gsi_stmt (*gsip
);
2863 tree lhs
= gimple_call_lhs (call
);
2864 use_operand_p use_p
;
2869 if (!flag_inline_atomics
2871 || !gimple_call_builtin_p (call
, BUILT_IN_NORMAL
)
2873 || SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs
)
2874 || !single_imm_use (lhs
, &use_p
, &use_stmt
)
2875 || !is_gimple_assign (use_stmt
)
2876 || gimple_assign_rhs_code (use_stmt
) != BIT_AND_EXPR
2877 || !gimple_vdef (call
))
2882 case IFN_ATOMIC_BIT_TEST_AND_SET
:
2883 optab
= atomic_bit_test_and_set_optab
;
2885 case IFN_ATOMIC_BIT_TEST_AND_COMPLEMENT
:
2886 optab
= atomic_bit_test_and_complement_optab
;
2888 case IFN_ATOMIC_BIT_TEST_AND_RESET
:
2889 optab
= atomic_bit_test_and_reset_optab
;
2895 if (optab_handler (optab
, TYPE_MODE (TREE_TYPE (lhs
))) == CODE_FOR_nothing
)
2898 mask
= gimple_call_arg (call
, 1);
2899 tree use_lhs
= gimple_assign_lhs (use_stmt
);
2903 if (TREE_CODE (mask
) == INTEGER_CST
)
2905 if (fn
== IFN_ATOMIC_BIT_TEST_AND_RESET
)
2906 mask
= const_unop (BIT_NOT_EXPR
, TREE_TYPE (mask
), mask
);
2907 mask
= fold_convert (TREE_TYPE (lhs
), mask
);
2908 int ibit
= tree_log2 (mask
);
2911 bit
= build_int_cst (TREE_TYPE (lhs
), ibit
);
2913 else if (TREE_CODE (mask
) == SSA_NAME
)
2915 gimple
*g
= SSA_NAME_DEF_STMT (mask
);
2916 if (fn
== IFN_ATOMIC_BIT_TEST_AND_RESET
)
2918 if (!is_gimple_assign (g
)
2919 || gimple_assign_rhs_code (g
) != BIT_NOT_EXPR
)
2921 mask
= gimple_assign_rhs1 (g
);
2922 if (TREE_CODE (mask
) != SSA_NAME
)
2924 g
= SSA_NAME_DEF_STMT (mask
);
2926 if (!is_gimple_assign (g
)
2927 || gimple_assign_rhs_code (g
) != LSHIFT_EXPR
2928 || !integer_onep (gimple_assign_rhs1 (g
)))
2930 bit
= gimple_assign_rhs2 (g
);
2935 if (gimple_assign_rhs1 (use_stmt
) == lhs
)
2937 if (!operand_equal_p (gimple_assign_rhs2 (use_stmt
), mask
, 0))
2940 else if (gimple_assign_rhs2 (use_stmt
) != lhs
2941 || !operand_equal_p (gimple_assign_rhs1 (use_stmt
), mask
, 0))
2944 bool use_bool
= true;
2945 bool has_debug_uses
= false;
2946 imm_use_iterator iter
;
2949 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (use_lhs
))
2951 FOR_EACH_IMM_USE_STMT (g
, iter
, use_lhs
)
2953 enum tree_code code
= ERROR_MARK
;
2954 tree op0
= NULL_TREE
, op1
= NULL_TREE
;
2955 if (is_gimple_debug (g
))
2957 has_debug_uses
= true;
2960 else if (is_gimple_assign (g
))
2961 switch (gimple_assign_rhs_code (g
))
2964 op1
= gimple_assign_rhs1 (g
);
2965 code
= TREE_CODE (op1
);
2966 op0
= TREE_OPERAND (op1
, 0);
2967 op1
= TREE_OPERAND (op1
, 1);
2971 code
= gimple_assign_rhs_code (g
);
2972 op0
= gimple_assign_rhs1 (g
);
2973 op1
= gimple_assign_rhs2 (g
);
2978 else if (gimple_code (g
) == GIMPLE_COND
)
2980 code
= gimple_cond_code (g
);
2981 op0
= gimple_cond_lhs (g
);
2982 op1
= gimple_cond_rhs (g
);
2985 if ((code
== EQ_EXPR
|| code
== NE_EXPR
)
2987 && integer_zerop (op1
))
2989 use_operand_p use_p
;
2991 FOR_EACH_IMM_USE_ON_STMT (use_p
, iter
)
2998 BREAK_FROM_IMM_USE_STMT (iter
);
3001 tree new_lhs
= make_ssa_name (TREE_TYPE (lhs
));
3002 tree flag
= build_int_cst (TREE_TYPE (lhs
), use_bool
);
3004 g
= gimple_build_call_internal (fn
, 4, gimple_call_arg (call
, 0),
3005 bit
, flag
, gimple_call_arg (call
, 2));
3007 g
= gimple_build_call_internal (fn
, 3, gimple_call_arg (call
, 0),
3009 gimple_call_set_lhs (g
, new_lhs
);
3010 gimple_set_location (g
, gimple_location (call
));
3011 gimple_move_vops (g
, call
);
3012 bool throws
= stmt_can_throw_internal (cfun
, call
);
3013 gimple_call_set_nothrow (as_a
<gcall
*> (g
),
3014 gimple_call_nothrow_p (as_a
<gcall
*> (call
)));
3015 gimple_stmt_iterator gsi
= *gsip
;
3016 gsi_insert_after (&gsi
, g
, GSI_NEW_STMT
);
3020 maybe_clean_or_replace_eh_stmt (call
, g
);
3021 if (after
|| (use_bool
&& has_debug_uses
))
3022 e
= find_fallthru_edge (gsi_bb (gsi
)->succs
);
3026 /* The internal function returns the value of the specified bit
3027 before the atomic operation. If we are interested in the value
3028 of the specified bit after the atomic operation (makes only sense
3029 for xor, otherwise the bit content is compile time known),
3030 we need to invert the bit. */
3031 g
= gimple_build_assign (make_ssa_name (TREE_TYPE (lhs
)),
3032 BIT_XOR_EXPR
, new_lhs
,
3033 use_bool
? build_int_cst (TREE_TYPE (lhs
), 1)
3035 new_lhs
= gimple_assign_lhs (g
);
3038 gsi_insert_on_edge_immediate (e
, g
);
3039 gsi
= gsi_for_stmt (g
);
3042 gsi_insert_after (&gsi
, g
, GSI_NEW_STMT
);
3044 if (use_bool
&& has_debug_uses
)
3046 tree temp
= NULL_TREE
;
3047 if (!throws
|| after
|| single_pred_p (e
->dest
))
3049 temp
= make_node (DEBUG_EXPR_DECL
);
3050 DECL_ARTIFICIAL (temp
) = 1;
3051 TREE_TYPE (temp
) = TREE_TYPE (lhs
);
3052 SET_DECL_MODE (temp
, TYPE_MODE (TREE_TYPE (lhs
)));
3053 tree t
= build2 (LSHIFT_EXPR
, TREE_TYPE (lhs
), new_lhs
, bit
);
3054 g
= gimple_build_debug_bind (temp
, t
, g
);
3055 if (throws
&& !after
)
3057 gsi
= gsi_after_labels (e
->dest
);
3058 gsi_insert_before (&gsi
, g
, GSI_SAME_STMT
);
3061 gsi_insert_after (&gsi
, g
, GSI_NEW_STMT
);
3063 FOR_EACH_IMM_USE_STMT (g
, iter
, use_lhs
)
3064 if (is_gimple_debug (g
))
3066 use_operand_p use_p
;
3067 if (temp
== NULL_TREE
)
3068 gimple_debug_bind_reset_value (g
);
3070 FOR_EACH_IMM_USE_ON_STMT (use_p
, iter
)
3071 SET_USE (use_p
, temp
);
3075 SSA_NAME_OCCURS_IN_ABNORMAL_PHI (new_lhs
)
3076 = SSA_NAME_OCCURS_IN_ABNORMAL_PHI (use_lhs
);
3077 replace_uses_by (use_lhs
, new_lhs
);
3078 gsi
= gsi_for_stmt (use_stmt
);
3079 gsi_remove (&gsi
, true);
3080 release_defs (use_stmt
);
3081 gsi_remove (gsip
, true);
3082 release_ssa_name (lhs
);
3091 Similarly for memset (&a, ..., sizeof (a)); instead of a = {};
3092 and/or memcpy (&b, &a, sizeof (a)); instead of b = a; */
3095 optimize_memcpy (gimple_stmt_iterator
*gsip
, tree dest
, tree src
, tree len
)
3097 gimple
*stmt
= gsi_stmt (*gsip
);
3098 if (gimple_has_volatile_ops (stmt
))
3101 tree vuse
= gimple_vuse (stmt
);
3105 gimple
*defstmt
= SSA_NAME_DEF_STMT (vuse
);
3106 tree src2
= NULL_TREE
, len2
= NULL_TREE
;
3107 poly_int64 offset
, offset2
;
3108 tree val
= integer_zero_node
;
3109 if (gimple_store_p (defstmt
)
3110 && gimple_assign_single_p (defstmt
)
3111 && TREE_CODE (gimple_assign_rhs1 (defstmt
)) == CONSTRUCTOR
3112 && !gimple_clobber_p (defstmt
))
3113 src2
= gimple_assign_lhs (defstmt
);
3114 else if (gimple_call_builtin_p (defstmt
, BUILT_IN_MEMSET
)
3115 && TREE_CODE (gimple_call_arg (defstmt
, 0)) == ADDR_EXPR
3116 && TREE_CODE (gimple_call_arg (defstmt
, 1)) == INTEGER_CST
)
3118 src2
= TREE_OPERAND (gimple_call_arg (defstmt
, 0), 0);
3119 len2
= gimple_call_arg (defstmt
, 2);
3120 val
= gimple_call_arg (defstmt
, 1);
3121 /* For non-0 val, we'd have to transform stmt from assignment
3122 into memset (only if dest is addressable). */
3123 if (!integer_zerop (val
) && is_gimple_assign (stmt
))
3127 if (src2
== NULL_TREE
)
3130 if (len
== NULL_TREE
)
3131 len
= (TREE_CODE (src
) == COMPONENT_REF
3132 ? DECL_SIZE_UNIT (TREE_OPERAND (src
, 1))
3133 : TYPE_SIZE_UNIT (TREE_TYPE (src
)));
3134 if (len2
== NULL_TREE
)
3135 len2
= (TREE_CODE (src2
) == COMPONENT_REF
3136 ? DECL_SIZE_UNIT (TREE_OPERAND (src2
, 1))
3137 : TYPE_SIZE_UNIT (TREE_TYPE (src2
)));
3138 if (len
== NULL_TREE
3139 || !poly_int_tree_p (len
)
3140 || len2
== NULL_TREE
3141 || !poly_int_tree_p (len2
))
3144 src
= get_addr_base_and_unit_offset (src
, &offset
);
3145 src2
= get_addr_base_and_unit_offset (src2
, &offset2
);
3146 if (src
== NULL_TREE
3147 || src2
== NULL_TREE
3148 || maybe_lt (offset
, offset2
))
3151 if (!operand_equal_p (src
, src2
, 0))
3154 /* [ src + offset2, src + offset2 + len2 - 1 ] is set to val.
3156 [ src + offset, src + offset + len - 1 ] is a subset of that. */
3157 if (maybe_gt (wi::to_poly_offset (len
) + (offset
- offset2
),
3158 wi::to_poly_offset (len2
)))
3161 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3163 fprintf (dump_file
, "Simplified\n ");
3164 print_gimple_stmt (dump_file
, stmt
, 0, dump_flags
);
3165 fprintf (dump_file
, "after previous\n ");
3166 print_gimple_stmt (dump_file
, defstmt
, 0, dump_flags
);
3169 /* For simplicity, don't change the kind of the stmt,
3170 turn dest = src; into dest = {}; and memcpy (&dest, &src, len);
3171 into memset (&dest, val, len);
3172 In theory we could change dest = src into memset if dest
3173 is addressable (maybe beneficial if val is not 0), or
3174 memcpy (&dest, &src, len) into dest = {} if len is the size
3175 of dest, dest isn't volatile. */
3176 if (is_gimple_assign (stmt
))
3178 tree ctor
= build_constructor (TREE_TYPE (dest
), NULL
);
3179 gimple_assign_set_rhs_from_tree (gsip
, ctor
);
3182 else /* If stmt is memcpy, transform it into memset. */
3184 gcall
*call
= as_a
<gcall
*> (stmt
);
3185 tree fndecl
= builtin_decl_implicit (BUILT_IN_MEMSET
);
3186 gimple_call_set_fndecl (call
, fndecl
);
3187 gimple_call_set_fntype (call
, TREE_TYPE (fndecl
));
3188 gimple_call_set_arg (call
, 1, val
);
3192 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3194 fprintf (dump_file
, "into\n ");
3195 print_gimple_stmt (dump_file
, stmt
, 0, dump_flags
);
3199 /* A simple pass that attempts to fold all builtin functions. This pass
3200 is run after we've propagated as many constants as we can. */
3204 const pass_data pass_data_fold_builtins
=
3206 GIMPLE_PASS
, /* type */
3208 OPTGROUP_NONE
, /* optinfo_flags */
3209 TV_NONE
, /* tv_id */
3210 ( PROP_cfg
| PROP_ssa
), /* properties_required */
3211 0, /* properties_provided */
3212 0, /* properties_destroyed */
3213 0, /* todo_flags_start */
3214 TODO_update_ssa
, /* todo_flags_finish */
3217 class pass_fold_builtins
: public gimple_opt_pass
3220 pass_fold_builtins (gcc::context
*ctxt
)
3221 : gimple_opt_pass (pass_data_fold_builtins
, ctxt
)
3224 /* opt_pass methods: */
3225 opt_pass
* clone () { return new pass_fold_builtins (m_ctxt
); }
3226 virtual unsigned int execute (function
*);
3228 }; // class pass_fold_builtins
3231 pass_fold_builtins::execute (function
*fun
)
3233 bool cfg_changed
= false;
3235 unsigned int todoflags
= 0;
3237 FOR_EACH_BB_FN (bb
, fun
)
3239 gimple_stmt_iterator i
;
3240 for (i
= gsi_start_bb (bb
); !gsi_end_p (i
); )
3242 gimple
*stmt
, *old_stmt
;
3244 enum built_in_function fcode
;
3246 stmt
= gsi_stmt (i
);
3248 if (gimple_code (stmt
) != GIMPLE_CALL
)
3250 /* Remove all *ssaname_N ={v} {CLOBBER}; stmts,
3251 after the last GIMPLE DSE they aren't needed and might
3252 unnecessarily keep the SSA_NAMEs live. */
3253 if (gimple_clobber_p (stmt
))
3255 tree lhs
= gimple_assign_lhs (stmt
);
3256 if (TREE_CODE (lhs
) == MEM_REF
3257 && TREE_CODE (TREE_OPERAND (lhs
, 0)) == SSA_NAME
)
3259 unlink_stmt_vdef (stmt
);
3260 gsi_remove (&i
, true);
3261 release_defs (stmt
);
3265 else if (gimple_assign_load_p (stmt
) && gimple_store_p (stmt
))
3266 optimize_memcpy (&i
, gimple_assign_lhs (stmt
),
3267 gimple_assign_rhs1 (stmt
), NULL_TREE
);
3272 callee
= gimple_call_fndecl (stmt
);
3273 if (!callee
|| !fndecl_built_in_p (callee
, BUILT_IN_NORMAL
))
3279 fcode
= DECL_FUNCTION_CODE (callee
);
3284 tree result
= NULL_TREE
;
3285 switch (DECL_FUNCTION_CODE (callee
))
3287 case BUILT_IN_CONSTANT_P
:
3288 /* Resolve __builtin_constant_p. If it hasn't been
3289 folded to integer_one_node by now, it's fairly
3290 certain that the value simply isn't constant. */
3291 result
= integer_zero_node
;
3294 case BUILT_IN_ASSUME_ALIGNED
:
3295 /* Remove __builtin_assume_aligned. */
3296 result
= gimple_call_arg (stmt
, 0);
3299 case BUILT_IN_STACK_RESTORE
:
3300 result
= optimize_stack_restore (i
);
3306 case BUILT_IN_UNREACHABLE
:
3307 if (optimize_unreachable (i
))
3311 case BUILT_IN_ATOMIC_FETCH_OR_1
:
3312 case BUILT_IN_ATOMIC_FETCH_OR_2
:
3313 case BUILT_IN_ATOMIC_FETCH_OR_4
:
3314 case BUILT_IN_ATOMIC_FETCH_OR_8
:
3315 case BUILT_IN_ATOMIC_FETCH_OR_16
:
3316 optimize_atomic_bit_test_and (&i
,
3317 IFN_ATOMIC_BIT_TEST_AND_SET
,
3320 case BUILT_IN_SYNC_FETCH_AND_OR_1
:
3321 case BUILT_IN_SYNC_FETCH_AND_OR_2
:
3322 case BUILT_IN_SYNC_FETCH_AND_OR_4
:
3323 case BUILT_IN_SYNC_FETCH_AND_OR_8
:
3324 case BUILT_IN_SYNC_FETCH_AND_OR_16
:
3325 optimize_atomic_bit_test_and (&i
,
3326 IFN_ATOMIC_BIT_TEST_AND_SET
,
3330 case BUILT_IN_ATOMIC_FETCH_XOR_1
:
3331 case BUILT_IN_ATOMIC_FETCH_XOR_2
:
3332 case BUILT_IN_ATOMIC_FETCH_XOR_4
:
3333 case BUILT_IN_ATOMIC_FETCH_XOR_8
:
3334 case BUILT_IN_ATOMIC_FETCH_XOR_16
:
3335 optimize_atomic_bit_test_and
3336 (&i
, IFN_ATOMIC_BIT_TEST_AND_COMPLEMENT
, true, false);
3338 case BUILT_IN_SYNC_FETCH_AND_XOR_1
:
3339 case BUILT_IN_SYNC_FETCH_AND_XOR_2
:
3340 case BUILT_IN_SYNC_FETCH_AND_XOR_4
:
3341 case BUILT_IN_SYNC_FETCH_AND_XOR_8
:
3342 case BUILT_IN_SYNC_FETCH_AND_XOR_16
:
3343 optimize_atomic_bit_test_and
3344 (&i
, IFN_ATOMIC_BIT_TEST_AND_COMPLEMENT
, false, false);
3347 case BUILT_IN_ATOMIC_XOR_FETCH_1
:
3348 case BUILT_IN_ATOMIC_XOR_FETCH_2
:
3349 case BUILT_IN_ATOMIC_XOR_FETCH_4
:
3350 case BUILT_IN_ATOMIC_XOR_FETCH_8
:
3351 case BUILT_IN_ATOMIC_XOR_FETCH_16
:
3352 optimize_atomic_bit_test_and
3353 (&i
, IFN_ATOMIC_BIT_TEST_AND_COMPLEMENT
, true, true);
3355 case BUILT_IN_SYNC_XOR_AND_FETCH_1
:
3356 case BUILT_IN_SYNC_XOR_AND_FETCH_2
:
3357 case BUILT_IN_SYNC_XOR_AND_FETCH_4
:
3358 case BUILT_IN_SYNC_XOR_AND_FETCH_8
:
3359 case BUILT_IN_SYNC_XOR_AND_FETCH_16
:
3360 optimize_atomic_bit_test_and
3361 (&i
, IFN_ATOMIC_BIT_TEST_AND_COMPLEMENT
, false, true);
3364 case BUILT_IN_ATOMIC_FETCH_AND_1
:
3365 case BUILT_IN_ATOMIC_FETCH_AND_2
:
3366 case BUILT_IN_ATOMIC_FETCH_AND_4
:
3367 case BUILT_IN_ATOMIC_FETCH_AND_8
:
3368 case BUILT_IN_ATOMIC_FETCH_AND_16
:
3369 optimize_atomic_bit_test_and (&i
,
3370 IFN_ATOMIC_BIT_TEST_AND_RESET
,
3373 case BUILT_IN_SYNC_FETCH_AND_AND_1
:
3374 case BUILT_IN_SYNC_FETCH_AND_AND_2
:
3375 case BUILT_IN_SYNC_FETCH_AND_AND_4
:
3376 case BUILT_IN_SYNC_FETCH_AND_AND_8
:
3377 case BUILT_IN_SYNC_FETCH_AND_AND_16
:
3378 optimize_atomic_bit_test_and (&i
,
3379 IFN_ATOMIC_BIT_TEST_AND_RESET
,
3383 case BUILT_IN_MEMCPY
:
3384 if (gimple_call_builtin_p (stmt
, BUILT_IN_NORMAL
)
3385 && TREE_CODE (gimple_call_arg (stmt
, 0)) == ADDR_EXPR
3386 && TREE_CODE (gimple_call_arg (stmt
, 1)) == ADDR_EXPR
3387 && TREE_CODE (gimple_call_arg (stmt
, 2)) == INTEGER_CST
)
3389 tree dest
= TREE_OPERAND (gimple_call_arg (stmt
, 0), 0);
3390 tree src
= TREE_OPERAND (gimple_call_arg (stmt
, 1), 0);
3391 tree len
= gimple_call_arg (stmt
, 2);
3392 optimize_memcpy (&i
, dest
, src
, len
);
3396 case BUILT_IN_VA_START
:
3397 case BUILT_IN_VA_END
:
3398 case BUILT_IN_VA_COPY
:
3399 /* These shouldn't be folded before pass_stdarg. */
3400 result
= optimize_stdarg_builtin (stmt
);
3412 if (!update_call_from_tree (&i
, result
))
3413 gimplify_and_update_call_from_tree (&i
, result
);
3416 todoflags
|= TODO_update_address_taken
;
3418 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3420 fprintf (dump_file
, "Simplified\n ");
3421 print_gimple_stmt (dump_file
, stmt
, 0, dump_flags
);
3425 stmt
= gsi_stmt (i
);
3428 if (maybe_clean_or_replace_eh_stmt (old_stmt
, stmt
)
3429 && gimple_purge_dead_eh_edges (bb
))
3432 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3434 fprintf (dump_file
, "to\n ");
3435 print_gimple_stmt (dump_file
, stmt
, 0, dump_flags
);
3436 fprintf (dump_file
, "\n");
3439 /* Retry the same statement if it changed into another
3440 builtin, there might be new opportunities now. */
3441 if (gimple_code (stmt
) != GIMPLE_CALL
)
3446 callee
= gimple_call_fndecl (stmt
);
3448 || !fndecl_built_in_p (callee
, fcode
))
3453 /* Delete unreachable blocks. */
3455 todoflags
|= TODO_cleanup_cfg
;
3463 make_pass_fold_builtins (gcc::context
*ctxt
)
3465 return new pass_fold_builtins (ctxt
);
3468 /* A simple pass that emits some warnings post IPA. */
3472 const pass_data pass_data_post_ipa_warn
=
3474 GIMPLE_PASS
, /* type */
3475 "post_ipa_warn", /* name */
3476 OPTGROUP_NONE
, /* optinfo_flags */
3477 TV_NONE
, /* tv_id */
3478 ( PROP_cfg
| PROP_ssa
), /* properties_required */
3479 0, /* properties_provided */
3480 0, /* properties_destroyed */
3481 0, /* todo_flags_start */
3482 0, /* todo_flags_finish */
3485 class pass_post_ipa_warn
: public gimple_opt_pass
3488 pass_post_ipa_warn (gcc::context
*ctxt
)
3489 : gimple_opt_pass (pass_data_post_ipa_warn
, ctxt
)
3492 /* opt_pass methods: */
3493 opt_pass
* clone () { return new pass_post_ipa_warn (m_ctxt
); }
3494 virtual bool gate (function
*) { return warn_nonnull
!= 0; }
3495 virtual unsigned int execute (function
*);
3497 }; // class pass_fold_builtins
3500 pass_post_ipa_warn::execute (function
*fun
)
3504 FOR_EACH_BB_FN (bb
, fun
)
3506 gimple_stmt_iterator gsi
;
3507 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
3509 gimple
*stmt
= gsi_stmt (gsi
);
3510 if (!is_gimple_call (stmt
) || gimple_no_warning_p (stmt
))
3516 = get_nonnull_args (gimple_call_fntype (stmt
));
3519 for (unsigned i
= 0; i
< gimple_call_num_args (stmt
); i
++)
3521 tree arg
= gimple_call_arg (stmt
, i
);
3522 if (TREE_CODE (TREE_TYPE (arg
)) != POINTER_TYPE
)
3524 if (!integer_zerop (arg
))
3526 if (!bitmap_empty_p (nonnullargs
)
3527 && !bitmap_bit_p (nonnullargs
, i
))
3530 location_t loc
= gimple_location (stmt
);
3531 auto_diagnostic_group d
;
3532 if (warning_at (loc
, OPT_Wnonnull
,
3533 "%Gargument %u null where non-null "
3534 "expected", stmt
, i
+ 1))
3536 tree fndecl
= gimple_call_fndecl (stmt
);
3537 if (fndecl
&& DECL_IS_BUILTIN (fndecl
))
3538 inform (loc
, "in a call to built-in function %qD",
3541 inform (DECL_SOURCE_LOCATION (fndecl
),
3542 "in a call to function %qD declared here",
3547 BITMAP_FREE (nonnullargs
);
3558 make_pass_post_ipa_warn (gcc::context
*ctxt
)
3560 return new pass_post_ipa_warn (ctxt
);