alias.c: Reorder #include statements and remove duplicates.
[gcc.git] / gcc / tree-ssa-ccp.c
1 /* Conditional constant propagation pass for the GNU compiler.
2 Copyright (C) 2000-2015 Free Software Foundation, Inc.
3 Adapted from original RTL SSA-CCP by Daniel Berlin <dberlin@dberlin.org>
4 Adapted to GIMPLE trees by Diego Novillo <dnovillo@redhat.com>
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it
9 under the terms of the GNU General Public License as published by the
10 Free Software Foundation; either version 3, or (at your option) any
11 later version.
12
13 GCC is distributed in the hope that it will be useful, but WITHOUT
14 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
21
22 /* Conditional constant propagation (CCP) is based on the SSA
23 propagation engine (tree-ssa-propagate.c). Constant assignments of
24 the form VAR = CST are propagated from the assignments into uses of
25 VAR, which in turn may generate new constants. The simulation uses
26 a four level lattice to keep track of constant values associated
27 with SSA names. Given an SSA name V_i, it may take one of the
28 following values:
29
30 UNINITIALIZED -> the initial state of the value. This value
31 is replaced with a correct initial value
32 the first time the value is used, so the
33 rest of the pass does not need to care about
34 it. Using this value simplifies initialization
35 of the pass, and prevents us from needlessly
36 scanning statements that are never reached.
37
38 UNDEFINED -> V_i is a local variable whose definition
39 has not been processed yet. Therefore we
40 don't yet know if its value is a constant
41 or not.
42
43 CONSTANT -> V_i has been found to hold a constant
44 value C.
45
46 VARYING -> V_i cannot take a constant value, or if it
47 does, it is not possible to determine it
48 at compile time.
49
50 The core of SSA-CCP is in ccp_visit_stmt and ccp_visit_phi_node:
51
52 1- In ccp_visit_stmt, we are interested in assignments whose RHS
53 evaluates into a constant and conditional jumps whose predicate
54 evaluates into a boolean true or false. When an assignment of
55 the form V_i = CONST is found, V_i's lattice value is set to
56 CONSTANT and CONST is associated with it. This causes the
57 propagation engine to add all the SSA edges coming out the
58 assignment into the worklists, so that statements that use V_i
59 can be visited.
60
61 If the statement is a conditional with a constant predicate, we
62 mark the outgoing edges as executable or not executable
63 depending on the predicate's value. This is then used when
64 visiting PHI nodes to know when a PHI argument can be ignored.
65
66
67 2- In ccp_visit_phi_node, if all the PHI arguments evaluate to the
68 same constant C, then the LHS of the PHI is set to C. This
69 evaluation is known as the "meet operation". Since one of the
70 goals of this evaluation is to optimistically return constant
71 values as often as possible, it uses two main short cuts:
72
73 - If an argument is flowing in through a non-executable edge, it
74 is ignored. This is useful in cases like this:
75
76 if (PRED)
77 a_9 = 3;
78 else
79 a_10 = 100;
80 a_11 = PHI (a_9, a_10)
81
82 If PRED is known to always evaluate to false, then we can
83 assume that a_11 will always take its value from a_10, meaning
84 that instead of consider it VARYING (a_9 and a_10 have
85 different values), we can consider it CONSTANT 100.
86
87 - If an argument has an UNDEFINED value, then it does not affect
88 the outcome of the meet operation. If a variable V_i has an
89 UNDEFINED value, it means that either its defining statement
90 hasn't been visited yet or V_i has no defining statement, in
91 which case the original symbol 'V' is being used
92 uninitialized. Since 'V' is a local variable, the compiler
93 may assume any initial value for it.
94
95
96 After propagation, every variable V_i that ends up with a lattice
97 value of CONSTANT will have the associated constant value in the
98 array CONST_VAL[i].VALUE. That is fed into substitute_and_fold for
99 final substitution and folding.
100
101 This algorithm uses wide-ints at the max precision of the target.
102 This means that, with one uninteresting exception, variables with
103 UNSIGNED types never go to VARYING because the bits above the
104 precision of the type of the variable are always zero. The
105 uninteresting case is a variable of UNSIGNED type that has the
106 maximum precision of the target. Such variables can go to VARYING,
107 but this causes no loss of infomation since these variables will
108 never be extended.
109
110 References:
111
112 Constant propagation with conditional branches,
113 Wegman and Zadeck, ACM TOPLAS 13(2):181-210.
114
115 Building an Optimizing Compiler,
116 Robert Morgan, Butterworth-Heinemann, 1998, Section 8.9.
117
118 Advanced Compiler Design and Implementation,
119 Steven Muchnick, Morgan Kaufmann, 1997, Section 12.6 */
120
121 #include "config.h"
122 #include "system.h"
123 #include "coretypes.h"
124 #include "backend.h"
125 #include "target.h"
126 #include "tree.h"
127 #include "gimple.h"
128 #include "tree-pass.h"
129 #include "tm_p.h"
130 #include "ssa.h"
131 #include "gimple-pretty-print.h"
132 #include "diagnostic-core.h"
133 #include "alias.h"
134 #include "fold-const.h"
135 #include "stor-layout.h"
136 #include "flags.h"
137 #include "internal-fn.h"
138 #include "gimple-fold.h"
139 #include "tree-eh.h"
140 #include "gimplify.h"
141 #include "gimple-iterator.h"
142 #include "tree-cfg.h"
143 #include "tree-ssa-propagate.h"
144 #include "value-prof.h"
145 #include "langhooks.h"
146 #include "dbgcnt.h"
147 #include "params.h"
148 #include "builtins.h"
149 #include "tree-chkp.h"
150
151
152 /* Possible lattice values. */
153 typedef enum
154 {
155 UNINITIALIZED,
156 UNDEFINED,
157 CONSTANT,
158 VARYING
159 } ccp_lattice_t;
160
161 struct ccp_prop_value_t {
162 /* Lattice value. */
163 ccp_lattice_t lattice_val;
164
165 /* Propagated value. */
166 tree value;
167
168 /* Mask that applies to the propagated value during CCP. For X
169 with a CONSTANT lattice value X & ~mask == value & ~mask. The
170 zero bits in the mask cover constant values. The ones mean no
171 information. */
172 widest_int mask;
173 };
174
175 /* Array of propagated constant values. After propagation,
176 CONST_VAL[I].VALUE holds the constant value for SSA_NAME(I). If
177 the constant is held in an SSA name representing a memory store
178 (i.e., a VDEF), CONST_VAL[I].MEM_REF will contain the actual
179 memory reference used to store (i.e., the LHS of the assignment
180 doing the store). */
181 static ccp_prop_value_t *const_val;
182 static unsigned n_const_val;
183
184 static void canonicalize_value (ccp_prop_value_t *);
185 static bool ccp_fold_stmt (gimple_stmt_iterator *);
186 static void ccp_lattice_meet (ccp_prop_value_t *, ccp_prop_value_t *);
187
188 /* Dump constant propagation value VAL to file OUTF prefixed by PREFIX. */
189
190 static void
191 dump_lattice_value (FILE *outf, const char *prefix, ccp_prop_value_t val)
192 {
193 switch (val.lattice_val)
194 {
195 case UNINITIALIZED:
196 fprintf (outf, "%sUNINITIALIZED", prefix);
197 break;
198 case UNDEFINED:
199 fprintf (outf, "%sUNDEFINED", prefix);
200 break;
201 case VARYING:
202 fprintf (outf, "%sVARYING", prefix);
203 break;
204 case CONSTANT:
205 if (TREE_CODE (val.value) != INTEGER_CST
206 || val.mask == 0)
207 {
208 fprintf (outf, "%sCONSTANT ", prefix);
209 print_generic_expr (outf, val.value, dump_flags);
210 }
211 else
212 {
213 widest_int cval = wi::bit_and_not (wi::to_widest (val.value),
214 val.mask);
215 fprintf (outf, "%sCONSTANT ", prefix);
216 print_hex (cval, outf);
217 fprintf (outf, " (");
218 print_hex (val.mask, outf);
219 fprintf (outf, ")");
220 }
221 break;
222 default:
223 gcc_unreachable ();
224 }
225 }
226
227
228 /* Print lattice value VAL to stderr. */
229
230 void debug_lattice_value (ccp_prop_value_t val);
231
232 DEBUG_FUNCTION void
233 debug_lattice_value (ccp_prop_value_t val)
234 {
235 dump_lattice_value (stderr, "", val);
236 fprintf (stderr, "\n");
237 }
238
239 /* Extend NONZERO_BITS to a full mask, with the upper bits being set. */
240
241 static widest_int
242 extend_mask (const wide_int &nonzero_bits)
243 {
244 return (wi::mask <widest_int> (wi::get_precision (nonzero_bits), true)
245 | widest_int::from (nonzero_bits, UNSIGNED));
246 }
247
248 /* Compute a default value for variable VAR and store it in the
249 CONST_VAL array. The following rules are used to get default
250 values:
251
252 1- Global and static variables that are declared constant are
253 considered CONSTANT.
254
255 2- Any other value is considered UNDEFINED. This is useful when
256 considering PHI nodes. PHI arguments that are undefined do not
257 change the constant value of the PHI node, which allows for more
258 constants to be propagated.
259
260 3- Variables defined by statements other than assignments and PHI
261 nodes are considered VARYING.
262
263 4- Initial values of variables that are not GIMPLE registers are
264 considered VARYING. */
265
266 static ccp_prop_value_t
267 get_default_value (tree var)
268 {
269 ccp_prop_value_t val = { UNINITIALIZED, NULL_TREE, 0 };
270 gimple *stmt;
271
272 stmt = SSA_NAME_DEF_STMT (var);
273
274 if (gimple_nop_p (stmt))
275 {
276 /* Variables defined by an empty statement are those used
277 before being initialized. If VAR is a local variable, we
278 can assume initially that it is UNDEFINED, otherwise we must
279 consider it VARYING. */
280 if (!virtual_operand_p (var)
281 && TREE_CODE (SSA_NAME_VAR (var)) == VAR_DECL)
282 val.lattice_val = UNDEFINED;
283 else
284 {
285 val.lattice_val = VARYING;
286 val.mask = -1;
287 if (flag_tree_bit_ccp)
288 {
289 wide_int nonzero_bits = get_nonzero_bits (var);
290 if (nonzero_bits != -1)
291 {
292 val.lattice_val = CONSTANT;
293 val.value = build_zero_cst (TREE_TYPE (var));
294 val.mask = extend_mask (nonzero_bits);
295 }
296 }
297 }
298 }
299 else if (is_gimple_assign (stmt))
300 {
301 tree cst;
302 if (gimple_assign_single_p (stmt)
303 && DECL_P (gimple_assign_rhs1 (stmt))
304 && (cst = get_symbol_constant_value (gimple_assign_rhs1 (stmt))))
305 {
306 val.lattice_val = CONSTANT;
307 val.value = cst;
308 }
309 else
310 {
311 /* Any other variable defined by an assignment is considered
312 UNDEFINED. */
313 val.lattice_val = UNDEFINED;
314 }
315 }
316 else if ((is_gimple_call (stmt)
317 && gimple_call_lhs (stmt) != NULL_TREE)
318 || gimple_code (stmt) == GIMPLE_PHI)
319 {
320 /* A variable defined by a call or a PHI node is considered
321 UNDEFINED. */
322 val.lattice_val = UNDEFINED;
323 }
324 else
325 {
326 /* Otherwise, VAR will never take on a constant value. */
327 val.lattice_val = VARYING;
328 val.mask = -1;
329 }
330
331 return val;
332 }
333
334
335 /* Get the constant value associated with variable VAR. */
336
337 static inline ccp_prop_value_t *
338 get_value (tree var)
339 {
340 ccp_prop_value_t *val;
341
342 if (const_val == NULL
343 || SSA_NAME_VERSION (var) >= n_const_val)
344 return NULL;
345
346 val = &const_val[SSA_NAME_VERSION (var)];
347 if (val->lattice_val == UNINITIALIZED)
348 *val = get_default_value (var);
349
350 canonicalize_value (val);
351
352 return val;
353 }
354
355 /* Return the constant tree value associated with VAR. */
356
357 static inline tree
358 get_constant_value (tree var)
359 {
360 ccp_prop_value_t *val;
361 if (TREE_CODE (var) != SSA_NAME)
362 {
363 if (is_gimple_min_invariant (var))
364 return var;
365 return NULL_TREE;
366 }
367 val = get_value (var);
368 if (val
369 && val->lattice_val == CONSTANT
370 && (TREE_CODE (val->value) != INTEGER_CST
371 || val->mask == 0))
372 return val->value;
373 return NULL_TREE;
374 }
375
376 /* Sets the value associated with VAR to VARYING. */
377
378 static inline void
379 set_value_varying (tree var)
380 {
381 ccp_prop_value_t *val = &const_val[SSA_NAME_VERSION (var)];
382
383 val->lattice_val = VARYING;
384 val->value = NULL_TREE;
385 val->mask = -1;
386 }
387
388 /* For integer constants, make sure to drop TREE_OVERFLOW. */
389
390 static void
391 canonicalize_value (ccp_prop_value_t *val)
392 {
393 if (val->lattice_val != CONSTANT)
394 return;
395
396 if (TREE_OVERFLOW_P (val->value))
397 val->value = drop_tree_overflow (val->value);
398 }
399
400 /* Return whether the lattice transition is valid. */
401
402 static bool
403 valid_lattice_transition (ccp_prop_value_t old_val, ccp_prop_value_t new_val)
404 {
405 /* Lattice transitions must always be monotonically increasing in
406 value. */
407 if (old_val.lattice_val < new_val.lattice_val)
408 return true;
409
410 if (old_val.lattice_val != new_val.lattice_val)
411 return false;
412
413 if (!old_val.value && !new_val.value)
414 return true;
415
416 /* Now both lattice values are CONSTANT. */
417
418 /* Allow arbitrary copy changes as we might look through PHI <a_1, ...>
419 when only a single copy edge is executable. */
420 if (TREE_CODE (old_val.value) == SSA_NAME
421 && TREE_CODE (new_val.value) == SSA_NAME)
422 return true;
423
424 /* Allow transitioning from a constant to a copy. */
425 if (is_gimple_min_invariant (old_val.value)
426 && TREE_CODE (new_val.value) == SSA_NAME)
427 return true;
428
429 /* Allow transitioning from PHI <&x, not executable> == &x
430 to PHI <&x, &y> == common alignment. */
431 if (TREE_CODE (old_val.value) != INTEGER_CST
432 && TREE_CODE (new_val.value) == INTEGER_CST)
433 return true;
434
435 /* Bit-lattices have to agree in the still valid bits. */
436 if (TREE_CODE (old_val.value) == INTEGER_CST
437 && TREE_CODE (new_val.value) == INTEGER_CST)
438 return (wi::bit_and_not (wi::to_widest (old_val.value), new_val.mask)
439 == wi::bit_and_not (wi::to_widest (new_val.value), new_val.mask));
440
441 /* Otherwise constant values have to agree. */
442 if (operand_equal_p (old_val.value, new_val.value, 0))
443 return true;
444
445 /* At least the kinds and types should agree now. */
446 if (TREE_CODE (old_val.value) != TREE_CODE (new_val.value)
447 || !types_compatible_p (TREE_TYPE (old_val.value),
448 TREE_TYPE (new_val.value)))
449 return false;
450
451 /* For floats and !HONOR_NANS allow transitions from (partial) NaN
452 to non-NaN. */
453 tree type = TREE_TYPE (new_val.value);
454 if (SCALAR_FLOAT_TYPE_P (type)
455 && !HONOR_NANS (type))
456 {
457 if (REAL_VALUE_ISNAN (TREE_REAL_CST (old_val.value)))
458 return true;
459 }
460 else if (VECTOR_FLOAT_TYPE_P (type)
461 && !HONOR_NANS (type))
462 {
463 for (unsigned i = 0; i < VECTOR_CST_NELTS (old_val.value); ++i)
464 if (!REAL_VALUE_ISNAN
465 (TREE_REAL_CST (VECTOR_CST_ELT (old_val.value, i)))
466 && !operand_equal_p (VECTOR_CST_ELT (old_val.value, i),
467 VECTOR_CST_ELT (new_val.value, i), 0))
468 return false;
469 return true;
470 }
471 else if (COMPLEX_FLOAT_TYPE_P (type)
472 && !HONOR_NANS (type))
473 {
474 if (!REAL_VALUE_ISNAN (TREE_REAL_CST (TREE_REALPART (old_val.value)))
475 && !operand_equal_p (TREE_REALPART (old_val.value),
476 TREE_REALPART (new_val.value), 0))
477 return false;
478 if (!REAL_VALUE_ISNAN (TREE_REAL_CST (TREE_IMAGPART (old_val.value)))
479 && !operand_equal_p (TREE_IMAGPART (old_val.value),
480 TREE_IMAGPART (new_val.value), 0))
481 return false;
482 return true;
483 }
484 return false;
485 }
486
487 /* Set the value for variable VAR to NEW_VAL. Return true if the new
488 value is different from VAR's previous value. */
489
490 static bool
491 set_lattice_value (tree var, ccp_prop_value_t *new_val)
492 {
493 /* We can deal with old UNINITIALIZED values just fine here. */
494 ccp_prop_value_t *old_val = &const_val[SSA_NAME_VERSION (var)];
495
496 canonicalize_value (new_val);
497
498 /* We have to be careful to not go up the bitwise lattice
499 represented by the mask. Instead of dropping to VARYING
500 use the meet operator to retain a conservative value.
501 Missed optimizations like PR65851 makes this necessary.
502 It also ensures we converge to a stable lattice solution. */
503 if (new_val->lattice_val == CONSTANT
504 && old_val->lattice_val == CONSTANT
505 && TREE_CODE (new_val->value) != SSA_NAME)
506 ccp_lattice_meet (new_val, old_val);
507
508 gcc_checking_assert (valid_lattice_transition (*old_val, *new_val));
509
510 /* If *OLD_VAL and NEW_VAL are the same, return false to inform the
511 caller that this was a non-transition. */
512 if (old_val->lattice_val != new_val->lattice_val
513 || (new_val->lattice_val == CONSTANT
514 && (TREE_CODE (new_val->value) != TREE_CODE (old_val->value)
515 || (TREE_CODE (new_val->value) == INTEGER_CST
516 && (new_val->mask != old_val->mask
517 || (wi::bit_and_not (wi::to_widest (old_val->value),
518 new_val->mask)
519 != wi::bit_and_not (wi::to_widest (new_val->value),
520 new_val->mask))))
521 || (TREE_CODE (new_val->value) != INTEGER_CST
522 && !operand_equal_p (new_val->value, old_val->value, 0)))))
523 {
524 /* ??? We would like to delay creation of INTEGER_CSTs from
525 partially constants here. */
526
527 if (dump_file && (dump_flags & TDF_DETAILS))
528 {
529 dump_lattice_value (dump_file, "Lattice value changed to ", *new_val);
530 fprintf (dump_file, ". Adding SSA edges to worklist.\n");
531 }
532
533 *old_val = *new_val;
534
535 gcc_assert (new_val->lattice_val != UNINITIALIZED);
536 return true;
537 }
538
539 return false;
540 }
541
542 static ccp_prop_value_t get_value_for_expr (tree, bool);
543 static ccp_prop_value_t bit_value_binop (enum tree_code, tree, tree, tree);
544 static void bit_value_binop_1 (enum tree_code, tree, widest_int *, widest_int *,
545 tree, const widest_int &, const widest_int &,
546 tree, const widest_int &, const widest_int &);
547
548 /* Return a widest_int that can be used for bitwise simplifications
549 from VAL. */
550
551 static widest_int
552 value_to_wide_int (ccp_prop_value_t val)
553 {
554 if (val.value
555 && TREE_CODE (val.value) == INTEGER_CST)
556 return wi::to_widest (val.value);
557
558 return 0;
559 }
560
561 /* Return the value for the address expression EXPR based on alignment
562 information. */
563
564 static ccp_prop_value_t
565 get_value_from_alignment (tree expr)
566 {
567 tree type = TREE_TYPE (expr);
568 ccp_prop_value_t val;
569 unsigned HOST_WIDE_INT bitpos;
570 unsigned int align;
571
572 gcc_assert (TREE_CODE (expr) == ADDR_EXPR);
573
574 get_pointer_alignment_1 (expr, &align, &bitpos);
575 val.mask = (POINTER_TYPE_P (type) || TYPE_UNSIGNED (type)
576 ? wi::mask <widest_int> (TYPE_PRECISION (type), false)
577 : -1).and_not (align / BITS_PER_UNIT - 1);
578 val.lattice_val
579 = wi::sext (val.mask, TYPE_PRECISION (type)) == -1 ? VARYING : CONSTANT;
580 if (val.lattice_val == CONSTANT)
581 val.value = build_int_cstu (type, bitpos / BITS_PER_UNIT);
582 else
583 val.value = NULL_TREE;
584
585 return val;
586 }
587
588 /* Return the value for the tree operand EXPR. If FOR_BITS_P is true
589 return constant bits extracted from alignment information for
590 invariant addresses. */
591
592 static ccp_prop_value_t
593 get_value_for_expr (tree expr, bool for_bits_p)
594 {
595 ccp_prop_value_t val;
596
597 if (TREE_CODE (expr) == SSA_NAME)
598 {
599 val = *get_value (expr);
600 if (for_bits_p
601 && val.lattice_val == CONSTANT
602 && TREE_CODE (val.value) == ADDR_EXPR)
603 val = get_value_from_alignment (val.value);
604 /* Fall back to a copy value. */
605 if (!for_bits_p
606 && val.lattice_val == VARYING
607 && !SSA_NAME_OCCURS_IN_ABNORMAL_PHI (expr))
608 {
609 val.lattice_val = CONSTANT;
610 val.value = expr;
611 val.mask = -1;
612 }
613 }
614 else if (is_gimple_min_invariant (expr)
615 && (!for_bits_p || TREE_CODE (expr) != ADDR_EXPR))
616 {
617 val.lattice_val = CONSTANT;
618 val.value = expr;
619 val.mask = 0;
620 canonicalize_value (&val);
621 }
622 else if (TREE_CODE (expr) == ADDR_EXPR)
623 val = get_value_from_alignment (expr);
624 else
625 {
626 val.lattice_val = VARYING;
627 val.mask = -1;
628 val.value = NULL_TREE;
629 }
630
631 if (val.lattice_val == VARYING
632 && TYPE_UNSIGNED (TREE_TYPE (expr)))
633 val.mask = wi::zext (val.mask, TYPE_PRECISION (TREE_TYPE (expr)));
634
635 return val;
636 }
637
638 /* Return the likely CCP lattice value for STMT.
639
640 If STMT has no operands, then return CONSTANT.
641
642 Else if undefinedness of operands of STMT cause its value to be
643 undefined, then return UNDEFINED.
644
645 Else if any operands of STMT are constants, then return CONSTANT.
646
647 Else return VARYING. */
648
649 static ccp_lattice_t
650 likely_value (gimple *stmt)
651 {
652 bool has_constant_operand, has_undefined_operand, all_undefined_operands;
653 bool has_nsa_operand;
654 tree use;
655 ssa_op_iter iter;
656 unsigned i;
657
658 enum gimple_code code = gimple_code (stmt);
659
660 /* This function appears to be called only for assignments, calls,
661 conditionals, and switches, due to the logic in visit_stmt. */
662 gcc_assert (code == GIMPLE_ASSIGN
663 || code == GIMPLE_CALL
664 || code == GIMPLE_COND
665 || code == GIMPLE_SWITCH);
666
667 /* If the statement has volatile operands, it won't fold to a
668 constant value. */
669 if (gimple_has_volatile_ops (stmt))
670 return VARYING;
671
672 /* Arrive here for more complex cases. */
673 has_constant_operand = false;
674 has_undefined_operand = false;
675 all_undefined_operands = true;
676 has_nsa_operand = false;
677 FOR_EACH_SSA_TREE_OPERAND (use, stmt, iter, SSA_OP_USE)
678 {
679 ccp_prop_value_t *val = get_value (use);
680
681 if (val->lattice_val == UNDEFINED)
682 has_undefined_operand = true;
683 else
684 all_undefined_operands = false;
685
686 if (val->lattice_val == CONSTANT)
687 has_constant_operand = true;
688
689 if (SSA_NAME_IS_DEFAULT_DEF (use)
690 || !prop_simulate_again_p (SSA_NAME_DEF_STMT (use)))
691 has_nsa_operand = true;
692 }
693
694 /* There may be constants in regular rhs operands. For calls we
695 have to ignore lhs, fndecl and static chain, otherwise only
696 the lhs. */
697 for (i = (is_gimple_call (stmt) ? 2 : 0) + gimple_has_lhs (stmt);
698 i < gimple_num_ops (stmt); ++i)
699 {
700 tree op = gimple_op (stmt, i);
701 if (!op || TREE_CODE (op) == SSA_NAME)
702 continue;
703 if (is_gimple_min_invariant (op))
704 has_constant_operand = true;
705 }
706
707 if (has_constant_operand)
708 all_undefined_operands = false;
709
710 if (has_undefined_operand
711 && code == GIMPLE_CALL
712 && gimple_call_internal_p (stmt))
713 switch (gimple_call_internal_fn (stmt))
714 {
715 /* These 3 builtins use the first argument just as a magic
716 way how to find out a decl uid. */
717 case IFN_GOMP_SIMD_LANE:
718 case IFN_GOMP_SIMD_VF:
719 case IFN_GOMP_SIMD_LAST_LANE:
720 has_undefined_operand = false;
721 break;
722 default:
723 break;
724 }
725
726 /* If the operation combines operands like COMPLEX_EXPR make sure to
727 not mark the result UNDEFINED if only one part of the result is
728 undefined. */
729 if (has_undefined_operand && all_undefined_operands)
730 return UNDEFINED;
731 else if (code == GIMPLE_ASSIGN && has_undefined_operand)
732 {
733 switch (gimple_assign_rhs_code (stmt))
734 {
735 /* Unary operators are handled with all_undefined_operands. */
736 case PLUS_EXPR:
737 case MINUS_EXPR:
738 case POINTER_PLUS_EXPR:
739 /* Not MIN_EXPR, MAX_EXPR. One VARYING operand may be selected.
740 Not bitwise operators, one VARYING operand may specify the
741 result completely. Not logical operators for the same reason.
742 Not COMPLEX_EXPR as one VARYING operand makes the result partly
743 not UNDEFINED. Not *DIV_EXPR, comparisons and shifts because
744 the undefined operand may be promoted. */
745 return UNDEFINED;
746
747 case ADDR_EXPR:
748 /* If any part of an address is UNDEFINED, like the index
749 of an ARRAY_EXPR, then treat the result as UNDEFINED. */
750 return UNDEFINED;
751
752 default:
753 ;
754 }
755 }
756 /* If there was an UNDEFINED operand but the result may be not UNDEFINED
757 fall back to CONSTANT. During iteration UNDEFINED may still drop
758 to CONSTANT. */
759 if (has_undefined_operand)
760 return CONSTANT;
761
762 /* We do not consider virtual operands here -- load from read-only
763 memory may have only VARYING virtual operands, but still be
764 constant. Also we can combine the stmt with definitions from
765 operands whose definitions are not simulated again. */
766 if (has_constant_operand
767 || has_nsa_operand
768 || gimple_references_memory_p (stmt))
769 return CONSTANT;
770
771 return VARYING;
772 }
773
774 /* Returns true if STMT cannot be constant. */
775
776 static bool
777 surely_varying_stmt_p (gimple *stmt)
778 {
779 /* If the statement has operands that we cannot handle, it cannot be
780 constant. */
781 if (gimple_has_volatile_ops (stmt))
782 return true;
783
784 /* If it is a call and does not return a value or is not a
785 builtin and not an indirect call or a call to function with
786 assume_aligned/alloc_align attribute, it is varying. */
787 if (is_gimple_call (stmt))
788 {
789 tree fndecl, fntype = gimple_call_fntype (stmt);
790 if (!gimple_call_lhs (stmt)
791 || ((fndecl = gimple_call_fndecl (stmt)) != NULL_TREE
792 && !DECL_BUILT_IN (fndecl)
793 && !lookup_attribute ("assume_aligned",
794 TYPE_ATTRIBUTES (fntype))
795 && !lookup_attribute ("alloc_align",
796 TYPE_ATTRIBUTES (fntype))))
797 return true;
798 }
799
800 /* Any other store operation is not interesting. */
801 else if (gimple_vdef (stmt))
802 return true;
803
804 /* Anything other than assignments and conditional jumps are not
805 interesting for CCP. */
806 if (gimple_code (stmt) != GIMPLE_ASSIGN
807 && gimple_code (stmt) != GIMPLE_COND
808 && gimple_code (stmt) != GIMPLE_SWITCH
809 && gimple_code (stmt) != GIMPLE_CALL)
810 return true;
811
812 return false;
813 }
814
815 /* Initialize local data structures for CCP. */
816
817 static void
818 ccp_initialize (void)
819 {
820 basic_block bb;
821
822 n_const_val = num_ssa_names;
823 const_val = XCNEWVEC (ccp_prop_value_t, n_const_val);
824
825 /* Initialize simulation flags for PHI nodes and statements. */
826 FOR_EACH_BB_FN (bb, cfun)
827 {
828 gimple_stmt_iterator i;
829
830 for (i = gsi_start_bb (bb); !gsi_end_p (i); gsi_next (&i))
831 {
832 gimple *stmt = gsi_stmt (i);
833 bool is_varying;
834
835 /* If the statement is a control insn, then we do not
836 want to avoid simulating the statement once. Failure
837 to do so means that those edges will never get added. */
838 if (stmt_ends_bb_p (stmt))
839 is_varying = false;
840 else
841 is_varying = surely_varying_stmt_p (stmt);
842
843 if (is_varying)
844 {
845 tree def;
846 ssa_op_iter iter;
847
848 /* If the statement will not produce a constant, mark
849 all its outputs VARYING. */
850 FOR_EACH_SSA_TREE_OPERAND (def, stmt, iter, SSA_OP_ALL_DEFS)
851 set_value_varying (def);
852 }
853 prop_set_simulate_again (stmt, !is_varying);
854 }
855 }
856
857 /* Now process PHI nodes. We never clear the simulate_again flag on
858 phi nodes, since we do not know which edges are executable yet,
859 except for phi nodes for virtual operands when we do not do store ccp. */
860 FOR_EACH_BB_FN (bb, cfun)
861 {
862 gphi_iterator i;
863
864 for (i = gsi_start_phis (bb); !gsi_end_p (i); gsi_next (&i))
865 {
866 gphi *phi = i.phi ();
867
868 if (virtual_operand_p (gimple_phi_result (phi)))
869 prop_set_simulate_again (phi, false);
870 else
871 prop_set_simulate_again (phi, true);
872 }
873 }
874 }
875
876 /* Debug count support. Reset the values of ssa names
877 VARYING when the total number ssa names analyzed is
878 beyond the debug count specified. */
879
880 static void
881 do_dbg_cnt (void)
882 {
883 unsigned i;
884 for (i = 0; i < num_ssa_names; i++)
885 {
886 if (!dbg_cnt (ccp))
887 {
888 const_val[i].lattice_val = VARYING;
889 const_val[i].mask = -1;
890 const_val[i].value = NULL_TREE;
891 }
892 }
893 }
894
895
896 /* Do final substitution of propagated values, cleanup the flowgraph and
897 free allocated storage.
898
899 Return TRUE when something was optimized. */
900
901 static bool
902 ccp_finalize (void)
903 {
904 bool something_changed;
905 unsigned i;
906
907 do_dbg_cnt ();
908
909 /* Derive alignment and misalignment information from partially
910 constant pointers in the lattice or nonzero bits from partially
911 constant integers. */
912 for (i = 1; i < num_ssa_names; ++i)
913 {
914 tree name = ssa_name (i);
915 ccp_prop_value_t *val;
916 unsigned int tem, align;
917
918 if (!name
919 || (!POINTER_TYPE_P (TREE_TYPE (name))
920 && (!INTEGRAL_TYPE_P (TREE_TYPE (name))
921 /* Don't record nonzero bits before IPA to avoid
922 using too much memory. */
923 || first_pass_instance)))
924 continue;
925
926 val = get_value (name);
927 if (val->lattice_val != CONSTANT
928 || TREE_CODE (val->value) != INTEGER_CST)
929 continue;
930
931 if (POINTER_TYPE_P (TREE_TYPE (name)))
932 {
933 /* Trailing mask bits specify the alignment, trailing value
934 bits the misalignment. */
935 tem = val->mask.to_uhwi ();
936 align = (tem & -tem);
937 if (align > 1)
938 set_ptr_info_alignment (get_ptr_info (name), align,
939 (TREE_INT_CST_LOW (val->value)
940 & (align - 1)));
941 }
942 else
943 {
944 unsigned int precision = TYPE_PRECISION (TREE_TYPE (val->value));
945 wide_int nonzero_bits = wide_int::from (val->mask, precision,
946 UNSIGNED) | val->value;
947 nonzero_bits &= get_nonzero_bits (name);
948 set_nonzero_bits (name, nonzero_bits);
949 }
950 }
951
952 /* Perform substitutions based on the known constant values. */
953 something_changed = substitute_and_fold (get_constant_value,
954 ccp_fold_stmt, true);
955
956 free (const_val);
957 const_val = NULL;
958 return something_changed;;
959 }
960
961
962 /* Compute the meet operator between *VAL1 and *VAL2. Store the result
963 in VAL1.
964
965 any M UNDEFINED = any
966 any M VARYING = VARYING
967 Ci M Cj = Ci if (i == j)
968 Ci M Cj = VARYING if (i != j)
969 */
970
971 static void
972 ccp_lattice_meet (ccp_prop_value_t *val1, ccp_prop_value_t *val2)
973 {
974 if (val1->lattice_val == UNDEFINED
975 /* For UNDEFINED M SSA we can't always SSA because its definition
976 may not dominate the PHI node. Doing optimistic copy propagation
977 also causes a lot of gcc.dg/uninit-pred*.c FAILs. */
978 && (val2->lattice_val != CONSTANT
979 || TREE_CODE (val2->value) != SSA_NAME))
980 {
981 /* UNDEFINED M any = any */
982 *val1 = *val2;
983 }
984 else if (val2->lattice_val == UNDEFINED
985 /* See above. */
986 && (val1->lattice_val != CONSTANT
987 || TREE_CODE (val1->value) != SSA_NAME))
988 {
989 /* any M UNDEFINED = any
990 Nothing to do. VAL1 already contains the value we want. */
991 ;
992 }
993 else if (val1->lattice_val == VARYING
994 || val2->lattice_val == VARYING)
995 {
996 /* any M VARYING = VARYING. */
997 val1->lattice_val = VARYING;
998 val1->mask = -1;
999 val1->value = NULL_TREE;
1000 }
1001 else if (val1->lattice_val == CONSTANT
1002 && val2->lattice_val == CONSTANT
1003 && TREE_CODE (val1->value) == INTEGER_CST
1004 && TREE_CODE (val2->value) == INTEGER_CST)
1005 {
1006 /* Ci M Cj = Ci if (i == j)
1007 Ci M Cj = VARYING if (i != j)
1008
1009 For INTEGER_CSTs mask unequal bits. If no equal bits remain,
1010 drop to varying. */
1011 val1->mask = (val1->mask | val2->mask
1012 | (wi::to_widest (val1->value)
1013 ^ wi::to_widest (val2->value)));
1014 if (wi::sext (val1->mask, TYPE_PRECISION (TREE_TYPE (val1->value))) == -1)
1015 {
1016 val1->lattice_val = VARYING;
1017 val1->value = NULL_TREE;
1018 }
1019 }
1020 else if (val1->lattice_val == CONSTANT
1021 && val2->lattice_val == CONSTANT
1022 && operand_equal_p (val1->value, val2->value, 0))
1023 {
1024 /* Ci M Cj = Ci if (i == j)
1025 Ci M Cj = VARYING if (i != j)
1026
1027 VAL1 already contains the value we want for equivalent values. */
1028 }
1029 else if (val1->lattice_val == CONSTANT
1030 && val2->lattice_val == CONSTANT
1031 && (TREE_CODE (val1->value) == ADDR_EXPR
1032 || TREE_CODE (val2->value) == ADDR_EXPR))
1033 {
1034 /* When not equal addresses are involved try meeting for
1035 alignment. */
1036 ccp_prop_value_t tem = *val2;
1037 if (TREE_CODE (val1->value) == ADDR_EXPR)
1038 *val1 = get_value_for_expr (val1->value, true);
1039 if (TREE_CODE (val2->value) == ADDR_EXPR)
1040 tem = get_value_for_expr (val2->value, true);
1041 ccp_lattice_meet (val1, &tem);
1042 }
1043 else
1044 {
1045 /* Any other combination is VARYING. */
1046 val1->lattice_val = VARYING;
1047 val1->mask = -1;
1048 val1->value = NULL_TREE;
1049 }
1050 }
1051
1052
1053 /* Loop through the PHI_NODE's parameters for BLOCK and compare their
1054 lattice values to determine PHI_NODE's lattice value. The value of a
1055 PHI node is determined calling ccp_lattice_meet with all the arguments
1056 of the PHI node that are incoming via executable edges. */
1057
1058 static enum ssa_prop_result
1059 ccp_visit_phi_node (gphi *phi)
1060 {
1061 unsigned i;
1062 ccp_prop_value_t new_val;
1063
1064 if (dump_file && (dump_flags & TDF_DETAILS))
1065 {
1066 fprintf (dump_file, "\nVisiting PHI node: ");
1067 print_gimple_stmt (dump_file, phi, 0, dump_flags);
1068 }
1069
1070 new_val.lattice_val = UNDEFINED;
1071 new_val.value = NULL_TREE;
1072 new_val.mask = 0;
1073
1074 bool first = true;
1075 bool non_exec_edge = false;
1076 for (i = 0; i < gimple_phi_num_args (phi); i++)
1077 {
1078 /* Compute the meet operator over all the PHI arguments flowing
1079 through executable edges. */
1080 edge e = gimple_phi_arg_edge (phi, i);
1081
1082 if (dump_file && (dump_flags & TDF_DETAILS))
1083 {
1084 fprintf (dump_file,
1085 "\n Argument #%d (%d -> %d %sexecutable)\n",
1086 i, e->src->index, e->dest->index,
1087 (e->flags & EDGE_EXECUTABLE) ? "" : "not ");
1088 }
1089
1090 /* If the incoming edge is executable, Compute the meet operator for
1091 the existing value of the PHI node and the current PHI argument. */
1092 if (e->flags & EDGE_EXECUTABLE)
1093 {
1094 tree arg = gimple_phi_arg (phi, i)->def;
1095 ccp_prop_value_t arg_val = get_value_for_expr (arg, false);
1096
1097 if (first)
1098 {
1099 new_val = arg_val;
1100 first = false;
1101 }
1102 else
1103 ccp_lattice_meet (&new_val, &arg_val);
1104
1105 if (dump_file && (dump_flags & TDF_DETAILS))
1106 {
1107 fprintf (dump_file, "\t");
1108 print_generic_expr (dump_file, arg, dump_flags);
1109 dump_lattice_value (dump_file, "\tValue: ", arg_val);
1110 fprintf (dump_file, "\n");
1111 }
1112
1113 if (new_val.lattice_val == VARYING)
1114 break;
1115 }
1116 else
1117 non_exec_edge = true;
1118 }
1119
1120 /* In case there were non-executable edges and the value is a copy
1121 make sure its definition dominates the PHI node. */
1122 if (non_exec_edge
1123 && new_val.lattice_val == CONSTANT
1124 && TREE_CODE (new_val.value) == SSA_NAME
1125 && ! SSA_NAME_IS_DEFAULT_DEF (new_val.value)
1126 && ! dominated_by_p (CDI_DOMINATORS, gimple_bb (phi),
1127 gimple_bb (SSA_NAME_DEF_STMT (new_val.value))))
1128 {
1129 new_val.lattice_val = VARYING;
1130 new_val.value = NULL_TREE;
1131 new_val.mask = -1;
1132 }
1133
1134 if (dump_file && (dump_flags & TDF_DETAILS))
1135 {
1136 dump_lattice_value (dump_file, "\n PHI node value: ", new_val);
1137 fprintf (dump_file, "\n\n");
1138 }
1139
1140 /* Make the transition to the new value. */
1141 if (set_lattice_value (gimple_phi_result (phi), &new_val))
1142 {
1143 if (new_val.lattice_val == VARYING)
1144 return SSA_PROP_VARYING;
1145 else
1146 return SSA_PROP_INTERESTING;
1147 }
1148 else
1149 return SSA_PROP_NOT_INTERESTING;
1150 }
1151
1152 /* Return the constant value for OP or OP otherwise. */
1153
1154 static tree
1155 valueize_op (tree op)
1156 {
1157 if (TREE_CODE (op) == SSA_NAME)
1158 {
1159 tree tem = get_constant_value (op);
1160 if (tem)
1161 return tem;
1162 }
1163 return op;
1164 }
1165
1166 /* Return the constant value for OP, but signal to not follow SSA
1167 edges if the definition may be simulated again. */
1168
1169 static tree
1170 valueize_op_1 (tree op)
1171 {
1172 if (TREE_CODE (op) == SSA_NAME)
1173 {
1174 /* If the definition may be simulated again we cannot follow
1175 this SSA edge as the SSA propagator does not necessarily
1176 re-visit the use. */
1177 gimple *def_stmt = SSA_NAME_DEF_STMT (op);
1178 if (!gimple_nop_p (def_stmt)
1179 && prop_simulate_again_p (def_stmt))
1180 return NULL_TREE;
1181 tree tem = get_constant_value (op);
1182 if (tem)
1183 return tem;
1184 }
1185 return op;
1186 }
1187
1188 /* CCP specific front-end to the non-destructive constant folding
1189 routines.
1190
1191 Attempt to simplify the RHS of STMT knowing that one or more
1192 operands are constants.
1193
1194 If simplification is possible, return the simplified RHS,
1195 otherwise return the original RHS or NULL_TREE. */
1196
1197 static tree
1198 ccp_fold (gimple *stmt)
1199 {
1200 location_t loc = gimple_location (stmt);
1201 switch (gimple_code (stmt))
1202 {
1203 case GIMPLE_COND:
1204 {
1205 /* Handle comparison operators that can appear in GIMPLE form. */
1206 tree op0 = valueize_op (gimple_cond_lhs (stmt));
1207 tree op1 = valueize_op (gimple_cond_rhs (stmt));
1208 enum tree_code code = gimple_cond_code (stmt);
1209 return fold_binary_loc (loc, code, boolean_type_node, op0, op1);
1210 }
1211
1212 case GIMPLE_SWITCH:
1213 {
1214 /* Return the constant switch index. */
1215 return valueize_op (gimple_switch_index (as_a <gswitch *> (stmt)));
1216 }
1217
1218 case GIMPLE_ASSIGN:
1219 case GIMPLE_CALL:
1220 return gimple_fold_stmt_to_constant_1 (stmt,
1221 valueize_op, valueize_op_1);
1222
1223 default:
1224 gcc_unreachable ();
1225 }
1226 }
1227
1228 /* Apply the operation CODE in type TYPE to the value, mask pair
1229 RVAL and RMASK representing a value of type RTYPE and set
1230 the value, mask pair *VAL and *MASK to the result. */
1231
1232 static void
1233 bit_value_unop_1 (enum tree_code code, tree type,
1234 widest_int *val, widest_int *mask,
1235 tree rtype, const widest_int &rval, const widest_int &rmask)
1236 {
1237 switch (code)
1238 {
1239 case BIT_NOT_EXPR:
1240 *mask = rmask;
1241 *val = ~rval;
1242 break;
1243
1244 case NEGATE_EXPR:
1245 {
1246 widest_int temv, temm;
1247 /* Return ~rval + 1. */
1248 bit_value_unop_1 (BIT_NOT_EXPR, type, &temv, &temm, type, rval, rmask);
1249 bit_value_binop_1 (PLUS_EXPR, type, val, mask,
1250 type, temv, temm, type, 1, 0);
1251 break;
1252 }
1253
1254 CASE_CONVERT:
1255 {
1256 signop sgn;
1257
1258 /* First extend mask and value according to the original type. */
1259 sgn = TYPE_SIGN (rtype);
1260 *mask = wi::ext (rmask, TYPE_PRECISION (rtype), sgn);
1261 *val = wi::ext (rval, TYPE_PRECISION (rtype), sgn);
1262
1263 /* Then extend mask and value according to the target type. */
1264 sgn = TYPE_SIGN (type);
1265 *mask = wi::ext (*mask, TYPE_PRECISION (type), sgn);
1266 *val = wi::ext (*val, TYPE_PRECISION (type), sgn);
1267 break;
1268 }
1269
1270 default:
1271 *mask = -1;
1272 break;
1273 }
1274 }
1275
1276 /* Apply the operation CODE in type TYPE to the value, mask pairs
1277 R1VAL, R1MASK and R2VAL, R2MASK representing a values of type R1TYPE
1278 and R2TYPE and set the value, mask pair *VAL and *MASK to the result. */
1279
1280 static void
1281 bit_value_binop_1 (enum tree_code code, tree type,
1282 widest_int *val, widest_int *mask,
1283 tree r1type, const widest_int &r1val,
1284 const widest_int &r1mask, tree r2type,
1285 const widest_int &r2val, const widest_int &r2mask)
1286 {
1287 signop sgn = TYPE_SIGN (type);
1288 int width = TYPE_PRECISION (type);
1289 bool swap_p = false;
1290
1291 /* Assume we'll get a constant result. Use an initial non varying
1292 value, we fall back to varying in the end if necessary. */
1293 *mask = -1;
1294
1295 switch (code)
1296 {
1297 case BIT_AND_EXPR:
1298 /* The mask is constant where there is a known not
1299 set bit, (m1 | m2) & ((v1 | m1) & (v2 | m2)) */
1300 *mask = (r1mask | r2mask) & (r1val | r1mask) & (r2val | r2mask);
1301 *val = r1val & r2val;
1302 break;
1303
1304 case BIT_IOR_EXPR:
1305 /* The mask is constant where there is a known
1306 set bit, (m1 | m2) & ~((v1 & ~m1) | (v2 & ~m2)). */
1307 *mask = (r1mask | r2mask)
1308 .and_not (r1val.and_not (r1mask) | r2val.and_not (r2mask));
1309 *val = r1val | r2val;
1310 break;
1311
1312 case BIT_XOR_EXPR:
1313 /* m1 | m2 */
1314 *mask = r1mask | r2mask;
1315 *val = r1val ^ r2val;
1316 break;
1317
1318 case LROTATE_EXPR:
1319 case RROTATE_EXPR:
1320 if (r2mask == 0)
1321 {
1322 widest_int shift = r2val;
1323 if (shift == 0)
1324 {
1325 *mask = r1mask;
1326 *val = r1val;
1327 }
1328 else
1329 {
1330 if (wi::neg_p (shift))
1331 {
1332 shift = -shift;
1333 if (code == RROTATE_EXPR)
1334 code = LROTATE_EXPR;
1335 else
1336 code = RROTATE_EXPR;
1337 }
1338 if (code == RROTATE_EXPR)
1339 {
1340 *mask = wi::rrotate (r1mask, shift, width);
1341 *val = wi::rrotate (r1val, shift, width);
1342 }
1343 else
1344 {
1345 *mask = wi::lrotate (r1mask, shift, width);
1346 *val = wi::lrotate (r1val, shift, width);
1347 }
1348 }
1349 }
1350 break;
1351
1352 case LSHIFT_EXPR:
1353 case RSHIFT_EXPR:
1354 /* ??? We can handle partially known shift counts if we know
1355 its sign. That way we can tell that (x << (y | 8)) & 255
1356 is zero. */
1357 if (r2mask == 0)
1358 {
1359 widest_int shift = r2val;
1360 if (shift == 0)
1361 {
1362 *mask = r1mask;
1363 *val = r1val;
1364 }
1365 else
1366 {
1367 if (wi::neg_p (shift))
1368 {
1369 shift = -shift;
1370 if (code == RSHIFT_EXPR)
1371 code = LSHIFT_EXPR;
1372 else
1373 code = RSHIFT_EXPR;
1374 }
1375 if (code == RSHIFT_EXPR)
1376 {
1377 *mask = wi::rshift (wi::ext (r1mask, width, sgn), shift, sgn);
1378 *val = wi::rshift (wi::ext (r1val, width, sgn), shift, sgn);
1379 }
1380 else
1381 {
1382 *mask = wi::ext (wi::lshift (r1mask, shift), width, sgn);
1383 *val = wi::ext (wi::lshift (r1val, shift), width, sgn);
1384 }
1385 }
1386 }
1387 break;
1388
1389 case PLUS_EXPR:
1390 case POINTER_PLUS_EXPR:
1391 {
1392 /* Do the addition with unknown bits set to zero, to give carry-ins of
1393 zero wherever possible. */
1394 widest_int lo = r1val.and_not (r1mask) + r2val.and_not (r2mask);
1395 lo = wi::ext (lo, width, sgn);
1396 /* Do the addition with unknown bits set to one, to give carry-ins of
1397 one wherever possible. */
1398 widest_int hi = (r1val | r1mask) + (r2val | r2mask);
1399 hi = wi::ext (hi, width, sgn);
1400 /* Each bit in the result is known if (a) the corresponding bits in
1401 both inputs are known, and (b) the carry-in to that bit position
1402 is known. We can check condition (b) by seeing if we got the same
1403 result with minimised carries as with maximised carries. */
1404 *mask = r1mask | r2mask | (lo ^ hi);
1405 *mask = wi::ext (*mask, width, sgn);
1406 /* It shouldn't matter whether we choose lo or hi here. */
1407 *val = lo;
1408 break;
1409 }
1410
1411 case MINUS_EXPR:
1412 {
1413 widest_int temv, temm;
1414 bit_value_unop_1 (NEGATE_EXPR, r2type, &temv, &temm,
1415 r2type, r2val, r2mask);
1416 bit_value_binop_1 (PLUS_EXPR, type, val, mask,
1417 r1type, r1val, r1mask,
1418 r2type, temv, temm);
1419 break;
1420 }
1421
1422 case MULT_EXPR:
1423 {
1424 /* Just track trailing zeros in both operands and transfer
1425 them to the other. */
1426 int r1tz = wi::ctz (r1val | r1mask);
1427 int r2tz = wi::ctz (r2val | r2mask);
1428 if (r1tz + r2tz >= width)
1429 {
1430 *mask = 0;
1431 *val = 0;
1432 }
1433 else if (r1tz + r2tz > 0)
1434 {
1435 *mask = wi::ext (wi::mask <widest_int> (r1tz + r2tz, true),
1436 width, sgn);
1437 *val = 0;
1438 }
1439 break;
1440 }
1441
1442 case EQ_EXPR:
1443 case NE_EXPR:
1444 {
1445 widest_int m = r1mask | r2mask;
1446 if (r1val.and_not (m) != r2val.and_not (m))
1447 {
1448 *mask = 0;
1449 *val = ((code == EQ_EXPR) ? 0 : 1);
1450 }
1451 else
1452 {
1453 /* We know the result of a comparison is always one or zero. */
1454 *mask = 1;
1455 *val = 0;
1456 }
1457 break;
1458 }
1459
1460 case GE_EXPR:
1461 case GT_EXPR:
1462 swap_p = true;
1463 code = swap_tree_comparison (code);
1464 /* Fall through. */
1465 case LT_EXPR:
1466 case LE_EXPR:
1467 {
1468 int minmax, maxmin;
1469
1470 const widest_int &o1val = swap_p ? r2val : r1val;
1471 const widest_int &o1mask = swap_p ? r2mask : r1mask;
1472 const widest_int &o2val = swap_p ? r1val : r2val;
1473 const widest_int &o2mask = swap_p ? r1mask : r2mask;
1474
1475 /* If the most significant bits are not known we know nothing. */
1476 if (wi::neg_p (o1mask) || wi::neg_p (o2mask))
1477 break;
1478
1479 /* For comparisons the signedness is in the comparison operands. */
1480 sgn = TYPE_SIGN (r1type);
1481
1482 /* If we know the most significant bits we know the values
1483 value ranges by means of treating varying bits as zero
1484 or one. Do a cross comparison of the max/min pairs. */
1485 maxmin = wi::cmp (o1val | o1mask, o2val.and_not (o2mask), sgn);
1486 minmax = wi::cmp (o1val.and_not (o1mask), o2val | o2mask, sgn);
1487 if (maxmin < 0) /* o1 is less than o2. */
1488 {
1489 *mask = 0;
1490 *val = 1;
1491 }
1492 else if (minmax > 0) /* o1 is not less or equal to o2. */
1493 {
1494 *mask = 0;
1495 *val = 0;
1496 }
1497 else if (maxmin == minmax) /* o1 and o2 are equal. */
1498 {
1499 /* This probably should never happen as we'd have
1500 folded the thing during fully constant value folding. */
1501 *mask = 0;
1502 *val = (code == LE_EXPR ? 1 : 0);
1503 }
1504 else
1505 {
1506 /* We know the result of a comparison is always one or zero. */
1507 *mask = 1;
1508 *val = 0;
1509 }
1510 break;
1511 }
1512
1513 default:;
1514 }
1515 }
1516
1517 /* Return the propagation value when applying the operation CODE to
1518 the value RHS yielding type TYPE. */
1519
1520 static ccp_prop_value_t
1521 bit_value_unop (enum tree_code code, tree type, tree rhs)
1522 {
1523 ccp_prop_value_t rval = get_value_for_expr (rhs, true);
1524 widest_int value, mask;
1525 ccp_prop_value_t val;
1526
1527 if (rval.lattice_val == UNDEFINED)
1528 return rval;
1529
1530 gcc_assert ((rval.lattice_val == CONSTANT
1531 && TREE_CODE (rval.value) == INTEGER_CST)
1532 || wi::sext (rval.mask, TYPE_PRECISION (TREE_TYPE (rhs))) == -1);
1533 bit_value_unop_1 (code, type, &value, &mask,
1534 TREE_TYPE (rhs), value_to_wide_int (rval), rval.mask);
1535 if (wi::sext (mask, TYPE_PRECISION (type)) != -1)
1536 {
1537 val.lattice_val = CONSTANT;
1538 val.mask = mask;
1539 /* ??? Delay building trees here. */
1540 val.value = wide_int_to_tree (type, value);
1541 }
1542 else
1543 {
1544 val.lattice_val = VARYING;
1545 val.value = NULL_TREE;
1546 val.mask = -1;
1547 }
1548 return val;
1549 }
1550
1551 /* Return the propagation value when applying the operation CODE to
1552 the values RHS1 and RHS2 yielding type TYPE. */
1553
1554 static ccp_prop_value_t
1555 bit_value_binop (enum tree_code code, tree type, tree rhs1, tree rhs2)
1556 {
1557 ccp_prop_value_t r1val = get_value_for_expr (rhs1, true);
1558 ccp_prop_value_t r2val = get_value_for_expr (rhs2, true);
1559 widest_int value, mask;
1560 ccp_prop_value_t val;
1561
1562 if (r1val.lattice_val == UNDEFINED
1563 || r2val.lattice_val == UNDEFINED)
1564 {
1565 val.lattice_val = VARYING;
1566 val.value = NULL_TREE;
1567 val.mask = -1;
1568 return val;
1569 }
1570
1571 gcc_assert ((r1val.lattice_val == CONSTANT
1572 && TREE_CODE (r1val.value) == INTEGER_CST)
1573 || wi::sext (r1val.mask,
1574 TYPE_PRECISION (TREE_TYPE (rhs1))) == -1);
1575 gcc_assert ((r2val.lattice_val == CONSTANT
1576 && TREE_CODE (r2val.value) == INTEGER_CST)
1577 || wi::sext (r2val.mask,
1578 TYPE_PRECISION (TREE_TYPE (rhs2))) == -1);
1579 bit_value_binop_1 (code, type, &value, &mask,
1580 TREE_TYPE (rhs1), value_to_wide_int (r1val), r1val.mask,
1581 TREE_TYPE (rhs2), value_to_wide_int (r2val), r2val.mask);
1582 if (wi::sext (mask, TYPE_PRECISION (type)) != -1)
1583 {
1584 val.lattice_val = CONSTANT;
1585 val.mask = mask;
1586 /* ??? Delay building trees here. */
1587 val.value = wide_int_to_tree (type, value);
1588 }
1589 else
1590 {
1591 val.lattice_val = VARYING;
1592 val.value = NULL_TREE;
1593 val.mask = -1;
1594 }
1595 return val;
1596 }
1597
1598 /* Return the propagation value for __builtin_assume_aligned
1599 and functions with assume_aligned or alloc_aligned attribute.
1600 For __builtin_assume_aligned, ATTR is NULL_TREE,
1601 for assume_aligned attribute ATTR is non-NULL and ALLOC_ALIGNED
1602 is false, for alloc_aligned attribute ATTR is non-NULL and
1603 ALLOC_ALIGNED is true. */
1604
1605 static ccp_prop_value_t
1606 bit_value_assume_aligned (gimple *stmt, tree attr, ccp_prop_value_t ptrval,
1607 bool alloc_aligned)
1608 {
1609 tree align, misalign = NULL_TREE, type;
1610 unsigned HOST_WIDE_INT aligni, misaligni = 0;
1611 ccp_prop_value_t alignval;
1612 widest_int value, mask;
1613 ccp_prop_value_t val;
1614
1615 if (attr == NULL_TREE)
1616 {
1617 tree ptr = gimple_call_arg (stmt, 0);
1618 type = TREE_TYPE (ptr);
1619 ptrval = get_value_for_expr (ptr, true);
1620 }
1621 else
1622 {
1623 tree lhs = gimple_call_lhs (stmt);
1624 type = TREE_TYPE (lhs);
1625 }
1626
1627 if (ptrval.lattice_val == UNDEFINED)
1628 return ptrval;
1629 gcc_assert ((ptrval.lattice_val == CONSTANT
1630 && TREE_CODE (ptrval.value) == INTEGER_CST)
1631 || wi::sext (ptrval.mask, TYPE_PRECISION (type)) == -1);
1632 if (attr == NULL_TREE)
1633 {
1634 /* Get aligni and misaligni from __builtin_assume_aligned. */
1635 align = gimple_call_arg (stmt, 1);
1636 if (!tree_fits_uhwi_p (align))
1637 return ptrval;
1638 aligni = tree_to_uhwi (align);
1639 if (gimple_call_num_args (stmt) > 2)
1640 {
1641 misalign = gimple_call_arg (stmt, 2);
1642 if (!tree_fits_uhwi_p (misalign))
1643 return ptrval;
1644 misaligni = tree_to_uhwi (misalign);
1645 }
1646 }
1647 else
1648 {
1649 /* Get aligni and misaligni from assume_aligned or
1650 alloc_align attributes. */
1651 if (TREE_VALUE (attr) == NULL_TREE)
1652 return ptrval;
1653 attr = TREE_VALUE (attr);
1654 align = TREE_VALUE (attr);
1655 if (!tree_fits_uhwi_p (align))
1656 return ptrval;
1657 aligni = tree_to_uhwi (align);
1658 if (alloc_aligned)
1659 {
1660 if (aligni == 0 || aligni > gimple_call_num_args (stmt))
1661 return ptrval;
1662 align = gimple_call_arg (stmt, aligni - 1);
1663 if (!tree_fits_uhwi_p (align))
1664 return ptrval;
1665 aligni = tree_to_uhwi (align);
1666 }
1667 else if (TREE_CHAIN (attr) && TREE_VALUE (TREE_CHAIN (attr)))
1668 {
1669 misalign = TREE_VALUE (TREE_CHAIN (attr));
1670 if (!tree_fits_uhwi_p (misalign))
1671 return ptrval;
1672 misaligni = tree_to_uhwi (misalign);
1673 }
1674 }
1675 if (aligni <= 1 || (aligni & (aligni - 1)) != 0 || misaligni >= aligni)
1676 return ptrval;
1677
1678 align = build_int_cst_type (type, -aligni);
1679 alignval = get_value_for_expr (align, true);
1680 bit_value_binop_1 (BIT_AND_EXPR, type, &value, &mask,
1681 type, value_to_wide_int (ptrval), ptrval.mask,
1682 type, value_to_wide_int (alignval), alignval.mask);
1683 if (wi::sext (mask, TYPE_PRECISION (type)) != -1)
1684 {
1685 val.lattice_val = CONSTANT;
1686 val.mask = mask;
1687 gcc_assert ((mask.to_uhwi () & (aligni - 1)) == 0);
1688 gcc_assert ((value.to_uhwi () & (aligni - 1)) == 0);
1689 value |= misaligni;
1690 /* ??? Delay building trees here. */
1691 val.value = wide_int_to_tree (type, value);
1692 }
1693 else
1694 {
1695 val.lattice_val = VARYING;
1696 val.value = NULL_TREE;
1697 val.mask = -1;
1698 }
1699 return val;
1700 }
1701
1702 /* Evaluate statement STMT.
1703 Valid only for assignments, calls, conditionals, and switches. */
1704
1705 static ccp_prop_value_t
1706 evaluate_stmt (gimple *stmt)
1707 {
1708 ccp_prop_value_t val;
1709 tree simplified = NULL_TREE;
1710 ccp_lattice_t likelyvalue = likely_value (stmt);
1711 bool is_constant = false;
1712 unsigned int align;
1713
1714 if (dump_file && (dump_flags & TDF_DETAILS))
1715 {
1716 fprintf (dump_file, "which is likely ");
1717 switch (likelyvalue)
1718 {
1719 case CONSTANT:
1720 fprintf (dump_file, "CONSTANT");
1721 break;
1722 case UNDEFINED:
1723 fprintf (dump_file, "UNDEFINED");
1724 break;
1725 case VARYING:
1726 fprintf (dump_file, "VARYING");
1727 break;
1728 default:;
1729 }
1730 fprintf (dump_file, "\n");
1731 }
1732
1733 /* If the statement is likely to have a CONSTANT result, then try
1734 to fold the statement to determine the constant value. */
1735 /* FIXME. This is the only place that we call ccp_fold.
1736 Since likely_value never returns CONSTANT for calls, we will
1737 not attempt to fold them, including builtins that may profit. */
1738 if (likelyvalue == CONSTANT)
1739 {
1740 fold_defer_overflow_warnings ();
1741 simplified = ccp_fold (stmt);
1742 if (simplified && TREE_CODE (simplified) == SSA_NAME)
1743 {
1744 val = *get_value (simplified);
1745 if (val.lattice_val != VARYING)
1746 {
1747 fold_undefer_overflow_warnings (true, stmt, 0);
1748 return val;
1749 }
1750 }
1751 is_constant = simplified && is_gimple_min_invariant (simplified);
1752 fold_undefer_overflow_warnings (is_constant, stmt, 0);
1753 if (is_constant)
1754 {
1755 /* The statement produced a constant value. */
1756 val.lattice_val = CONSTANT;
1757 val.value = simplified;
1758 val.mask = 0;
1759 return val;
1760 }
1761 }
1762 /* If the statement is likely to have a VARYING result, then do not
1763 bother folding the statement. */
1764 else if (likelyvalue == VARYING)
1765 {
1766 enum gimple_code code = gimple_code (stmt);
1767 if (code == GIMPLE_ASSIGN)
1768 {
1769 enum tree_code subcode = gimple_assign_rhs_code (stmt);
1770
1771 /* Other cases cannot satisfy is_gimple_min_invariant
1772 without folding. */
1773 if (get_gimple_rhs_class (subcode) == GIMPLE_SINGLE_RHS)
1774 simplified = gimple_assign_rhs1 (stmt);
1775 }
1776 else if (code == GIMPLE_SWITCH)
1777 simplified = gimple_switch_index (as_a <gswitch *> (stmt));
1778 else
1779 /* These cannot satisfy is_gimple_min_invariant without folding. */
1780 gcc_assert (code == GIMPLE_CALL || code == GIMPLE_COND);
1781 is_constant = simplified && is_gimple_min_invariant (simplified);
1782 if (is_constant)
1783 {
1784 /* The statement produced a constant value. */
1785 val.lattice_val = CONSTANT;
1786 val.value = simplified;
1787 val.mask = 0;
1788 }
1789 }
1790 /* If the statement result is likely UNDEFINED, make it so. */
1791 else if (likelyvalue == UNDEFINED)
1792 {
1793 val.lattice_val = UNDEFINED;
1794 val.value = NULL_TREE;
1795 val.mask = 0;
1796 return val;
1797 }
1798
1799 /* Resort to simplification for bitwise tracking. */
1800 if (flag_tree_bit_ccp
1801 && (likelyvalue == CONSTANT || is_gimple_call (stmt)
1802 || (gimple_assign_single_p (stmt)
1803 && gimple_assign_rhs_code (stmt) == ADDR_EXPR))
1804 && !is_constant)
1805 {
1806 enum gimple_code code = gimple_code (stmt);
1807 val.lattice_val = VARYING;
1808 val.value = NULL_TREE;
1809 val.mask = -1;
1810 if (code == GIMPLE_ASSIGN)
1811 {
1812 enum tree_code subcode = gimple_assign_rhs_code (stmt);
1813 tree rhs1 = gimple_assign_rhs1 (stmt);
1814 tree lhs = gimple_assign_lhs (stmt);
1815 if ((INTEGRAL_TYPE_P (TREE_TYPE (lhs))
1816 || POINTER_TYPE_P (TREE_TYPE (lhs)))
1817 && (INTEGRAL_TYPE_P (TREE_TYPE (rhs1))
1818 || POINTER_TYPE_P (TREE_TYPE (rhs1))))
1819 switch (get_gimple_rhs_class (subcode))
1820 {
1821 case GIMPLE_SINGLE_RHS:
1822 val = get_value_for_expr (rhs1, true);
1823 break;
1824
1825 case GIMPLE_UNARY_RHS:
1826 val = bit_value_unop (subcode, TREE_TYPE (lhs), rhs1);
1827 break;
1828
1829 case GIMPLE_BINARY_RHS:
1830 val = bit_value_binop (subcode, TREE_TYPE (lhs), rhs1,
1831 gimple_assign_rhs2 (stmt));
1832 break;
1833
1834 default:;
1835 }
1836 }
1837 else if (code == GIMPLE_COND)
1838 {
1839 enum tree_code code = gimple_cond_code (stmt);
1840 tree rhs1 = gimple_cond_lhs (stmt);
1841 tree rhs2 = gimple_cond_rhs (stmt);
1842 if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1))
1843 || POINTER_TYPE_P (TREE_TYPE (rhs1)))
1844 val = bit_value_binop (code, TREE_TYPE (rhs1), rhs1, rhs2);
1845 }
1846 else if (gimple_call_builtin_p (stmt, BUILT_IN_NORMAL))
1847 {
1848 tree fndecl = gimple_call_fndecl (stmt);
1849 switch (DECL_FUNCTION_CODE (fndecl))
1850 {
1851 case BUILT_IN_MALLOC:
1852 case BUILT_IN_REALLOC:
1853 case BUILT_IN_CALLOC:
1854 case BUILT_IN_STRDUP:
1855 case BUILT_IN_STRNDUP:
1856 val.lattice_val = CONSTANT;
1857 val.value = build_int_cst (TREE_TYPE (gimple_get_lhs (stmt)), 0);
1858 val.mask = ~((HOST_WIDE_INT) MALLOC_ABI_ALIGNMENT
1859 / BITS_PER_UNIT - 1);
1860 break;
1861
1862 case BUILT_IN_ALLOCA:
1863 case BUILT_IN_ALLOCA_WITH_ALIGN:
1864 align = (DECL_FUNCTION_CODE (fndecl) == BUILT_IN_ALLOCA_WITH_ALIGN
1865 ? TREE_INT_CST_LOW (gimple_call_arg (stmt, 1))
1866 : BIGGEST_ALIGNMENT);
1867 val.lattice_val = CONSTANT;
1868 val.value = build_int_cst (TREE_TYPE (gimple_get_lhs (stmt)), 0);
1869 val.mask = ~((HOST_WIDE_INT) align / BITS_PER_UNIT - 1);
1870 break;
1871
1872 /* These builtins return their first argument, unmodified. */
1873 case BUILT_IN_MEMCPY:
1874 case BUILT_IN_MEMMOVE:
1875 case BUILT_IN_MEMSET:
1876 case BUILT_IN_STRCPY:
1877 case BUILT_IN_STRNCPY:
1878 case BUILT_IN_MEMCPY_CHK:
1879 case BUILT_IN_MEMMOVE_CHK:
1880 case BUILT_IN_MEMSET_CHK:
1881 case BUILT_IN_STRCPY_CHK:
1882 case BUILT_IN_STRNCPY_CHK:
1883 val = get_value_for_expr (gimple_call_arg (stmt, 0), true);
1884 break;
1885
1886 case BUILT_IN_ASSUME_ALIGNED:
1887 val = bit_value_assume_aligned (stmt, NULL_TREE, val, false);
1888 break;
1889
1890 case BUILT_IN_ALIGNED_ALLOC:
1891 {
1892 tree align = get_constant_value (gimple_call_arg (stmt, 0));
1893 if (align
1894 && tree_fits_uhwi_p (align))
1895 {
1896 unsigned HOST_WIDE_INT aligni = tree_to_uhwi (align);
1897 if (aligni > 1
1898 /* align must be power-of-two */
1899 && (aligni & (aligni - 1)) == 0)
1900 {
1901 val.lattice_val = CONSTANT;
1902 val.value = build_int_cst (ptr_type_node, 0);
1903 val.mask = -aligni;
1904 }
1905 }
1906 break;
1907 }
1908
1909 default:;
1910 }
1911 }
1912 if (is_gimple_call (stmt) && gimple_call_lhs (stmt))
1913 {
1914 tree fntype = gimple_call_fntype (stmt);
1915 if (fntype)
1916 {
1917 tree attrs = lookup_attribute ("assume_aligned",
1918 TYPE_ATTRIBUTES (fntype));
1919 if (attrs)
1920 val = bit_value_assume_aligned (stmt, attrs, val, false);
1921 attrs = lookup_attribute ("alloc_align",
1922 TYPE_ATTRIBUTES (fntype));
1923 if (attrs)
1924 val = bit_value_assume_aligned (stmt, attrs, val, true);
1925 }
1926 }
1927 is_constant = (val.lattice_val == CONSTANT);
1928 }
1929
1930 if (flag_tree_bit_ccp
1931 && ((is_constant && TREE_CODE (val.value) == INTEGER_CST)
1932 || !is_constant)
1933 && gimple_get_lhs (stmt)
1934 && TREE_CODE (gimple_get_lhs (stmt)) == SSA_NAME)
1935 {
1936 tree lhs = gimple_get_lhs (stmt);
1937 wide_int nonzero_bits = get_nonzero_bits (lhs);
1938 if (nonzero_bits != -1)
1939 {
1940 if (!is_constant)
1941 {
1942 val.lattice_val = CONSTANT;
1943 val.value = build_zero_cst (TREE_TYPE (lhs));
1944 val.mask = extend_mask (nonzero_bits);
1945 is_constant = true;
1946 }
1947 else
1948 {
1949 if (wi::bit_and_not (val.value, nonzero_bits) != 0)
1950 val.value = wide_int_to_tree (TREE_TYPE (lhs),
1951 nonzero_bits & val.value);
1952 if (nonzero_bits == 0)
1953 val.mask = 0;
1954 else
1955 val.mask = val.mask & extend_mask (nonzero_bits);
1956 }
1957 }
1958 }
1959
1960 /* The statement produced a nonconstant value. */
1961 if (!is_constant)
1962 {
1963 /* The statement produced a copy. */
1964 if (simplified && TREE_CODE (simplified) == SSA_NAME
1965 && !SSA_NAME_OCCURS_IN_ABNORMAL_PHI (simplified))
1966 {
1967 val.lattice_val = CONSTANT;
1968 val.value = simplified;
1969 val.mask = -1;
1970 }
1971 /* The statement is VARYING. */
1972 else
1973 {
1974 val.lattice_val = VARYING;
1975 val.value = NULL_TREE;
1976 val.mask = -1;
1977 }
1978 }
1979
1980 return val;
1981 }
1982
1983 typedef hash_table<nofree_ptr_hash<gimple> > gimple_htab;
1984
1985 /* Given a BUILT_IN_STACK_SAVE value SAVED_VAL, insert a clobber of VAR before
1986 each matching BUILT_IN_STACK_RESTORE. Mark visited phis in VISITED. */
1987
1988 static void
1989 insert_clobber_before_stack_restore (tree saved_val, tree var,
1990 gimple_htab **visited)
1991 {
1992 gimple *stmt;
1993 gassign *clobber_stmt;
1994 tree clobber;
1995 imm_use_iterator iter;
1996 gimple_stmt_iterator i;
1997 gimple **slot;
1998
1999 FOR_EACH_IMM_USE_STMT (stmt, iter, saved_val)
2000 if (gimple_call_builtin_p (stmt, BUILT_IN_STACK_RESTORE))
2001 {
2002 clobber = build_constructor (TREE_TYPE (var),
2003 NULL);
2004 TREE_THIS_VOLATILE (clobber) = 1;
2005 clobber_stmt = gimple_build_assign (var, clobber);
2006
2007 i = gsi_for_stmt (stmt);
2008 gsi_insert_before (&i, clobber_stmt, GSI_SAME_STMT);
2009 }
2010 else if (gimple_code (stmt) == GIMPLE_PHI)
2011 {
2012 if (!*visited)
2013 *visited = new gimple_htab (10);
2014
2015 slot = (*visited)->find_slot (stmt, INSERT);
2016 if (*slot != NULL)
2017 continue;
2018
2019 *slot = stmt;
2020 insert_clobber_before_stack_restore (gimple_phi_result (stmt), var,
2021 visited);
2022 }
2023 else if (gimple_assign_ssa_name_copy_p (stmt))
2024 insert_clobber_before_stack_restore (gimple_assign_lhs (stmt), var,
2025 visited);
2026 else if (chkp_gimple_call_builtin_p (stmt, BUILT_IN_CHKP_BNDRET))
2027 continue;
2028 else
2029 gcc_assert (is_gimple_debug (stmt));
2030 }
2031
2032 /* Advance the iterator to the previous non-debug gimple statement in the same
2033 or dominating basic block. */
2034
2035 static inline void
2036 gsi_prev_dom_bb_nondebug (gimple_stmt_iterator *i)
2037 {
2038 basic_block dom;
2039
2040 gsi_prev_nondebug (i);
2041 while (gsi_end_p (*i))
2042 {
2043 dom = get_immediate_dominator (CDI_DOMINATORS, i->bb);
2044 if (dom == NULL || dom == ENTRY_BLOCK_PTR_FOR_FN (cfun))
2045 return;
2046
2047 *i = gsi_last_bb (dom);
2048 }
2049 }
2050
2051 /* Find a BUILT_IN_STACK_SAVE dominating gsi_stmt (I), and insert
2052 a clobber of VAR before each matching BUILT_IN_STACK_RESTORE.
2053
2054 It is possible that BUILT_IN_STACK_SAVE cannot be find in a dominator when a
2055 previous pass (such as DOM) duplicated it along multiple paths to a BB. In
2056 that case the function gives up without inserting the clobbers. */
2057
2058 static void
2059 insert_clobbers_for_var (gimple_stmt_iterator i, tree var)
2060 {
2061 gimple *stmt;
2062 tree saved_val;
2063 gimple_htab *visited = NULL;
2064
2065 for (; !gsi_end_p (i); gsi_prev_dom_bb_nondebug (&i))
2066 {
2067 stmt = gsi_stmt (i);
2068
2069 if (!gimple_call_builtin_p (stmt, BUILT_IN_STACK_SAVE))
2070 continue;
2071
2072 saved_val = gimple_call_lhs (stmt);
2073 if (saved_val == NULL_TREE)
2074 continue;
2075
2076 insert_clobber_before_stack_restore (saved_val, var, &visited);
2077 break;
2078 }
2079
2080 delete visited;
2081 }
2082
2083 /* Detects a __builtin_alloca_with_align with constant size argument. Declares
2084 fixed-size array and returns the address, if found, otherwise returns
2085 NULL_TREE. */
2086
2087 static tree
2088 fold_builtin_alloca_with_align (gimple *stmt)
2089 {
2090 unsigned HOST_WIDE_INT size, threshold, n_elem;
2091 tree lhs, arg, block, var, elem_type, array_type;
2092
2093 /* Get lhs. */
2094 lhs = gimple_call_lhs (stmt);
2095 if (lhs == NULL_TREE)
2096 return NULL_TREE;
2097
2098 /* Detect constant argument. */
2099 arg = get_constant_value (gimple_call_arg (stmt, 0));
2100 if (arg == NULL_TREE
2101 || TREE_CODE (arg) != INTEGER_CST
2102 || !tree_fits_uhwi_p (arg))
2103 return NULL_TREE;
2104
2105 size = tree_to_uhwi (arg);
2106
2107 /* Heuristic: don't fold large allocas. */
2108 threshold = (unsigned HOST_WIDE_INT)PARAM_VALUE (PARAM_LARGE_STACK_FRAME);
2109 /* In case the alloca is located at function entry, it has the same lifetime
2110 as a declared array, so we allow a larger size. */
2111 block = gimple_block (stmt);
2112 if (!(cfun->after_inlining
2113 && block
2114 && TREE_CODE (BLOCK_SUPERCONTEXT (block)) == FUNCTION_DECL))
2115 threshold /= 10;
2116 if (size > threshold)
2117 return NULL_TREE;
2118
2119 /* Declare array. */
2120 elem_type = build_nonstandard_integer_type (BITS_PER_UNIT, 1);
2121 n_elem = size * 8 / BITS_PER_UNIT;
2122 array_type = build_array_type_nelts (elem_type, n_elem);
2123 var = create_tmp_var (array_type);
2124 DECL_ALIGN (var) = TREE_INT_CST_LOW (gimple_call_arg (stmt, 1));
2125 {
2126 struct ptr_info_def *pi = SSA_NAME_PTR_INFO (lhs);
2127 if (pi != NULL && !pi->pt.anything)
2128 {
2129 bool singleton_p;
2130 unsigned uid;
2131 singleton_p = pt_solution_singleton_p (&pi->pt, &uid);
2132 gcc_assert (singleton_p);
2133 SET_DECL_PT_UID (var, uid);
2134 }
2135 }
2136
2137 /* Fold alloca to the address of the array. */
2138 return fold_convert (TREE_TYPE (lhs), build_fold_addr_expr (var));
2139 }
2140
2141 /* Fold the stmt at *GSI with CCP specific information that propagating
2142 and regular folding does not catch. */
2143
2144 static bool
2145 ccp_fold_stmt (gimple_stmt_iterator *gsi)
2146 {
2147 gimple *stmt = gsi_stmt (*gsi);
2148
2149 switch (gimple_code (stmt))
2150 {
2151 case GIMPLE_COND:
2152 {
2153 gcond *cond_stmt = as_a <gcond *> (stmt);
2154 ccp_prop_value_t val;
2155 /* Statement evaluation will handle type mismatches in constants
2156 more gracefully than the final propagation. This allows us to
2157 fold more conditionals here. */
2158 val = evaluate_stmt (stmt);
2159 if (val.lattice_val != CONSTANT
2160 || val.mask != 0)
2161 return false;
2162
2163 if (dump_file)
2164 {
2165 fprintf (dump_file, "Folding predicate ");
2166 print_gimple_expr (dump_file, stmt, 0, 0);
2167 fprintf (dump_file, " to ");
2168 print_generic_expr (dump_file, val.value, 0);
2169 fprintf (dump_file, "\n");
2170 }
2171
2172 if (integer_zerop (val.value))
2173 gimple_cond_make_false (cond_stmt);
2174 else
2175 gimple_cond_make_true (cond_stmt);
2176
2177 return true;
2178 }
2179
2180 case GIMPLE_CALL:
2181 {
2182 tree lhs = gimple_call_lhs (stmt);
2183 int flags = gimple_call_flags (stmt);
2184 tree val;
2185 tree argt;
2186 bool changed = false;
2187 unsigned i;
2188
2189 /* If the call was folded into a constant make sure it goes
2190 away even if we cannot propagate into all uses because of
2191 type issues. */
2192 if (lhs
2193 && TREE_CODE (lhs) == SSA_NAME
2194 && (val = get_constant_value (lhs))
2195 /* Don't optimize away calls that have side-effects. */
2196 && (flags & (ECF_CONST|ECF_PURE)) != 0
2197 && (flags & ECF_LOOPING_CONST_OR_PURE) == 0)
2198 {
2199 tree new_rhs = unshare_expr (val);
2200 bool res;
2201 if (!useless_type_conversion_p (TREE_TYPE (lhs),
2202 TREE_TYPE (new_rhs)))
2203 new_rhs = fold_convert (TREE_TYPE (lhs), new_rhs);
2204 res = update_call_from_tree (gsi, new_rhs);
2205 gcc_assert (res);
2206 return true;
2207 }
2208
2209 /* Internal calls provide no argument types, so the extra laxity
2210 for normal calls does not apply. */
2211 if (gimple_call_internal_p (stmt))
2212 return false;
2213
2214 /* The heuristic of fold_builtin_alloca_with_align differs before and
2215 after inlining, so we don't require the arg to be changed into a
2216 constant for folding, but just to be constant. */
2217 if (gimple_call_builtin_p (stmt, BUILT_IN_ALLOCA_WITH_ALIGN))
2218 {
2219 tree new_rhs = fold_builtin_alloca_with_align (stmt);
2220 if (new_rhs)
2221 {
2222 bool res = update_call_from_tree (gsi, new_rhs);
2223 tree var = TREE_OPERAND (TREE_OPERAND (new_rhs, 0),0);
2224 gcc_assert (res);
2225 insert_clobbers_for_var (*gsi, var);
2226 return true;
2227 }
2228 }
2229
2230 /* Propagate into the call arguments. Compared to replace_uses_in
2231 this can use the argument slot types for type verification
2232 instead of the current argument type. We also can safely
2233 drop qualifiers here as we are dealing with constants anyway. */
2234 argt = TYPE_ARG_TYPES (gimple_call_fntype (stmt));
2235 for (i = 0; i < gimple_call_num_args (stmt) && argt;
2236 ++i, argt = TREE_CHAIN (argt))
2237 {
2238 tree arg = gimple_call_arg (stmt, i);
2239 if (TREE_CODE (arg) == SSA_NAME
2240 && (val = get_constant_value (arg))
2241 && useless_type_conversion_p
2242 (TYPE_MAIN_VARIANT (TREE_VALUE (argt)),
2243 TYPE_MAIN_VARIANT (TREE_TYPE (val))))
2244 {
2245 gimple_call_set_arg (stmt, i, unshare_expr (val));
2246 changed = true;
2247 }
2248 }
2249
2250 return changed;
2251 }
2252
2253 case GIMPLE_ASSIGN:
2254 {
2255 tree lhs = gimple_assign_lhs (stmt);
2256 tree val;
2257
2258 /* If we have a load that turned out to be constant replace it
2259 as we cannot propagate into all uses in all cases. */
2260 if (gimple_assign_single_p (stmt)
2261 && TREE_CODE (lhs) == SSA_NAME
2262 && (val = get_constant_value (lhs)))
2263 {
2264 tree rhs = unshare_expr (val);
2265 if (!useless_type_conversion_p (TREE_TYPE (lhs), TREE_TYPE (rhs)))
2266 rhs = fold_build1 (VIEW_CONVERT_EXPR, TREE_TYPE (lhs), rhs);
2267 gimple_assign_set_rhs_from_tree (gsi, rhs);
2268 return true;
2269 }
2270
2271 return false;
2272 }
2273
2274 default:
2275 return false;
2276 }
2277 }
2278
2279 /* Visit the assignment statement STMT. Set the value of its LHS to the
2280 value computed by the RHS and store LHS in *OUTPUT_P. If STMT
2281 creates virtual definitions, set the value of each new name to that
2282 of the RHS (if we can derive a constant out of the RHS).
2283 Value-returning call statements also perform an assignment, and
2284 are handled here. */
2285
2286 static enum ssa_prop_result
2287 visit_assignment (gimple *stmt, tree *output_p)
2288 {
2289 ccp_prop_value_t val;
2290 enum ssa_prop_result retval = SSA_PROP_NOT_INTERESTING;
2291
2292 tree lhs = gimple_get_lhs (stmt);
2293 if (TREE_CODE (lhs) == SSA_NAME)
2294 {
2295 /* Evaluate the statement, which could be
2296 either a GIMPLE_ASSIGN or a GIMPLE_CALL. */
2297 val = evaluate_stmt (stmt);
2298
2299 /* If STMT is an assignment to an SSA_NAME, we only have one
2300 value to set. */
2301 if (set_lattice_value (lhs, &val))
2302 {
2303 *output_p = lhs;
2304 if (val.lattice_val == VARYING)
2305 retval = SSA_PROP_VARYING;
2306 else
2307 retval = SSA_PROP_INTERESTING;
2308 }
2309 }
2310
2311 return retval;
2312 }
2313
2314
2315 /* Visit the conditional statement STMT. Return SSA_PROP_INTERESTING
2316 if it can determine which edge will be taken. Otherwise, return
2317 SSA_PROP_VARYING. */
2318
2319 static enum ssa_prop_result
2320 visit_cond_stmt (gimple *stmt, edge *taken_edge_p)
2321 {
2322 ccp_prop_value_t val;
2323 basic_block block;
2324
2325 block = gimple_bb (stmt);
2326 val = evaluate_stmt (stmt);
2327 if (val.lattice_val != CONSTANT
2328 || val.mask != 0)
2329 return SSA_PROP_VARYING;
2330
2331 /* Find which edge out of the conditional block will be taken and add it
2332 to the worklist. If no single edge can be determined statically,
2333 return SSA_PROP_VARYING to feed all the outgoing edges to the
2334 propagation engine. */
2335 *taken_edge_p = find_taken_edge (block, val.value);
2336 if (*taken_edge_p)
2337 return SSA_PROP_INTERESTING;
2338 else
2339 return SSA_PROP_VARYING;
2340 }
2341
2342
2343 /* Evaluate statement STMT. If the statement produces an output value and
2344 its evaluation changes the lattice value of its output, return
2345 SSA_PROP_INTERESTING and set *OUTPUT_P to the SSA_NAME holding the
2346 output value.
2347
2348 If STMT is a conditional branch and we can determine its truth
2349 value, set *TAKEN_EDGE_P accordingly. If STMT produces a varying
2350 value, return SSA_PROP_VARYING. */
2351
2352 static enum ssa_prop_result
2353 ccp_visit_stmt (gimple *stmt, edge *taken_edge_p, tree *output_p)
2354 {
2355 tree def;
2356 ssa_op_iter iter;
2357
2358 if (dump_file && (dump_flags & TDF_DETAILS))
2359 {
2360 fprintf (dump_file, "\nVisiting statement:\n");
2361 print_gimple_stmt (dump_file, stmt, 0, dump_flags);
2362 }
2363
2364 switch (gimple_code (stmt))
2365 {
2366 case GIMPLE_ASSIGN:
2367 /* If the statement is an assignment that produces a single
2368 output value, evaluate its RHS to see if the lattice value of
2369 its output has changed. */
2370 return visit_assignment (stmt, output_p);
2371
2372 case GIMPLE_CALL:
2373 /* A value-returning call also performs an assignment. */
2374 if (gimple_call_lhs (stmt) != NULL_TREE)
2375 return visit_assignment (stmt, output_p);
2376 break;
2377
2378 case GIMPLE_COND:
2379 case GIMPLE_SWITCH:
2380 /* If STMT is a conditional branch, see if we can determine
2381 which branch will be taken. */
2382 /* FIXME. It appears that we should be able to optimize
2383 computed GOTOs here as well. */
2384 return visit_cond_stmt (stmt, taken_edge_p);
2385
2386 default:
2387 break;
2388 }
2389
2390 /* Any other kind of statement is not interesting for constant
2391 propagation and, therefore, not worth simulating. */
2392 if (dump_file && (dump_flags & TDF_DETAILS))
2393 fprintf (dump_file, "No interesting values produced. Marked VARYING.\n");
2394
2395 /* Definitions made by statements other than assignments to
2396 SSA_NAMEs represent unknown modifications to their outputs.
2397 Mark them VARYING. */
2398 FOR_EACH_SSA_TREE_OPERAND (def, stmt, iter, SSA_OP_ALL_DEFS)
2399 set_value_varying (def);
2400
2401 return SSA_PROP_VARYING;
2402 }
2403
2404
2405 /* Main entry point for SSA Conditional Constant Propagation. */
2406
2407 static unsigned int
2408 do_ssa_ccp (void)
2409 {
2410 unsigned int todo = 0;
2411 calculate_dominance_info (CDI_DOMINATORS);
2412 ccp_initialize ();
2413 ssa_propagate (ccp_visit_stmt, ccp_visit_phi_node);
2414 if (ccp_finalize ())
2415 todo = (TODO_cleanup_cfg | TODO_update_ssa);
2416 free_dominance_info (CDI_DOMINATORS);
2417 return todo;
2418 }
2419
2420
2421 namespace {
2422
2423 const pass_data pass_data_ccp =
2424 {
2425 GIMPLE_PASS, /* type */
2426 "ccp", /* name */
2427 OPTGROUP_NONE, /* optinfo_flags */
2428 TV_TREE_CCP, /* tv_id */
2429 ( PROP_cfg | PROP_ssa ), /* properties_required */
2430 0, /* properties_provided */
2431 0, /* properties_destroyed */
2432 0, /* todo_flags_start */
2433 TODO_update_address_taken, /* todo_flags_finish */
2434 };
2435
2436 class pass_ccp : public gimple_opt_pass
2437 {
2438 public:
2439 pass_ccp (gcc::context *ctxt)
2440 : gimple_opt_pass (pass_data_ccp, ctxt)
2441 {}
2442
2443 /* opt_pass methods: */
2444 opt_pass * clone () { return new pass_ccp (m_ctxt); }
2445 virtual bool gate (function *) { return flag_tree_ccp != 0; }
2446 virtual unsigned int execute (function *) { return do_ssa_ccp (); }
2447
2448 }; // class pass_ccp
2449
2450 } // anon namespace
2451
2452 gimple_opt_pass *
2453 make_pass_ccp (gcc::context *ctxt)
2454 {
2455 return new pass_ccp (ctxt);
2456 }
2457
2458
2459
2460 /* Try to optimize out __builtin_stack_restore. Optimize it out
2461 if there is another __builtin_stack_restore in the same basic
2462 block and no calls or ASM_EXPRs are in between, or if this block's
2463 only outgoing edge is to EXIT_BLOCK and there are no calls or
2464 ASM_EXPRs after this __builtin_stack_restore. */
2465
2466 static tree
2467 optimize_stack_restore (gimple_stmt_iterator i)
2468 {
2469 tree callee;
2470 gimple *stmt;
2471
2472 basic_block bb = gsi_bb (i);
2473 gimple *call = gsi_stmt (i);
2474
2475 if (gimple_code (call) != GIMPLE_CALL
2476 || gimple_call_num_args (call) != 1
2477 || TREE_CODE (gimple_call_arg (call, 0)) != SSA_NAME
2478 || !POINTER_TYPE_P (TREE_TYPE (gimple_call_arg (call, 0))))
2479 return NULL_TREE;
2480
2481 for (gsi_next (&i); !gsi_end_p (i); gsi_next (&i))
2482 {
2483 stmt = gsi_stmt (i);
2484 if (gimple_code (stmt) == GIMPLE_ASM)
2485 return NULL_TREE;
2486 if (gimple_code (stmt) != GIMPLE_CALL)
2487 continue;
2488
2489 callee = gimple_call_fndecl (stmt);
2490 if (!callee
2491 || DECL_BUILT_IN_CLASS (callee) != BUILT_IN_NORMAL
2492 /* All regular builtins are ok, just obviously not alloca. */
2493 || DECL_FUNCTION_CODE (callee) == BUILT_IN_ALLOCA
2494 || DECL_FUNCTION_CODE (callee) == BUILT_IN_ALLOCA_WITH_ALIGN)
2495 return NULL_TREE;
2496
2497 if (DECL_FUNCTION_CODE (callee) == BUILT_IN_STACK_RESTORE)
2498 goto second_stack_restore;
2499 }
2500
2501 if (!gsi_end_p (i))
2502 return NULL_TREE;
2503
2504 /* Allow one successor of the exit block, or zero successors. */
2505 switch (EDGE_COUNT (bb->succs))
2506 {
2507 case 0:
2508 break;
2509 case 1:
2510 if (single_succ_edge (bb)->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
2511 return NULL_TREE;
2512 break;
2513 default:
2514 return NULL_TREE;
2515 }
2516 second_stack_restore:
2517
2518 /* If there's exactly one use, then zap the call to __builtin_stack_save.
2519 If there are multiple uses, then the last one should remove the call.
2520 In any case, whether the call to __builtin_stack_save can be removed
2521 or not is irrelevant to removing the call to __builtin_stack_restore. */
2522 if (has_single_use (gimple_call_arg (call, 0)))
2523 {
2524 gimple *stack_save = SSA_NAME_DEF_STMT (gimple_call_arg (call, 0));
2525 if (is_gimple_call (stack_save))
2526 {
2527 callee = gimple_call_fndecl (stack_save);
2528 if (callee
2529 && DECL_BUILT_IN_CLASS (callee) == BUILT_IN_NORMAL
2530 && DECL_FUNCTION_CODE (callee) == BUILT_IN_STACK_SAVE)
2531 {
2532 gimple_stmt_iterator stack_save_gsi;
2533 tree rhs;
2534
2535 stack_save_gsi = gsi_for_stmt (stack_save);
2536 rhs = build_int_cst (TREE_TYPE (gimple_call_arg (call, 0)), 0);
2537 update_call_from_tree (&stack_save_gsi, rhs);
2538 }
2539 }
2540 }
2541
2542 /* No effect, so the statement will be deleted. */
2543 return integer_zero_node;
2544 }
2545
2546 /* If va_list type is a simple pointer and nothing special is needed,
2547 optimize __builtin_va_start (&ap, 0) into ap = __builtin_next_arg (0),
2548 __builtin_va_end (&ap) out as NOP and __builtin_va_copy into a simple
2549 pointer assignment. */
2550
2551 static tree
2552 optimize_stdarg_builtin (gimple *call)
2553 {
2554 tree callee, lhs, rhs, cfun_va_list;
2555 bool va_list_simple_ptr;
2556 location_t loc = gimple_location (call);
2557
2558 if (gimple_code (call) != GIMPLE_CALL)
2559 return NULL_TREE;
2560
2561 callee = gimple_call_fndecl (call);
2562
2563 cfun_va_list = targetm.fn_abi_va_list (callee);
2564 va_list_simple_ptr = POINTER_TYPE_P (cfun_va_list)
2565 && (TREE_TYPE (cfun_va_list) == void_type_node
2566 || TREE_TYPE (cfun_va_list) == char_type_node);
2567
2568 switch (DECL_FUNCTION_CODE (callee))
2569 {
2570 case BUILT_IN_VA_START:
2571 if (!va_list_simple_ptr
2572 || targetm.expand_builtin_va_start != NULL
2573 || !builtin_decl_explicit_p (BUILT_IN_NEXT_ARG))
2574 return NULL_TREE;
2575
2576 if (gimple_call_num_args (call) != 2)
2577 return NULL_TREE;
2578
2579 lhs = gimple_call_arg (call, 0);
2580 if (!POINTER_TYPE_P (TREE_TYPE (lhs))
2581 || TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (lhs)))
2582 != TYPE_MAIN_VARIANT (cfun_va_list))
2583 return NULL_TREE;
2584
2585 lhs = build_fold_indirect_ref_loc (loc, lhs);
2586 rhs = build_call_expr_loc (loc, builtin_decl_explicit (BUILT_IN_NEXT_ARG),
2587 1, integer_zero_node);
2588 rhs = fold_convert_loc (loc, TREE_TYPE (lhs), rhs);
2589 return build2 (MODIFY_EXPR, TREE_TYPE (lhs), lhs, rhs);
2590
2591 case BUILT_IN_VA_COPY:
2592 if (!va_list_simple_ptr)
2593 return NULL_TREE;
2594
2595 if (gimple_call_num_args (call) != 2)
2596 return NULL_TREE;
2597
2598 lhs = gimple_call_arg (call, 0);
2599 if (!POINTER_TYPE_P (TREE_TYPE (lhs))
2600 || TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (lhs)))
2601 != TYPE_MAIN_VARIANT (cfun_va_list))
2602 return NULL_TREE;
2603
2604 lhs = build_fold_indirect_ref_loc (loc, lhs);
2605 rhs = gimple_call_arg (call, 1);
2606 if (TYPE_MAIN_VARIANT (TREE_TYPE (rhs))
2607 != TYPE_MAIN_VARIANT (cfun_va_list))
2608 return NULL_TREE;
2609
2610 rhs = fold_convert_loc (loc, TREE_TYPE (lhs), rhs);
2611 return build2 (MODIFY_EXPR, TREE_TYPE (lhs), lhs, rhs);
2612
2613 case BUILT_IN_VA_END:
2614 /* No effect, so the statement will be deleted. */
2615 return integer_zero_node;
2616
2617 default:
2618 gcc_unreachable ();
2619 }
2620 }
2621
2622 /* Attemp to make the block of __builtin_unreachable I unreachable by changing
2623 the incoming jumps. Return true if at least one jump was changed. */
2624
2625 static bool
2626 optimize_unreachable (gimple_stmt_iterator i)
2627 {
2628 basic_block bb = gsi_bb (i);
2629 gimple_stmt_iterator gsi;
2630 gimple *stmt;
2631 edge_iterator ei;
2632 edge e;
2633 bool ret;
2634
2635 if (flag_sanitize & SANITIZE_UNREACHABLE)
2636 return false;
2637
2638 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2639 {
2640 stmt = gsi_stmt (gsi);
2641
2642 if (is_gimple_debug (stmt))
2643 continue;
2644
2645 if (glabel *label_stmt = dyn_cast <glabel *> (stmt))
2646 {
2647 /* Verify we do not need to preserve the label. */
2648 if (FORCED_LABEL (gimple_label_label (label_stmt)))
2649 return false;
2650
2651 continue;
2652 }
2653
2654 /* Only handle the case that __builtin_unreachable is the first statement
2655 in the block. We rely on DCE to remove stmts without side-effects
2656 before __builtin_unreachable. */
2657 if (gsi_stmt (gsi) != gsi_stmt (i))
2658 return false;
2659 }
2660
2661 ret = false;
2662 FOR_EACH_EDGE (e, ei, bb->preds)
2663 {
2664 gsi = gsi_last_bb (e->src);
2665 if (gsi_end_p (gsi))
2666 continue;
2667
2668 stmt = gsi_stmt (gsi);
2669 if (gcond *cond_stmt = dyn_cast <gcond *> (stmt))
2670 {
2671 if (e->flags & EDGE_TRUE_VALUE)
2672 gimple_cond_make_false (cond_stmt);
2673 else if (e->flags & EDGE_FALSE_VALUE)
2674 gimple_cond_make_true (cond_stmt);
2675 else
2676 gcc_unreachable ();
2677 update_stmt (cond_stmt);
2678 }
2679 else
2680 {
2681 /* Todo: handle other cases, f.i. switch statement. */
2682 continue;
2683 }
2684
2685 ret = true;
2686 }
2687
2688 return ret;
2689 }
2690
2691 /* A simple pass that attempts to fold all builtin functions. This pass
2692 is run after we've propagated as many constants as we can. */
2693
2694 namespace {
2695
2696 const pass_data pass_data_fold_builtins =
2697 {
2698 GIMPLE_PASS, /* type */
2699 "fab", /* name */
2700 OPTGROUP_NONE, /* optinfo_flags */
2701 TV_NONE, /* tv_id */
2702 ( PROP_cfg | PROP_ssa ), /* properties_required */
2703 0, /* properties_provided */
2704 0, /* properties_destroyed */
2705 0, /* todo_flags_start */
2706 TODO_update_ssa, /* todo_flags_finish */
2707 };
2708
2709 class pass_fold_builtins : public gimple_opt_pass
2710 {
2711 public:
2712 pass_fold_builtins (gcc::context *ctxt)
2713 : gimple_opt_pass (pass_data_fold_builtins, ctxt)
2714 {}
2715
2716 /* opt_pass methods: */
2717 opt_pass * clone () { return new pass_fold_builtins (m_ctxt); }
2718 virtual unsigned int execute (function *);
2719
2720 }; // class pass_fold_builtins
2721
2722 unsigned int
2723 pass_fold_builtins::execute (function *fun)
2724 {
2725 bool cfg_changed = false;
2726 basic_block bb;
2727 unsigned int todoflags = 0;
2728
2729 FOR_EACH_BB_FN (bb, fun)
2730 {
2731 gimple_stmt_iterator i;
2732 for (i = gsi_start_bb (bb); !gsi_end_p (i); )
2733 {
2734 gimple *stmt, *old_stmt;
2735 tree callee;
2736 enum built_in_function fcode;
2737
2738 stmt = gsi_stmt (i);
2739
2740 if (gimple_code (stmt) != GIMPLE_CALL)
2741 {
2742 /* Remove all *ssaname_N ={v} {CLOBBER}; stmts,
2743 after the last GIMPLE DSE they aren't needed and might
2744 unnecessarily keep the SSA_NAMEs live. */
2745 if (gimple_clobber_p (stmt))
2746 {
2747 tree lhs = gimple_assign_lhs (stmt);
2748 if (TREE_CODE (lhs) == MEM_REF
2749 && TREE_CODE (TREE_OPERAND (lhs, 0)) == SSA_NAME)
2750 {
2751 unlink_stmt_vdef (stmt);
2752 gsi_remove (&i, true);
2753 release_defs (stmt);
2754 continue;
2755 }
2756 }
2757 gsi_next (&i);
2758 continue;
2759 }
2760
2761 callee = gimple_call_fndecl (stmt);
2762 if (!callee || DECL_BUILT_IN_CLASS (callee) != BUILT_IN_NORMAL)
2763 {
2764 gsi_next (&i);
2765 continue;
2766 }
2767
2768 fcode = DECL_FUNCTION_CODE (callee);
2769 if (fold_stmt (&i))
2770 ;
2771 else
2772 {
2773 tree result = NULL_TREE;
2774 switch (DECL_FUNCTION_CODE (callee))
2775 {
2776 case BUILT_IN_CONSTANT_P:
2777 /* Resolve __builtin_constant_p. If it hasn't been
2778 folded to integer_one_node by now, it's fairly
2779 certain that the value simply isn't constant. */
2780 result = integer_zero_node;
2781 break;
2782
2783 case BUILT_IN_ASSUME_ALIGNED:
2784 /* Remove __builtin_assume_aligned. */
2785 result = gimple_call_arg (stmt, 0);
2786 break;
2787
2788 case BUILT_IN_STACK_RESTORE:
2789 result = optimize_stack_restore (i);
2790 if (result)
2791 break;
2792 gsi_next (&i);
2793 continue;
2794
2795 case BUILT_IN_UNREACHABLE:
2796 if (optimize_unreachable (i))
2797 cfg_changed = true;
2798 break;
2799
2800 case BUILT_IN_VA_START:
2801 case BUILT_IN_VA_END:
2802 case BUILT_IN_VA_COPY:
2803 /* These shouldn't be folded before pass_stdarg. */
2804 result = optimize_stdarg_builtin (stmt);
2805 if (result)
2806 break;
2807 /* FALLTHRU */
2808
2809 default:;
2810 }
2811
2812 if (!result)
2813 {
2814 gsi_next (&i);
2815 continue;
2816 }
2817
2818 if (!update_call_from_tree (&i, result))
2819 gimplify_and_update_call_from_tree (&i, result);
2820 }
2821
2822 todoflags |= TODO_update_address_taken;
2823
2824 if (dump_file && (dump_flags & TDF_DETAILS))
2825 {
2826 fprintf (dump_file, "Simplified\n ");
2827 print_gimple_stmt (dump_file, stmt, 0, dump_flags);
2828 }
2829
2830 old_stmt = stmt;
2831 stmt = gsi_stmt (i);
2832 update_stmt (stmt);
2833
2834 if (maybe_clean_or_replace_eh_stmt (old_stmt, stmt)
2835 && gimple_purge_dead_eh_edges (bb))
2836 cfg_changed = true;
2837
2838 if (dump_file && (dump_flags & TDF_DETAILS))
2839 {
2840 fprintf (dump_file, "to\n ");
2841 print_gimple_stmt (dump_file, stmt, 0, dump_flags);
2842 fprintf (dump_file, "\n");
2843 }
2844
2845 /* Retry the same statement if it changed into another
2846 builtin, there might be new opportunities now. */
2847 if (gimple_code (stmt) != GIMPLE_CALL)
2848 {
2849 gsi_next (&i);
2850 continue;
2851 }
2852 callee = gimple_call_fndecl (stmt);
2853 if (!callee
2854 || DECL_BUILT_IN_CLASS (callee) != BUILT_IN_NORMAL
2855 || DECL_FUNCTION_CODE (callee) == fcode)
2856 gsi_next (&i);
2857 }
2858 }
2859
2860 /* Delete unreachable blocks. */
2861 if (cfg_changed)
2862 todoflags |= TODO_cleanup_cfg;
2863
2864 return todoflags;
2865 }
2866
2867 } // anon namespace
2868
2869 gimple_opt_pass *
2870 make_pass_fold_builtins (gcc::context *ctxt)
2871 {
2872 return new pass_fold_builtins (ctxt);
2873 }