coretypes.h: Include hash-table.h and hash-set.h for host files.
[gcc.git] / gcc / tree-ssa-ccp.c
1 /* Conditional constant propagation pass for the GNU compiler.
2 Copyright (C) 2000-2015 Free Software Foundation, Inc.
3 Adapted from original RTL SSA-CCP by Daniel Berlin <dberlin@dberlin.org>
4 Adapted to GIMPLE trees by Diego Novillo <dnovillo@redhat.com>
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it
9 under the terms of the GNU General Public License as published by the
10 Free Software Foundation; either version 3, or (at your option) any
11 later version.
12
13 GCC is distributed in the hope that it will be useful, but WITHOUT
14 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
21
22 /* Conditional constant propagation (CCP) is based on the SSA
23 propagation engine (tree-ssa-propagate.c). Constant assignments of
24 the form VAR = CST are propagated from the assignments into uses of
25 VAR, which in turn may generate new constants. The simulation uses
26 a four level lattice to keep track of constant values associated
27 with SSA names. Given an SSA name V_i, it may take one of the
28 following values:
29
30 UNINITIALIZED -> the initial state of the value. This value
31 is replaced with a correct initial value
32 the first time the value is used, so the
33 rest of the pass does not need to care about
34 it. Using this value simplifies initialization
35 of the pass, and prevents us from needlessly
36 scanning statements that are never reached.
37
38 UNDEFINED -> V_i is a local variable whose definition
39 has not been processed yet. Therefore we
40 don't yet know if its value is a constant
41 or not.
42
43 CONSTANT -> V_i has been found to hold a constant
44 value C.
45
46 VARYING -> V_i cannot take a constant value, or if it
47 does, it is not possible to determine it
48 at compile time.
49
50 The core of SSA-CCP is in ccp_visit_stmt and ccp_visit_phi_node:
51
52 1- In ccp_visit_stmt, we are interested in assignments whose RHS
53 evaluates into a constant and conditional jumps whose predicate
54 evaluates into a boolean true or false. When an assignment of
55 the form V_i = CONST is found, V_i's lattice value is set to
56 CONSTANT and CONST is associated with it. This causes the
57 propagation engine to add all the SSA edges coming out the
58 assignment into the worklists, so that statements that use V_i
59 can be visited.
60
61 If the statement is a conditional with a constant predicate, we
62 mark the outgoing edges as executable or not executable
63 depending on the predicate's value. This is then used when
64 visiting PHI nodes to know when a PHI argument can be ignored.
65
66
67 2- In ccp_visit_phi_node, if all the PHI arguments evaluate to the
68 same constant C, then the LHS of the PHI is set to C. This
69 evaluation is known as the "meet operation". Since one of the
70 goals of this evaluation is to optimistically return constant
71 values as often as possible, it uses two main short cuts:
72
73 - If an argument is flowing in through a non-executable edge, it
74 is ignored. This is useful in cases like this:
75
76 if (PRED)
77 a_9 = 3;
78 else
79 a_10 = 100;
80 a_11 = PHI (a_9, a_10)
81
82 If PRED is known to always evaluate to false, then we can
83 assume that a_11 will always take its value from a_10, meaning
84 that instead of consider it VARYING (a_9 and a_10 have
85 different values), we can consider it CONSTANT 100.
86
87 - If an argument has an UNDEFINED value, then it does not affect
88 the outcome of the meet operation. If a variable V_i has an
89 UNDEFINED value, it means that either its defining statement
90 hasn't been visited yet or V_i has no defining statement, in
91 which case the original symbol 'V' is being used
92 uninitialized. Since 'V' is a local variable, the compiler
93 may assume any initial value for it.
94
95
96 After propagation, every variable V_i that ends up with a lattice
97 value of CONSTANT will have the associated constant value in the
98 array CONST_VAL[i].VALUE. That is fed into substitute_and_fold for
99 final substitution and folding.
100
101 This algorithm uses wide-ints at the max precision of the target.
102 This means that, with one uninteresting exception, variables with
103 UNSIGNED types never go to VARYING because the bits above the
104 precision of the type of the variable are always zero. The
105 uninteresting case is a variable of UNSIGNED type that has the
106 maximum precision of the target. Such variables can go to VARYING,
107 but this causes no loss of infomation since these variables will
108 never be extended.
109
110 References:
111
112 Constant propagation with conditional branches,
113 Wegman and Zadeck, ACM TOPLAS 13(2):181-210.
114
115 Building an Optimizing Compiler,
116 Robert Morgan, Butterworth-Heinemann, 1998, Section 8.9.
117
118 Advanced Compiler Design and Implementation,
119 Steven Muchnick, Morgan Kaufmann, 1997, Section 12.6 */
120
121 #include "config.h"
122 #include "system.h"
123 #include "coretypes.h"
124 #include "tm.h"
125 #include "input.h"
126 #include "alias.h"
127 #include "symtab.h"
128 #include "tree.h"
129 #include "fold-const.h"
130 #include "stor-layout.h"
131 #include "flags.h"
132 #include "tm_p.h"
133 #include "predict.h"
134 #include "hard-reg-set.h"
135 #include "input.h"
136 #include "function.h"
137 #include "dominance.h"
138 #include "cfg.h"
139 #include "basic-block.h"
140 #include "gimple-pretty-print.h"
141 #include "tree-ssa-alias.h"
142 #include "internal-fn.h"
143 #include "gimple-fold.h"
144 #include "tree-eh.h"
145 #include "gimple-expr.h"
146 #include "is-a.h"
147 #include "gimple.h"
148 #include "gimplify.h"
149 #include "gimple-iterator.h"
150 #include "gimple-ssa.h"
151 #include "tree-cfg.h"
152 #include "tree-phinodes.h"
153 #include "ssa-iterators.h"
154 #include "stringpool.h"
155 #include "tree-ssanames.h"
156 #include "tree-pass.h"
157 #include "tree-ssa-propagate.h"
158 #include "value-prof.h"
159 #include "langhooks.h"
160 #include "target.h"
161 #include "diagnostic-core.h"
162 #include "dbgcnt.h"
163 #include "params.h"
164 #include "wide-int-print.h"
165 #include "builtins.h"
166 #include "tree-chkp.h"
167
168
169 /* Possible lattice values. */
170 typedef enum
171 {
172 UNINITIALIZED,
173 UNDEFINED,
174 CONSTANT,
175 VARYING
176 } ccp_lattice_t;
177
178 struct ccp_prop_value_t {
179 /* Lattice value. */
180 ccp_lattice_t lattice_val;
181
182 /* Propagated value. */
183 tree value;
184
185 /* Mask that applies to the propagated value during CCP. For X
186 with a CONSTANT lattice value X & ~mask == value & ~mask. The
187 zero bits in the mask cover constant values. The ones mean no
188 information. */
189 widest_int mask;
190 };
191
192 /* Array of propagated constant values. After propagation,
193 CONST_VAL[I].VALUE holds the constant value for SSA_NAME(I). If
194 the constant is held in an SSA name representing a memory store
195 (i.e., a VDEF), CONST_VAL[I].MEM_REF will contain the actual
196 memory reference used to store (i.e., the LHS of the assignment
197 doing the store). */
198 static ccp_prop_value_t *const_val;
199 static unsigned n_const_val;
200
201 static void canonicalize_value (ccp_prop_value_t *);
202 static bool ccp_fold_stmt (gimple_stmt_iterator *);
203 static void ccp_lattice_meet (ccp_prop_value_t *, ccp_prop_value_t *);
204
205 /* Dump constant propagation value VAL to file OUTF prefixed by PREFIX. */
206
207 static void
208 dump_lattice_value (FILE *outf, const char *prefix, ccp_prop_value_t val)
209 {
210 switch (val.lattice_val)
211 {
212 case UNINITIALIZED:
213 fprintf (outf, "%sUNINITIALIZED", prefix);
214 break;
215 case UNDEFINED:
216 fprintf (outf, "%sUNDEFINED", prefix);
217 break;
218 case VARYING:
219 fprintf (outf, "%sVARYING", prefix);
220 break;
221 case CONSTANT:
222 if (TREE_CODE (val.value) != INTEGER_CST
223 || val.mask == 0)
224 {
225 fprintf (outf, "%sCONSTANT ", prefix);
226 print_generic_expr (outf, val.value, dump_flags);
227 }
228 else
229 {
230 widest_int cval = wi::bit_and_not (wi::to_widest (val.value),
231 val.mask);
232 fprintf (outf, "%sCONSTANT ", prefix);
233 print_hex (cval, outf);
234 fprintf (outf, " (");
235 print_hex (val.mask, outf);
236 fprintf (outf, ")");
237 }
238 break;
239 default:
240 gcc_unreachable ();
241 }
242 }
243
244
245 /* Print lattice value VAL to stderr. */
246
247 void debug_lattice_value (ccp_prop_value_t val);
248
249 DEBUG_FUNCTION void
250 debug_lattice_value (ccp_prop_value_t val)
251 {
252 dump_lattice_value (stderr, "", val);
253 fprintf (stderr, "\n");
254 }
255
256 /* Extend NONZERO_BITS to a full mask, with the upper bits being set. */
257
258 static widest_int
259 extend_mask (const wide_int &nonzero_bits)
260 {
261 return (wi::mask <widest_int> (wi::get_precision (nonzero_bits), true)
262 | widest_int::from (nonzero_bits, UNSIGNED));
263 }
264
265 /* Compute a default value for variable VAR and store it in the
266 CONST_VAL array. The following rules are used to get default
267 values:
268
269 1- Global and static variables that are declared constant are
270 considered CONSTANT.
271
272 2- Any other value is considered UNDEFINED. This is useful when
273 considering PHI nodes. PHI arguments that are undefined do not
274 change the constant value of the PHI node, which allows for more
275 constants to be propagated.
276
277 3- Variables defined by statements other than assignments and PHI
278 nodes are considered VARYING.
279
280 4- Initial values of variables that are not GIMPLE registers are
281 considered VARYING. */
282
283 static ccp_prop_value_t
284 get_default_value (tree var)
285 {
286 ccp_prop_value_t val = { UNINITIALIZED, NULL_TREE, 0 };
287 gimple stmt;
288
289 stmt = SSA_NAME_DEF_STMT (var);
290
291 if (gimple_nop_p (stmt))
292 {
293 /* Variables defined by an empty statement are those used
294 before being initialized. If VAR is a local variable, we
295 can assume initially that it is UNDEFINED, otherwise we must
296 consider it VARYING. */
297 if (!virtual_operand_p (var)
298 && TREE_CODE (SSA_NAME_VAR (var)) == VAR_DECL)
299 val.lattice_val = UNDEFINED;
300 else
301 {
302 val.lattice_val = VARYING;
303 val.mask = -1;
304 if (flag_tree_bit_ccp)
305 {
306 wide_int nonzero_bits = get_nonzero_bits (var);
307 if (nonzero_bits != -1)
308 {
309 val.lattice_val = CONSTANT;
310 val.value = build_zero_cst (TREE_TYPE (var));
311 val.mask = extend_mask (nonzero_bits);
312 }
313 }
314 }
315 }
316 else if (is_gimple_assign (stmt))
317 {
318 tree cst;
319 if (gimple_assign_single_p (stmt)
320 && DECL_P (gimple_assign_rhs1 (stmt))
321 && (cst = get_symbol_constant_value (gimple_assign_rhs1 (stmt))))
322 {
323 val.lattice_val = CONSTANT;
324 val.value = cst;
325 }
326 else
327 {
328 /* Any other variable defined by an assignment is considered
329 UNDEFINED. */
330 val.lattice_val = UNDEFINED;
331 }
332 }
333 else if ((is_gimple_call (stmt)
334 && gimple_call_lhs (stmt) != NULL_TREE)
335 || gimple_code (stmt) == GIMPLE_PHI)
336 {
337 /* A variable defined by a call or a PHI node is considered
338 UNDEFINED. */
339 val.lattice_val = UNDEFINED;
340 }
341 else
342 {
343 /* Otherwise, VAR will never take on a constant value. */
344 val.lattice_val = VARYING;
345 val.mask = -1;
346 }
347
348 return val;
349 }
350
351
352 /* Get the constant value associated with variable VAR. */
353
354 static inline ccp_prop_value_t *
355 get_value (tree var)
356 {
357 ccp_prop_value_t *val;
358
359 if (const_val == NULL
360 || SSA_NAME_VERSION (var) >= n_const_val)
361 return NULL;
362
363 val = &const_val[SSA_NAME_VERSION (var)];
364 if (val->lattice_val == UNINITIALIZED)
365 *val = get_default_value (var);
366
367 canonicalize_value (val);
368
369 return val;
370 }
371
372 /* Return the constant tree value associated with VAR. */
373
374 static inline tree
375 get_constant_value (tree var)
376 {
377 ccp_prop_value_t *val;
378 if (TREE_CODE (var) != SSA_NAME)
379 {
380 if (is_gimple_min_invariant (var))
381 return var;
382 return NULL_TREE;
383 }
384 val = get_value (var);
385 if (val
386 && val->lattice_val == CONSTANT
387 && (TREE_CODE (val->value) != INTEGER_CST
388 || val->mask == 0))
389 return val->value;
390 return NULL_TREE;
391 }
392
393 /* Sets the value associated with VAR to VARYING. */
394
395 static inline void
396 set_value_varying (tree var)
397 {
398 ccp_prop_value_t *val = &const_val[SSA_NAME_VERSION (var)];
399
400 val->lattice_val = VARYING;
401 val->value = NULL_TREE;
402 val->mask = -1;
403 }
404
405 /* For integer constants, make sure to drop TREE_OVERFLOW. */
406
407 static void
408 canonicalize_value (ccp_prop_value_t *val)
409 {
410 if (val->lattice_val != CONSTANT)
411 return;
412
413 if (TREE_OVERFLOW_P (val->value))
414 val->value = drop_tree_overflow (val->value);
415 }
416
417 /* Return whether the lattice transition is valid. */
418
419 static bool
420 valid_lattice_transition (ccp_prop_value_t old_val, ccp_prop_value_t new_val)
421 {
422 /* Lattice transitions must always be monotonically increasing in
423 value. */
424 if (old_val.lattice_val < new_val.lattice_val)
425 return true;
426
427 if (old_val.lattice_val != new_val.lattice_val)
428 return false;
429
430 if (!old_val.value && !new_val.value)
431 return true;
432
433 /* Now both lattice values are CONSTANT. */
434
435 /* Allow arbitrary copy changes as we might look through PHI <a_1, ...>
436 when only a single copy edge is executable. */
437 if (TREE_CODE (old_val.value) == SSA_NAME
438 && TREE_CODE (new_val.value) == SSA_NAME)
439 return true;
440
441 /* Allow transitioning from a constant to a copy. */
442 if (is_gimple_min_invariant (old_val.value)
443 && TREE_CODE (new_val.value) == SSA_NAME)
444 return true;
445
446 /* Allow transitioning from PHI <&x, not executable> == &x
447 to PHI <&x, &y> == common alignment. */
448 if (TREE_CODE (old_val.value) != INTEGER_CST
449 && TREE_CODE (new_val.value) == INTEGER_CST)
450 return true;
451
452 /* Bit-lattices have to agree in the still valid bits. */
453 if (TREE_CODE (old_val.value) == INTEGER_CST
454 && TREE_CODE (new_val.value) == INTEGER_CST)
455 return (wi::bit_and_not (wi::to_widest (old_val.value), new_val.mask)
456 == wi::bit_and_not (wi::to_widest (new_val.value), new_val.mask));
457
458 /* Otherwise constant values have to agree. */
459 if (operand_equal_p (old_val.value, new_val.value, 0))
460 return true;
461
462 /* At least the kinds and types should agree now. */
463 if (TREE_CODE (old_val.value) != TREE_CODE (new_val.value)
464 || !types_compatible_p (TREE_TYPE (old_val.value),
465 TREE_TYPE (new_val.value)))
466 return false;
467
468 /* For floats and !HONOR_NANS allow transitions from (partial) NaN
469 to non-NaN. */
470 tree type = TREE_TYPE (new_val.value);
471 if (SCALAR_FLOAT_TYPE_P (type)
472 && !HONOR_NANS (type))
473 {
474 if (REAL_VALUE_ISNAN (TREE_REAL_CST (old_val.value)))
475 return true;
476 }
477 else if (VECTOR_FLOAT_TYPE_P (type)
478 && !HONOR_NANS (type))
479 {
480 for (unsigned i = 0; i < VECTOR_CST_NELTS (old_val.value); ++i)
481 if (!REAL_VALUE_ISNAN
482 (TREE_REAL_CST (VECTOR_CST_ELT (old_val.value, i)))
483 && !operand_equal_p (VECTOR_CST_ELT (old_val.value, i),
484 VECTOR_CST_ELT (new_val.value, i), 0))
485 return false;
486 return true;
487 }
488 else if (COMPLEX_FLOAT_TYPE_P (type)
489 && !HONOR_NANS (type))
490 {
491 if (!REAL_VALUE_ISNAN (TREE_REAL_CST (TREE_REALPART (old_val.value)))
492 && !operand_equal_p (TREE_REALPART (old_val.value),
493 TREE_REALPART (new_val.value), 0))
494 return false;
495 if (!REAL_VALUE_ISNAN (TREE_REAL_CST (TREE_IMAGPART (old_val.value)))
496 && !operand_equal_p (TREE_IMAGPART (old_val.value),
497 TREE_IMAGPART (new_val.value), 0))
498 return false;
499 return true;
500 }
501 return false;
502 }
503
504 /* Set the value for variable VAR to NEW_VAL. Return true if the new
505 value is different from VAR's previous value. */
506
507 static bool
508 set_lattice_value (tree var, ccp_prop_value_t *new_val)
509 {
510 /* We can deal with old UNINITIALIZED values just fine here. */
511 ccp_prop_value_t *old_val = &const_val[SSA_NAME_VERSION (var)];
512
513 canonicalize_value (new_val);
514
515 /* We have to be careful to not go up the bitwise lattice
516 represented by the mask. Instead of dropping to VARYING
517 use the meet operator to retain a conservative value.
518 Missed optimizations like PR65851 makes this necessary.
519 It also ensures we converge to a stable lattice solution. */
520 if (new_val->lattice_val == CONSTANT
521 && old_val->lattice_val == CONSTANT
522 && TREE_CODE (new_val->value) != SSA_NAME)
523 ccp_lattice_meet (new_val, old_val);
524
525 gcc_checking_assert (valid_lattice_transition (*old_val, *new_val));
526
527 /* If *OLD_VAL and NEW_VAL are the same, return false to inform the
528 caller that this was a non-transition. */
529 if (old_val->lattice_val != new_val->lattice_val
530 || (new_val->lattice_val == CONSTANT
531 && (TREE_CODE (new_val->value) != TREE_CODE (old_val->value)
532 || (TREE_CODE (new_val->value) == INTEGER_CST
533 && (new_val->mask != old_val->mask
534 || (wi::bit_and_not (wi::to_widest (old_val->value),
535 new_val->mask)
536 != wi::bit_and_not (wi::to_widest (new_val->value),
537 new_val->mask))))
538 || (TREE_CODE (new_val->value) != INTEGER_CST
539 && !operand_equal_p (new_val->value, old_val->value, 0)))))
540 {
541 /* ??? We would like to delay creation of INTEGER_CSTs from
542 partially constants here. */
543
544 if (dump_file && (dump_flags & TDF_DETAILS))
545 {
546 dump_lattice_value (dump_file, "Lattice value changed to ", *new_val);
547 fprintf (dump_file, ". Adding SSA edges to worklist.\n");
548 }
549
550 *old_val = *new_val;
551
552 gcc_assert (new_val->lattice_val != UNINITIALIZED);
553 return true;
554 }
555
556 return false;
557 }
558
559 static ccp_prop_value_t get_value_for_expr (tree, bool);
560 static ccp_prop_value_t bit_value_binop (enum tree_code, tree, tree, tree);
561 static void bit_value_binop_1 (enum tree_code, tree, widest_int *, widest_int *,
562 tree, const widest_int &, const widest_int &,
563 tree, const widest_int &, const widest_int &);
564
565 /* Return a widest_int that can be used for bitwise simplifications
566 from VAL. */
567
568 static widest_int
569 value_to_wide_int (ccp_prop_value_t val)
570 {
571 if (val.value
572 && TREE_CODE (val.value) == INTEGER_CST)
573 return wi::to_widest (val.value);
574
575 return 0;
576 }
577
578 /* Return the value for the address expression EXPR based on alignment
579 information. */
580
581 static ccp_prop_value_t
582 get_value_from_alignment (tree expr)
583 {
584 tree type = TREE_TYPE (expr);
585 ccp_prop_value_t val;
586 unsigned HOST_WIDE_INT bitpos;
587 unsigned int align;
588
589 gcc_assert (TREE_CODE (expr) == ADDR_EXPR);
590
591 get_pointer_alignment_1 (expr, &align, &bitpos);
592 val.mask = (POINTER_TYPE_P (type) || TYPE_UNSIGNED (type)
593 ? wi::mask <widest_int> (TYPE_PRECISION (type), false)
594 : -1).and_not (align / BITS_PER_UNIT - 1);
595 val.lattice_val
596 = wi::sext (val.mask, TYPE_PRECISION (type)) == -1 ? VARYING : CONSTANT;
597 if (val.lattice_val == CONSTANT)
598 val.value = build_int_cstu (type, bitpos / BITS_PER_UNIT);
599 else
600 val.value = NULL_TREE;
601
602 return val;
603 }
604
605 /* Return the value for the tree operand EXPR. If FOR_BITS_P is true
606 return constant bits extracted from alignment information for
607 invariant addresses. */
608
609 static ccp_prop_value_t
610 get_value_for_expr (tree expr, bool for_bits_p)
611 {
612 ccp_prop_value_t val;
613
614 if (TREE_CODE (expr) == SSA_NAME)
615 {
616 val = *get_value (expr);
617 if (for_bits_p
618 && val.lattice_val == CONSTANT
619 && TREE_CODE (val.value) == ADDR_EXPR)
620 val = get_value_from_alignment (val.value);
621 /* Fall back to a copy value. */
622 if (!for_bits_p
623 && val.lattice_val == VARYING
624 && !SSA_NAME_OCCURS_IN_ABNORMAL_PHI (expr))
625 {
626 val.lattice_val = CONSTANT;
627 val.value = expr;
628 val.mask = -1;
629 }
630 }
631 else if (is_gimple_min_invariant (expr)
632 && (!for_bits_p || TREE_CODE (expr) != ADDR_EXPR))
633 {
634 val.lattice_val = CONSTANT;
635 val.value = expr;
636 val.mask = 0;
637 canonicalize_value (&val);
638 }
639 else if (TREE_CODE (expr) == ADDR_EXPR)
640 val = get_value_from_alignment (expr);
641 else
642 {
643 val.lattice_val = VARYING;
644 val.mask = -1;
645 val.value = NULL_TREE;
646 }
647 return val;
648 }
649
650 /* Return the likely CCP lattice value for STMT.
651
652 If STMT has no operands, then return CONSTANT.
653
654 Else if undefinedness of operands of STMT cause its value to be
655 undefined, then return UNDEFINED.
656
657 Else if any operands of STMT are constants, then return CONSTANT.
658
659 Else return VARYING. */
660
661 static ccp_lattice_t
662 likely_value (gimple stmt)
663 {
664 bool has_constant_operand, has_undefined_operand, all_undefined_operands;
665 bool has_nsa_operand;
666 tree use;
667 ssa_op_iter iter;
668 unsigned i;
669
670 enum gimple_code code = gimple_code (stmt);
671
672 /* This function appears to be called only for assignments, calls,
673 conditionals, and switches, due to the logic in visit_stmt. */
674 gcc_assert (code == GIMPLE_ASSIGN
675 || code == GIMPLE_CALL
676 || code == GIMPLE_COND
677 || code == GIMPLE_SWITCH);
678
679 /* If the statement has volatile operands, it won't fold to a
680 constant value. */
681 if (gimple_has_volatile_ops (stmt))
682 return VARYING;
683
684 /* Arrive here for more complex cases. */
685 has_constant_operand = false;
686 has_undefined_operand = false;
687 all_undefined_operands = true;
688 has_nsa_operand = false;
689 FOR_EACH_SSA_TREE_OPERAND (use, stmt, iter, SSA_OP_USE)
690 {
691 ccp_prop_value_t *val = get_value (use);
692
693 if (val->lattice_val == UNDEFINED)
694 has_undefined_operand = true;
695 else
696 all_undefined_operands = false;
697
698 if (val->lattice_val == CONSTANT)
699 has_constant_operand = true;
700
701 if (SSA_NAME_IS_DEFAULT_DEF (use)
702 || !prop_simulate_again_p (SSA_NAME_DEF_STMT (use)))
703 has_nsa_operand = true;
704 }
705
706 /* There may be constants in regular rhs operands. For calls we
707 have to ignore lhs, fndecl and static chain, otherwise only
708 the lhs. */
709 for (i = (is_gimple_call (stmt) ? 2 : 0) + gimple_has_lhs (stmt);
710 i < gimple_num_ops (stmt); ++i)
711 {
712 tree op = gimple_op (stmt, i);
713 if (!op || TREE_CODE (op) == SSA_NAME)
714 continue;
715 if (is_gimple_min_invariant (op))
716 has_constant_operand = true;
717 }
718
719 if (has_constant_operand)
720 all_undefined_operands = false;
721
722 if (has_undefined_operand
723 && code == GIMPLE_CALL
724 && gimple_call_internal_p (stmt))
725 switch (gimple_call_internal_fn (stmt))
726 {
727 /* These 3 builtins use the first argument just as a magic
728 way how to find out a decl uid. */
729 case IFN_GOMP_SIMD_LANE:
730 case IFN_GOMP_SIMD_VF:
731 case IFN_GOMP_SIMD_LAST_LANE:
732 has_undefined_operand = false;
733 break;
734 default:
735 break;
736 }
737
738 /* If the operation combines operands like COMPLEX_EXPR make sure to
739 not mark the result UNDEFINED if only one part of the result is
740 undefined. */
741 if (has_undefined_operand && all_undefined_operands)
742 return UNDEFINED;
743 else if (code == GIMPLE_ASSIGN && has_undefined_operand)
744 {
745 switch (gimple_assign_rhs_code (stmt))
746 {
747 /* Unary operators are handled with all_undefined_operands. */
748 case PLUS_EXPR:
749 case MINUS_EXPR:
750 case POINTER_PLUS_EXPR:
751 /* Not MIN_EXPR, MAX_EXPR. One VARYING operand may be selected.
752 Not bitwise operators, one VARYING operand may specify the
753 result completely. Not logical operators for the same reason.
754 Not COMPLEX_EXPR as one VARYING operand makes the result partly
755 not UNDEFINED. Not *DIV_EXPR, comparisons and shifts because
756 the undefined operand may be promoted. */
757 return UNDEFINED;
758
759 case ADDR_EXPR:
760 /* If any part of an address is UNDEFINED, like the index
761 of an ARRAY_EXPR, then treat the result as UNDEFINED. */
762 return UNDEFINED;
763
764 default:
765 ;
766 }
767 }
768 /* If there was an UNDEFINED operand but the result may be not UNDEFINED
769 fall back to CONSTANT. During iteration UNDEFINED may still drop
770 to CONSTANT. */
771 if (has_undefined_operand)
772 return CONSTANT;
773
774 /* We do not consider virtual operands here -- load from read-only
775 memory may have only VARYING virtual operands, but still be
776 constant. Also we can combine the stmt with definitions from
777 operands whose definitions are not simulated again. */
778 if (has_constant_operand
779 || has_nsa_operand
780 || gimple_references_memory_p (stmt))
781 return CONSTANT;
782
783 return VARYING;
784 }
785
786 /* Returns true if STMT cannot be constant. */
787
788 static bool
789 surely_varying_stmt_p (gimple stmt)
790 {
791 /* If the statement has operands that we cannot handle, it cannot be
792 constant. */
793 if (gimple_has_volatile_ops (stmt))
794 return true;
795
796 /* If it is a call and does not return a value or is not a
797 builtin and not an indirect call or a call to function with
798 assume_aligned/alloc_align attribute, it is varying. */
799 if (is_gimple_call (stmt))
800 {
801 tree fndecl, fntype = gimple_call_fntype (stmt);
802 if (!gimple_call_lhs (stmt)
803 || ((fndecl = gimple_call_fndecl (stmt)) != NULL_TREE
804 && !DECL_BUILT_IN (fndecl)
805 && !lookup_attribute ("assume_aligned",
806 TYPE_ATTRIBUTES (fntype))
807 && !lookup_attribute ("alloc_align",
808 TYPE_ATTRIBUTES (fntype))))
809 return true;
810 }
811
812 /* Any other store operation is not interesting. */
813 else if (gimple_vdef (stmt))
814 return true;
815
816 /* Anything other than assignments and conditional jumps are not
817 interesting for CCP. */
818 if (gimple_code (stmt) != GIMPLE_ASSIGN
819 && gimple_code (stmt) != GIMPLE_COND
820 && gimple_code (stmt) != GIMPLE_SWITCH
821 && gimple_code (stmt) != GIMPLE_CALL)
822 return true;
823
824 return false;
825 }
826
827 /* Initialize local data structures for CCP. */
828
829 static void
830 ccp_initialize (void)
831 {
832 basic_block bb;
833
834 n_const_val = num_ssa_names;
835 const_val = XCNEWVEC (ccp_prop_value_t, n_const_val);
836
837 /* Initialize simulation flags for PHI nodes and statements. */
838 FOR_EACH_BB_FN (bb, cfun)
839 {
840 gimple_stmt_iterator i;
841
842 for (i = gsi_start_bb (bb); !gsi_end_p (i); gsi_next (&i))
843 {
844 gimple stmt = gsi_stmt (i);
845 bool is_varying;
846
847 /* If the statement is a control insn, then we do not
848 want to avoid simulating the statement once. Failure
849 to do so means that those edges will never get added. */
850 if (stmt_ends_bb_p (stmt))
851 is_varying = false;
852 else
853 is_varying = surely_varying_stmt_p (stmt);
854
855 if (is_varying)
856 {
857 tree def;
858 ssa_op_iter iter;
859
860 /* If the statement will not produce a constant, mark
861 all its outputs VARYING. */
862 FOR_EACH_SSA_TREE_OPERAND (def, stmt, iter, SSA_OP_ALL_DEFS)
863 set_value_varying (def);
864 }
865 prop_set_simulate_again (stmt, !is_varying);
866 }
867 }
868
869 /* Now process PHI nodes. We never clear the simulate_again flag on
870 phi nodes, since we do not know which edges are executable yet,
871 except for phi nodes for virtual operands when we do not do store ccp. */
872 FOR_EACH_BB_FN (bb, cfun)
873 {
874 gphi_iterator i;
875
876 for (i = gsi_start_phis (bb); !gsi_end_p (i); gsi_next (&i))
877 {
878 gphi *phi = i.phi ();
879
880 if (virtual_operand_p (gimple_phi_result (phi)))
881 prop_set_simulate_again (phi, false);
882 else
883 prop_set_simulate_again (phi, true);
884 }
885 }
886 }
887
888 /* Debug count support. Reset the values of ssa names
889 VARYING when the total number ssa names analyzed is
890 beyond the debug count specified. */
891
892 static void
893 do_dbg_cnt (void)
894 {
895 unsigned i;
896 for (i = 0; i < num_ssa_names; i++)
897 {
898 if (!dbg_cnt (ccp))
899 {
900 const_val[i].lattice_val = VARYING;
901 const_val[i].mask = -1;
902 const_val[i].value = NULL_TREE;
903 }
904 }
905 }
906
907
908 /* Do final substitution of propagated values, cleanup the flowgraph and
909 free allocated storage.
910
911 Return TRUE when something was optimized. */
912
913 static bool
914 ccp_finalize (void)
915 {
916 bool something_changed;
917 unsigned i;
918
919 do_dbg_cnt ();
920
921 /* Derive alignment and misalignment information from partially
922 constant pointers in the lattice or nonzero bits from partially
923 constant integers. */
924 for (i = 1; i < num_ssa_names; ++i)
925 {
926 tree name = ssa_name (i);
927 ccp_prop_value_t *val;
928 unsigned int tem, align;
929
930 if (!name
931 || (!POINTER_TYPE_P (TREE_TYPE (name))
932 && (!INTEGRAL_TYPE_P (TREE_TYPE (name))
933 /* Don't record nonzero bits before IPA to avoid
934 using too much memory. */
935 || first_pass_instance)))
936 continue;
937
938 val = get_value (name);
939 if (val->lattice_val != CONSTANT
940 || TREE_CODE (val->value) != INTEGER_CST)
941 continue;
942
943 if (POINTER_TYPE_P (TREE_TYPE (name)))
944 {
945 /* Trailing mask bits specify the alignment, trailing value
946 bits the misalignment. */
947 tem = val->mask.to_uhwi ();
948 align = (tem & -tem);
949 if (align > 1)
950 set_ptr_info_alignment (get_ptr_info (name), align,
951 (TREE_INT_CST_LOW (val->value)
952 & (align - 1)));
953 }
954 else
955 {
956 unsigned int precision = TYPE_PRECISION (TREE_TYPE (val->value));
957 wide_int nonzero_bits = wide_int::from (val->mask, precision,
958 UNSIGNED) | val->value;
959 nonzero_bits &= get_nonzero_bits (name);
960 set_nonzero_bits (name, nonzero_bits);
961 }
962 }
963
964 /* Perform substitutions based on the known constant values. */
965 something_changed = substitute_and_fold (get_constant_value,
966 ccp_fold_stmt, true);
967
968 free (const_val);
969 const_val = NULL;
970 return something_changed;;
971 }
972
973
974 /* Compute the meet operator between *VAL1 and *VAL2. Store the result
975 in VAL1.
976
977 any M UNDEFINED = any
978 any M VARYING = VARYING
979 Ci M Cj = Ci if (i == j)
980 Ci M Cj = VARYING if (i != j)
981 */
982
983 static void
984 ccp_lattice_meet (ccp_prop_value_t *val1, ccp_prop_value_t *val2)
985 {
986 if (val1->lattice_val == UNDEFINED
987 /* For UNDEFINED M SSA we can't always SSA because its definition
988 may not dominate the PHI node. Doing optimistic copy propagation
989 also causes a lot of gcc.dg/uninit-pred*.c FAILs. */
990 && (val2->lattice_val != CONSTANT
991 || TREE_CODE (val2->value) != SSA_NAME))
992 {
993 /* UNDEFINED M any = any */
994 *val1 = *val2;
995 }
996 else if (val2->lattice_val == UNDEFINED
997 /* See above. */
998 && (val1->lattice_val != CONSTANT
999 || TREE_CODE (val1->value) != SSA_NAME))
1000 {
1001 /* any M UNDEFINED = any
1002 Nothing to do. VAL1 already contains the value we want. */
1003 ;
1004 }
1005 else if (val1->lattice_val == VARYING
1006 || val2->lattice_val == VARYING)
1007 {
1008 /* any M VARYING = VARYING. */
1009 val1->lattice_val = VARYING;
1010 val1->mask = -1;
1011 val1->value = NULL_TREE;
1012 }
1013 else if (val1->lattice_val == CONSTANT
1014 && val2->lattice_val == CONSTANT
1015 && TREE_CODE (val1->value) == INTEGER_CST
1016 && TREE_CODE (val2->value) == INTEGER_CST)
1017 {
1018 /* Ci M Cj = Ci if (i == j)
1019 Ci M Cj = VARYING if (i != j)
1020
1021 For INTEGER_CSTs mask unequal bits. If no equal bits remain,
1022 drop to varying. */
1023 val1->mask = (val1->mask | val2->mask
1024 | (wi::to_widest (val1->value)
1025 ^ wi::to_widest (val2->value)));
1026 if (wi::sext (val1->mask, TYPE_PRECISION (TREE_TYPE (val1->value))) == -1)
1027 {
1028 val1->lattice_val = VARYING;
1029 val1->value = NULL_TREE;
1030 }
1031 }
1032 else if (val1->lattice_val == CONSTANT
1033 && val2->lattice_val == CONSTANT
1034 && operand_equal_p (val1->value, val2->value, 0))
1035 {
1036 /* Ci M Cj = Ci if (i == j)
1037 Ci M Cj = VARYING if (i != j)
1038
1039 VAL1 already contains the value we want for equivalent values. */
1040 }
1041 else if (val1->lattice_val == CONSTANT
1042 && val2->lattice_val == CONSTANT
1043 && (TREE_CODE (val1->value) == ADDR_EXPR
1044 || TREE_CODE (val2->value) == ADDR_EXPR))
1045 {
1046 /* When not equal addresses are involved try meeting for
1047 alignment. */
1048 ccp_prop_value_t tem = *val2;
1049 if (TREE_CODE (val1->value) == ADDR_EXPR)
1050 *val1 = get_value_for_expr (val1->value, true);
1051 if (TREE_CODE (val2->value) == ADDR_EXPR)
1052 tem = get_value_for_expr (val2->value, true);
1053 ccp_lattice_meet (val1, &tem);
1054 }
1055 else
1056 {
1057 /* Any other combination is VARYING. */
1058 val1->lattice_val = VARYING;
1059 val1->mask = -1;
1060 val1->value = NULL_TREE;
1061 }
1062 }
1063
1064
1065 /* Loop through the PHI_NODE's parameters for BLOCK and compare their
1066 lattice values to determine PHI_NODE's lattice value. The value of a
1067 PHI node is determined calling ccp_lattice_meet with all the arguments
1068 of the PHI node that are incoming via executable edges. */
1069
1070 static enum ssa_prop_result
1071 ccp_visit_phi_node (gphi *phi)
1072 {
1073 unsigned i;
1074 ccp_prop_value_t new_val;
1075
1076 if (dump_file && (dump_flags & TDF_DETAILS))
1077 {
1078 fprintf (dump_file, "\nVisiting PHI node: ");
1079 print_gimple_stmt (dump_file, phi, 0, dump_flags);
1080 }
1081
1082 new_val.lattice_val = UNDEFINED;
1083 new_val.value = NULL_TREE;
1084 new_val.mask = 0;
1085
1086 bool first = true;
1087 for (i = 0; i < gimple_phi_num_args (phi); i++)
1088 {
1089 /* Compute the meet operator over all the PHI arguments flowing
1090 through executable edges. */
1091 edge e = gimple_phi_arg_edge (phi, i);
1092
1093 if (dump_file && (dump_flags & TDF_DETAILS))
1094 {
1095 fprintf (dump_file,
1096 "\n Argument #%d (%d -> %d %sexecutable)\n",
1097 i, e->src->index, e->dest->index,
1098 (e->flags & EDGE_EXECUTABLE) ? "" : "not ");
1099 }
1100
1101 /* If the incoming edge is executable, Compute the meet operator for
1102 the existing value of the PHI node and the current PHI argument. */
1103 if (e->flags & EDGE_EXECUTABLE)
1104 {
1105 tree arg = gimple_phi_arg (phi, i)->def;
1106 ccp_prop_value_t arg_val = get_value_for_expr (arg, false);
1107
1108 if (first)
1109 {
1110 new_val = arg_val;
1111 first = false;
1112 }
1113 else
1114 ccp_lattice_meet (&new_val, &arg_val);
1115
1116 if (dump_file && (dump_flags & TDF_DETAILS))
1117 {
1118 fprintf (dump_file, "\t");
1119 print_generic_expr (dump_file, arg, dump_flags);
1120 dump_lattice_value (dump_file, "\tValue: ", arg_val);
1121 fprintf (dump_file, "\n");
1122 }
1123
1124 if (new_val.lattice_val == VARYING)
1125 break;
1126 }
1127 }
1128
1129 if (dump_file && (dump_flags & TDF_DETAILS))
1130 {
1131 dump_lattice_value (dump_file, "\n PHI node value: ", new_val);
1132 fprintf (dump_file, "\n\n");
1133 }
1134
1135 /* Make the transition to the new value. */
1136 if (set_lattice_value (gimple_phi_result (phi), &new_val))
1137 {
1138 if (new_val.lattice_val == VARYING)
1139 return SSA_PROP_VARYING;
1140 else
1141 return SSA_PROP_INTERESTING;
1142 }
1143 else
1144 return SSA_PROP_NOT_INTERESTING;
1145 }
1146
1147 /* Return the constant value for OP or OP otherwise. */
1148
1149 static tree
1150 valueize_op (tree op)
1151 {
1152 if (TREE_CODE (op) == SSA_NAME)
1153 {
1154 tree tem = get_constant_value (op);
1155 if (tem)
1156 return tem;
1157 }
1158 return op;
1159 }
1160
1161 /* Return the constant value for OP, but signal to not follow SSA
1162 edges if the definition may be simulated again. */
1163
1164 static tree
1165 valueize_op_1 (tree op)
1166 {
1167 if (TREE_CODE (op) == SSA_NAME)
1168 {
1169 /* If the definition may be simulated again we cannot follow
1170 this SSA edge as the SSA propagator does not necessarily
1171 re-visit the use. */
1172 gimple def_stmt = SSA_NAME_DEF_STMT (op);
1173 if (!gimple_nop_p (def_stmt)
1174 && prop_simulate_again_p (def_stmt))
1175 return NULL_TREE;
1176 tree tem = get_constant_value (op);
1177 if (tem)
1178 return tem;
1179 }
1180 return op;
1181 }
1182
1183 /* CCP specific front-end to the non-destructive constant folding
1184 routines.
1185
1186 Attempt to simplify the RHS of STMT knowing that one or more
1187 operands are constants.
1188
1189 If simplification is possible, return the simplified RHS,
1190 otherwise return the original RHS or NULL_TREE. */
1191
1192 static tree
1193 ccp_fold (gimple stmt)
1194 {
1195 location_t loc = gimple_location (stmt);
1196 switch (gimple_code (stmt))
1197 {
1198 case GIMPLE_COND:
1199 {
1200 /* Handle comparison operators that can appear in GIMPLE form. */
1201 tree op0 = valueize_op (gimple_cond_lhs (stmt));
1202 tree op1 = valueize_op (gimple_cond_rhs (stmt));
1203 enum tree_code code = gimple_cond_code (stmt);
1204 return fold_binary_loc (loc, code, boolean_type_node, op0, op1);
1205 }
1206
1207 case GIMPLE_SWITCH:
1208 {
1209 /* Return the constant switch index. */
1210 return valueize_op (gimple_switch_index (as_a <gswitch *> (stmt)));
1211 }
1212
1213 case GIMPLE_ASSIGN:
1214 case GIMPLE_CALL:
1215 return gimple_fold_stmt_to_constant_1 (stmt,
1216 valueize_op, valueize_op_1);
1217
1218 default:
1219 gcc_unreachable ();
1220 }
1221 }
1222
1223 /* Apply the operation CODE in type TYPE to the value, mask pair
1224 RVAL and RMASK representing a value of type RTYPE and set
1225 the value, mask pair *VAL and *MASK to the result. */
1226
1227 static void
1228 bit_value_unop_1 (enum tree_code code, tree type,
1229 widest_int *val, widest_int *mask,
1230 tree rtype, const widest_int &rval, const widest_int &rmask)
1231 {
1232 switch (code)
1233 {
1234 case BIT_NOT_EXPR:
1235 *mask = rmask;
1236 *val = ~rval;
1237 break;
1238
1239 case NEGATE_EXPR:
1240 {
1241 widest_int temv, temm;
1242 /* Return ~rval + 1. */
1243 bit_value_unop_1 (BIT_NOT_EXPR, type, &temv, &temm, type, rval, rmask);
1244 bit_value_binop_1 (PLUS_EXPR, type, val, mask,
1245 type, temv, temm, type, 1, 0);
1246 break;
1247 }
1248
1249 CASE_CONVERT:
1250 {
1251 signop sgn;
1252
1253 /* First extend mask and value according to the original type. */
1254 sgn = TYPE_SIGN (rtype);
1255 *mask = wi::ext (rmask, TYPE_PRECISION (rtype), sgn);
1256 *val = wi::ext (rval, TYPE_PRECISION (rtype), sgn);
1257
1258 /* Then extend mask and value according to the target type. */
1259 sgn = TYPE_SIGN (type);
1260 *mask = wi::ext (*mask, TYPE_PRECISION (type), sgn);
1261 *val = wi::ext (*val, TYPE_PRECISION (type), sgn);
1262 break;
1263 }
1264
1265 default:
1266 *mask = -1;
1267 break;
1268 }
1269 }
1270
1271 /* Apply the operation CODE in type TYPE to the value, mask pairs
1272 R1VAL, R1MASK and R2VAL, R2MASK representing a values of type R1TYPE
1273 and R2TYPE and set the value, mask pair *VAL and *MASK to the result. */
1274
1275 static void
1276 bit_value_binop_1 (enum tree_code code, tree type,
1277 widest_int *val, widest_int *mask,
1278 tree r1type, const widest_int &r1val,
1279 const widest_int &r1mask, tree r2type,
1280 const widest_int &r2val, const widest_int &r2mask)
1281 {
1282 signop sgn = TYPE_SIGN (type);
1283 int width = TYPE_PRECISION (type);
1284 bool swap_p = false;
1285
1286 /* Assume we'll get a constant result. Use an initial non varying
1287 value, we fall back to varying in the end if necessary. */
1288 *mask = -1;
1289
1290 switch (code)
1291 {
1292 case BIT_AND_EXPR:
1293 /* The mask is constant where there is a known not
1294 set bit, (m1 | m2) & ((v1 | m1) & (v2 | m2)) */
1295 *mask = (r1mask | r2mask) & (r1val | r1mask) & (r2val | r2mask);
1296 *val = r1val & r2val;
1297 break;
1298
1299 case BIT_IOR_EXPR:
1300 /* The mask is constant where there is a known
1301 set bit, (m1 | m2) & ~((v1 & ~m1) | (v2 & ~m2)). */
1302 *mask = (r1mask | r2mask)
1303 .and_not (r1val.and_not (r1mask) | r2val.and_not (r2mask));
1304 *val = r1val | r2val;
1305 break;
1306
1307 case BIT_XOR_EXPR:
1308 /* m1 | m2 */
1309 *mask = r1mask | r2mask;
1310 *val = r1val ^ r2val;
1311 break;
1312
1313 case LROTATE_EXPR:
1314 case RROTATE_EXPR:
1315 if (r2mask == 0)
1316 {
1317 widest_int shift = r2val;
1318 if (shift == 0)
1319 {
1320 *mask = r1mask;
1321 *val = r1val;
1322 }
1323 else
1324 {
1325 if (wi::neg_p (shift))
1326 {
1327 shift = -shift;
1328 if (code == RROTATE_EXPR)
1329 code = LROTATE_EXPR;
1330 else
1331 code = RROTATE_EXPR;
1332 }
1333 if (code == RROTATE_EXPR)
1334 {
1335 *mask = wi::rrotate (r1mask, shift, width);
1336 *val = wi::rrotate (r1val, shift, width);
1337 }
1338 else
1339 {
1340 *mask = wi::lrotate (r1mask, shift, width);
1341 *val = wi::lrotate (r1val, shift, width);
1342 }
1343 }
1344 }
1345 break;
1346
1347 case LSHIFT_EXPR:
1348 case RSHIFT_EXPR:
1349 /* ??? We can handle partially known shift counts if we know
1350 its sign. That way we can tell that (x << (y | 8)) & 255
1351 is zero. */
1352 if (r2mask == 0)
1353 {
1354 widest_int shift = r2val;
1355 if (shift == 0)
1356 {
1357 *mask = r1mask;
1358 *val = r1val;
1359 }
1360 else
1361 {
1362 if (wi::neg_p (shift))
1363 {
1364 shift = -shift;
1365 if (code == RSHIFT_EXPR)
1366 code = LSHIFT_EXPR;
1367 else
1368 code = RSHIFT_EXPR;
1369 }
1370 if (code == RSHIFT_EXPR)
1371 {
1372 *mask = wi::rshift (wi::ext (r1mask, width, sgn), shift, sgn);
1373 *val = wi::rshift (wi::ext (r1val, width, sgn), shift, sgn);
1374 }
1375 else
1376 {
1377 *mask = wi::ext (wi::lshift (r1mask, shift), width, sgn);
1378 *val = wi::ext (wi::lshift (r1val, shift), width, sgn);
1379 }
1380 }
1381 }
1382 break;
1383
1384 case PLUS_EXPR:
1385 case POINTER_PLUS_EXPR:
1386 {
1387 /* Do the addition with unknown bits set to zero, to give carry-ins of
1388 zero wherever possible. */
1389 widest_int lo = r1val.and_not (r1mask) + r2val.and_not (r2mask);
1390 lo = wi::ext (lo, width, sgn);
1391 /* Do the addition with unknown bits set to one, to give carry-ins of
1392 one wherever possible. */
1393 widest_int hi = (r1val | r1mask) + (r2val | r2mask);
1394 hi = wi::ext (hi, width, sgn);
1395 /* Each bit in the result is known if (a) the corresponding bits in
1396 both inputs are known, and (b) the carry-in to that bit position
1397 is known. We can check condition (b) by seeing if we got the same
1398 result with minimised carries as with maximised carries. */
1399 *mask = r1mask | r2mask | (lo ^ hi);
1400 *mask = wi::ext (*mask, width, sgn);
1401 /* It shouldn't matter whether we choose lo or hi here. */
1402 *val = lo;
1403 break;
1404 }
1405
1406 case MINUS_EXPR:
1407 {
1408 widest_int temv, temm;
1409 bit_value_unop_1 (NEGATE_EXPR, r2type, &temv, &temm,
1410 r2type, r2val, r2mask);
1411 bit_value_binop_1 (PLUS_EXPR, type, val, mask,
1412 r1type, r1val, r1mask,
1413 r2type, temv, temm);
1414 break;
1415 }
1416
1417 case MULT_EXPR:
1418 {
1419 /* Just track trailing zeros in both operands and transfer
1420 them to the other. */
1421 int r1tz = wi::ctz (r1val | r1mask);
1422 int r2tz = wi::ctz (r2val | r2mask);
1423 if (r1tz + r2tz >= width)
1424 {
1425 *mask = 0;
1426 *val = 0;
1427 }
1428 else if (r1tz + r2tz > 0)
1429 {
1430 *mask = wi::ext (wi::mask <widest_int> (r1tz + r2tz, true),
1431 width, sgn);
1432 *val = 0;
1433 }
1434 break;
1435 }
1436
1437 case EQ_EXPR:
1438 case NE_EXPR:
1439 {
1440 widest_int m = r1mask | r2mask;
1441 if (r1val.and_not (m) != r2val.and_not (m))
1442 {
1443 *mask = 0;
1444 *val = ((code == EQ_EXPR) ? 0 : 1);
1445 }
1446 else
1447 {
1448 /* We know the result of a comparison is always one or zero. */
1449 *mask = 1;
1450 *val = 0;
1451 }
1452 break;
1453 }
1454
1455 case GE_EXPR:
1456 case GT_EXPR:
1457 swap_p = true;
1458 code = swap_tree_comparison (code);
1459 /* Fall through. */
1460 case LT_EXPR:
1461 case LE_EXPR:
1462 {
1463 int minmax, maxmin;
1464
1465 const widest_int &o1val = swap_p ? r2val : r1val;
1466 const widest_int &o1mask = swap_p ? r2mask : r1mask;
1467 const widest_int &o2val = swap_p ? r1val : r2val;
1468 const widest_int &o2mask = swap_p ? r1mask : r2mask;
1469
1470 /* If the most significant bits are not known we know nothing. */
1471 if (wi::neg_p (o1mask) || wi::neg_p (o2mask))
1472 break;
1473
1474 /* For comparisons the signedness is in the comparison operands. */
1475 sgn = TYPE_SIGN (r1type);
1476
1477 /* If we know the most significant bits we know the values
1478 value ranges by means of treating varying bits as zero
1479 or one. Do a cross comparison of the max/min pairs. */
1480 maxmin = wi::cmp (o1val | o1mask, o2val.and_not (o2mask), sgn);
1481 minmax = wi::cmp (o1val.and_not (o1mask), o2val | o2mask, sgn);
1482 if (maxmin < 0) /* o1 is less than o2. */
1483 {
1484 *mask = 0;
1485 *val = 1;
1486 }
1487 else if (minmax > 0) /* o1 is not less or equal to o2. */
1488 {
1489 *mask = 0;
1490 *val = 0;
1491 }
1492 else if (maxmin == minmax) /* o1 and o2 are equal. */
1493 {
1494 /* This probably should never happen as we'd have
1495 folded the thing during fully constant value folding. */
1496 *mask = 0;
1497 *val = (code == LE_EXPR ? 1 : 0);
1498 }
1499 else
1500 {
1501 /* We know the result of a comparison is always one or zero. */
1502 *mask = 1;
1503 *val = 0;
1504 }
1505 break;
1506 }
1507
1508 default:;
1509 }
1510 }
1511
1512 /* Return the propagation value when applying the operation CODE to
1513 the value RHS yielding type TYPE. */
1514
1515 static ccp_prop_value_t
1516 bit_value_unop (enum tree_code code, tree type, tree rhs)
1517 {
1518 ccp_prop_value_t rval = get_value_for_expr (rhs, true);
1519 widest_int value, mask;
1520 ccp_prop_value_t val;
1521
1522 if (rval.lattice_val == UNDEFINED)
1523 return rval;
1524
1525 gcc_assert ((rval.lattice_val == CONSTANT
1526 && TREE_CODE (rval.value) == INTEGER_CST)
1527 || wi::sext (rval.mask, TYPE_PRECISION (TREE_TYPE (rhs))) == -1);
1528 bit_value_unop_1 (code, type, &value, &mask,
1529 TREE_TYPE (rhs), value_to_wide_int (rval), rval.mask);
1530 if (wi::sext (mask, TYPE_PRECISION (type)) != -1)
1531 {
1532 val.lattice_val = CONSTANT;
1533 val.mask = mask;
1534 /* ??? Delay building trees here. */
1535 val.value = wide_int_to_tree (type, value);
1536 }
1537 else
1538 {
1539 val.lattice_val = VARYING;
1540 val.value = NULL_TREE;
1541 val.mask = -1;
1542 }
1543 return val;
1544 }
1545
1546 /* Return the propagation value when applying the operation CODE to
1547 the values RHS1 and RHS2 yielding type TYPE. */
1548
1549 static ccp_prop_value_t
1550 bit_value_binop (enum tree_code code, tree type, tree rhs1, tree rhs2)
1551 {
1552 ccp_prop_value_t r1val = get_value_for_expr (rhs1, true);
1553 ccp_prop_value_t r2val = get_value_for_expr (rhs2, true);
1554 widest_int value, mask;
1555 ccp_prop_value_t val;
1556
1557 if (r1val.lattice_val == UNDEFINED
1558 || r2val.lattice_val == UNDEFINED)
1559 {
1560 val.lattice_val = VARYING;
1561 val.value = NULL_TREE;
1562 val.mask = -1;
1563 return val;
1564 }
1565
1566 gcc_assert ((r1val.lattice_val == CONSTANT
1567 && TREE_CODE (r1val.value) == INTEGER_CST)
1568 || wi::sext (r1val.mask,
1569 TYPE_PRECISION (TREE_TYPE (rhs1))) == -1);
1570 gcc_assert ((r2val.lattice_val == CONSTANT
1571 && TREE_CODE (r2val.value) == INTEGER_CST)
1572 || wi::sext (r2val.mask,
1573 TYPE_PRECISION (TREE_TYPE (rhs2))) == -1);
1574 bit_value_binop_1 (code, type, &value, &mask,
1575 TREE_TYPE (rhs1), value_to_wide_int (r1val), r1val.mask,
1576 TREE_TYPE (rhs2), value_to_wide_int (r2val), r2val.mask);
1577 if (wi::sext (mask, TYPE_PRECISION (type)) != -1)
1578 {
1579 val.lattice_val = CONSTANT;
1580 val.mask = mask;
1581 /* ??? Delay building trees here. */
1582 val.value = wide_int_to_tree (type, value);
1583 }
1584 else
1585 {
1586 val.lattice_val = VARYING;
1587 val.value = NULL_TREE;
1588 val.mask = -1;
1589 }
1590 return val;
1591 }
1592
1593 /* Return the propagation value for __builtin_assume_aligned
1594 and functions with assume_aligned or alloc_aligned attribute.
1595 For __builtin_assume_aligned, ATTR is NULL_TREE,
1596 for assume_aligned attribute ATTR is non-NULL and ALLOC_ALIGNED
1597 is false, for alloc_aligned attribute ATTR is non-NULL and
1598 ALLOC_ALIGNED is true. */
1599
1600 static ccp_prop_value_t
1601 bit_value_assume_aligned (gimple stmt, tree attr, ccp_prop_value_t ptrval,
1602 bool alloc_aligned)
1603 {
1604 tree align, misalign = NULL_TREE, type;
1605 unsigned HOST_WIDE_INT aligni, misaligni = 0;
1606 ccp_prop_value_t alignval;
1607 widest_int value, mask;
1608 ccp_prop_value_t val;
1609
1610 if (attr == NULL_TREE)
1611 {
1612 tree ptr = gimple_call_arg (stmt, 0);
1613 type = TREE_TYPE (ptr);
1614 ptrval = get_value_for_expr (ptr, true);
1615 }
1616 else
1617 {
1618 tree lhs = gimple_call_lhs (stmt);
1619 type = TREE_TYPE (lhs);
1620 }
1621
1622 if (ptrval.lattice_val == UNDEFINED)
1623 return ptrval;
1624 gcc_assert ((ptrval.lattice_val == CONSTANT
1625 && TREE_CODE (ptrval.value) == INTEGER_CST)
1626 || wi::sext (ptrval.mask, TYPE_PRECISION (type)) == -1);
1627 if (attr == NULL_TREE)
1628 {
1629 /* Get aligni and misaligni from __builtin_assume_aligned. */
1630 align = gimple_call_arg (stmt, 1);
1631 if (!tree_fits_uhwi_p (align))
1632 return ptrval;
1633 aligni = tree_to_uhwi (align);
1634 if (gimple_call_num_args (stmt) > 2)
1635 {
1636 misalign = gimple_call_arg (stmt, 2);
1637 if (!tree_fits_uhwi_p (misalign))
1638 return ptrval;
1639 misaligni = tree_to_uhwi (misalign);
1640 }
1641 }
1642 else
1643 {
1644 /* Get aligni and misaligni from assume_aligned or
1645 alloc_align attributes. */
1646 if (TREE_VALUE (attr) == NULL_TREE)
1647 return ptrval;
1648 attr = TREE_VALUE (attr);
1649 align = TREE_VALUE (attr);
1650 if (!tree_fits_uhwi_p (align))
1651 return ptrval;
1652 aligni = tree_to_uhwi (align);
1653 if (alloc_aligned)
1654 {
1655 if (aligni == 0 || aligni > gimple_call_num_args (stmt))
1656 return ptrval;
1657 align = gimple_call_arg (stmt, aligni - 1);
1658 if (!tree_fits_uhwi_p (align))
1659 return ptrval;
1660 aligni = tree_to_uhwi (align);
1661 }
1662 else if (TREE_CHAIN (attr) && TREE_VALUE (TREE_CHAIN (attr)))
1663 {
1664 misalign = TREE_VALUE (TREE_CHAIN (attr));
1665 if (!tree_fits_uhwi_p (misalign))
1666 return ptrval;
1667 misaligni = tree_to_uhwi (misalign);
1668 }
1669 }
1670 if (aligni <= 1 || (aligni & (aligni - 1)) != 0 || misaligni >= aligni)
1671 return ptrval;
1672
1673 align = build_int_cst_type (type, -aligni);
1674 alignval = get_value_for_expr (align, true);
1675 bit_value_binop_1 (BIT_AND_EXPR, type, &value, &mask,
1676 type, value_to_wide_int (ptrval), ptrval.mask,
1677 type, value_to_wide_int (alignval), alignval.mask);
1678 if (wi::sext (mask, TYPE_PRECISION (type)) != -1)
1679 {
1680 val.lattice_val = CONSTANT;
1681 val.mask = mask;
1682 gcc_assert ((mask.to_uhwi () & (aligni - 1)) == 0);
1683 gcc_assert ((value.to_uhwi () & (aligni - 1)) == 0);
1684 value |= misaligni;
1685 /* ??? Delay building trees here. */
1686 val.value = wide_int_to_tree (type, value);
1687 }
1688 else
1689 {
1690 val.lattice_val = VARYING;
1691 val.value = NULL_TREE;
1692 val.mask = -1;
1693 }
1694 return val;
1695 }
1696
1697 /* Evaluate statement STMT.
1698 Valid only for assignments, calls, conditionals, and switches. */
1699
1700 static ccp_prop_value_t
1701 evaluate_stmt (gimple stmt)
1702 {
1703 ccp_prop_value_t val;
1704 tree simplified = NULL_TREE;
1705 ccp_lattice_t likelyvalue = likely_value (stmt);
1706 bool is_constant = false;
1707 unsigned int align;
1708
1709 if (dump_file && (dump_flags & TDF_DETAILS))
1710 {
1711 fprintf (dump_file, "which is likely ");
1712 switch (likelyvalue)
1713 {
1714 case CONSTANT:
1715 fprintf (dump_file, "CONSTANT");
1716 break;
1717 case UNDEFINED:
1718 fprintf (dump_file, "UNDEFINED");
1719 break;
1720 case VARYING:
1721 fprintf (dump_file, "VARYING");
1722 break;
1723 default:;
1724 }
1725 fprintf (dump_file, "\n");
1726 }
1727
1728 /* If the statement is likely to have a CONSTANT result, then try
1729 to fold the statement to determine the constant value. */
1730 /* FIXME. This is the only place that we call ccp_fold.
1731 Since likely_value never returns CONSTANT for calls, we will
1732 not attempt to fold them, including builtins that may profit. */
1733 if (likelyvalue == CONSTANT)
1734 {
1735 fold_defer_overflow_warnings ();
1736 simplified = ccp_fold (stmt);
1737 if (simplified && TREE_CODE (simplified) == SSA_NAME)
1738 {
1739 val = *get_value (simplified);
1740 if (val.lattice_val != VARYING)
1741 {
1742 fold_undefer_overflow_warnings (true, stmt, 0);
1743 return val;
1744 }
1745 }
1746 is_constant = simplified && is_gimple_min_invariant (simplified);
1747 fold_undefer_overflow_warnings (is_constant, stmt, 0);
1748 if (is_constant)
1749 {
1750 /* The statement produced a constant value. */
1751 val.lattice_val = CONSTANT;
1752 val.value = simplified;
1753 val.mask = 0;
1754 return val;
1755 }
1756 }
1757 /* If the statement is likely to have a VARYING result, then do not
1758 bother folding the statement. */
1759 else if (likelyvalue == VARYING)
1760 {
1761 enum gimple_code code = gimple_code (stmt);
1762 if (code == GIMPLE_ASSIGN)
1763 {
1764 enum tree_code subcode = gimple_assign_rhs_code (stmt);
1765
1766 /* Other cases cannot satisfy is_gimple_min_invariant
1767 without folding. */
1768 if (get_gimple_rhs_class (subcode) == GIMPLE_SINGLE_RHS)
1769 simplified = gimple_assign_rhs1 (stmt);
1770 }
1771 else if (code == GIMPLE_SWITCH)
1772 simplified = gimple_switch_index (as_a <gswitch *> (stmt));
1773 else
1774 /* These cannot satisfy is_gimple_min_invariant without folding. */
1775 gcc_assert (code == GIMPLE_CALL || code == GIMPLE_COND);
1776 is_constant = simplified && is_gimple_min_invariant (simplified);
1777 if (is_constant)
1778 {
1779 /* The statement produced a constant value. */
1780 val.lattice_val = CONSTANT;
1781 val.value = simplified;
1782 val.mask = 0;
1783 }
1784 }
1785 /* If the statement result is likely UNDEFINED, make it so. */
1786 else if (likelyvalue == UNDEFINED)
1787 {
1788 val.lattice_val = UNDEFINED;
1789 val.value = NULL_TREE;
1790 val.mask = 0;
1791 return val;
1792 }
1793
1794 /* Resort to simplification for bitwise tracking. */
1795 if (flag_tree_bit_ccp
1796 && (likelyvalue == CONSTANT || is_gimple_call (stmt)
1797 || (gimple_assign_single_p (stmt)
1798 && gimple_assign_rhs_code (stmt) == ADDR_EXPR))
1799 && !is_constant)
1800 {
1801 enum gimple_code code = gimple_code (stmt);
1802 val.lattice_val = VARYING;
1803 val.value = NULL_TREE;
1804 val.mask = -1;
1805 if (code == GIMPLE_ASSIGN)
1806 {
1807 enum tree_code subcode = gimple_assign_rhs_code (stmt);
1808 tree rhs1 = gimple_assign_rhs1 (stmt);
1809 tree lhs = gimple_assign_lhs (stmt);
1810 if ((INTEGRAL_TYPE_P (TREE_TYPE (lhs))
1811 || POINTER_TYPE_P (TREE_TYPE (lhs)))
1812 && (INTEGRAL_TYPE_P (TREE_TYPE (rhs1))
1813 || POINTER_TYPE_P (TREE_TYPE (rhs1))))
1814 switch (get_gimple_rhs_class (subcode))
1815 {
1816 case GIMPLE_SINGLE_RHS:
1817 val = get_value_for_expr (rhs1, true);
1818 break;
1819
1820 case GIMPLE_UNARY_RHS:
1821 val = bit_value_unop (subcode, TREE_TYPE (lhs), rhs1);
1822 break;
1823
1824 case GIMPLE_BINARY_RHS:
1825 val = bit_value_binop (subcode, TREE_TYPE (lhs), rhs1,
1826 gimple_assign_rhs2 (stmt));
1827 break;
1828
1829 default:;
1830 }
1831 }
1832 else if (code == GIMPLE_COND)
1833 {
1834 enum tree_code code = gimple_cond_code (stmt);
1835 tree rhs1 = gimple_cond_lhs (stmt);
1836 tree rhs2 = gimple_cond_rhs (stmt);
1837 if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1))
1838 || POINTER_TYPE_P (TREE_TYPE (rhs1)))
1839 val = bit_value_binop (code, TREE_TYPE (rhs1), rhs1, rhs2);
1840 }
1841 else if (gimple_call_builtin_p (stmt, BUILT_IN_NORMAL))
1842 {
1843 tree fndecl = gimple_call_fndecl (stmt);
1844 switch (DECL_FUNCTION_CODE (fndecl))
1845 {
1846 case BUILT_IN_MALLOC:
1847 case BUILT_IN_REALLOC:
1848 case BUILT_IN_CALLOC:
1849 case BUILT_IN_STRDUP:
1850 case BUILT_IN_STRNDUP:
1851 val.lattice_val = CONSTANT;
1852 val.value = build_int_cst (TREE_TYPE (gimple_get_lhs (stmt)), 0);
1853 val.mask = ~((HOST_WIDE_INT) MALLOC_ABI_ALIGNMENT
1854 / BITS_PER_UNIT - 1);
1855 break;
1856
1857 case BUILT_IN_ALLOCA:
1858 case BUILT_IN_ALLOCA_WITH_ALIGN:
1859 align = (DECL_FUNCTION_CODE (fndecl) == BUILT_IN_ALLOCA_WITH_ALIGN
1860 ? TREE_INT_CST_LOW (gimple_call_arg (stmt, 1))
1861 : BIGGEST_ALIGNMENT);
1862 val.lattice_val = CONSTANT;
1863 val.value = build_int_cst (TREE_TYPE (gimple_get_lhs (stmt)), 0);
1864 val.mask = ~((HOST_WIDE_INT) align / BITS_PER_UNIT - 1);
1865 break;
1866
1867 /* These builtins return their first argument, unmodified. */
1868 case BUILT_IN_MEMCPY:
1869 case BUILT_IN_MEMMOVE:
1870 case BUILT_IN_MEMSET:
1871 case BUILT_IN_STRCPY:
1872 case BUILT_IN_STRNCPY:
1873 case BUILT_IN_MEMCPY_CHK:
1874 case BUILT_IN_MEMMOVE_CHK:
1875 case BUILT_IN_MEMSET_CHK:
1876 case BUILT_IN_STRCPY_CHK:
1877 case BUILT_IN_STRNCPY_CHK:
1878 val = get_value_for_expr (gimple_call_arg (stmt, 0), true);
1879 break;
1880
1881 case BUILT_IN_ASSUME_ALIGNED:
1882 val = bit_value_assume_aligned (stmt, NULL_TREE, val, false);
1883 break;
1884
1885 case BUILT_IN_ALIGNED_ALLOC:
1886 {
1887 tree align = get_constant_value (gimple_call_arg (stmt, 0));
1888 if (align
1889 && tree_fits_uhwi_p (align))
1890 {
1891 unsigned HOST_WIDE_INT aligni = tree_to_uhwi (align);
1892 if (aligni > 1
1893 /* align must be power-of-two */
1894 && (aligni & (aligni - 1)) == 0)
1895 {
1896 val.lattice_val = CONSTANT;
1897 val.value = build_int_cst (ptr_type_node, 0);
1898 val.mask = -aligni;
1899 }
1900 }
1901 break;
1902 }
1903
1904 default:;
1905 }
1906 }
1907 if (is_gimple_call (stmt) && gimple_call_lhs (stmt))
1908 {
1909 tree fntype = gimple_call_fntype (stmt);
1910 if (fntype)
1911 {
1912 tree attrs = lookup_attribute ("assume_aligned",
1913 TYPE_ATTRIBUTES (fntype));
1914 if (attrs)
1915 val = bit_value_assume_aligned (stmt, attrs, val, false);
1916 attrs = lookup_attribute ("alloc_align",
1917 TYPE_ATTRIBUTES (fntype));
1918 if (attrs)
1919 val = bit_value_assume_aligned (stmt, attrs, val, true);
1920 }
1921 }
1922 is_constant = (val.lattice_val == CONSTANT);
1923 }
1924
1925 if (flag_tree_bit_ccp
1926 && ((is_constant && TREE_CODE (val.value) == INTEGER_CST)
1927 || !is_constant)
1928 && gimple_get_lhs (stmt)
1929 && TREE_CODE (gimple_get_lhs (stmt)) == SSA_NAME)
1930 {
1931 tree lhs = gimple_get_lhs (stmt);
1932 wide_int nonzero_bits = get_nonzero_bits (lhs);
1933 if (nonzero_bits != -1)
1934 {
1935 if (!is_constant)
1936 {
1937 val.lattice_val = CONSTANT;
1938 val.value = build_zero_cst (TREE_TYPE (lhs));
1939 val.mask = extend_mask (nonzero_bits);
1940 is_constant = true;
1941 }
1942 else
1943 {
1944 if (wi::bit_and_not (val.value, nonzero_bits) != 0)
1945 val.value = wide_int_to_tree (TREE_TYPE (lhs),
1946 nonzero_bits & val.value);
1947 if (nonzero_bits == 0)
1948 val.mask = 0;
1949 else
1950 val.mask = val.mask & extend_mask (nonzero_bits);
1951 }
1952 }
1953 }
1954
1955 /* The statement produced a nonconstant value. */
1956 if (!is_constant)
1957 {
1958 /* The statement produced a copy. */
1959 if (simplified && TREE_CODE (simplified) == SSA_NAME
1960 && !SSA_NAME_OCCURS_IN_ABNORMAL_PHI (simplified))
1961 {
1962 val.lattice_val = CONSTANT;
1963 val.value = simplified;
1964 val.mask = -1;
1965 }
1966 /* The statement is VARYING. */
1967 else
1968 {
1969 val.lattice_val = VARYING;
1970 val.value = NULL_TREE;
1971 val.mask = -1;
1972 }
1973 }
1974
1975 return val;
1976 }
1977
1978 typedef hash_table<pointer_hash<gimple_statement_base> > gimple_htab;
1979
1980 /* Given a BUILT_IN_STACK_SAVE value SAVED_VAL, insert a clobber of VAR before
1981 each matching BUILT_IN_STACK_RESTORE. Mark visited phis in VISITED. */
1982
1983 static void
1984 insert_clobber_before_stack_restore (tree saved_val, tree var,
1985 gimple_htab **visited)
1986 {
1987 gimple stmt;
1988 gassign *clobber_stmt;
1989 tree clobber;
1990 imm_use_iterator iter;
1991 gimple_stmt_iterator i;
1992 gimple *slot;
1993
1994 FOR_EACH_IMM_USE_STMT (stmt, iter, saved_val)
1995 if (gimple_call_builtin_p (stmt, BUILT_IN_STACK_RESTORE))
1996 {
1997 clobber = build_constructor (TREE_TYPE (var),
1998 NULL);
1999 TREE_THIS_VOLATILE (clobber) = 1;
2000 clobber_stmt = gimple_build_assign (var, clobber);
2001
2002 i = gsi_for_stmt (stmt);
2003 gsi_insert_before (&i, clobber_stmt, GSI_SAME_STMT);
2004 }
2005 else if (gimple_code (stmt) == GIMPLE_PHI)
2006 {
2007 if (!*visited)
2008 *visited = new gimple_htab (10);
2009
2010 slot = (*visited)->find_slot (stmt, INSERT);
2011 if (*slot != NULL)
2012 continue;
2013
2014 *slot = stmt;
2015 insert_clobber_before_stack_restore (gimple_phi_result (stmt), var,
2016 visited);
2017 }
2018 else if (gimple_assign_ssa_name_copy_p (stmt))
2019 insert_clobber_before_stack_restore (gimple_assign_lhs (stmt), var,
2020 visited);
2021 else if (chkp_gimple_call_builtin_p (stmt, BUILT_IN_CHKP_BNDRET))
2022 continue;
2023 else
2024 gcc_assert (is_gimple_debug (stmt));
2025 }
2026
2027 /* Advance the iterator to the previous non-debug gimple statement in the same
2028 or dominating basic block. */
2029
2030 static inline void
2031 gsi_prev_dom_bb_nondebug (gimple_stmt_iterator *i)
2032 {
2033 basic_block dom;
2034
2035 gsi_prev_nondebug (i);
2036 while (gsi_end_p (*i))
2037 {
2038 dom = get_immediate_dominator (CDI_DOMINATORS, i->bb);
2039 if (dom == NULL || dom == ENTRY_BLOCK_PTR_FOR_FN (cfun))
2040 return;
2041
2042 *i = gsi_last_bb (dom);
2043 }
2044 }
2045
2046 /* Find a BUILT_IN_STACK_SAVE dominating gsi_stmt (I), and insert
2047 a clobber of VAR before each matching BUILT_IN_STACK_RESTORE.
2048
2049 It is possible that BUILT_IN_STACK_SAVE cannot be find in a dominator when a
2050 previous pass (such as DOM) duplicated it along multiple paths to a BB. In
2051 that case the function gives up without inserting the clobbers. */
2052
2053 static void
2054 insert_clobbers_for_var (gimple_stmt_iterator i, tree var)
2055 {
2056 gimple stmt;
2057 tree saved_val;
2058 gimple_htab *visited = NULL;
2059
2060 for (; !gsi_end_p (i); gsi_prev_dom_bb_nondebug (&i))
2061 {
2062 stmt = gsi_stmt (i);
2063
2064 if (!gimple_call_builtin_p (stmt, BUILT_IN_STACK_SAVE))
2065 continue;
2066
2067 saved_val = gimple_call_lhs (stmt);
2068 if (saved_val == NULL_TREE)
2069 continue;
2070
2071 insert_clobber_before_stack_restore (saved_val, var, &visited);
2072 break;
2073 }
2074
2075 delete visited;
2076 }
2077
2078 /* Detects a __builtin_alloca_with_align with constant size argument. Declares
2079 fixed-size array and returns the address, if found, otherwise returns
2080 NULL_TREE. */
2081
2082 static tree
2083 fold_builtin_alloca_with_align (gimple stmt)
2084 {
2085 unsigned HOST_WIDE_INT size, threshold, n_elem;
2086 tree lhs, arg, block, var, elem_type, array_type;
2087
2088 /* Get lhs. */
2089 lhs = gimple_call_lhs (stmt);
2090 if (lhs == NULL_TREE)
2091 return NULL_TREE;
2092
2093 /* Detect constant argument. */
2094 arg = get_constant_value (gimple_call_arg (stmt, 0));
2095 if (arg == NULL_TREE
2096 || TREE_CODE (arg) != INTEGER_CST
2097 || !tree_fits_uhwi_p (arg))
2098 return NULL_TREE;
2099
2100 size = tree_to_uhwi (arg);
2101
2102 /* Heuristic: don't fold large allocas. */
2103 threshold = (unsigned HOST_WIDE_INT)PARAM_VALUE (PARAM_LARGE_STACK_FRAME);
2104 /* In case the alloca is located at function entry, it has the same lifetime
2105 as a declared array, so we allow a larger size. */
2106 block = gimple_block (stmt);
2107 if (!(cfun->after_inlining
2108 && TREE_CODE (BLOCK_SUPERCONTEXT (block)) == FUNCTION_DECL))
2109 threshold /= 10;
2110 if (size > threshold)
2111 return NULL_TREE;
2112
2113 /* Declare array. */
2114 elem_type = build_nonstandard_integer_type (BITS_PER_UNIT, 1);
2115 n_elem = size * 8 / BITS_PER_UNIT;
2116 array_type = build_array_type_nelts (elem_type, n_elem);
2117 var = create_tmp_var (array_type);
2118 DECL_ALIGN (var) = TREE_INT_CST_LOW (gimple_call_arg (stmt, 1));
2119 {
2120 struct ptr_info_def *pi = SSA_NAME_PTR_INFO (lhs);
2121 if (pi != NULL && !pi->pt.anything)
2122 {
2123 bool singleton_p;
2124 unsigned uid;
2125 singleton_p = pt_solution_singleton_p (&pi->pt, &uid);
2126 gcc_assert (singleton_p);
2127 SET_DECL_PT_UID (var, uid);
2128 }
2129 }
2130
2131 /* Fold alloca to the address of the array. */
2132 return fold_convert (TREE_TYPE (lhs), build_fold_addr_expr (var));
2133 }
2134
2135 /* Fold the stmt at *GSI with CCP specific information that propagating
2136 and regular folding does not catch. */
2137
2138 static bool
2139 ccp_fold_stmt (gimple_stmt_iterator *gsi)
2140 {
2141 gimple stmt = gsi_stmt (*gsi);
2142
2143 switch (gimple_code (stmt))
2144 {
2145 case GIMPLE_COND:
2146 {
2147 gcond *cond_stmt = as_a <gcond *> (stmt);
2148 ccp_prop_value_t val;
2149 /* Statement evaluation will handle type mismatches in constants
2150 more gracefully than the final propagation. This allows us to
2151 fold more conditionals here. */
2152 val = evaluate_stmt (stmt);
2153 if (val.lattice_val != CONSTANT
2154 || val.mask != 0)
2155 return false;
2156
2157 if (dump_file)
2158 {
2159 fprintf (dump_file, "Folding predicate ");
2160 print_gimple_expr (dump_file, stmt, 0, 0);
2161 fprintf (dump_file, " to ");
2162 print_generic_expr (dump_file, val.value, 0);
2163 fprintf (dump_file, "\n");
2164 }
2165
2166 if (integer_zerop (val.value))
2167 gimple_cond_make_false (cond_stmt);
2168 else
2169 gimple_cond_make_true (cond_stmt);
2170
2171 return true;
2172 }
2173
2174 case GIMPLE_CALL:
2175 {
2176 tree lhs = gimple_call_lhs (stmt);
2177 int flags = gimple_call_flags (stmt);
2178 tree val;
2179 tree argt;
2180 bool changed = false;
2181 unsigned i;
2182
2183 /* If the call was folded into a constant make sure it goes
2184 away even if we cannot propagate into all uses because of
2185 type issues. */
2186 if (lhs
2187 && TREE_CODE (lhs) == SSA_NAME
2188 && (val = get_constant_value (lhs))
2189 /* Don't optimize away calls that have side-effects. */
2190 && (flags & (ECF_CONST|ECF_PURE)) != 0
2191 && (flags & ECF_LOOPING_CONST_OR_PURE) == 0)
2192 {
2193 tree new_rhs = unshare_expr (val);
2194 bool res;
2195 if (!useless_type_conversion_p (TREE_TYPE (lhs),
2196 TREE_TYPE (new_rhs)))
2197 new_rhs = fold_convert (TREE_TYPE (lhs), new_rhs);
2198 res = update_call_from_tree (gsi, new_rhs);
2199 gcc_assert (res);
2200 return true;
2201 }
2202
2203 /* Internal calls provide no argument types, so the extra laxity
2204 for normal calls does not apply. */
2205 if (gimple_call_internal_p (stmt))
2206 return false;
2207
2208 /* The heuristic of fold_builtin_alloca_with_align differs before and
2209 after inlining, so we don't require the arg to be changed into a
2210 constant for folding, but just to be constant. */
2211 if (gimple_call_builtin_p (stmt, BUILT_IN_ALLOCA_WITH_ALIGN))
2212 {
2213 tree new_rhs = fold_builtin_alloca_with_align (stmt);
2214 if (new_rhs)
2215 {
2216 bool res = update_call_from_tree (gsi, new_rhs);
2217 tree var = TREE_OPERAND (TREE_OPERAND (new_rhs, 0),0);
2218 gcc_assert (res);
2219 insert_clobbers_for_var (*gsi, var);
2220 return true;
2221 }
2222 }
2223
2224 /* Propagate into the call arguments. Compared to replace_uses_in
2225 this can use the argument slot types for type verification
2226 instead of the current argument type. We also can safely
2227 drop qualifiers here as we are dealing with constants anyway. */
2228 argt = TYPE_ARG_TYPES (gimple_call_fntype (stmt));
2229 for (i = 0; i < gimple_call_num_args (stmt) && argt;
2230 ++i, argt = TREE_CHAIN (argt))
2231 {
2232 tree arg = gimple_call_arg (stmt, i);
2233 if (TREE_CODE (arg) == SSA_NAME
2234 && (val = get_constant_value (arg))
2235 && useless_type_conversion_p
2236 (TYPE_MAIN_VARIANT (TREE_VALUE (argt)),
2237 TYPE_MAIN_VARIANT (TREE_TYPE (val))))
2238 {
2239 gimple_call_set_arg (stmt, i, unshare_expr (val));
2240 changed = true;
2241 }
2242 }
2243
2244 return changed;
2245 }
2246
2247 case GIMPLE_ASSIGN:
2248 {
2249 tree lhs = gimple_assign_lhs (stmt);
2250 tree val;
2251
2252 /* If we have a load that turned out to be constant replace it
2253 as we cannot propagate into all uses in all cases. */
2254 if (gimple_assign_single_p (stmt)
2255 && TREE_CODE (lhs) == SSA_NAME
2256 && (val = get_constant_value (lhs)))
2257 {
2258 tree rhs = unshare_expr (val);
2259 if (!useless_type_conversion_p (TREE_TYPE (lhs), TREE_TYPE (rhs)))
2260 rhs = fold_build1 (VIEW_CONVERT_EXPR, TREE_TYPE (lhs), rhs);
2261 gimple_assign_set_rhs_from_tree (gsi, rhs);
2262 return true;
2263 }
2264
2265 return false;
2266 }
2267
2268 default:
2269 return false;
2270 }
2271 }
2272
2273 /* Visit the assignment statement STMT. Set the value of its LHS to the
2274 value computed by the RHS and store LHS in *OUTPUT_P. If STMT
2275 creates virtual definitions, set the value of each new name to that
2276 of the RHS (if we can derive a constant out of the RHS).
2277 Value-returning call statements also perform an assignment, and
2278 are handled here. */
2279
2280 static enum ssa_prop_result
2281 visit_assignment (gimple stmt, tree *output_p)
2282 {
2283 ccp_prop_value_t val;
2284 enum ssa_prop_result retval = SSA_PROP_NOT_INTERESTING;
2285
2286 tree lhs = gimple_get_lhs (stmt);
2287 if (TREE_CODE (lhs) == SSA_NAME)
2288 {
2289 /* Evaluate the statement, which could be
2290 either a GIMPLE_ASSIGN or a GIMPLE_CALL. */
2291 val = evaluate_stmt (stmt);
2292
2293 /* If STMT is an assignment to an SSA_NAME, we only have one
2294 value to set. */
2295 if (set_lattice_value (lhs, &val))
2296 {
2297 *output_p = lhs;
2298 if (val.lattice_val == VARYING)
2299 retval = SSA_PROP_VARYING;
2300 else
2301 retval = SSA_PROP_INTERESTING;
2302 }
2303 }
2304
2305 return retval;
2306 }
2307
2308
2309 /* Visit the conditional statement STMT. Return SSA_PROP_INTERESTING
2310 if it can determine which edge will be taken. Otherwise, return
2311 SSA_PROP_VARYING. */
2312
2313 static enum ssa_prop_result
2314 visit_cond_stmt (gimple stmt, edge *taken_edge_p)
2315 {
2316 ccp_prop_value_t val;
2317 basic_block block;
2318
2319 block = gimple_bb (stmt);
2320 val = evaluate_stmt (stmt);
2321 if (val.lattice_val != CONSTANT
2322 || val.mask != 0)
2323 return SSA_PROP_VARYING;
2324
2325 /* Find which edge out of the conditional block will be taken and add it
2326 to the worklist. If no single edge can be determined statically,
2327 return SSA_PROP_VARYING to feed all the outgoing edges to the
2328 propagation engine. */
2329 *taken_edge_p = find_taken_edge (block, val.value);
2330 if (*taken_edge_p)
2331 return SSA_PROP_INTERESTING;
2332 else
2333 return SSA_PROP_VARYING;
2334 }
2335
2336
2337 /* Evaluate statement STMT. If the statement produces an output value and
2338 its evaluation changes the lattice value of its output, return
2339 SSA_PROP_INTERESTING and set *OUTPUT_P to the SSA_NAME holding the
2340 output value.
2341
2342 If STMT is a conditional branch and we can determine its truth
2343 value, set *TAKEN_EDGE_P accordingly. If STMT produces a varying
2344 value, return SSA_PROP_VARYING. */
2345
2346 static enum ssa_prop_result
2347 ccp_visit_stmt (gimple stmt, edge *taken_edge_p, tree *output_p)
2348 {
2349 tree def;
2350 ssa_op_iter iter;
2351
2352 if (dump_file && (dump_flags & TDF_DETAILS))
2353 {
2354 fprintf (dump_file, "\nVisiting statement:\n");
2355 print_gimple_stmt (dump_file, stmt, 0, dump_flags);
2356 }
2357
2358 switch (gimple_code (stmt))
2359 {
2360 case GIMPLE_ASSIGN:
2361 /* If the statement is an assignment that produces a single
2362 output value, evaluate its RHS to see if the lattice value of
2363 its output has changed. */
2364 return visit_assignment (stmt, output_p);
2365
2366 case GIMPLE_CALL:
2367 /* A value-returning call also performs an assignment. */
2368 if (gimple_call_lhs (stmt) != NULL_TREE)
2369 return visit_assignment (stmt, output_p);
2370 break;
2371
2372 case GIMPLE_COND:
2373 case GIMPLE_SWITCH:
2374 /* If STMT is a conditional branch, see if we can determine
2375 which branch will be taken. */
2376 /* FIXME. It appears that we should be able to optimize
2377 computed GOTOs here as well. */
2378 return visit_cond_stmt (stmt, taken_edge_p);
2379
2380 default:
2381 break;
2382 }
2383
2384 /* Any other kind of statement is not interesting for constant
2385 propagation and, therefore, not worth simulating. */
2386 if (dump_file && (dump_flags & TDF_DETAILS))
2387 fprintf (dump_file, "No interesting values produced. Marked VARYING.\n");
2388
2389 /* Definitions made by statements other than assignments to
2390 SSA_NAMEs represent unknown modifications to their outputs.
2391 Mark them VARYING. */
2392 FOR_EACH_SSA_TREE_OPERAND (def, stmt, iter, SSA_OP_ALL_DEFS)
2393 set_value_varying (def);
2394
2395 return SSA_PROP_VARYING;
2396 }
2397
2398
2399 /* Main entry point for SSA Conditional Constant Propagation. */
2400
2401 static unsigned int
2402 do_ssa_ccp (void)
2403 {
2404 unsigned int todo = 0;
2405 calculate_dominance_info (CDI_DOMINATORS);
2406 ccp_initialize ();
2407 ssa_propagate (ccp_visit_stmt, ccp_visit_phi_node);
2408 if (ccp_finalize ())
2409 todo = (TODO_cleanup_cfg | TODO_update_ssa);
2410 free_dominance_info (CDI_DOMINATORS);
2411 return todo;
2412 }
2413
2414
2415 namespace {
2416
2417 const pass_data pass_data_ccp =
2418 {
2419 GIMPLE_PASS, /* type */
2420 "ccp", /* name */
2421 OPTGROUP_NONE, /* optinfo_flags */
2422 TV_TREE_CCP, /* tv_id */
2423 ( PROP_cfg | PROP_ssa ), /* properties_required */
2424 0, /* properties_provided */
2425 0, /* properties_destroyed */
2426 0, /* todo_flags_start */
2427 TODO_update_address_taken, /* todo_flags_finish */
2428 };
2429
2430 class pass_ccp : public gimple_opt_pass
2431 {
2432 public:
2433 pass_ccp (gcc::context *ctxt)
2434 : gimple_opt_pass (pass_data_ccp, ctxt)
2435 {}
2436
2437 /* opt_pass methods: */
2438 opt_pass * clone () { return new pass_ccp (m_ctxt); }
2439 virtual bool gate (function *) { return flag_tree_ccp != 0; }
2440 virtual unsigned int execute (function *) { return do_ssa_ccp (); }
2441
2442 }; // class pass_ccp
2443
2444 } // anon namespace
2445
2446 gimple_opt_pass *
2447 make_pass_ccp (gcc::context *ctxt)
2448 {
2449 return new pass_ccp (ctxt);
2450 }
2451
2452
2453
2454 /* Try to optimize out __builtin_stack_restore. Optimize it out
2455 if there is another __builtin_stack_restore in the same basic
2456 block and no calls or ASM_EXPRs are in between, or if this block's
2457 only outgoing edge is to EXIT_BLOCK and there are no calls or
2458 ASM_EXPRs after this __builtin_stack_restore. */
2459
2460 static tree
2461 optimize_stack_restore (gimple_stmt_iterator i)
2462 {
2463 tree callee;
2464 gimple stmt;
2465
2466 basic_block bb = gsi_bb (i);
2467 gimple call = gsi_stmt (i);
2468
2469 if (gimple_code (call) != GIMPLE_CALL
2470 || gimple_call_num_args (call) != 1
2471 || TREE_CODE (gimple_call_arg (call, 0)) != SSA_NAME
2472 || !POINTER_TYPE_P (TREE_TYPE (gimple_call_arg (call, 0))))
2473 return NULL_TREE;
2474
2475 for (gsi_next (&i); !gsi_end_p (i); gsi_next (&i))
2476 {
2477 stmt = gsi_stmt (i);
2478 if (gimple_code (stmt) == GIMPLE_ASM)
2479 return NULL_TREE;
2480 if (gimple_code (stmt) != GIMPLE_CALL)
2481 continue;
2482
2483 callee = gimple_call_fndecl (stmt);
2484 if (!callee
2485 || DECL_BUILT_IN_CLASS (callee) != BUILT_IN_NORMAL
2486 /* All regular builtins are ok, just obviously not alloca. */
2487 || DECL_FUNCTION_CODE (callee) == BUILT_IN_ALLOCA
2488 || DECL_FUNCTION_CODE (callee) == BUILT_IN_ALLOCA_WITH_ALIGN)
2489 return NULL_TREE;
2490
2491 if (DECL_FUNCTION_CODE (callee) == BUILT_IN_STACK_RESTORE)
2492 goto second_stack_restore;
2493 }
2494
2495 if (!gsi_end_p (i))
2496 return NULL_TREE;
2497
2498 /* Allow one successor of the exit block, or zero successors. */
2499 switch (EDGE_COUNT (bb->succs))
2500 {
2501 case 0:
2502 break;
2503 case 1:
2504 if (single_succ_edge (bb)->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
2505 return NULL_TREE;
2506 break;
2507 default:
2508 return NULL_TREE;
2509 }
2510 second_stack_restore:
2511
2512 /* If there's exactly one use, then zap the call to __builtin_stack_save.
2513 If there are multiple uses, then the last one should remove the call.
2514 In any case, whether the call to __builtin_stack_save can be removed
2515 or not is irrelevant to removing the call to __builtin_stack_restore. */
2516 if (has_single_use (gimple_call_arg (call, 0)))
2517 {
2518 gimple stack_save = SSA_NAME_DEF_STMT (gimple_call_arg (call, 0));
2519 if (is_gimple_call (stack_save))
2520 {
2521 callee = gimple_call_fndecl (stack_save);
2522 if (callee
2523 && DECL_BUILT_IN_CLASS (callee) == BUILT_IN_NORMAL
2524 && DECL_FUNCTION_CODE (callee) == BUILT_IN_STACK_SAVE)
2525 {
2526 gimple_stmt_iterator stack_save_gsi;
2527 tree rhs;
2528
2529 stack_save_gsi = gsi_for_stmt (stack_save);
2530 rhs = build_int_cst (TREE_TYPE (gimple_call_arg (call, 0)), 0);
2531 update_call_from_tree (&stack_save_gsi, rhs);
2532 }
2533 }
2534 }
2535
2536 /* No effect, so the statement will be deleted. */
2537 return integer_zero_node;
2538 }
2539
2540 /* If va_list type is a simple pointer and nothing special is needed,
2541 optimize __builtin_va_start (&ap, 0) into ap = __builtin_next_arg (0),
2542 __builtin_va_end (&ap) out as NOP and __builtin_va_copy into a simple
2543 pointer assignment. */
2544
2545 static tree
2546 optimize_stdarg_builtin (gimple call)
2547 {
2548 tree callee, lhs, rhs, cfun_va_list;
2549 bool va_list_simple_ptr;
2550 location_t loc = gimple_location (call);
2551
2552 if (gimple_code (call) != GIMPLE_CALL)
2553 return NULL_TREE;
2554
2555 callee = gimple_call_fndecl (call);
2556
2557 cfun_va_list = targetm.fn_abi_va_list (callee);
2558 va_list_simple_ptr = POINTER_TYPE_P (cfun_va_list)
2559 && (TREE_TYPE (cfun_va_list) == void_type_node
2560 || TREE_TYPE (cfun_va_list) == char_type_node);
2561
2562 switch (DECL_FUNCTION_CODE (callee))
2563 {
2564 case BUILT_IN_VA_START:
2565 if (!va_list_simple_ptr
2566 || targetm.expand_builtin_va_start != NULL
2567 || !builtin_decl_explicit_p (BUILT_IN_NEXT_ARG))
2568 return NULL_TREE;
2569
2570 if (gimple_call_num_args (call) != 2)
2571 return NULL_TREE;
2572
2573 lhs = gimple_call_arg (call, 0);
2574 if (!POINTER_TYPE_P (TREE_TYPE (lhs))
2575 || TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (lhs)))
2576 != TYPE_MAIN_VARIANT (cfun_va_list))
2577 return NULL_TREE;
2578
2579 lhs = build_fold_indirect_ref_loc (loc, lhs);
2580 rhs = build_call_expr_loc (loc, builtin_decl_explicit (BUILT_IN_NEXT_ARG),
2581 1, integer_zero_node);
2582 rhs = fold_convert_loc (loc, TREE_TYPE (lhs), rhs);
2583 return build2 (MODIFY_EXPR, TREE_TYPE (lhs), lhs, rhs);
2584
2585 case BUILT_IN_VA_COPY:
2586 if (!va_list_simple_ptr)
2587 return NULL_TREE;
2588
2589 if (gimple_call_num_args (call) != 2)
2590 return NULL_TREE;
2591
2592 lhs = gimple_call_arg (call, 0);
2593 if (!POINTER_TYPE_P (TREE_TYPE (lhs))
2594 || TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (lhs)))
2595 != TYPE_MAIN_VARIANT (cfun_va_list))
2596 return NULL_TREE;
2597
2598 lhs = build_fold_indirect_ref_loc (loc, lhs);
2599 rhs = gimple_call_arg (call, 1);
2600 if (TYPE_MAIN_VARIANT (TREE_TYPE (rhs))
2601 != TYPE_MAIN_VARIANT (cfun_va_list))
2602 return NULL_TREE;
2603
2604 rhs = fold_convert_loc (loc, TREE_TYPE (lhs), rhs);
2605 return build2 (MODIFY_EXPR, TREE_TYPE (lhs), lhs, rhs);
2606
2607 case BUILT_IN_VA_END:
2608 /* No effect, so the statement will be deleted. */
2609 return integer_zero_node;
2610
2611 default:
2612 gcc_unreachable ();
2613 }
2614 }
2615
2616 /* Attemp to make the block of __builtin_unreachable I unreachable by changing
2617 the incoming jumps. Return true if at least one jump was changed. */
2618
2619 static bool
2620 optimize_unreachable (gimple_stmt_iterator i)
2621 {
2622 basic_block bb = gsi_bb (i);
2623 gimple_stmt_iterator gsi;
2624 gimple stmt;
2625 edge_iterator ei;
2626 edge e;
2627 bool ret;
2628
2629 if (flag_sanitize & SANITIZE_UNREACHABLE)
2630 return false;
2631
2632 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2633 {
2634 stmt = gsi_stmt (gsi);
2635
2636 if (is_gimple_debug (stmt))
2637 continue;
2638
2639 if (glabel *label_stmt = dyn_cast <glabel *> (stmt))
2640 {
2641 /* Verify we do not need to preserve the label. */
2642 if (FORCED_LABEL (gimple_label_label (label_stmt)))
2643 return false;
2644
2645 continue;
2646 }
2647
2648 /* Only handle the case that __builtin_unreachable is the first statement
2649 in the block. We rely on DCE to remove stmts without side-effects
2650 before __builtin_unreachable. */
2651 if (gsi_stmt (gsi) != gsi_stmt (i))
2652 return false;
2653 }
2654
2655 ret = false;
2656 FOR_EACH_EDGE (e, ei, bb->preds)
2657 {
2658 gsi = gsi_last_bb (e->src);
2659 if (gsi_end_p (gsi))
2660 continue;
2661
2662 stmt = gsi_stmt (gsi);
2663 if (gcond *cond_stmt = dyn_cast <gcond *> (stmt))
2664 {
2665 if (e->flags & EDGE_TRUE_VALUE)
2666 gimple_cond_make_false (cond_stmt);
2667 else if (e->flags & EDGE_FALSE_VALUE)
2668 gimple_cond_make_true (cond_stmt);
2669 else
2670 gcc_unreachable ();
2671 update_stmt (cond_stmt);
2672 }
2673 else
2674 {
2675 /* Todo: handle other cases, f.i. switch statement. */
2676 continue;
2677 }
2678
2679 ret = true;
2680 }
2681
2682 return ret;
2683 }
2684
2685 /* A simple pass that attempts to fold all builtin functions. This pass
2686 is run after we've propagated as many constants as we can. */
2687
2688 namespace {
2689
2690 const pass_data pass_data_fold_builtins =
2691 {
2692 GIMPLE_PASS, /* type */
2693 "fab", /* name */
2694 OPTGROUP_NONE, /* optinfo_flags */
2695 TV_NONE, /* tv_id */
2696 ( PROP_cfg | PROP_ssa ), /* properties_required */
2697 0, /* properties_provided */
2698 0, /* properties_destroyed */
2699 0, /* todo_flags_start */
2700 TODO_update_ssa, /* todo_flags_finish */
2701 };
2702
2703 class pass_fold_builtins : public gimple_opt_pass
2704 {
2705 public:
2706 pass_fold_builtins (gcc::context *ctxt)
2707 : gimple_opt_pass (pass_data_fold_builtins, ctxt)
2708 {}
2709
2710 /* opt_pass methods: */
2711 opt_pass * clone () { return new pass_fold_builtins (m_ctxt); }
2712 virtual unsigned int execute (function *);
2713
2714 }; // class pass_fold_builtins
2715
2716 unsigned int
2717 pass_fold_builtins::execute (function *fun)
2718 {
2719 bool cfg_changed = false;
2720 basic_block bb;
2721 unsigned int todoflags = 0;
2722
2723 FOR_EACH_BB_FN (bb, fun)
2724 {
2725 gimple_stmt_iterator i;
2726 for (i = gsi_start_bb (bb); !gsi_end_p (i); )
2727 {
2728 gimple stmt, old_stmt;
2729 tree callee;
2730 enum built_in_function fcode;
2731
2732 stmt = gsi_stmt (i);
2733
2734 if (gimple_code (stmt) != GIMPLE_CALL)
2735 {
2736 /* Remove all *ssaname_N ={v} {CLOBBER}; stmts,
2737 after the last GIMPLE DSE they aren't needed and might
2738 unnecessarily keep the SSA_NAMEs live. */
2739 if (gimple_clobber_p (stmt))
2740 {
2741 tree lhs = gimple_assign_lhs (stmt);
2742 if (TREE_CODE (lhs) == MEM_REF
2743 && TREE_CODE (TREE_OPERAND (lhs, 0)) == SSA_NAME)
2744 {
2745 unlink_stmt_vdef (stmt);
2746 gsi_remove (&i, true);
2747 release_defs (stmt);
2748 continue;
2749 }
2750 }
2751 gsi_next (&i);
2752 continue;
2753 }
2754
2755 callee = gimple_call_fndecl (stmt);
2756 if (!callee || DECL_BUILT_IN_CLASS (callee) != BUILT_IN_NORMAL)
2757 {
2758 gsi_next (&i);
2759 continue;
2760 }
2761
2762 fcode = DECL_FUNCTION_CODE (callee);
2763 if (fold_stmt (&i))
2764 ;
2765 else
2766 {
2767 tree result = NULL_TREE;
2768 switch (DECL_FUNCTION_CODE (callee))
2769 {
2770 case BUILT_IN_CONSTANT_P:
2771 /* Resolve __builtin_constant_p. If it hasn't been
2772 folded to integer_one_node by now, it's fairly
2773 certain that the value simply isn't constant. */
2774 result = integer_zero_node;
2775 break;
2776
2777 case BUILT_IN_ASSUME_ALIGNED:
2778 /* Remove __builtin_assume_aligned. */
2779 result = gimple_call_arg (stmt, 0);
2780 break;
2781
2782 case BUILT_IN_STACK_RESTORE:
2783 result = optimize_stack_restore (i);
2784 if (result)
2785 break;
2786 gsi_next (&i);
2787 continue;
2788
2789 case BUILT_IN_UNREACHABLE:
2790 if (optimize_unreachable (i))
2791 cfg_changed = true;
2792 break;
2793
2794 case BUILT_IN_VA_START:
2795 case BUILT_IN_VA_END:
2796 case BUILT_IN_VA_COPY:
2797 /* These shouldn't be folded before pass_stdarg. */
2798 result = optimize_stdarg_builtin (stmt);
2799 if (result)
2800 break;
2801 /* FALLTHRU */
2802
2803 default:;
2804 }
2805
2806 if (!result)
2807 {
2808 gsi_next (&i);
2809 continue;
2810 }
2811
2812 if (!update_call_from_tree (&i, result))
2813 gimplify_and_update_call_from_tree (&i, result);
2814 }
2815
2816 todoflags |= TODO_update_address_taken;
2817
2818 if (dump_file && (dump_flags & TDF_DETAILS))
2819 {
2820 fprintf (dump_file, "Simplified\n ");
2821 print_gimple_stmt (dump_file, stmt, 0, dump_flags);
2822 }
2823
2824 old_stmt = stmt;
2825 stmt = gsi_stmt (i);
2826 update_stmt (stmt);
2827
2828 if (maybe_clean_or_replace_eh_stmt (old_stmt, stmt)
2829 && gimple_purge_dead_eh_edges (bb))
2830 cfg_changed = true;
2831
2832 if (dump_file && (dump_flags & TDF_DETAILS))
2833 {
2834 fprintf (dump_file, "to\n ");
2835 print_gimple_stmt (dump_file, stmt, 0, dump_flags);
2836 fprintf (dump_file, "\n");
2837 }
2838
2839 /* Retry the same statement if it changed into another
2840 builtin, there might be new opportunities now. */
2841 if (gimple_code (stmt) != GIMPLE_CALL)
2842 {
2843 gsi_next (&i);
2844 continue;
2845 }
2846 callee = gimple_call_fndecl (stmt);
2847 if (!callee
2848 || DECL_BUILT_IN_CLASS (callee) != BUILT_IN_NORMAL
2849 || DECL_FUNCTION_CODE (callee) == fcode)
2850 gsi_next (&i);
2851 }
2852 }
2853
2854 /* Delete unreachable blocks. */
2855 if (cfg_changed)
2856 todoflags |= TODO_cleanup_cfg;
2857
2858 return todoflags;
2859 }
2860
2861 } // anon namespace
2862
2863 gimple_opt_pass *
2864 make_pass_fold_builtins (gcc::context *ctxt)
2865 {
2866 return new pass_fold_builtins (ctxt);
2867 }