switch from gimple to gimple*
[gcc.git] / gcc / tree-ssa-ccp.c
1 /* Conditional constant propagation pass for the GNU compiler.
2 Copyright (C) 2000-2015 Free Software Foundation, Inc.
3 Adapted from original RTL SSA-CCP by Daniel Berlin <dberlin@dberlin.org>
4 Adapted to GIMPLE trees by Diego Novillo <dnovillo@redhat.com>
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it
9 under the terms of the GNU General Public License as published by the
10 Free Software Foundation; either version 3, or (at your option) any
11 later version.
12
13 GCC is distributed in the hope that it will be useful, but WITHOUT
14 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
21
22 /* Conditional constant propagation (CCP) is based on the SSA
23 propagation engine (tree-ssa-propagate.c). Constant assignments of
24 the form VAR = CST are propagated from the assignments into uses of
25 VAR, which in turn may generate new constants. The simulation uses
26 a four level lattice to keep track of constant values associated
27 with SSA names. Given an SSA name V_i, it may take one of the
28 following values:
29
30 UNINITIALIZED -> the initial state of the value. This value
31 is replaced with a correct initial value
32 the first time the value is used, so the
33 rest of the pass does not need to care about
34 it. Using this value simplifies initialization
35 of the pass, and prevents us from needlessly
36 scanning statements that are never reached.
37
38 UNDEFINED -> V_i is a local variable whose definition
39 has not been processed yet. Therefore we
40 don't yet know if its value is a constant
41 or not.
42
43 CONSTANT -> V_i has been found to hold a constant
44 value C.
45
46 VARYING -> V_i cannot take a constant value, or if it
47 does, it is not possible to determine it
48 at compile time.
49
50 The core of SSA-CCP is in ccp_visit_stmt and ccp_visit_phi_node:
51
52 1- In ccp_visit_stmt, we are interested in assignments whose RHS
53 evaluates into a constant and conditional jumps whose predicate
54 evaluates into a boolean true or false. When an assignment of
55 the form V_i = CONST is found, V_i's lattice value is set to
56 CONSTANT and CONST is associated with it. This causes the
57 propagation engine to add all the SSA edges coming out the
58 assignment into the worklists, so that statements that use V_i
59 can be visited.
60
61 If the statement is a conditional with a constant predicate, we
62 mark the outgoing edges as executable or not executable
63 depending on the predicate's value. This is then used when
64 visiting PHI nodes to know when a PHI argument can be ignored.
65
66
67 2- In ccp_visit_phi_node, if all the PHI arguments evaluate to the
68 same constant C, then the LHS of the PHI is set to C. This
69 evaluation is known as the "meet operation". Since one of the
70 goals of this evaluation is to optimistically return constant
71 values as often as possible, it uses two main short cuts:
72
73 - If an argument is flowing in through a non-executable edge, it
74 is ignored. This is useful in cases like this:
75
76 if (PRED)
77 a_9 = 3;
78 else
79 a_10 = 100;
80 a_11 = PHI (a_9, a_10)
81
82 If PRED is known to always evaluate to false, then we can
83 assume that a_11 will always take its value from a_10, meaning
84 that instead of consider it VARYING (a_9 and a_10 have
85 different values), we can consider it CONSTANT 100.
86
87 - If an argument has an UNDEFINED value, then it does not affect
88 the outcome of the meet operation. If a variable V_i has an
89 UNDEFINED value, it means that either its defining statement
90 hasn't been visited yet or V_i has no defining statement, in
91 which case the original symbol 'V' is being used
92 uninitialized. Since 'V' is a local variable, the compiler
93 may assume any initial value for it.
94
95
96 After propagation, every variable V_i that ends up with a lattice
97 value of CONSTANT will have the associated constant value in the
98 array CONST_VAL[i].VALUE. That is fed into substitute_and_fold for
99 final substitution and folding.
100
101 This algorithm uses wide-ints at the max precision of the target.
102 This means that, with one uninteresting exception, variables with
103 UNSIGNED types never go to VARYING because the bits above the
104 precision of the type of the variable are always zero. The
105 uninteresting case is a variable of UNSIGNED type that has the
106 maximum precision of the target. Such variables can go to VARYING,
107 but this causes no loss of infomation since these variables will
108 never be extended.
109
110 References:
111
112 Constant propagation with conditional branches,
113 Wegman and Zadeck, ACM TOPLAS 13(2):181-210.
114
115 Building an Optimizing Compiler,
116 Robert Morgan, Butterworth-Heinemann, 1998, Section 8.9.
117
118 Advanced Compiler Design and Implementation,
119 Steven Muchnick, Morgan Kaufmann, 1997, Section 12.6 */
120
121 #include "config.h"
122 #include "system.h"
123 #include "coretypes.h"
124 #include "backend.h"
125 #include "tree.h"
126 #include "gimple.h"
127 #include "hard-reg-set.h"
128 #include "ssa.h"
129 #include "alias.h"
130 #include "fold-const.h"
131 #include "stor-layout.h"
132 #include "flags.h"
133 #include "tm_p.h"
134 #include "gimple-pretty-print.h"
135 #include "internal-fn.h"
136 #include "gimple-fold.h"
137 #include "tree-eh.h"
138 #include "gimplify.h"
139 #include "gimple-iterator.h"
140 #include "tree-cfg.h"
141 #include "tree-pass.h"
142 #include "tree-ssa-propagate.h"
143 #include "value-prof.h"
144 #include "langhooks.h"
145 #include "target.h"
146 #include "diagnostic-core.h"
147 #include "dbgcnt.h"
148 #include "params.h"
149 #include "wide-int-print.h"
150 #include "builtins.h"
151 #include "tree-chkp.h"
152
153
154 /* Possible lattice values. */
155 typedef enum
156 {
157 UNINITIALIZED,
158 UNDEFINED,
159 CONSTANT,
160 VARYING
161 } ccp_lattice_t;
162
163 struct ccp_prop_value_t {
164 /* Lattice value. */
165 ccp_lattice_t lattice_val;
166
167 /* Propagated value. */
168 tree value;
169
170 /* Mask that applies to the propagated value during CCP. For X
171 with a CONSTANT lattice value X & ~mask == value & ~mask. The
172 zero bits in the mask cover constant values. The ones mean no
173 information. */
174 widest_int mask;
175 };
176
177 /* Array of propagated constant values. After propagation,
178 CONST_VAL[I].VALUE holds the constant value for SSA_NAME(I). If
179 the constant is held in an SSA name representing a memory store
180 (i.e., a VDEF), CONST_VAL[I].MEM_REF will contain the actual
181 memory reference used to store (i.e., the LHS of the assignment
182 doing the store). */
183 static ccp_prop_value_t *const_val;
184 static unsigned n_const_val;
185
186 static void canonicalize_value (ccp_prop_value_t *);
187 static bool ccp_fold_stmt (gimple_stmt_iterator *);
188 static void ccp_lattice_meet (ccp_prop_value_t *, ccp_prop_value_t *);
189
190 /* Dump constant propagation value VAL to file OUTF prefixed by PREFIX. */
191
192 static void
193 dump_lattice_value (FILE *outf, const char *prefix, ccp_prop_value_t val)
194 {
195 switch (val.lattice_val)
196 {
197 case UNINITIALIZED:
198 fprintf (outf, "%sUNINITIALIZED", prefix);
199 break;
200 case UNDEFINED:
201 fprintf (outf, "%sUNDEFINED", prefix);
202 break;
203 case VARYING:
204 fprintf (outf, "%sVARYING", prefix);
205 break;
206 case CONSTANT:
207 if (TREE_CODE (val.value) != INTEGER_CST
208 || val.mask == 0)
209 {
210 fprintf (outf, "%sCONSTANT ", prefix);
211 print_generic_expr (outf, val.value, dump_flags);
212 }
213 else
214 {
215 widest_int cval = wi::bit_and_not (wi::to_widest (val.value),
216 val.mask);
217 fprintf (outf, "%sCONSTANT ", prefix);
218 print_hex (cval, outf);
219 fprintf (outf, " (");
220 print_hex (val.mask, outf);
221 fprintf (outf, ")");
222 }
223 break;
224 default:
225 gcc_unreachable ();
226 }
227 }
228
229
230 /* Print lattice value VAL to stderr. */
231
232 void debug_lattice_value (ccp_prop_value_t val);
233
234 DEBUG_FUNCTION void
235 debug_lattice_value (ccp_prop_value_t val)
236 {
237 dump_lattice_value (stderr, "", val);
238 fprintf (stderr, "\n");
239 }
240
241 /* Extend NONZERO_BITS to a full mask, with the upper bits being set. */
242
243 static widest_int
244 extend_mask (const wide_int &nonzero_bits)
245 {
246 return (wi::mask <widest_int> (wi::get_precision (nonzero_bits), true)
247 | widest_int::from (nonzero_bits, UNSIGNED));
248 }
249
250 /* Compute a default value for variable VAR and store it in the
251 CONST_VAL array. The following rules are used to get default
252 values:
253
254 1- Global and static variables that are declared constant are
255 considered CONSTANT.
256
257 2- Any other value is considered UNDEFINED. This is useful when
258 considering PHI nodes. PHI arguments that are undefined do not
259 change the constant value of the PHI node, which allows for more
260 constants to be propagated.
261
262 3- Variables defined by statements other than assignments and PHI
263 nodes are considered VARYING.
264
265 4- Initial values of variables that are not GIMPLE registers are
266 considered VARYING. */
267
268 static ccp_prop_value_t
269 get_default_value (tree var)
270 {
271 ccp_prop_value_t val = { UNINITIALIZED, NULL_TREE, 0 };
272 gimple *stmt;
273
274 stmt = SSA_NAME_DEF_STMT (var);
275
276 if (gimple_nop_p (stmt))
277 {
278 /* Variables defined by an empty statement are those used
279 before being initialized. If VAR is a local variable, we
280 can assume initially that it is UNDEFINED, otherwise we must
281 consider it VARYING. */
282 if (!virtual_operand_p (var)
283 && TREE_CODE (SSA_NAME_VAR (var)) == VAR_DECL)
284 val.lattice_val = UNDEFINED;
285 else
286 {
287 val.lattice_val = VARYING;
288 val.mask = -1;
289 if (flag_tree_bit_ccp)
290 {
291 wide_int nonzero_bits = get_nonzero_bits (var);
292 if (nonzero_bits != -1)
293 {
294 val.lattice_val = CONSTANT;
295 val.value = build_zero_cst (TREE_TYPE (var));
296 val.mask = extend_mask (nonzero_bits);
297 }
298 }
299 }
300 }
301 else if (is_gimple_assign (stmt))
302 {
303 tree cst;
304 if (gimple_assign_single_p (stmt)
305 && DECL_P (gimple_assign_rhs1 (stmt))
306 && (cst = get_symbol_constant_value (gimple_assign_rhs1 (stmt))))
307 {
308 val.lattice_val = CONSTANT;
309 val.value = cst;
310 }
311 else
312 {
313 /* Any other variable defined by an assignment is considered
314 UNDEFINED. */
315 val.lattice_val = UNDEFINED;
316 }
317 }
318 else if ((is_gimple_call (stmt)
319 && gimple_call_lhs (stmt) != NULL_TREE)
320 || gimple_code (stmt) == GIMPLE_PHI)
321 {
322 /* A variable defined by a call or a PHI node is considered
323 UNDEFINED. */
324 val.lattice_val = UNDEFINED;
325 }
326 else
327 {
328 /* Otherwise, VAR will never take on a constant value. */
329 val.lattice_val = VARYING;
330 val.mask = -1;
331 }
332
333 return val;
334 }
335
336
337 /* Get the constant value associated with variable VAR. */
338
339 static inline ccp_prop_value_t *
340 get_value (tree var)
341 {
342 ccp_prop_value_t *val;
343
344 if (const_val == NULL
345 || SSA_NAME_VERSION (var) >= n_const_val)
346 return NULL;
347
348 val = &const_val[SSA_NAME_VERSION (var)];
349 if (val->lattice_val == UNINITIALIZED)
350 *val = get_default_value (var);
351
352 canonicalize_value (val);
353
354 return val;
355 }
356
357 /* Return the constant tree value associated with VAR. */
358
359 static inline tree
360 get_constant_value (tree var)
361 {
362 ccp_prop_value_t *val;
363 if (TREE_CODE (var) != SSA_NAME)
364 {
365 if (is_gimple_min_invariant (var))
366 return var;
367 return NULL_TREE;
368 }
369 val = get_value (var);
370 if (val
371 && val->lattice_val == CONSTANT
372 && (TREE_CODE (val->value) != INTEGER_CST
373 || val->mask == 0))
374 return val->value;
375 return NULL_TREE;
376 }
377
378 /* Sets the value associated with VAR to VARYING. */
379
380 static inline void
381 set_value_varying (tree var)
382 {
383 ccp_prop_value_t *val = &const_val[SSA_NAME_VERSION (var)];
384
385 val->lattice_val = VARYING;
386 val->value = NULL_TREE;
387 val->mask = -1;
388 }
389
390 /* For integer constants, make sure to drop TREE_OVERFLOW. */
391
392 static void
393 canonicalize_value (ccp_prop_value_t *val)
394 {
395 if (val->lattice_val != CONSTANT)
396 return;
397
398 if (TREE_OVERFLOW_P (val->value))
399 val->value = drop_tree_overflow (val->value);
400 }
401
402 /* Return whether the lattice transition is valid. */
403
404 static bool
405 valid_lattice_transition (ccp_prop_value_t old_val, ccp_prop_value_t new_val)
406 {
407 /* Lattice transitions must always be monotonically increasing in
408 value. */
409 if (old_val.lattice_val < new_val.lattice_val)
410 return true;
411
412 if (old_val.lattice_val != new_val.lattice_val)
413 return false;
414
415 if (!old_val.value && !new_val.value)
416 return true;
417
418 /* Now both lattice values are CONSTANT. */
419
420 /* Allow arbitrary copy changes as we might look through PHI <a_1, ...>
421 when only a single copy edge is executable. */
422 if (TREE_CODE (old_val.value) == SSA_NAME
423 && TREE_CODE (new_val.value) == SSA_NAME)
424 return true;
425
426 /* Allow transitioning from a constant to a copy. */
427 if (is_gimple_min_invariant (old_val.value)
428 && TREE_CODE (new_val.value) == SSA_NAME)
429 return true;
430
431 /* Allow transitioning from PHI <&x, not executable> == &x
432 to PHI <&x, &y> == common alignment. */
433 if (TREE_CODE (old_val.value) != INTEGER_CST
434 && TREE_CODE (new_val.value) == INTEGER_CST)
435 return true;
436
437 /* Bit-lattices have to agree in the still valid bits. */
438 if (TREE_CODE (old_val.value) == INTEGER_CST
439 && TREE_CODE (new_val.value) == INTEGER_CST)
440 return (wi::bit_and_not (wi::to_widest (old_val.value), new_val.mask)
441 == wi::bit_and_not (wi::to_widest (new_val.value), new_val.mask));
442
443 /* Otherwise constant values have to agree. */
444 if (operand_equal_p (old_val.value, new_val.value, 0))
445 return true;
446
447 /* At least the kinds and types should agree now. */
448 if (TREE_CODE (old_val.value) != TREE_CODE (new_val.value)
449 || !types_compatible_p (TREE_TYPE (old_val.value),
450 TREE_TYPE (new_val.value)))
451 return false;
452
453 /* For floats and !HONOR_NANS allow transitions from (partial) NaN
454 to non-NaN. */
455 tree type = TREE_TYPE (new_val.value);
456 if (SCALAR_FLOAT_TYPE_P (type)
457 && !HONOR_NANS (type))
458 {
459 if (REAL_VALUE_ISNAN (TREE_REAL_CST (old_val.value)))
460 return true;
461 }
462 else if (VECTOR_FLOAT_TYPE_P (type)
463 && !HONOR_NANS (type))
464 {
465 for (unsigned i = 0; i < VECTOR_CST_NELTS (old_val.value); ++i)
466 if (!REAL_VALUE_ISNAN
467 (TREE_REAL_CST (VECTOR_CST_ELT (old_val.value, i)))
468 && !operand_equal_p (VECTOR_CST_ELT (old_val.value, i),
469 VECTOR_CST_ELT (new_val.value, i), 0))
470 return false;
471 return true;
472 }
473 else if (COMPLEX_FLOAT_TYPE_P (type)
474 && !HONOR_NANS (type))
475 {
476 if (!REAL_VALUE_ISNAN (TREE_REAL_CST (TREE_REALPART (old_val.value)))
477 && !operand_equal_p (TREE_REALPART (old_val.value),
478 TREE_REALPART (new_val.value), 0))
479 return false;
480 if (!REAL_VALUE_ISNAN (TREE_REAL_CST (TREE_IMAGPART (old_val.value)))
481 && !operand_equal_p (TREE_IMAGPART (old_val.value),
482 TREE_IMAGPART (new_val.value), 0))
483 return false;
484 return true;
485 }
486 return false;
487 }
488
489 /* Set the value for variable VAR to NEW_VAL. Return true if the new
490 value is different from VAR's previous value. */
491
492 static bool
493 set_lattice_value (tree var, ccp_prop_value_t *new_val)
494 {
495 /* We can deal with old UNINITIALIZED values just fine here. */
496 ccp_prop_value_t *old_val = &const_val[SSA_NAME_VERSION (var)];
497
498 canonicalize_value (new_val);
499
500 /* We have to be careful to not go up the bitwise lattice
501 represented by the mask. Instead of dropping to VARYING
502 use the meet operator to retain a conservative value.
503 Missed optimizations like PR65851 makes this necessary.
504 It also ensures we converge to a stable lattice solution. */
505 if (new_val->lattice_val == CONSTANT
506 && old_val->lattice_val == CONSTANT
507 && TREE_CODE (new_val->value) != SSA_NAME)
508 ccp_lattice_meet (new_val, old_val);
509
510 gcc_checking_assert (valid_lattice_transition (*old_val, *new_val));
511
512 /* If *OLD_VAL and NEW_VAL are the same, return false to inform the
513 caller that this was a non-transition. */
514 if (old_val->lattice_val != new_val->lattice_val
515 || (new_val->lattice_val == CONSTANT
516 && (TREE_CODE (new_val->value) != TREE_CODE (old_val->value)
517 || (TREE_CODE (new_val->value) == INTEGER_CST
518 && (new_val->mask != old_val->mask
519 || (wi::bit_and_not (wi::to_widest (old_val->value),
520 new_val->mask)
521 != wi::bit_and_not (wi::to_widest (new_val->value),
522 new_val->mask))))
523 || (TREE_CODE (new_val->value) != INTEGER_CST
524 && !operand_equal_p (new_val->value, old_val->value, 0)))))
525 {
526 /* ??? We would like to delay creation of INTEGER_CSTs from
527 partially constants here. */
528
529 if (dump_file && (dump_flags & TDF_DETAILS))
530 {
531 dump_lattice_value (dump_file, "Lattice value changed to ", *new_val);
532 fprintf (dump_file, ". Adding SSA edges to worklist.\n");
533 }
534
535 *old_val = *new_val;
536
537 gcc_assert (new_val->lattice_val != UNINITIALIZED);
538 return true;
539 }
540
541 return false;
542 }
543
544 static ccp_prop_value_t get_value_for_expr (tree, bool);
545 static ccp_prop_value_t bit_value_binop (enum tree_code, tree, tree, tree);
546 static void bit_value_binop_1 (enum tree_code, tree, widest_int *, widest_int *,
547 tree, const widest_int &, const widest_int &,
548 tree, const widest_int &, const widest_int &);
549
550 /* Return a widest_int that can be used for bitwise simplifications
551 from VAL. */
552
553 static widest_int
554 value_to_wide_int (ccp_prop_value_t val)
555 {
556 if (val.value
557 && TREE_CODE (val.value) == INTEGER_CST)
558 return wi::to_widest (val.value);
559
560 return 0;
561 }
562
563 /* Return the value for the address expression EXPR based on alignment
564 information. */
565
566 static ccp_prop_value_t
567 get_value_from_alignment (tree expr)
568 {
569 tree type = TREE_TYPE (expr);
570 ccp_prop_value_t val;
571 unsigned HOST_WIDE_INT bitpos;
572 unsigned int align;
573
574 gcc_assert (TREE_CODE (expr) == ADDR_EXPR);
575
576 get_pointer_alignment_1 (expr, &align, &bitpos);
577 val.mask = (POINTER_TYPE_P (type) || TYPE_UNSIGNED (type)
578 ? wi::mask <widest_int> (TYPE_PRECISION (type), false)
579 : -1).and_not (align / BITS_PER_UNIT - 1);
580 val.lattice_val
581 = wi::sext (val.mask, TYPE_PRECISION (type)) == -1 ? VARYING : CONSTANT;
582 if (val.lattice_val == CONSTANT)
583 val.value = build_int_cstu (type, bitpos / BITS_PER_UNIT);
584 else
585 val.value = NULL_TREE;
586
587 return val;
588 }
589
590 /* Return the value for the tree operand EXPR. If FOR_BITS_P is true
591 return constant bits extracted from alignment information for
592 invariant addresses. */
593
594 static ccp_prop_value_t
595 get_value_for_expr (tree expr, bool for_bits_p)
596 {
597 ccp_prop_value_t val;
598
599 if (TREE_CODE (expr) == SSA_NAME)
600 {
601 val = *get_value (expr);
602 if (for_bits_p
603 && val.lattice_val == CONSTANT
604 && TREE_CODE (val.value) == ADDR_EXPR)
605 val = get_value_from_alignment (val.value);
606 /* Fall back to a copy value. */
607 if (!for_bits_p
608 && val.lattice_val == VARYING
609 && !SSA_NAME_OCCURS_IN_ABNORMAL_PHI (expr))
610 {
611 val.lattice_val = CONSTANT;
612 val.value = expr;
613 val.mask = -1;
614 }
615 }
616 else if (is_gimple_min_invariant (expr)
617 && (!for_bits_p || TREE_CODE (expr) != ADDR_EXPR))
618 {
619 val.lattice_val = CONSTANT;
620 val.value = expr;
621 val.mask = 0;
622 canonicalize_value (&val);
623 }
624 else if (TREE_CODE (expr) == ADDR_EXPR)
625 val = get_value_from_alignment (expr);
626 else
627 {
628 val.lattice_val = VARYING;
629 val.mask = -1;
630 val.value = NULL_TREE;
631 }
632 return val;
633 }
634
635 /* Return the likely CCP lattice value for STMT.
636
637 If STMT has no operands, then return CONSTANT.
638
639 Else if undefinedness of operands of STMT cause its value to be
640 undefined, then return UNDEFINED.
641
642 Else if any operands of STMT are constants, then return CONSTANT.
643
644 Else return VARYING. */
645
646 static ccp_lattice_t
647 likely_value (gimple *stmt)
648 {
649 bool has_constant_operand, has_undefined_operand, all_undefined_operands;
650 bool has_nsa_operand;
651 tree use;
652 ssa_op_iter iter;
653 unsigned i;
654
655 enum gimple_code code = gimple_code (stmt);
656
657 /* This function appears to be called only for assignments, calls,
658 conditionals, and switches, due to the logic in visit_stmt. */
659 gcc_assert (code == GIMPLE_ASSIGN
660 || code == GIMPLE_CALL
661 || code == GIMPLE_COND
662 || code == GIMPLE_SWITCH);
663
664 /* If the statement has volatile operands, it won't fold to a
665 constant value. */
666 if (gimple_has_volatile_ops (stmt))
667 return VARYING;
668
669 /* Arrive here for more complex cases. */
670 has_constant_operand = false;
671 has_undefined_operand = false;
672 all_undefined_operands = true;
673 has_nsa_operand = false;
674 FOR_EACH_SSA_TREE_OPERAND (use, stmt, iter, SSA_OP_USE)
675 {
676 ccp_prop_value_t *val = get_value (use);
677
678 if (val->lattice_val == UNDEFINED)
679 has_undefined_operand = true;
680 else
681 all_undefined_operands = false;
682
683 if (val->lattice_val == CONSTANT)
684 has_constant_operand = true;
685
686 if (SSA_NAME_IS_DEFAULT_DEF (use)
687 || !prop_simulate_again_p (SSA_NAME_DEF_STMT (use)))
688 has_nsa_operand = true;
689 }
690
691 /* There may be constants in regular rhs operands. For calls we
692 have to ignore lhs, fndecl and static chain, otherwise only
693 the lhs. */
694 for (i = (is_gimple_call (stmt) ? 2 : 0) + gimple_has_lhs (stmt);
695 i < gimple_num_ops (stmt); ++i)
696 {
697 tree op = gimple_op (stmt, i);
698 if (!op || TREE_CODE (op) == SSA_NAME)
699 continue;
700 if (is_gimple_min_invariant (op))
701 has_constant_operand = true;
702 }
703
704 if (has_constant_operand)
705 all_undefined_operands = false;
706
707 if (has_undefined_operand
708 && code == GIMPLE_CALL
709 && gimple_call_internal_p (stmt))
710 switch (gimple_call_internal_fn (stmt))
711 {
712 /* These 3 builtins use the first argument just as a magic
713 way how to find out a decl uid. */
714 case IFN_GOMP_SIMD_LANE:
715 case IFN_GOMP_SIMD_VF:
716 case IFN_GOMP_SIMD_LAST_LANE:
717 has_undefined_operand = false;
718 break;
719 default:
720 break;
721 }
722
723 /* If the operation combines operands like COMPLEX_EXPR make sure to
724 not mark the result UNDEFINED if only one part of the result is
725 undefined. */
726 if (has_undefined_operand && all_undefined_operands)
727 return UNDEFINED;
728 else if (code == GIMPLE_ASSIGN && has_undefined_operand)
729 {
730 switch (gimple_assign_rhs_code (stmt))
731 {
732 /* Unary operators are handled with all_undefined_operands. */
733 case PLUS_EXPR:
734 case MINUS_EXPR:
735 case POINTER_PLUS_EXPR:
736 /* Not MIN_EXPR, MAX_EXPR. One VARYING operand may be selected.
737 Not bitwise operators, one VARYING operand may specify the
738 result completely. Not logical operators for the same reason.
739 Not COMPLEX_EXPR as one VARYING operand makes the result partly
740 not UNDEFINED. Not *DIV_EXPR, comparisons and shifts because
741 the undefined operand may be promoted. */
742 return UNDEFINED;
743
744 case ADDR_EXPR:
745 /* If any part of an address is UNDEFINED, like the index
746 of an ARRAY_EXPR, then treat the result as UNDEFINED. */
747 return UNDEFINED;
748
749 default:
750 ;
751 }
752 }
753 /* If there was an UNDEFINED operand but the result may be not UNDEFINED
754 fall back to CONSTANT. During iteration UNDEFINED may still drop
755 to CONSTANT. */
756 if (has_undefined_operand)
757 return CONSTANT;
758
759 /* We do not consider virtual operands here -- load from read-only
760 memory may have only VARYING virtual operands, but still be
761 constant. Also we can combine the stmt with definitions from
762 operands whose definitions are not simulated again. */
763 if (has_constant_operand
764 || has_nsa_operand
765 || gimple_references_memory_p (stmt))
766 return CONSTANT;
767
768 return VARYING;
769 }
770
771 /* Returns true if STMT cannot be constant. */
772
773 static bool
774 surely_varying_stmt_p (gimple *stmt)
775 {
776 /* If the statement has operands that we cannot handle, it cannot be
777 constant. */
778 if (gimple_has_volatile_ops (stmt))
779 return true;
780
781 /* If it is a call and does not return a value or is not a
782 builtin and not an indirect call or a call to function with
783 assume_aligned/alloc_align attribute, it is varying. */
784 if (is_gimple_call (stmt))
785 {
786 tree fndecl, fntype = gimple_call_fntype (stmt);
787 if (!gimple_call_lhs (stmt)
788 || ((fndecl = gimple_call_fndecl (stmt)) != NULL_TREE
789 && !DECL_BUILT_IN (fndecl)
790 && !lookup_attribute ("assume_aligned",
791 TYPE_ATTRIBUTES (fntype))
792 && !lookup_attribute ("alloc_align",
793 TYPE_ATTRIBUTES (fntype))))
794 return true;
795 }
796
797 /* Any other store operation is not interesting. */
798 else if (gimple_vdef (stmt))
799 return true;
800
801 /* Anything other than assignments and conditional jumps are not
802 interesting for CCP. */
803 if (gimple_code (stmt) != GIMPLE_ASSIGN
804 && gimple_code (stmt) != GIMPLE_COND
805 && gimple_code (stmt) != GIMPLE_SWITCH
806 && gimple_code (stmt) != GIMPLE_CALL)
807 return true;
808
809 return false;
810 }
811
812 /* Initialize local data structures for CCP. */
813
814 static void
815 ccp_initialize (void)
816 {
817 basic_block bb;
818
819 n_const_val = num_ssa_names;
820 const_val = XCNEWVEC (ccp_prop_value_t, n_const_val);
821
822 /* Initialize simulation flags for PHI nodes and statements. */
823 FOR_EACH_BB_FN (bb, cfun)
824 {
825 gimple_stmt_iterator i;
826
827 for (i = gsi_start_bb (bb); !gsi_end_p (i); gsi_next (&i))
828 {
829 gimple *stmt = gsi_stmt (i);
830 bool is_varying;
831
832 /* If the statement is a control insn, then we do not
833 want to avoid simulating the statement once. Failure
834 to do so means that those edges will never get added. */
835 if (stmt_ends_bb_p (stmt))
836 is_varying = false;
837 else
838 is_varying = surely_varying_stmt_p (stmt);
839
840 if (is_varying)
841 {
842 tree def;
843 ssa_op_iter iter;
844
845 /* If the statement will not produce a constant, mark
846 all its outputs VARYING. */
847 FOR_EACH_SSA_TREE_OPERAND (def, stmt, iter, SSA_OP_ALL_DEFS)
848 set_value_varying (def);
849 }
850 prop_set_simulate_again (stmt, !is_varying);
851 }
852 }
853
854 /* Now process PHI nodes. We never clear the simulate_again flag on
855 phi nodes, since we do not know which edges are executable yet,
856 except for phi nodes for virtual operands when we do not do store ccp. */
857 FOR_EACH_BB_FN (bb, cfun)
858 {
859 gphi_iterator i;
860
861 for (i = gsi_start_phis (bb); !gsi_end_p (i); gsi_next (&i))
862 {
863 gphi *phi = i.phi ();
864
865 if (virtual_operand_p (gimple_phi_result (phi)))
866 prop_set_simulate_again (phi, false);
867 else
868 prop_set_simulate_again (phi, true);
869 }
870 }
871 }
872
873 /* Debug count support. Reset the values of ssa names
874 VARYING when the total number ssa names analyzed is
875 beyond the debug count specified. */
876
877 static void
878 do_dbg_cnt (void)
879 {
880 unsigned i;
881 for (i = 0; i < num_ssa_names; i++)
882 {
883 if (!dbg_cnt (ccp))
884 {
885 const_val[i].lattice_val = VARYING;
886 const_val[i].mask = -1;
887 const_val[i].value = NULL_TREE;
888 }
889 }
890 }
891
892
893 /* Do final substitution of propagated values, cleanup the flowgraph and
894 free allocated storage.
895
896 Return TRUE when something was optimized. */
897
898 static bool
899 ccp_finalize (void)
900 {
901 bool something_changed;
902 unsigned i;
903
904 do_dbg_cnt ();
905
906 /* Derive alignment and misalignment information from partially
907 constant pointers in the lattice or nonzero bits from partially
908 constant integers. */
909 for (i = 1; i < num_ssa_names; ++i)
910 {
911 tree name = ssa_name (i);
912 ccp_prop_value_t *val;
913 unsigned int tem, align;
914
915 if (!name
916 || (!POINTER_TYPE_P (TREE_TYPE (name))
917 && (!INTEGRAL_TYPE_P (TREE_TYPE (name))
918 /* Don't record nonzero bits before IPA to avoid
919 using too much memory. */
920 || first_pass_instance)))
921 continue;
922
923 val = get_value (name);
924 if (val->lattice_val != CONSTANT
925 || TREE_CODE (val->value) != INTEGER_CST)
926 continue;
927
928 if (POINTER_TYPE_P (TREE_TYPE (name)))
929 {
930 /* Trailing mask bits specify the alignment, trailing value
931 bits the misalignment. */
932 tem = val->mask.to_uhwi ();
933 align = (tem & -tem);
934 if (align > 1)
935 set_ptr_info_alignment (get_ptr_info (name), align,
936 (TREE_INT_CST_LOW (val->value)
937 & (align - 1)));
938 }
939 else
940 {
941 unsigned int precision = TYPE_PRECISION (TREE_TYPE (val->value));
942 wide_int nonzero_bits = wide_int::from (val->mask, precision,
943 UNSIGNED) | val->value;
944 nonzero_bits &= get_nonzero_bits (name);
945 set_nonzero_bits (name, nonzero_bits);
946 }
947 }
948
949 /* Perform substitutions based on the known constant values. */
950 something_changed = substitute_and_fold (get_constant_value,
951 ccp_fold_stmt, true);
952
953 free (const_val);
954 const_val = NULL;
955 return something_changed;;
956 }
957
958
959 /* Compute the meet operator between *VAL1 and *VAL2. Store the result
960 in VAL1.
961
962 any M UNDEFINED = any
963 any M VARYING = VARYING
964 Ci M Cj = Ci if (i == j)
965 Ci M Cj = VARYING if (i != j)
966 */
967
968 static void
969 ccp_lattice_meet (ccp_prop_value_t *val1, ccp_prop_value_t *val2)
970 {
971 if (val1->lattice_val == UNDEFINED
972 /* For UNDEFINED M SSA we can't always SSA because its definition
973 may not dominate the PHI node. Doing optimistic copy propagation
974 also causes a lot of gcc.dg/uninit-pred*.c FAILs. */
975 && (val2->lattice_val != CONSTANT
976 || TREE_CODE (val2->value) != SSA_NAME))
977 {
978 /* UNDEFINED M any = any */
979 *val1 = *val2;
980 }
981 else if (val2->lattice_val == UNDEFINED
982 /* See above. */
983 && (val1->lattice_val != CONSTANT
984 || TREE_CODE (val1->value) != SSA_NAME))
985 {
986 /* any M UNDEFINED = any
987 Nothing to do. VAL1 already contains the value we want. */
988 ;
989 }
990 else if (val1->lattice_val == VARYING
991 || val2->lattice_val == VARYING)
992 {
993 /* any M VARYING = VARYING. */
994 val1->lattice_val = VARYING;
995 val1->mask = -1;
996 val1->value = NULL_TREE;
997 }
998 else if (val1->lattice_val == CONSTANT
999 && val2->lattice_val == CONSTANT
1000 && TREE_CODE (val1->value) == INTEGER_CST
1001 && TREE_CODE (val2->value) == INTEGER_CST)
1002 {
1003 /* Ci M Cj = Ci if (i == j)
1004 Ci M Cj = VARYING if (i != j)
1005
1006 For INTEGER_CSTs mask unequal bits. If no equal bits remain,
1007 drop to varying. */
1008 val1->mask = (val1->mask | val2->mask
1009 | (wi::to_widest (val1->value)
1010 ^ wi::to_widest (val2->value)));
1011 if (wi::sext (val1->mask, TYPE_PRECISION (TREE_TYPE (val1->value))) == -1)
1012 {
1013 val1->lattice_val = VARYING;
1014 val1->value = NULL_TREE;
1015 }
1016 }
1017 else if (val1->lattice_val == CONSTANT
1018 && val2->lattice_val == CONSTANT
1019 && operand_equal_p (val1->value, val2->value, 0))
1020 {
1021 /* Ci M Cj = Ci if (i == j)
1022 Ci M Cj = VARYING if (i != j)
1023
1024 VAL1 already contains the value we want for equivalent values. */
1025 }
1026 else if (val1->lattice_val == CONSTANT
1027 && val2->lattice_val == CONSTANT
1028 && (TREE_CODE (val1->value) == ADDR_EXPR
1029 || TREE_CODE (val2->value) == ADDR_EXPR))
1030 {
1031 /* When not equal addresses are involved try meeting for
1032 alignment. */
1033 ccp_prop_value_t tem = *val2;
1034 if (TREE_CODE (val1->value) == ADDR_EXPR)
1035 *val1 = get_value_for_expr (val1->value, true);
1036 if (TREE_CODE (val2->value) == ADDR_EXPR)
1037 tem = get_value_for_expr (val2->value, true);
1038 ccp_lattice_meet (val1, &tem);
1039 }
1040 else
1041 {
1042 /* Any other combination is VARYING. */
1043 val1->lattice_val = VARYING;
1044 val1->mask = -1;
1045 val1->value = NULL_TREE;
1046 }
1047 }
1048
1049
1050 /* Loop through the PHI_NODE's parameters for BLOCK and compare their
1051 lattice values to determine PHI_NODE's lattice value. The value of a
1052 PHI node is determined calling ccp_lattice_meet with all the arguments
1053 of the PHI node that are incoming via executable edges. */
1054
1055 static enum ssa_prop_result
1056 ccp_visit_phi_node (gphi *phi)
1057 {
1058 unsigned i;
1059 ccp_prop_value_t new_val;
1060
1061 if (dump_file && (dump_flags & TDF_DETAILS))
1062 {
1063 fprintf (dump_file, "\nVisiting PHI node: ");
1064 print_gimple_stmt (dump_file, phi, 0, dump_flags);
1065 }
1066
1067 new_val.lattice_val = UNDEFINED;
1068 new_val.value = NULL_TREE;
1069 new_val.mask = 0;
1070
1071 bool first = true;
1072 bool non_exec_edge = false;
1073 for (i = 0; i < gimple_phi_num_args (phi); i++)
1074 {
1075 /* Compute the meet operator over all the PHI arguments flowing
1076 through executable edges. */
1077 edge e = gimple_phi_arg_edge (phi, i);
1078
1079 if (dump_file && (dump_flags & TDF_DETAILS))
1080 {
1081 fprintf (dump_file,
1082 "\n Argument #%d (%d -> %d %sexecutable)\n",
1083 i, e->src->index, e->dest->index,
1084 (e->flags & EDGE_EXECUTABLE) ? "" : "not ");
1085 }
1086
1087 /* If the incoming edge is executable, Compute the meet operator for
1088 the existing value of the PHI node and the current PHI argument. */
1089 if (e->flags & EDGE_EXECUTABLE)
1090 {
1091 tree arg = gimple_phi_arg (phi, i)->def;
1092 ccp_prop_value_t arg_val = get_value_for_expr (arg, false);
1093
1094 if (first)
1095 {
1096 new_val = arg_val;
1097 first = false;
1098 }
1099 else
1100 ccp_lattice_meet (&new_val, &arg_val);
1101
1102 if (dump_file && (dump_flags & TDF_DETAILS))
1103 {
1104 fprintf (dump_file, "\t");
1105 print_generic_expr (dump_file, arg, dump_flags);
1106 dump_lattice_value (dump_file, "\tValue: ", arg_val);
1107 fprintf (dump_file, "\n");
1108 }
1109
1110 if (new_val.lattice_val == VARYING)
1111 break;
1112 }
1113 else
1114 non_exec_edge = true;
1115 }
1116
1117 /* In case there were non-executable edges and the value is a copy
1118 make sure its definition dominates the PHI node. */
1119 if (non_exec_edge
1120 && new_val.lattice_val == CONSTANT
1121 && TREE_CODE (new_val.value) == SSA_NAME
1122 && ! SSA_NAME_IS_DEFAULT_DEF (new_val.value)
1123 && ! dominated_by_p (CDI_DOMINATORS, gimple_bb (phi),
1124 gimple_bb (SSA_NAME_DEF_STMT (new_val.value))))
1125 {
1126 new_val.lattice_val = VARYING;
1127 new_val.value = NULL_TREE;
1128 new_val.mask = -1;
1129 }
1130
1131 if (dump_file && (dump_flags & TDF_DETAILS))
1132 {
1133 dump_lattice_value (dump_file, "\n PHI node value: ", new_val);
1134 fprintf (dump_file, "\n\n");
1135 }
1136
1137 /* Make the transition to the new value. */
1138 if (set_lattice_value (gimple_phi_result (phi), &new_val))
1139 {
1140 if (new_val.lattice_val == VARYING)
1141 return SSA_PROP_VARYING;
1142 else
1143 return SSA_PROP_INTERESTING;
1144 }
1145 else
1146 return SSA_PROP_NOT_INTERESTING;
1147 }
1148
1149 /* Return the constant value for OP or OP otherwise. */
1150
1151 static tree
1152 valueize_op (tree op)
1153 {
1154 if (TREE_CODE (op) == SSA_NAME)
1155 {
1156 tree tem = get_constant_value (op);
1157 if (tem)
1158 return tem;
1159 }
1160 return op;
1161 }
1162
1163 /* Return the constant value for OP, but signal to not follow SSA
1164 edges if the definition may be simulated again. */
1165
1166 static tree
1167 valueize_op_1 (tree op)
1168 {
1169 if (TREE_CODE (op) == SSA_NAME)
1170 {
1171 /* If the definition may be simulated again we cannot follow
1172 this SSA edge as the SSA propagator does not necessarily
1173 re-visit the use. */
1174 gimple *def_stmt = SSA_NAME_DEF_STMT (op);
1175 if (!gimple_nop_p (def_stmt)
1176 && prop_simulate_again_p (def_stmt))
1177 return NULL_TREE;
1178 tree tem = get_constant_value (op);
1179 if (tem)
1180 return tem;
1181 }
1182 return op;
1183 }
1184
1185 /* CCP specific front-end to the non-destructive constant folding
1186 routines.
1187
1188 Attempt to simplify the RHS of STMT knowing that one or more
1189 operands are constants.
1190
1191 If simplification is possible, return the simplified RHS,
1192 otherwise return the original RHS or NULL_TREE. */
1193
1194 static tree
1195 ccp_fold (gimple *stmt)
1196 {
1197 location_t loc = gimple_location (stmt);
1198 switch (gimple_code (stmt))
1199 {
1200 case GIMPLE_COND:
1201 {
1202 /* Handle comparison operators that can appear in GIMPLE form. */
1203 tree op0 = valueize_op (gimple_cond_lhs (stmt));
1204 tree op1 = valueize_op (gimple_cond_rhs (stmt));
1205 enum tree_code code = gimple_cond_code (stmt);
1206 return fold_binary_loc (loc, code, boolean_type_node, op0, op1);
1207 }
1208
1209 case GIMPLE_SWITCH:
1210 {
1211 /* Return the constant switch index. */
1212 return valueize_op (gimple_switch_index (as_a <gswitch *> (stmt)));
1213 }
1214
1215 case GIMPLE_ASSIGN:
1216 case GIMPLE_CALL:
1217 return gimple_fold_stmt_to_constant_1 (stmt,
1218 valueize_op, valueize_op_1);
1219
1220 default:
1221 gcc_unreachable ();
1222 }
1223 }
1224
1225 /* Apply the operation CODE in type TYPE to the value, mask pair
1226 RVAL and RMASK representing a value of type RTYPE and set
1227 the value, mask pair *VAL and *MASK to the result. */
1228
1229 static void
1230 bit_value_unop_1 (enum tree_code code, tree type,
1231 widest_int *val, widest_int *mask,
1232 tree rtype, const widest_int &rval, const widest_int &rmask)
1233 {
1234 switch (code)
1235 {
1236 case BIT_NOT_EXPR:
1237 *mask = rmask;
1238 *val = ~rval;
1239 break;
1240
1241 case NEGATE_EXPR:
1242 {
1243 widest_int temv, temm;
1244 /* Return ~rval + 1. */
1245 bit_value_unop_1 (BIT_NOT_EXPR, type, &temv, &temm, type, rval, rmask);
1246 bit_value_binop_1 (PLUS_EXPR, type, val, mask,
1247 type, temv, temm, type, 1, 0);
1248 break;
1249 }
1250
1251 CASE_CONVERT:
1252 {
1253 signop sgn;
1254
1255 /* First extend mask and value according to the original type. */
1256 sgn = TYPE_SIGN (rtype);
1257 *mask = wi::ext (rmask, TYPE_PRECISION (rtype), sgn);
1258 *val = wi::ext (rval, TYPE_PRECISION (rtype), sgn);
1259
1260 /* Then extend mask and value according to the target type. */
1261 sgn = TYPE_SIGN (type);
1262 *mask = wi::ext (*mask, TYPE_PRECISION (type), sgn);
1263 *val = wi::ext (*val, TYPE_PRECISION (type), sgn);
1264 break;
1265 }
1266
1267 default:
1268 *mask = -1;
1269 break;
1270 }
1271 }
1272
1273 /* Apply the operation CODE in type TYPE to the value, mask pairs
1274 R1VAL, R1MASK and R2VAL, R2MASK representing a values of type R1TYPE
1275 and R2TYPE and set the value, mask pair *VAL and *MASK to the result. */
1276
1277 static void
1278 bit_value_binop_1 (enum tree_code code, tree type,
1279 widest_int *val, widest_int *mask,
1280 tree r1type, const widest_int &r1val,
1281 const widest_int &r1mask, tree r2type,
1282 const widest_int &r2val, const widest_int &r2mask)
1283 {
1284 signop sgn = TYPE_SIGN (type);
1285 int width = TYPE_PRECISION (type);
1286 bool swap_p = false;
1287
1288 /* Assume we'll get a constant result. Use an initial non varying
1289 value, we fall back to varying in the end if necessary. */
1290 *mask = -1;
1291
1292 switch (code)
1293 {
1294 case BIT_AND_EXPR:
1295 /* The mask is constant where there is a known not
1296 set bit, (m1 | m2) & ((v1 | m1) & (v2 | m2)) */
1297 *mask = (r1mask | r2mask) & (r1val | r1mask) & (r2val | r2mask);
1298 *val = r1val & r2val;
1299 break;
1300
1301 case BIT_IOR_EXPR:
1302 /* The mask is constant where there is a known
1303 set bit, (m1 | m2) & ~((v1 & ~m1) | (v2 & ~m2)). */
1304 *mask = (r1mask | r2mask)
1305 .and_not (r1val.and_not (r1mask) | r2val.and_not (r2mask));
1306 *val = r1val | r2val;
1307 break;
1308
1309 case BIT_XOR_EXPR:
1310 /* m1 | m2 */
1311 *mask = r1mask | r2mask;
1312 *val = r1val ^ r2val;
1313 break;
1314
1315 case LROTATE_EXPR:
1316 case RROTATE_EXPR:
1317 if (r2mask == 0)
1318 {
1319 widest_int shift = r2val;
1320 if (shift == 0)
1321 {
1322 *mask = r1mask;
1323 *val = r1val;
1324 }
1325 else
1326 {
1327 if (wi::neg_p (shift))
1328 {
1329 shift = -shift;
1330 if (code == RROTATE_EXPR)
1331 code = LROTATE_EXPR;
1332 else
1333 code = RROTATE_EXPR;
1334 }
1335 if (code == RROTATE_EXPR)
1336 {
1337 *mask = wi::rrotate (r1mask, shift, width);
1338 *val = wi::rrotate (r1val, shift, width);
1339 }
1340 else
1341 {
1342 *mask = wi::lrotate (r1mask, shift, width);
1343 *val = wi::lrotate (r1val, shift, width);
1344 }
1345 }
1346 }
1347 break;
1348
1349 case LSHIFT_EXPR:
1350 case RSHIFT_EXPR:
1351 /* ??? We can handle partially known shift counts if we know
1352 its sign. That way we can tell that (x << (y | 8)) & 255
1353 is zero. */
1354 if (r2mask == 0)
1355 {
1356 widest_int shift = r2val;
1357 if (shift == 0)
1358 {
1359 *mask = r1mask;
1360 *val = r1val;
1361 }
1362 else
1363 {
1364 if (wi::neg_p (shift))
1365 {
1366 shift = -shift;
1367 if (code == RSHIFT_EXPR)
1368 code = LSHIFT_EXPR;
1369 else
1370 code = RSHIFT_EXPR;
1371 }
1372 if (code == RSHIFT_EXPR)
1373 {
1374 *mask = wi::rshift (wi::ext (r1mask, width, sgn), shift, sgn);
1375 *val = wi::rshift (wi::ext (r1val, width, sgn), shift, sgn);
1376 }
1377 else
1378 {
1379 *mask = wi::ext (wi::lshift (r1mask, shift), width, sgn);
1380 *val = wi::ext (wi::lshift (r1val, shift), width, sgn);
1381 }
1382 }
1383 }
1384 break;
1385
1386 case PLUS_EXPR:
1387 case POINTER_PLUS_EXPR:
1388 {
1389 /* Do the addition with unknown bits set to zero, to give carry-ins of
1390 zero wherever possible. */
1391 widest_int lo = r1val.and_not (r1mask) + r2val.and_not (r2mask);
1392 lo = wi::ext (lo, width, sgn);
1393 /* Do the addition with unknown bits set to one, to give carry-ins of
1394 one wherever possible. */
1395 widest_int hi = (r1val | r1mask) + (r2val | r2mask);
1396 hi = wi::ext (hi, width, sgn);
1397 /* Each bit in the result is known if (a) the corresponding bits in
1398 both inputs are known, and (b) the carry-in to that bit position
1399 is known. We can check condition (b) by seeing if we got the same
1400 result with minimised carries as with maximised carries. */
1401 *mask = r1mask | r2mask | (lo ^ hi);
1402 *mask = wi::ext (*mask, width, sgn);
1403 /* It shouldn't matter whether we choose lo or hi here. */
1404 *val = lo;
1405 break;
1406 }
1407
1408 case MINUS_EXPR:
1409 {
1410 widest_int temv, temm;
1411 bit_value_unop_1 (NEGATE_EXPR, r2type, &temv, &temm,
1412 r2type, r2val, r2mask);
1413 bit_value_binop_1 (PLUS_EXPR, type, val, mask,
1414 r1type, r1val, r1mask,
1415 r2type, temv, temm);
1416 break;
1417 }
1418
1419 case MULT_EXPR:
1420 {
1421 /* Just track trailing zeros in both operands and transfer
1422 them to the other. */
1423 int r1tz = wi::ctz (r1val | r1mask);
1424 int r2tz = wi::ctz (r2val | r2mask);
1425 if (r1tz + r2tz >= width)
1426 {
1427 *mask = 0;
1428 *val = 0;
1429 }
1430 else if (r1tz + r2tz > 0)
1431 {
1432 *mask = wi::ext (wi::mask <widest_int> (r1tz + r2tz, true),
1433 width, sgn);
1434 *val = 0;
1435 }
1436 break;
1437 }
1438
1439 case EQ_EXPR:
1440 case NE_EXPR:
1441 {
1442 widest_int m = r1mask | r2mask;
1443 if (r1val.and_not (m) != r2val.and_not (m))
1444 {
1445 *mask = 0;
1446 *val = ((code == EQ_EXPR) ? 0 : 1);
1447 }
1448 else
1449 {
1450 /* We know the result of a comparison is always one or zero. */
1451 *mask = 1;
1452 *val = 0;
1453 }
1454 break;
1455 }
1456
1457 case GE_EXPR:
1458 case GT_EXPR:
1459 swap_p = true;
1460 code = swap_tree_comparison (code);
1461 /* Fall through. */
1462 case LT_EXPR:
1463 case LE_EXPR:
1464 {
1465 int minmax, maxmin;
1466
1467 const widest_int &o1val = swap_p ? r2val : r1val;
1468 const widest_int &o1mask = swap_p ? r2mask : r1mask;
1469 const widest_int &o2val = swap_p ? r1val : r2val;
1470 const widest_int &o2mask = swap_p ? r1mask : r2mask;
1471
1472 /* If the most significant bits are not known we know nothing. */
1473 if (wi::neg_p (o1mask) || wi::neg_p (o2mask))
1474 break;
1475
1476 /* For comparisons the signedness is in the comparison operands. */
1477 sgn = TYPE_SIGN (r1type);
1478
1479 /* If we know the most significant bits we know the values
1480 value ranges by means of treating varying bits as zero
1481 or one. Do a cross comparison of the max/min pairs. */
1482 maxmin = wi::cmp (o1val | o1mask, o2val.and_not (o2mask), sgn);
1483 minmax = wi::cmp (o1val.and_not (o1mask), o2val | o2mask, sgn);
1484 if (maxmin < 0) /* o1 is less than o2. */
1485 {
1486 *mask = 0;
1487 *val = 1;
1488 }
1489 else if (minmax > 0) /* o1 is not less or equal to o2. */
1490 {
1491 *mask = 0;
1492 *val = 0;
1493 }
1494 else if (maxmin == minmax) /* o1 and o2 are equal. */
1495 {
1496 /* This probably should never happen as we'd have
1497 folded the thing during fully constant value folding. */
1498 *mask = 0;
1499 *val = (code == LE_EXPR ? 1 : 0);
1500 }
1501 else
1502 {
1503 /* We know the result of a comparison is always one or zero. */
1504 *mask = 1;
1505 *val = 0;
1506 }
1507 break;
1508 }
1509
1510 default:;
1511 }
1512 }
1513
1514 /* Return the propagation value when applying the operation CODE to
1515 the value RHS yielding type TYPE. */
1516
1517 static ccp_prop_value_t
1518 bit_value_unop (enum tree_code code, tree type, tree rhs)
1519 {
1520 ccp_prop_value_t rval = get_value_for_expr (rhs, true);
1521 widest_int value, mask;
1522 ccp_prop_value_t val;
1523
1524 if (rval.lattice_val == UNDEFINED)
1525 return rval;
1526
1527 gcc_assert ((rval.lattice_val == CONSTANT
1528 && TREE_CODE (rval.value) == INTEGER_CST)
1529 || wi::sext (rval.mask, TYPE_PRECISION (TREE_TYPE (rhs))) == -1);
1530 bit_value_unop_1 (code, type, &value, &mask,
1531 TREE_TYPE (rhs), value_to_wide_int (rval), rval.mask);
1532 if (wi::sext (mask, TYPE_PRECISION (type)) != -1)
1533 {
1534 val.lattice_val = CONSTANT;
1535 val.mask = mask;
1536 /* ??? Delay building trees here. */
1537 val.value = wide_int_to_tree (type, value);
1538 }
1539 else
1540 {
1541 val.lattice_val = VARYING;
1542 val.value = NULL_TREE;
1543 val.mask = -1;
1544 }
1545 return val;
1546 }
1547
1548 /* Return the propagation value when applying the operation CODE to
1549 the values RHS1 and RHS2 yielding type TYPE. */
1550
1551 static ccp_prop_value_t
1552 bit_value_binop (enum tree_code code, tree type, tree rhs1, tree rhs2)
1553 {
1554 ccp_prop_value_t r1val = get_value_for_expr (rhs1, true);
1555 ccp_prop_value_t r2val = get_value_for_expr (rhs2, true);
1556 widest_int value, mask;
1557 ccp_prop_value_t val;
1558
1559 if (r1val.lattice_val == UNDEFINED
1560 || r2val.lattice_val == UNDEFINED)
1561 {
1562 val.lattice_val = VARYING;
1563 val.value = NULL_TREE;
1564 val.mask = -1;
1565 return val;
1566 }
1567
1568 gcc_assert ((r1val.lattice_val == CONSTANT
1569 && TREE_CODE (r1val.value) == INTEGER_CST)
1570 || wi::sext (r1val.mask,
1571 TYPE_PRECISION (TREE_TYPE (rhs1))) == -1);
1572 gcc_assert ((r2val.lattice_val == CONSTANT
1573 && TREE_CODE (r2val.value) == INTEGER_CST)
1574 || wi::sext (r2val.mask,
1575 TYPE_PRECISION (TREE_TYPE (rhs2))) == -1);
1576 bit_value_binop_1 (code, type, &value, &mask,
1577 TREE_TYPE (rhs1), value_to_wide_int (r1val), r1val.mask,
1578 TREE_TYPE (rhs2), value_to_wide_int (r2val), r2val.mask);
1579 if (wi::sext (mask, TYPE_PRECISION (type)) != -1)
1580 {
1581 val.lattice_val = CONSTANT;
1582 val.mask = mask;
1583 /* ??? Delay building trees here. */
1584 val.value = wide_int_to_tree (type, value);
1585 }
1586 else
1587 {
1588 val.lattice_val = VARYING;
1589 val.value = NULL_TREE;
1590 val.mask = -1;
1591 }
1592 return val;
1593 }
1594
1595 /* Return the propagation value for __builtin_assume_aligned
1596 and functions with assume_aligned or alloc_aligned attribute.
1597 For __builtin_assume_aligned, ATTR is NULL_TREE,
1598 for assume_aligned attribute ATTR is non-NULL and ALLOC_ALIGNED
1599 is false, for alloc_aligned attribute ATTR is non-NULL and
1600 ALLOC_ALIGNED is true. */
1601
1602 static ccp_prop_value_t
1603 bit_value_assume_aligned (gimple *stmt, tree attr, ccp_prop_value_t ptrval,
1604 bool alloc_aligned)
1605 {
1606 tree align, misalign = NULL_TREE, type;
1607 unsigned HOST_WIDE_INT aligni, misaligni = 0;
1608 ccp_prop_value_t alignval;
1609 widest_int value, mask;
1610 ccp_prop_value_t val;
1611
1612 if (attr == NULL_TREE)
1613 {
1614 tree ptr = gimple_call_arg (stmt, 0);
1615 type = TREE_TYPE (ptr);
1616 ptrval = get_value_for_expr (ptr, true);
1617 }
1618 else
1619 {
1620 tree lhs = gimple_call_lhs (stmt);
1621 type = TREE_TYPE (lhs);
1622 }
1623
1624 if (ptrval.lattice_val == UNDEFINED)
1625 return ptrval;
1626 gcc_assert ((ptrval.lattice_val == CONSTANT
1627 && TREE_CODE (ptrval.value) == INTEGER_CST)
1628 || wi::sext (ptrval.mask, TYPE_PRECISION (type)) == -1);
1629 if (attr == NULL_TREE)
1630 {
1631 /* Get aligni and misaligni from __builtin_assume_aligned. */
1632 align = gimple_call_arg (stmt, 1);
1633 if (!tree_fits_uhwi_p (align))
1634 return ptrval;
1635 aligni = tree_to_uhwi (align);
1636 if (gimple_call_num_args (stmt) > 2)
1637 {
1638 misalign = gimple_call_arg (stmt, 2);
1639 if (!tree_fits_uhwi_p (misalign))
1640 return ptrval;
1641 misaligni = tree_to_uhwi (misalign);
1642 }
1643 }
1644 else
1645 {
1646 /* Get aligni and misaligni from assume_aligned or
1647 alloc_align attributes. */
1648 if (TREE_VALUE (attr) == NULL_TREE)
1649 return ptrval;
1650 attr = TREE_VALUE (attr);
1651 align = TREE_VALUE (attr);
1652 if (!tree_fits_uhwi_p (align))
1653 return ptrval;
1654 aligni = tree_to_uhwi (align);
1655 if (alloc_aligned)
1656 {
1657 if (aligni == 0 || aligni > gimple_call_num_args (stmt))
1658 return ptrval;
1659 align = gimple_call_arg (stmt, aligni - 1);
1660 if (!tree_fits_uhwi_p (align))
1661 return ptrval;
1662 aligni = tree_to_uhwi (align);
1663 }
1664 else if (TREE_CHAIN (attr) && TREE_VALUE (TREE_CHAIN (attr)))
1665 {
1666 misalign = TREE_VALUE (TREE_CHAIN (attr));
1667 if (!tree_fits_uhwi_p (misalign))
1668 return ptrval;
1669 misaligni = tree_to_uhwi (misalign);
1670 }
1671 }
1672 if (aligni <= 1 || (aligni & (aligni - 1)) != 0 || misaligni >= aligni)
1673 return ptrval;
1674
1675 align = build_int_cst_type (type, -aligni);
1676 alignval = get_value_for_expr (align, true);
1677 bit_value_binop_1 (BIT_AND_EXPR, type, &value, &mask,
1678 type, value_to_wide_int (ptrval), ptrval.mask,
1679 type, value_to_wide_int (alignval), alignval.mask);
1680 if (wi::sext (mask, TYPE_PRECISION (type)) != -1)
1681 {
1682 val.lattice_val = CONSTANT;
1683 val.mask = mask;
1684 gcc_assert ((mask.to_uhwi () & (aligni - 1)) == 0);
1685 gcc_assert ((value.to_uhwi () & (aligni - 1)) == 0);
1686 value |= misaligni;
1687 /* ??? Delay building trees here. */
1688 val.value = wide_int_to_tree (type, value);
1689 }
1690 else
1691 {
1692 val.lattice_val = VARYING;
1693 val.value = NULL_TREE;
1694 val.mask = -1;
1695 }
1696 return val;
1697 }
1698
1699 /* Evaluate statement STMT.
1700 Valid only for assignments, calls, conditionals, and switches. */
1701
1702 static ccp_prop_value_t
1703 evaluate_stmt (gimple *stmt)
1704 {
1705 ccp_prop_value_t val;
1706 tree simplified = NULL_TREE;
1707 ccp_lattice_t likelyvalue = likely_value (stmt);
1708 bool is_constant = false;
1709 unsigned int align;
1710
1711 if (dump_file && (dump_flags & TDF_DETAILS))
1712 {
1713 fprintf (dump_file, "which is likely ");
1714 switch (likelyvalue)
1715 {
1716 case CONSTANT:
1717 fprintf (dump_file, "CONSTANT");
1718 break;
1719 case UNDEFINED:
1720 fprintf (dump_file, "UNDEFINED");
1721 break;
1722 case VARYING:
1723 fprintf (dump_file, "VARYING");
1724 break;
1725 default:;
1726 }
1727 fprintf (dump_file, "\n");
1728 }
1729
1730 /* If the statement is likely to have a CONSTANT result, then try
1731 to fold the statement to determine the constant value. */
1732 /* FIXME. This is the only place that we call ccp_fold.
1733 Since likely_value never returns CONSTANT for calls, we will
1734 not attempt to fold them, including builtins that may profit. */
1735 if (likelyvalue == CONSTANT)
1736 {
1737 fold_defer_overflow_warnings ();
1738 simplified = ccp_fold (stmt);
1739 if (simplified && TREE_CODE (simplified) == SSA_NAME)
1740 {
1741 val = *get_value (simplified);
1742 if (val.lattice_val != VARYING)
1743 {
1744 fold_undefer_overflow_warnings (true, stmt, 0);
1745 return val;
1746 }
1747 }
1748 is_constant = simplified && is_gimple_min_invariant (simplified);
1749 fold_undefer_overflow_warnings (is_constant, stmt, 0);
1750 if (is_constant)
1751 {
1752 /* The statement produced a constant value. */
1753 val.lattice_val = CONSTANT;
1754 val.value = simplified;
1755 val.mask = 0;
1756 return val;
1757 }
1758 }
1759 /* If the statement is likely to have a VARYING result, then do not
1760 bother folding the statement. */
1761 else if (likelyvalue == VARYING)
1762 {
1763 enum gimple_code code = gimple_code (stmt);
1764 if (code == GIMPLE_ASSIGN)
1765 {
1766 enum tree_code subcode = gimple_assign_rhs_code (stmt);
1767
1768 /* Other cases cannot satisfy is_gimple_min_invariant
1769 without folding. */
1770 if (get_gimple_rhs_class (subcode) == GIMPLE_SINGLE_RHS)
1771 simplified = gimple_assign_rhs1 (stmt);
1772 }
1773 else if (code == GIMPLE_SWITCH)
1774 simplified = gimple_switch_index (as_a <gswitch *> (stmt));
1775 else
1776 /* These cannot satisfy is_gimple_min_invariant without folding. */
1777 gcc_assert (code == GIMPLE_CALL || code == GIMPLE_COND);
1778 is_constant = simplified && is_gimple_min_invariant (simplified);
1779 if (is_constant)
1780 {
1781 /* The statement produced a constant value. */
1782 val.lattice_val = CONSTANT;
1783 val.value = simplified;
1784 val.mask = 0;
1785 }
1786 }
1787 /* If the statement result is likely UNDEFINED, make it so. */
1788 else if (likelyvalue == UNDEFINED)
1789 {
1790 val.lattice_val = UNDEFINED;
1791 val.value = NULL_TREE;
1792 val.mask = 0;
1793 return val;
1794 }
1795
1796 /* Resort to simplification for bitwise tracking. */
1797 if (flag_tree_bit_ccp
1798 && (likelyvalue == CONSTANT || is_gimple_call (stmt)
1799 || (gimple_assign_single_p (stmt)
1800 && gimple_assign_rhs_code (stmt) == ADDR_EXPR))
1801 && !is_constant)
1802 {
1803 enum gimple_code code = gimple_code (stmt);
1804 val.lattice_val = VARYING;
1805 val.value = NULL_TREE;
1806 val.mask = -1;
1807 if (code == GIMPLE_ASSIGN)
1808 {
1809 enum tree_code subcode = gimple_assign_rhs_code (stmt);
1810 tree rhs1 = gimple_assign_rhs1 (stmt);
1811 tree lhs = gimple_assign_lhs (stmt);
1812 if ((INTEGRAL_TYPE_P (TREE_TYPE (lhs))
1813 || POINTER_TYPE_P (TREE_TYPE (lhs)))
1814 && (INTEGRAL_TYPE_P (TREE_TYPE (rhs1))
1815 || POINTER_TYPE_P (TREE_TYPE (rhs1))))
1816 switch (get_gimple_rhs_class (subcode))
1817 {
1818 case GIMPLE_SINGLE_RHS:
1819 val = get_value_for_expr (rhs1, true);
1820 break;
1821
1822 case GIMPLE_UNARY_RHS:
1823 val = bit_value_unop (subcode, TREE_TYPE (lhs), rhs1);
1824 break;
1825
1826 case GIMPLE_BINARY_RHS:
1827 val = bit_value_binop (subcode, TREE_TYPE (lhs), rhs1,
1828 gimple_assign_rhs2 (stmt));
1829 break;
1830
1831 default:;
1832 }
1833 }
1834 else if (code == GIMPLE_COND)
1835 {
1836 enum tree_code code = gimple_cond_code (stmt);
1837 tree rhs1 = gimple_cond_lhs (stmt);
1838 tree rhs2 = gimple_cond_rhs (stmt);
1839 if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1))
1840 || POINTER_TYPE_P (TREE_TYPE (rhs1)))
1841 val = bit_value_binop (code, TREE_TYPE (rhs1), rhs1, rhs2);
1842 }
1843 else if (gimple_call_builtin_p (stmt, BUILT_IN_NORMAL))
1844 {
1845 tree fndecl = gimple_call_fndecl (stmt);
1846 switch (DECL_FUNCTION_CODE (fndecl))
1847 {
1848 case BUILT_IN_MALLOC:
1849 case BUILT_IN_REALLOC:
1850 case BUILT_IN_CALLOC:
1851 case BUILT_IN_STRDUP:
1852 case BUILT_IN_STRNDUP:
1853 val.lattice_val = CONSTANT;
1854 val.value = build_int_cst (TREE_TYPE (gimple_get_lhs (stmt)), 0);
1855 val.mask = ~((HOST_WIDE_INT) MALLOC_ABI_ALIGNMENT
1856 / BITS_PER_UNIT - 1);
1857 break;
1858
1859 case BUILT_IN_ALLOCA:
1860 case BUILT_IN_ALLOCA_WITH_ALIGN:
1861 align = (DECL_FUNCTION_CODE (fndecl) == BUILT_IN_ALLOCA_WITH_ALIGN
1862 ? TREE_INT_CST_LOW (gimple_call_arg (stmt, 1))
1863 : BIGGEST_ALIGNMENT);
1864 val.lattice_val = CONSTANT;
1865 val.value = build_int_cst (TREE_TYPE (gimple_get_lhs (stmt)), 0);
1866 val.mask = ~((HOST_WIDE_INT) align / BITS_PER_UNIT - 1);
1867 break;
1868
1869 /* These builtins return their first argument, unmodified. */
1870 case BUILT_IN_MEMCPY:
1871 case BUILT_IN_MEMMOVE:
1872 case BUILT_IN_MEMSET:
1873 case BUILT_IN_STRCPY:
1874 case BUILT_IN_STRNCPY:
1875 case BUILT_IN_MEMCPY_CHK:
1876 case BUILT_IN_MEMMOVE_CHK:
1877 case BUILT_IN_MEMSET_CHK:
1878 case BUILT_IN_STRCPY_CHK:
1879 case BUILT_IN_STRNCPY_CHK:
1880 val = get_value_for_expr (gimple_call_arg (stmt, 0), true);
1881 break;
1882
1883 case BUILT_IN_ASSUME_ALIGNED:
1884 val = bit_value_assume_aligned (stmt, NULL_TREE, val, false);
1885 break;
1886
1887 case BUILT_IN_ALIGNED_ALLOC:
1888 {
1889 tree align = get_constant_value (gimple_call_arg (stmt, 0));
1890 if (align
1891 && tree_fits_uhwi_p (align))
1892 {
1893 unsigned HOST_WIDE_INT aligni = tree_to_uhwi (align);
1894 if (aligni > 1
1895 /* align must be power-of-two */
1896 && (aligni & (aligni - 1)) == 0)
1897 {
1898 val.lattice_val = CONSTANT;
1899 val.value = build_int_cst (ptr_type_node, 0);
1900 val.mask = -aligni;
1901 }
1902 }
1903 break;
1904 }
1905
1906 default:;
1907 }
1908 }
1909 if (is_gimple_call (stmt) && gimple_call_lhs (stmt))
1910 {
1911 tree fntype = gimple_call_fntype (stmt);
1912 if (fntype)
1913 {
1914 tree attrs = lookup_attribute ("assume_aligned",
1915 TYPE_ATTRIBUTES (fntype));
1916 if (attrs)
1917 val = bit_value_assume_aligned (stmt, attrs, val, false);
1918 attrs = lookup_attribute ("alloc_align",
1919 TYPE_ATTRIBUTES (fntype));
1920 if (attrs)
1921 val = bit_value_assume_aligned (stmt, attrs, val, true);
1922 }
1923 }
1924 is_constant = (val.lattice_val == CONSTANT);
1925 }
1926
1927 if (flag_tree_bit_ccp
1928 && ((is_constant && TREE_CODE (val.value) == INTEGER_CST)
1929 || !is_constant)
1930 && gimple_get_lhs (stmt)
1931 && TREE_CODE (gimple_get_lhs (stmt)) == SSA_NAME)
1932 {
1933 tree lhs = gimple_get_lhs (stmt);
1934 wide_int nonzero_bits = get_nonzero_bits (lhs);
1935 if (nonzero_bits != -1)
1936 {
1937 if (!is_constant)
1938 {
1939 val.lattice_val = CONSTANT;
1940 val.value = build_zero_cst (TREE_TYPE (lhs));
1941 val.mask = extend_mask (nonzero_bits);
1942 is_constant = true;
1943 }
1944 else
1945 {
1946 if (wi::bit_and_not (val.value, nonzero_bits) != 0)
1947 val.value = wide_int_to_tree (TREE_TYPE (lhs),
1948 nonzero_bits & val.value);
1949 if (nonzero_bits == 0)
1950 val.mask = 0;
1951 else
1952 val.mask = val.mask & extend_mask (nonzero_bits);
1953 }
1954 }
1955 }
1956
1957 /* The statement produced a nonconstant value. */
1958 if (!is_constant)
1959 {
1960 /* The statement produced a copy. */
1961 if (simplified && TREE_CODE (simplified) == SSA_NAME
1962 && !SSA_NAME_OCCURS_IN_ABNORMAL_PHI (simplified))
1963 {
1964 val.lattice_val = CONSTANT;
1965 val.value = simplified;
1966 val.mask = -1;
1967 }
1968 /* The statement is VARYING. */
1969 else
1970 {
1971 val.lattice_val = VARYING;
1972 val.value = NULL_TREE;
1973 val.mask = -1;
1974 }
1975 }
1976
1977 return val;
1978 }
1979
1980 typedef hash_table<nofree_ptr_hash<gimple> > gimple_htab;
1981
1982 /* Given a BUILT_IN_STACK_SAVE value SAVED_VAL, insert a clobber of VAR before
1983 each matching BUILT_IN_STACK_RESTORE. Mark visited phis in VISITED. */
1984
1985 static void
1986 insert_clobber_before_stack_restore (tree saved_val, tree var,
1987 gimple_htab **visited)
1988 {
1989 gimple *stmt;
1990 gassign *clobber_stmt;
1991 tree clobber;
1992 imm_use_iterator iter;
1993 gimple_stmt_iterator i;
1994 gimple **slot;
1995
1996 FOR_EACH_IMM_USE_STMT (stmt, iter, saved_val)
1997 if (gimple_call_builtin_p (stmt, BUILT_IN_STACK_RESTORE))
1998 {
1999 clobber = build_constructor (TREE_TYPE (var),
2000 NULL);
2001 TREE_THIS_VOLATILE (clobber) = 1;
2002 clobber_stmt = gimple_build_assign (var, clobber);
2003
2004 i = gsi_for_stmt (stmt);
2005 gsi_insert_before (&i, clobber_stmt, GSI_SAME_STMT);
2006 }
2007 else if (gimple_code (stmt) == GIMPLE_PHI)
2008 {
2009 if (!*visited)
2010 *visited = new gimple_htab (10);
2011
2012 slot = (*visited)->find_slot (stmt, INSERT);
2013 if (*slot != NULL)
2014 continue;
2015
2016 *slot = stmt;
2017 insert_clobber_before_stack_restore (gimple_phi_result (stmt), var,
2018 visited);
2019 }
2020 else if (gimple_assign_ssa_name_copy_p (stmt))
2021 insert_clobber_before_stack_restore (gimple_assign_lhs (stmt), var,
2022 visited);
2023 else if (chkp_gimple_call_builtin_p (stmt, BUILT_IN_CHKP_BNDRET))
2024 continue;
2025 else
2026 gcc_assert (is_gimple_debug (stmt));
2027 }
2028
2029 /* Advance the iterator to the previous non-debug gimple statement in the same
2030 or dominating basic block. */
2031
2032 static inline void
2033 gsi_prev_dom_bb_nondebug (gimple_stmt_iterator *i)
2034 {
2035 basic_block dom;
2036
2037 gsi_prev_nondebug (i);
2038 while (gsi_end_p (*i))
2039 {
2040 dom = get_immediate_dominator (CDI_DOMINATORS, i->bb);
2041 if (dom == NULL || dom == ENTRY_BLOCK_PTR_FOR_FN (cfun))
2042 return;
2043
2044 *i = gsi_last_bb (dom);
2045 }
2046 }
2047
2048 /* Find a BUILT_IN_STACK_SAVE dominating gsi_stmt (I), and insert
2049 a clobber of VAR before each matching BUILT_IN_STACK_RESTORE.
2050
2051 It is possible that BUILT_IN_STACK_SAVE cannot be find in a dominator when a
2052 previous pass (such as DOM) duplicated it along multiple paths to a BB. In
2053 that case the function gives up without inserting the clobbers. */
2054
2055 static void
2056 insert_clobbers_for_var (gimple_stmt_iterator i, tree var)
2057 {
2058 gimple *stmt;
2059 tree saved_val;
2060 gimple_htab *visited = NULL;
2061
2062 for (; !gsi_end_p (i); gsi_prev_dom_bb_nondebug (&i))
2063 {
2064 stmt = gsi_stmt (i);
2065
2066 if (!gimple_call_builtin_p (stmt, BUILT_IN_STACK_SAVE))
2067 continue;
2068
2069 saved_val = gimple_call_lhs (stmt);
2070 if (saved_val == NULL_TREE)
2071 continue;
2072
2073 insert_clobber_before_stack_restore (saved_val, var, &visited);
2074 break;
2075 }
2076
2077 delete visited;
2078 }
2079
2080 /* Detects a __builtin_alloca_with_align with constant size argument. Declares
2081 fixed-size array and returns the address, if found, otherwise returns
2082 NULL_TREE. */
2083
2084 static tree
2085 fold_builtin_alloca_with_align (gimple *stmt)
2086 {
2087 unsigned HOST_WIDE_INT size, threshold, n_elem;
2088 tree lhs, arg, block, var, elem_type, array_type;
2089
2090 /* Get lhs. */
2091 lhs = gimple_call_lhs (stmt);
2092 if (lhs == NULL_TREE)
2093 return NULL_TREE;
2094
2095 /* Detect constant argument. */
2096 arg = get_constant_value (gimple_call_arg (stmt, 0));
2097 if (arg == NULL_TREE
2098 || TREE_CODE (arg) != INTEGER_CST
2099 || !tree_fits_uhwi_p (arg))
2100 return NULL_TREE;
2101
2102 size = tree_to_uhwi (arg);
2103
2104 /* Heuristic: don't fold large allocas. */
2105 threshold = (unsigned HOST_WIDE_INT)PARAM_VALUE (PARAM_LARGE_STACK_FRAME);
2106 /* In case the alloca is located at function entry, it has the same lifetime
2107 as a declared array, so we allow a larger size. */
2108 block = gimple_block (stmt);
2109 if (!(cfun->after_inlining
2110 && block
2111 && TREE_CODE (BLOCK_SUPERCONTEXT (block)) == FUNCTION_DECL))
2112 threshold /= 10;
2113 if (size > threshold)
2114 return NULL_TREE;
2115
2116 /* Declare array. */
2117 elem_type = build_nonstandard_integer_type (BITS_PER_UNIT, 1);
2118 n_elem = size * 8 / BITS_PER_UNIT;
2119 array_type = build_array_type_nelts (elem_type, n_elem);
2120 var = create_tmp_var (array_type);
2121 DECL_ALIGN (var) = TREE_INT_CST_LOW (gimple_call_arg (stmt, 1));
2122 {
2123 struct ptr_info_def *pi = SSA_NAME_PTR_INFO (lhs);
2124 if (pi != NULL && !pi->pt.anything)
2125 {
2126 bool singleton_p;
2127 unsigned uid;
2128 singleton_p = pt_solution_singleton_p (&pi->pt, &uid);
2129 gcc_assert (singleton_p);
2130 SET_DECL_PT_UID (var, uid);
2131 }
2132 }
2133
2134 /* Fold alloca to the address of the array. */
2135 return fold_convert (TREE_TYPE (lhs), build_fold_addr_expr (var));
2136 }
2137
2138 /* Fold the stmt at *GSI with CCP specific information that propagating
2139 and regular folding does not catch. */
2140
2141 static bool
2142 ccp_fold_stmt (gimple_stmt_iterator *gsi)
2143 {
2144 gimple *stmt = gsi_stmt (*gsi);
2145
2146 switch (gimple_code (stmt))
2147 {
2148 case GIMPLE_COND:
2149 {
2150 gcond *cond_stmt = as_a <gcond *> (stmt);
2151 ccp_prop_value_t val;
2152 /* Statement evaluation will handle type mismatches in constants
2153 more gracefully than the final propagation. This allows us to
2154 fold more conditionals here. */
2155 val = evaluate_stmt (stmt);
2156 if (val.lattice_val != CONSTANT
2157 || val.mask != 0)
2158 return false;
2159
2160 if (dump_file)
2161 {
2162 fprintf (dump_file, "Folding predicate ");
2163 print_gimple_expr (dump_file, stmt, 0, 0);
2164 fprintf (dump_file, " to ");
2165 print_generic_expr (dump_file, val.value, 0);
2166 fprintf (dump_file, "\n");
2167 }
2168
2169 if (integer_zerop (val.value))
2170 gimple_cond_make_false (cond_stmt);
2171 else
2172 gimple_cond_make_true (cond_stmt);
2173
2174 return true;
2175 }
2176
2177 case GIMPLE_CALL:
2178 {
2179 tree lhs = gimple_call_lhs (stmt);
2180 int flags = gimple_call_flags (stmt);
2181 tree val;
2182 tree argt;
2183 bool changed = false;
2184 unsigned i;
2185
2186 /* If the call was folded into a constant make sure it goes
2187 away even if we cannot propagate into all uses because of
2188 type issues. */
2189 if (lhs
2190 && TREE_CODE (lhs) == SSA_NAME
2191 && (val = get_constant_value (lhs))
2192 /* Don't optimize away calls that have side-effects. */
2193 && (flags & (ECF_CONST|ECF_PURE)) != 0
2194 && (flags & ECF_LOOPING_CONST_OR_PURE) == 0)
2195 {
2196 tree new_rhs = unshare_expr (val);
2197 bool res;
2198 if (!useless_type_conversion_p (TREE_TYPE (lhs),
2199 TREE_TYPE (new_rhs)))
2200 new_rhs = fold_convert (TREE_TYPE (lhs), new_rhs);
2201 res = update_call_from_tree (gsi, new_rhs);
2202 gcc_assert (res);
2203 return true;
2204 }
2205
2206 /* Internal calls provide no argument types, so the extra laxity
2207 for normal calls does not apply. */
2208 if (gimple_call_internal_p (stmt))
2209 return false;
2210
2211 /* The heuristic of fold_builtin_alloca_with_align differs before and
2212 after inlining, so we don't require the arg to be changed into a
2213 constant for folding, but just to be constant. */
2214 if (gimple_call_builtin_p (stmt, BUILT_IN_ALLOCA_WITH_ALIGN))
2215 {
2216 tree new_rhs = fold_builtin_alloca_with_align (stmt);
2217 if (new_rhs)
2218 {
2219 bool res = update_call_from_tree (gsi, new_rhs);
2220 tree var = TREE_OPERAND (TREE_OPERAND (new_rhs, 0),0);
2221 gcc_assert (res);
2222 insert_clobbers_for_var (*gsi, var);
2223 return true;
2224 }
2225 }
2226
2227 /* Propagate into the call arguments. Compared to replace_uses_in
2228 this can use the argument slot types for type verification
2229 instead of the current argument type. We also can safely
2230 drop qualifiers here as we are dealing with constants anyway. */
2231 argt = TYPE_ARG_TYPES (gimple_call_fntype (stmt));
2232 for (i = 0; i < gimple_call_num_args (stmt) && argt;
2233 ++i, argt = TREE_CHAIN (argt))
2234 {
2235 tree arg = gimple_call_arg (stmt, i);
2236 if (TREE_CODE (arg) == SSA_NAME
2237 && (val = get_constant_value (arg))
2238 && useless_type_conversion_p
2239 (TYPE_MAIN_VARIANT (TREE_VALUE (argt)),
2240 TYPE_MAIN_VARIANT (TREE_TYPE (val))))
2241 {
2242 gimple_call_set_arg (stmt, i, unshare_expr (val));
2243 changed = true;
2244 }
2245 }
2246
2247 return changed;
2248 }
2249
2250 case GIMPLE_ASSIGN:
2251 {
2252 tree lhs = gimple_assign_lhs (stmt);
2253 tree val;
2254
2255 /* If we have a load that turned out to be constant replace it
2256 as we cannot propagate into all uses in all cases. */
2257 if (gimple_assign_single_p (stmt)
2258 && TREE_CODE (lhs) == SSA_NAME
2259 && (val = get_constant_value (lhs)))
2260 {
2261 tree rhs = unshare_expr (val);
2262 if (!useless_type_conversion_p (TREE_TYPE (lhs), TREE_TYPE (rhs)))
2263 rhs = fold_build1 (VIEW_CONVERT_EXPR, TREE_TYPE (lhs), rhs);
2264 gimple_assign_set_rhs_from_tree (gsi, rhs);
2265 return true;
2266 }
2267
2268 return false;
2269 }
2270
2271 default:
2272 return false;
2273 }
2274 }
2275
2276 /* Visit the assignment statement STMT. Set the value of its LHS to the
2277 value computed by the RHS and store LHS in *OUTPUT_P. If STMT
2278 creates virtual definitions, set the value of each new name to that
2279 of the RHS (if we can derive a constant out of the RHS).
2280 Value-returning call statements also perform an assignment, and
2281 are handled here. */
2282
2283 static enum ssa_prop_result
2284 visit_assignment (gimple *stmt, tree *output_p)
2285 {
2286 ccp_prop_value_t val;
2287 enum ssa_prop_result retval = SSA_PROP_NOT_INTERESTING;
2288
2289 tree lhs = gimple_get_lhs (stmt);
2290 if (TREE_CODE (lhs) == SSA_NAME)
2291 {
2292 /* Evaluate the statement, which could be
2293 either a GIMPLE_ASSIGN or a GIMPLE_CALL. */
2294 val = evaluate_stmt (stmt);
2295
2296 /* If STMT is an assignment to an SSA_NAME, we only have one
2297 value to set. */
2298 if (set_lattice_value (lhs, &val))
2299 {
2300 *output_p = lhs;
2301 if (val.lattice_val == VARYING)
2302 retval = SSA_PROP_VARYING;
2303 else
2304 retval = SSA_PROP_INTERESTING;
2305 }
2306 }
2307
2308 return retval;
2309 }
2310
2311
2312 /* Visit the conditional statement STMT. Return SSA_PROP_INTERESTING
2313 if it can determine which edge will be taken. Otherwise, return
2314 SSA_PROP_VARYING. */
2315
2316 static enum ssa_prop_result
2317 visit_cond_stmt (gimple *stmt, edge *taken_edge_p)
2318 {
2319 ccp_prop_value_t val;
2320 basic_block block;
2321
2322 block = gimple_bb (stmt);
2323 val = evaluate_stmt (stmt);
2324 if (val.lattice_val != CONSTANT
2325 || val.mask != 0)
2326 return SSA_PROP_VARYING;
2327
2328 /* Find which edge out of the conditional block will be taken and add it
2329 to the worklist. If no single edge can be determined statically,
2330 return SSA_PROP_VARYING to feed all the outgoing edges to the
2331 propagation engine. */
2332 *taken_edge_p = find_taken_edge (block, val.value);
2333 if (*taken_edge_p)
2334 return SSA_PROP_INTERESTING;
2335 else
2336 return SSA_PROP_VARYING;
2337 }
2338
2339
2340 /* Evaluate statement STMT. If the statement produces an output value and
2341 its evaluation changes the lattice value of its output, return
2342 SSA_PROP_INTERESTING and set *OUTPUT_P to the SSA_NAME holding the
2343 output value.
2344
2345 If STMT is a conditional branch and we can determine its truth
2346 value, set *TAKEN_EDGE_P accordingly. If STMT produces a varying
2347 value, return SSA_PROP_VARYING. */
2348
2349 static enum ssa_prop_result
2350 ccp_visit_stmt (gimple *stmt, edge *taken_edge_p, tree *output_p)
2351 {
2352 tree def;
2353 ssa_op_iter iter;
2354
2355 if (dump_file && (dump_flags & TDF_DETAILS))
2356 {
2357 fprintf (dump_file, "\nVisiting statement:\n");
2358 print_gimple_stmt (dump_file, stmt, 0, dump_flags);
2359 }
2360
2361 switch (gimple_code (stmt))
2362 {
2363 case GIMPLE_ASSIGN:
2364 /* If the statement is an assignment that produces a single
2365 output value, evaluate its RHS to see if the lattice value of
2366 its output has changed. */
2367 return visit_assignment (stmt, output_p);
2368
2369 case GIMPLE_CALL:
2370 /* A value-returning call also performs an assignment. */
2371 if (gimple_call_lhs (stmt) != NULL_TREE)
2372 return visit_assignment (stmt, output_p);
2373 break;
2374
2375 case GIMPLE_COND:
2376 case GIMPLE_SWITCH:
2377 /* If STMT is a conditional branch, see if we can determine
2378 which branch will be taken. */
2379 /* FIXME. It appears that we should be able to optimize
2380 computed GOTOs here as well. */
2381 return visit_cond_stmt (stmt, taken_edge_p);
2382
2383 default:
2384 break;
2385 }
2386
2387 /* Any other kind of statement is not interesting for constant
2388 propagation and, therefore, not worth simulating. */
2389 if (dump_file && (dump_flags & TDF_DETAILS))
2390 fprintf (dump_file, "No interesting values produced. Marked VARYING.\n");
2391
2392 /* Definitions made by statements other than assignments to
2393 SSA_NAMEs represent unknown modifications to their outputs.
2394 Mark them VARYING. */
2395 FOR_EACH_SSA_TREE_OPERAND (def, stmt, iter, SSA_OP_ALL_DEFS)
2396 set_value_varying (def);
2397
2398 return SSA_PROP_VARYING;
2399 }
2400
2401
2402 /* Main entry point for SSA Conditional Constant Propagation. */
2403
2404 static unsigned int
2405 do_ssa_ccp (void)
2406 {
2407 unsigned int todo = 0;
2408 calculate_dominance_info (CDI_DOMINATORS);
2409 ccp_initialize ();
2410 ssa_propagate (ccp_visit_stmt, ccp_visit_phi_node);
2411 if (ccp_finalize ())
2412 todo = (TODO_cleanup_cfg | TODO_update_ssa);
2413 free_dominance_info (CDI_DOMINATORS);
2414 return todo;
2415 }
2416
2417
2418 namespace {
2419
2420 const pass_data pass_data_ccp =
2421 {
2422 GIMPLE_PASS, /* type */
2423 "ccp", /* name */
2424 OPTGROUP_NONE, /* optinfo_flags */
2425 TV_TREE_CCP, /* tv_id */
2426 ( PROP_cfg | PROP_ssa ), /* properties_required */
2427 0, /* properties_provided */
2428 0, /* properties_destroyed */
2429 0, /* todo_flags_start */
2430 TODO_update_address_taken, /* todo_flags_finish */
2431 };
2432
2433 class pass_ccp : public gimple_opt_pass
2434 {
2435 public:
2436 pass_ccp (gcc::context *ctxt)
2437 : gimple_opt_pass (pass_data_ccp, ctxt)
2438 {}
2439
2440 /* opt_pass methods: */
2441 opt_pass * clone () { return new pass_ccp (m_ctxt); }
2442 virtual bool gate (function *) { return flag_tree_ccp != 0; }
2443 virtual unsigned int execute (function *) { return do_ssa_ccp (); }
2444
2445 }; // class pass_ccp
2446
2447 } // anon namespace
2448
2449 gimple_opt_pass *
2450 make_pass_ccp (gcc::context *ctxt)
2451 {
2452 return new pass_ccp (ctxt);
2453 }
2454
2455
2456
2457 /* Try to optimize out __builtin_stack_restore. Optimize it out
2458 if there is another __builtin_stack_restore in the same basic
2459 block and no calls or ASM_EXPRs are in between, or if this block's
2460 only outgoing edge is to EXIT_BLOCK and there are no calls or
2461 ASM_EXPRs after this __builtin_stack_restore. */
2462
2463 static tree
2464 optimize_stack_restore (gimple_stmt_iterator i)
2465 {
2466 tree callee;
2467 gimple *stmt;
2468
2469 basic_block bb = gsi_bb (i);
2470 gimple *call = gsi_stmt (i);
2471
2472 if (gimple_code (call) != GIMPLE_CALL
2473 || gimple_call_num_args (call) != 1
2474 || TREE_CODE (gimple_call_arg (call, 0)) != SSA_NAME
2475 || !POINTER_TYPE_P (TREE_TYPE (gimple_call_arg (call, 0))))
2476 return NULL_TREE;
2477
2478 for (gsi_next (&i); !gsi_end_p (i); gsi_next (&i))
2479 {
2480 stmt = gsi_stmt (i);
2481 if (gimple_code (stmt) == GIMPLE_ASM)
2482 return NULL_TREE;
2483 if (gimple_code (stmt) != GIMPLE_CALL)
2484 continue;
2485
2486 callee = gimple_call_fndecl (stmt);
2487 if (!callee
2488 || DECL_BUILT_IN_CLASS (callee) != BUILT_IN_NORMAL
2489 /* All regular builtins are ok, just obviously not alloca. */
2490 || DECL_FUNCTION_CODE (callee) == BUILT_IN_ALLOCA
2491 || DECL_FUNCTION_CODE (callee) == BUILT_IN_ALLOCA_WITH_ALIGN)
2492 return NULL_TREE;
2493
2494 if (DECL_FUNCTION_CODE (callee) == BUILT_IN_STACK_RESTORE)
2495 goto second_stack_restore;
2496 }
2497
2498 if (!gsi_end_p (i))
2499 return NULL_TREE;
2500
2501 /* Allow one successor of the exit block, or zero successors. */
2502 switch (EDGE_COUNT (bb->succs))
2503 {
2504 case 0:
2505 break;
2506 case 1:
2507 if (single_succ_edge (bb)->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
2508 return NULL_TREE;
2509 break;
2510 default:
2511 return NULL_TREE;
2512 }
2513 second_stack_restore:
2514
2515 /* If there's exactly one use, then zap the call to __builtin_stack_save.
2516 If there are multiple uses, then the last one should remove the call.
2517 In any case, whether the call to __builtin_stack_save can be removed
2518 or not is irrelevant to removing the call to __builtin_stack_restore. */
2519 if (has_single_use (gimple_call_arg (call, 0)))
2520 {
2521 gimple *stack_save = SSA_NAME_DEF_STMT (gimple_call_arg (call, 0));
2522 if (is_gimple_call (stack_save))
2523 {
2524 callee = gimple_call_fndecl (stack_save);
2525 if (callee
2526 && DECL_BUILT_IN_CLASS (callee) == BUILT_IN_NORMAL
2527 && DECL_FUNCTION_CODE (callee) == BUILT_IN_STACK_SAVE)
2528 {
2529 gimple_stmt_iterator stack_save_gsi;
2530 tree rhs;
2531
2532 stack_save_gsi = gsi_for_stmt (stack_save);
2533 rhs = build_int_cst (TREE_TYPE (gimple_call_arg (call, 0)), 0);
2534 update_call_from_tree (&stack_save_gsi, rhs);
2535 }
2536 }
2537 }
2538
2539 /* No effect, so the statement will be deleted. */
2540 return integer_zero_node;
2541 }
2542
2543 /* If va_list type is a simple pointer and nothing special is needed,
2544 optimize __builtin_va_start (&ap, 0) into ap = __builtin_next_arg (0),
2545 __builtin_va_end (&ap) out as NOP and __builtin_va_copy into a simple
2546 pointer assignment. */
2547
2548 static tree
2549 optimize_stdarg_builtin (gimple *call)
2550 {
2551 tree callee, lhs, rhs, cfun_va_list;
2552 bool va_list_simple_ptr;
2553 location_t loc = gimple_location (call);
2554
2555 if (gimple_code (call) != GIMPLE_CALL)
2556 return NULL_TREE;
2557
2558 callee = gimple_call_fndecl (call);
2559
2560 cfun_va_list = targetm.fn_abi_va_list (callee);
2561 va_list_simple_ptr = POINTER_TYPE_P (cfun_va_list)
2562 && (TREE_TYPE (cfun_va_list) == void_type_node
2563 || TREE_TYPE (cfun_va_list) == char_type_node);
2564
2565 switch (DECL_FUNCTION_CODE (callee))
2566 {
2567 case BUILT_IN_VA_START:
2568 if (!va_list_simple_ptr
2569 || targetm.expand_builtin_va_start != NULL
2570 || !builtin_decl_explicit_p (BUILT_IN_NEXT_ARG))
2571 return NULL_TREE;
2572
2573 if (gimple_call_num_args (call) != 2)
2574 return NULL_TREE;
2575
2576 lhs = gimple_call_arg (call, 0);
2577 if (!POINTER_TYPE_P (TREE_TYPE (lhs))
2578 || TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (lhs)))
2579 != TYPE_MAIN_VARIANT (cfun_va_list))
2580 return NULL_TREE;
2581
2582 lhs = build_fold_indirect_ref_loc (loc, lhs);
2583 rhs = build_call_expr_loc (loc, builtin_decl_explicit (BUILT_IN_NEXT_ARG),
2584 1, integer_zero_node);
2585 rhs = fold_convert_loc (loc, TREE_TYPE (lhs), rhs);
2586 return build2 (MODIFY_EXPR, TREE_TYPE (lhs), lhs, rhs);
2587
2588 case BUILT_IN_VA_COPY:
2589 if (!va_list_simple_ptr)
2590 return NULL_TREE;
2591
2592 if (gimple_call_num_args (call) != 2)
2593 return NULL_TREE;
2594
2595 lhs = gimple_call_arg (call, 0);
2596 if (!POINTER_TYPE_P (TREE_TYPE (lhs))
2597 || TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (lhs)))
2598 != TYPE_MAIN_VARIANT (cfun_va_list))
2599 return NULL_TREE;
2600
2601 lhs = build_fold_indirect_ref_loc (loc, lhs);
2602 rhs = gimple_call_arg (call, 1);
2603 if (TYPE_MAIN_VARIANT (TREE_TYPE (rhs))
2604 != TYPE_MAIN_VARIANT (cfun_va_list))
2605 return NULL_TREE;
2606
2607 rhs = fold_convert_loc (loc, TREE_TYPE (lhs), rhs);
2608 return build2 (MODIFY_EXPR, TREE_TYPE (lhs), lhs, rhs);
2609
2610 case BUILT_IN_VA_END:
2611 /* No effect, so the statement will be deleted. */
2612 return integer_zero_node;
2613
2614 default:
2615 gcc_unreachable ();
2616 }
2617 }
2618
2619 /* Attemp to make the block of __builtin_unreachable I unreachable by changing
2620 the incoming jumps. Return true if at least one jump was changed. */
2621
2622 static bool
2623 optimize_unreachable (gimple_stmt_iterator i)
2624 {
2625 basic_block bb = gsi_bb (i);
2626 gimple_stmt_iterator gsi;
2627 gimple *stmt;
2628 edge_iterator ei;
2629 edge e;
2630 bool ret;
2631
2632 if (flag_sanitize & SANITIZE_UNREACHABLE)
2633 return false;
2634
2635 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2636 {
2637 stmt = gsi_stmt (gsi);
2638
2639 if (is_gimple_debug (stmt))
2640 continue;
2641
2642 if (glabel *label_stmt = dyn_cast <glabel *> (stmt))
2643 {
2644 /* Verify we do not need to preserve the label. */
2645 if (FORCED_LABEL (gimple_label_label (label_stmt)))
2646 return false;
2647
2648 continue;
2649 }
2650
2651 /* Only handle the case that __builtin_unreachable is the first statement
2652 in the block. We rely on DCE to remove stmts without side-effects
2653 before __builtin_unreachable. */
2654 if (gsi_stmt (gsi) != gsi_stmt (i))
2655 return false;
2656 }
2657
2658 ret = false;
2659 FOR_EACH_EDGE (e, ei, bb->preds)
2660 {
2661 gsi = gsi_last_bb (e->src);
2662 if (gsi_end_p (gsi))
2663 continue;
2664
2665 stmt = gsi_stmt (gsi);
2666 if (gcond *cond_stmt = dyn_cast <gcond *> (stmt))
2667 {
2668 if (e->flags & EDGE_TRUE_VALUE)
2669 gimple_cond_make_false (cond_stmt);
2670 else if (e->flags & EDGE_FALSE_VALUE)
2671 gimple_cond_make_true (cond_stmt);
2672 else
2673 gcc_unreachable ();
2674 update_stmt (cond_stmt);
2675 }
2676 else
2677 {
2678 /* Todo: handle other cases, f.i. switch statement. */
2679 continue;
2680 }
2681
2682 ret = true;
2683 }
2684
2685 return ret;
2686 }
2687
2688 /* A simple pass that attempts to fold all builtin functions. This pass
2689 is run after we've propagated as many constants as we can. */
2690
2691 namespace {
2692
2693 const pass_data pass_data_fold_builtins =
2694 {
2695 GIMPLE_PASS, /* type */
2696 "fab", /* name */
2697 OPTGROUP_NONE, /* optinfo_flags */
2698 TV_NONE, /* tv_id */
2699 ( PROP_cfg | PROP_ssa ), /* properties_required */
2700 0, /* properties_provided */
2701 0, /* properties_destroyed */
2702 0, /* todo_flags_start */
2703 TODO_update_ssa, /* todo_flags_finish */
2704 };
2705
2706 class pass_fold_builtins : public gimple_opt_pass
2707 {
2708 public:
2709 pass_fold_builtins (gcc::context *ctxt)
2710 : gimple_opt_pass (pass_data_fold_builtins, ctxt)
2711 {}
2712
2713 /* opt_pass methods: */
2714 opt_pass * clone () { return new pass_fold_builtins (m_ctxt); }
2715 virtual unsigned int execute (function *);
2716
2717 }; // class pass_fold_builtins
2718
2719 unsigned int
2720 pass_fold_builtins::execute (function *fun)
2721 {
2722 bool cfg_changed = false;
2723 basic_block bb;
2724 unsigned int todoflags = 0;
2725
2726 FOR_EACH_BB_FN (bb, fun)
2727 {
2728 gimple_stmt_iterator i;
2729 for (i = gsi_start_bb (bb); !gsi_end_p (i); )
2730 {
2731 gimple *stmt, *old_stmt;
2732 tree callee;
2733 enum built_in_function fcode;
2734
2735 stmt = gsi_stmt (i);
2736
2737 if (gimple_code (stmt) != GIMPLE_CALL)
2738 {
2739 /* Remove all *ssaname_N ={v} {CLOBBER}; stmts,
2740 after the last GIMPLE DSE they aren't needed and might
2741 unnecessarily keep the SSA_NAMEs live. */
2742 if (gimple_clobber_p (stmt))
2743 {
2744 tree lhs = gimple_assign_lhs (stmt);
2745 if (TREE_CODE (lhs) == MEM_REF
2746 && TREE_CODE (TREE_OPERAND (lhs, 0)) == SSA_NAME)
2747 {
2748 unlink_stmt_vdef (stmt);
2749 gsi_remove (&i, true);
2750 release_defs (stmt);
2751 continue;
2752 }
2753 }
2754 gsi_next (&i);
2755 continue;
2756 }
2757
2758 callee = gimple_call_fndecl (stmt);
2759 if (!callee || DECL_BUILT_IN_CLASS (callee) != BUILT_IN_NORMAL)
2760 {
2761 gsi_next (&i);
2762 continue;
2763 }
2764
2765 fcode = DECL_FUNCTION_CODE (callee);
2766 if (fold_stmt (&i))
2767 ;
2768 else
2769 {
2770 tree result = NULL_TREE;
2771 switch (DECL_FUNCTION_CODE (callee))
2772 {
2773 case BUILT_IN_CONSTANT_P:
2774 /* Resolve __builtin_constant_p. If it hasn't been
2775 folded to integer_one_node by now, it's fairly
2776 certain that the value simply isn't constant. */
2777 result = integer_zero_node;
2778 break;
2779
2780 case BUILT_IN_ASSUME_ALIGNED:
2781 /* Remove __builtin_assume_aligned. */
2782 result = gimple_call_arg (stmt, 0);
2783 break;
2784
2785 case BUILT_IN_STACK_RESTORE:
2786 result = optimize_stack_restore (i);
2787 if (result)
2788 break;
2789 gsi_next (&i);
2790 continue;
2791
2792 case BUILT_IN_UNREACHABLE:
2793 if (optimize_unreachable (i))
2794 cfg_changed = true;
2795 break;
2796
2797 case BUILT_IN_VA_START:
2798 case BUILT_IN_VA_END:
2799 case BUILT_IN_VA_COPY:
2800 /* These shouldn't be folded before pass_stdarg. */
2801 result = optimize_stdarg_builtin (stmt);
2802 if (result)
2803 break;
2804 /* FALLTHRU */
2805
2806 default:;
2807 }
2808
2809 if (!result)
2810 {
2811 gsi_next (&i);
2812 continue;
2813 }
2814
2815 if (!update_call_from_tree (&i, result))
2816 gimplify_and_update_call_from_tree (&i, result);
2817 }
2818
2819 todoflags |= TODO_update_address_taken;
2820
2821 if (dump_file && (dump_flags & TDF_DETAILS))
2822 {
2823 fprintf (dump_file, "Simplified\n ");
2824 print_gimple_stmt (dump_file, stmt, 0, dump_flags);
2825 }
2826
2827 old_stmt = stmt;
2828 stmt = gsi_stmt (i);
2829 update_stmt (stmt);
2830
2831 if (maybe_clean_or_replace_eh_stmt (old_stmt, stmt)
2832 && gimple_purge_dead_eh_edges (bb))
2833 cfg_changed = true;
2834
2835 if (dump_file && (dump_flags & TDF_DETAILS))
2836 {
2837 fprintf (dump_file, "to\n ");
2838 print_gimple_stmt (dump_file, stmt, 0, dump_flags);
2839 fprintf (dump_file, "\n");
2840 }
2841
2842 /* Retry the same statement if it changed into another
2843 builtin, there might be new opportunities now. */
2844 if (gimple_code (stmt) != GIMPLE_CALL)
2845 {
2846 gsi_next (&i);
2847 continue;
2848 }
2849 callee = gimple_call_fndecl (stmt);
2850 if (!callee
2851 || DECL_BUILT_IN_CLASS (callee) != BUILT_IN_NORMAL
2852 || DECL_FUNCTION_CODE (callee) == fcode)
2853 gsi_next (&i);
2854 }
2855 }
2856
2857 /* Delete unreachable blocks. */
2858 if (cfg_changed)
2859 todoflags |= TODO_cleanup_cfg;
2860
2861 return todoflags;
2862 }
2863
2864 } // anon namespace
2865
2866 gimple_opt_pass *
2867 make_pass_fold_builtins (gcc::context *ctxt)
2868 {
2869 return new pass_fold_builtins (ctxt);
2870 }