Remove first_pass_instance from pass_ccp
[gcc.git] / gcc / tree-ssa-ccp.c
1 /* Conditional constant propagation pass for the GNU compiler.
2 Copyright (C) 2000-2015 Free Software Foundation, Inc.
3 Adapted from original RTL SSA-CCP by Daniel Berlin <dberlin@dberlin.org>
4 Adapted to GIMPLE trees by Diego Novillo <dnovillo@redhat.com>
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it
9 under the terms of the GNU General Public License as published by the
10 Free Software Foundation; either version 3, or (at your option) any
11 later version.
12
13 GCC is distributed in the hope that it will be useful, but WITHOUT
14 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
21
22 /* Conditional constant propagation (CCP) is based on the SSA
23 propagation engine (tree-ssa-propagate.c). Constant assignments of
24 the form VAR = CST are propagated from the assignments into uses of
25 VAR, which in turn may generate new constants. The simulation uses
26 a four level lattice to keep track of constant values associated
27 with SSA names. Given an SSA name V_i, it may take one of the
28 following values:
29
30 UNINITIALIZED -> the initial state of the value. This value
31 is replaced with a correct initial value
32 the first time the value is used, so the
33 rest of the pass does not need to care about
34 it. Using this value simplifies initialization
35 of the pass, and prevents us from needlessly
36 scanning statements that are never reached.
37
38 UNDEFINED -> V_i is a local variable whose definition
39 has not been processed yet. Therefore we
40 don't yet know if its value is a constant
41 or not.
42
43 CONSTANT -> V_i has been found to hold a constant
44 value C.
45
46 VARYING -> V_i cannot take a constant value, or if it
47 does, it is not possible to determine it
48 at compile time.
49
50 The core of SSA-CCP is in ccp_visit_stmt and ccp_visit_phi_node:
51
52 1- In ccp_visit_stmt, we are interested in assignments whose RHS
53 evaluates into a constant and conditional jumps whose predicate
54 evaluates into a boolean true or false. When an assignment of
55 the form V_i = CONST is found, V_i's lattice value is set to
56 CONSTANT and CONST is associated with it. This causes the
57 propagation engine to add all the SSA edges coming out the
58 assignment into the worklists, so that statements that use V_i
59 can be visited.
60
61 If the statement is a conditional with a constant predicate, we
62 mark the outgoing edges as executable or not executable
63 depending on the predicate's value. This is then used when
64 visiting PHI nodes to know when a PHI argument can be ignored.
65
66
67 2- In ccp_visit_phi_node, if all the PHI arguments evaluate to the
68 same constant C, then the LHS of the PHI is set to C. This
69 evaluation is known as the "meet operation". Since one of the
70 goals of this evaluation is to optimistically return constant
71 values as often as possible, it uses two main short cuts:
72
73 - If an argument is flowing in through a non-executable edge, it
74 is ignored. This is useful in cases like this:
75
76 if (PRED)
77 a_9 = 3;
78 else
79 a_10 = 100;
80 a_11 = PHI (a_9, a_10)
81
82 If PRED is known to always evaluate to false, then we can
83 assume that a_11 will always take its value from a_10, meaning
84 that instead of consider it VARYING (a_9 and a_10 have
85 different values), we can consider it CONSTANT 100.
86
87 - If an argument has an UNDEFINED value, then it does not affect
88 the outcome of the meet operation. If a variable V_i has an
89 UNDEFINED value, it means that either its defining statement
90 hasn't been visited yet or V_i has no defining statement, in
91 which case the original symbol 'V' is being used
92 uninitialized. Since 'V' is a local variable, the compiler
93 may assume any initial value for it.
94
95
96 After propagation, every variable V_i that ends up with a lattice
97 value of CONSTANT will have the associated constant value in the
98 array CONST_VAL[i].VALUE. That is fed into substitute_and_fold for
99 final substitution and folding.
100
101 This algorithm uses wide-ints at the max precision of the target.
102 This means that, with one uninteresting exception, variables with
103 UNSIGNED types never go to VARYING because the bits above the
104 precision of the type of the variable are always zero. The
105 uninteresting case is a variable of UNSIGNED type that has the
106 maximum precision of the target. Such variables can go to VARYING,
107 but this causes no loss of infomation since these variables will
108 never be extended.
109
110 References:
111
112 Constant propagation with conditional branches,
113 Wegman and Zadeck, ACM TOPLAS 13(2):181-210.
114
115 Building an Optimizing Compiler,
116 Robert Morgan, Butterworth-Heinemann, 1998, Section 8.9.
117
118 Advanced Compiler Design and Implementation,
119 Steven Muchnick, Morgan Kaufmann, 1997, Section 12.6 */
120
121 #include "config.h"
122 #include "system.h"
123 #include "coretypes.h"
124 #include "backend.h"
125 #include "target.h"
126 #include "tree.h"
127 #include "gimple.h"
128 #include "tree-pass.h"
129 #include "ssa.h"
130 #include "gimple-pretty-print.h"
131 #include "fold-const.h"
132 #include "gimple-fold.h"
133 #include "tree-eh.h"
134 #include "gimplify.h"
135 #include "gimple-iterator.h"
136 #include "tree-cfg.h"
137 #include "tree-ssa-propagate.h"
138 #include "dbgcnt.h"
139 #include "params.h"
140 #include "builtins.h"
141 #include "tree-chkp.h"
142
143
144 /* Possible lattice values. */
145 typedef enum
146 {
147 UNINITIALIZED,
148 UNDEFINED,
149 CONSTANT,
150 VARYING
151 } ccp_lattice_t;
152
153 struct ccp_prop_value_t {
154 /* Lattice value. */
155 ccp_lattice_t lattice_val;
156
157 /* Propagated value. */
158 tree value;
159
160 /* Mask that applies to the propagated value during CCP. For X
161 with a CONSTANT lattice value X & ~mask == value & ~mask. The
162 zero bits in the mask cover constant values. The ones mean no
163 information. */
164 widest_int mask;
165 };
166
167 /* Array of propagated constant values. After propagation,
168 CONST_VAL[I].VALUE holds the constant value for SSA_NAME(I). If
169 the constant is held in an SSA name representing a memory store
170 (i.e., a VDEF), CONST_VAL[I].MEM_REF will contain the actual
171 memory reference used to store (i.e., the LHS of the assignment
172 doing the store). */
173 static ccp_prop_value_t *const_val;
174 static unsigned n_const_val;
175
176 static void canonicalize_value (ccp_prop_value_t *);
177 static bool ccp_fold_stmt (gimple_stmt_iterator *);
178 static void ccp_lattice_meet (ccp_prop_value_t *, ccp_prop_value_t *);
179
180 /* Dump constant propagation value VAL to file OUTF prefixed by PREFIX. */
181
182 static void
183 dump_lattice_value (FILE *outf, const char *prefix, ccp_prop_value_t val)
184 {
185 switch (val.lattice_val)
186 {
187 case UNINITIALIZED:
188 fprintf (outf, "%sUNINITIALIZED", prefix);
189 break;
190 case UNDEFINED:
191 fprintf (outf, "%sUNDEFINED", prefix);
192 break;
193 case VARYING:
194 fprintf (outf, "%sVARYING", prefix);
195 break;
196 case CONSTANT:
197 if (TREE_CODE (val.value) != INTEGER_CST
198 || val.mask == 0)
199 {
200 fprintf (outf, "%sCONSTANT ", prefix);
201 print_generic_expr (outf, val.value, dump_flags);
202 }
203 else
204 {
205 widest_int cval = wi::bit_and_not (wi::to_widest (val.value),
206 val.mask);
207 fprintf (outf, "%sCONSTANT ", prefix);
208 print_hex (cval, outf);
209 fprintf (outf, " (");
210 print_hex (val.mask, outf);
211 fprintf (outf, ")");
212 }
213 break;
214 default:
215 gcc_unreachable ();
216 }
217 }
218
219
220 /* Print lattice value VAL to stderr. */
221
222 void debug_lattice_value (ccp_prop_value_t val);
223
224 DEBUG_FUNCTION void
225 debug_lattice_value (ccp_prop_value_t val)
226 {
227 dump_lattice_value (stderr, "", val);
228 fprintf (stderr, "\n");
229 }
230
231 /* Extend NONZERO_BITS to a full mask, with the upper bits being set. */
232
233 static widest_int
234 extend_mask (const wide_int &nonzero_bits)
235 {
236 return (wi::mask <widest_int> (wi::get_precision (nonzero_bits), true)
237 | widest_int::from (nonzero_bits, UNSIGNED));
238 }
239
240 /* Compute a default value for variable VAR and store it in the
241 CONST_VAL array. The following rules are used to get default
242 values:
243
244 1- Global and static variables that are declared constant are
245 considered CONSTANT.
246
247 2- Any other value is considered UNDEFINED. This is useful when
248 considering PHI nodes. PHI arguments that are undefined do not
249 change the constant value of the PHI node, which allows for more
250 constants to be propagated.
251
252 3- Variables defined by statements other than assignments and PHI
253 nodes are considered VARYING.
254
255 4- Initial values of variables that are not GIMPLE registers are
256 considered VARYING. */
257
258 static ccp_prop_value_t
259 get_default_value (tree var)
260 {
261 ccp_prop_value_t val = { UNINITIALIZED, NULL_TREE, 0 };
262 gimple *stmt;
263
264 stmt = SSA_NAME_DEF_STMT (var);
265
266 if (gimple_nop_p (stmt))
267 {
268 /* Variables defined by an empty statement are those used
269 before being initialized. If VAR is a local variable, we
270 can assume initially that it is UNDEFINED, otherwise we must
271 consider it VARYING. */
272 if (!virtual_operand_p (var)
273 && TREE_CODE (SSA_NAME_VAR (var)) == VAR_DECL)
274 val.lattice_val = UNDEFINED;
275 else
276 {
277 val.lattice_val = VARYING;
278 val.mask = -1;
279 if (flag_tree_bit_ccp)
280 {
281 wide_int nonzero_bits = get_nonzero_bits (var);
282 if (nonzero_bits != -1)
283 {
284 val.lattice_val = CONSTANT;
285 val.value = build_zero_cst (TREE_TYPE (var));
286 val.mask = extend_mask (nonzero_bits);
287 }
288 }
289 }
290 }
291 else if (is_gimple_assign (stmt))
292 {
293 tree cst;
294 if (gimple_assign_single_p (stmt)
295 && DECL_P (gimple_assign_rhs1 (stmt))
296 && (cst = get_symbol_constant_value (gimple_assign_rhs1 (stmt))))
297 {
298 val.lattice_val = CONSTANT;
299 val.value = cst;
300 }
301 else
302 {
303 /* Any other variable defined by an assignment is considered
304 UNDEFINED. */
305 val.lattice_val = UNDEFINED;
306 }
307 }
308 else if ((is_gimple_call (stmt)
309 && gimple_call_lhs (stmt) != NULL_TREE)
310 || gimple_code (stmt) == GIMPLE_PHI)
311 {
312 /* A variable defined by a call or a PHI node is considered
313 UNDEFINED. */
314 val.lattice_val = UNDEFINED;
315 }
316 else
317 {
318 /* Otherwise, VAR will never take on a constant value. */
319 val.lattice_val = VARYING;
320 val.mask = -1;
321 }
322
323 return val;
324 }
325
326
327 /* Get the constant value associated with variable VAR. */
328
329 static inline ccp_prop_value_t *
330 get_value (tree var)
331 {
332 ccp_prop_value_t *val;
333
334 if (const_val == NULL
335 || SSA_NAME_VERSION (var) >= n_const_val)
336 return NULL;
337
338 val = &const_val[SSA_NAME_VERSION (var)];
339 if (val->lattice_val == UNINITIALIZED)
340 *val = get_default_value (var);
341
342 canonicalize_value (val);
343
344 return val;
345 }
346
347 /* Return the constant tree value associated with VAR. */
348
349 static inline tree
350 get_constant_value (tree var)
351 {
352 ccp_prop_value_t *val;
353 if (TREE_CODE (var) != SSA_NAME)
354 {
355 if (is_gimple_min_invariant (var))
356 return var;
357 return NULL_TREE;
358 }
359 val = get_value (var);
360 if (val
361 && val->lattice_val == CONSTANT
362 && (TREE_CODE (val->value) != INTEGER_CST
363 || val->mask == 0))
364 return val->value;
365 return NULL_TREE;
366 }
367
368 /* Sets the value associated with VAR to VARYING. */
369
370 static inline void
371 set_value_varying (tree var)
372 {
373 ccp_prop_value_t *val = &const_val[SSA_NAME_VERSION (var)];
374
375 val->lattice_val = VARYING;
376 val->value = NULL_TREE;
377 val->mask = -1;
378 }
379
380 /* For integer constants, make sure to drop TREE_OVERFLOW. */
381
382 static void
383 canonicalize_value (ccp_prop_value_t *val)
384 {
385 if (val->lattice_val != CONSTANT)
386 return;
387
388 if (TREE_OVERFLOW_P (val->value))
389 val->value = drop_tree_overflow (val->value);
390 }
391
392 /* Return whether the lattice transition is valid. */
393
394 static bool
395 valid_lattice_transition (ccp_prop_value_t old_val, ccp_prop_value_t new_val)
396 {
397 /* Lattice transitions must always be monotonically increasing in
398 value. */
399 if (old_val.lattice_val < new_val.lattice_val)
400 return true;
401
402 if (old_val.lattice_val != new_val.lattice_val)
403 return false;
404
405 if (!old_val.value && !new_val.value)
406 return true;
407
408 /* Now both lattice values are CONSTANT. */
409
410 /* Allow arbitrary copy changes as we might look through PHI <a_1, ...>
411 when only a single copy edge is executable. */
412 if (TREE_CODE (old_val.value) == SSA_NAME
413 && TREE_CODE (new_val.value) == SSA_NAME)
414 return true;
415
416 /* Allow transitioning from a constant to a copy. */
417 if (is_gimple_min_invariant (old_val.value)
418 && TREE_CODE (new_val.value) == SSA_NAME)
419 return true;
420
421 /* Allow transitioning from PHI <&x, not executable> == &x
422 to PHI <&x, &y> == common alignment. */
423 if (TREE_CODE (old_val.value) != INTEGER_CST
424 && TREE_CODE (new_val.value) == INTEGER_CST)
425 return true;
426
427 /* Bit-lattices have to agree in the still valid bits. */
428 if (TREE_CODE (old_val.value) == INTEGER_CST
429 && TREE_CODE (new_val.value) == INTEGER_CST)
430 return (wi::bit_and_not (wi::to_widest (old_val.value), new_val.mask)
431 == wi::bit_and_not (wi::to_widest (new_val.value), new_val.mask));
432
433 /* Otherwise constant values have to agree. */
434 if (operand_equal_p (old_val.value, new_val.value, 0))
435 return true;
436
437 /* At least the kinds and types should agree now. */
438 if (TREE_CODE (old_val.value) != TREE_CODE (new_val.value)
439 || !types_compatible_p (TREE_TYPE (old_val.value),
440 TREE_TYPE (new_val.value)))
441 return false;
442
443 /* For floats and !HONOR_NANS allow transitions from (partial) NaN
444 to non-NaN. */
445 tree type = TREE_TYPE (new_val.value);
446 if (SCALAR_FLOAT_TYPE_P (type)
447 && !HONOR_NANS (type))
448 {
449 if (REAL_VALUE_ISNAN (TREE_REAL_CST (old_val.value)))
450 return true;
451 }
452 else if (VECTOR_FLOAT_TYPE_P (type)
453 && !HONOR_NANS (type))
454 {
455 for (unsigned i = 0; i < VECTOR_CST_NELTS (old_val.value); ++i)
456 if (!REAL_VALUE_ISNAN
457 (TREE_REAL_CST (VECTOR_CST_ELT (old_val.value, i)))
458 && !operand_equal_p (VECTOR_CST_ELT (old_val.value, i),
459 VECTOR_CST_ELT (new_val.value, i), 0))
460 return false;
461 return true;
462 }
463 else if (COMPLEX_FLOAT_TYPE_P (type)
464 && !HONOR_NANS (type))
465 {
466 if (!REAL_VALUE_ISNAN (TREE_REAL_CST (TREE_REALPART (old_val.value)))
467 && !operand_equal_p (TREE_REALPART (old_val.value),
468 TREE_REALPART (new_val.value), 0))
469 return false;
470 if (!REAL_VALUE_ISNAN (TREE_REAL_CST (TREE_IMAGPART (old_val.value)))
471 && !operand_equal_p (TREE_IMAGPART (old_val.value),
472 TREE_IMAGPART (new_val.value), 0))
473 return false;
474 return true;
475 }
476 return false;
477 }
478
479 /* Set the value for variable VAR to NEW_VAL. Return true if the new
480 value is different from VAR's previous value. */
481
482 static bool
483 set_lattice_value (tree var, ccp_prop_value_t *new_val)
484 {
485 /* We can deal with old UNINITIALIZED values just fine here. */
486 ccp_prop_value_t *old_val = &const_val[SSA_NAME_VERSION (var)];
487
488 canonicalize_value (new_val);
489
490 /* We have to be careful to not go up the bitwise lattice
491 represented by the mask. Instead of dropping to VARYING
492 use the meet operator to retain a conservative value.
493 Missed optimizations like PR65851 makes this necessary.
494 It also ensures we converge to a stable lattice solution. */
495 if (new_val->lattice_val == CONSTANT
496 && old_val->lattice_val == CONSTANT
497 && TREE_CODE (new_val->value) != SSA_NAME)
498 ccp_lattice_meet (new_val, old_val);
499
500 gcc_checking_assert (valid_lattice_transition (*old_val, *new_val));
501
502 /* If *OLD_VAL and NEW_VAL are the same, return false to inform the
503 caller that this was a non-transition. */
504 if (old_val->lattice_val != new_val->lattice_val
505 || (new_val->lattice_val == CONSTANT
506 && (TREE_CODE (new_val->value) != TREE_CODE (old_val->value)
507 || (TREE_CODE (new_val->value) == INTEGER_CST
508 && (new_val->mask != old_val->mask
509 || (wi::bit_and_not (wi::to_widest (old_val->value),
510 new_val->mask)
511 != wi::bit_and_not (wi::to_widest (new_val->value),
512 new_val->mask))))
513 || (TREE_CODE (new_val->value) != INTEGER_CST
514 && !operand_equal_p (new_val->value, old_val->value, 0)))))
515 {
516 /* ??? We would like to delay creation of INTEGER_CSTs from
517 partially constants here. */
518
519 if (dump_file && (dump_flags & TDF_DETAILS))
520 {
521 dump_lattice_value (dump_file, "Lattice value changed to ", *new_val);
522 fprintf (dump_file, ". Adding SSA edges to worklist.\n");
523 }
524
525 *old_val = *new_val;
526
527 gcc_assert (new_val->lattice_val != UNINITIALIZED);
528 return true;
529 }
530
531 return false;
532 }
533
534 static ccp_prop_value_t get_value_for_expr (tree, bool);
535 static ccp_prop_value_t bit_value_binop (enum tree_code, tree, tree, tree);
536 static void bit_value_binop_1 (enum tree_code, tree, widest_int *, widest_int *,
537 tree, const widest_int &, const widest_int &,
538 tree, const widest_int &, const widest_int &);
539
540 /* Return a widest_int that can be used for bitwise simplifications
541 from VAL. */
542
543 static widest_int
544 value_to_wide_int (ccp_prop_value_t val)
545 {
546 if (val.value
547 && TREE_CODE (val.value) == INTEGER_CST)
548 return wi::to_widest (val.value);
549
550 return 0;
551 }
552
553 /* Return the value for the address expression EXPR based on alignment
554 information. */
555
556 static ccp_prop_value_t
557 get_value_from_alignment (tree expr)
558 {
559 tree type = TREE_TYPE (expr);
560 ccp_prop_value_t val;
561 unsigned HOST_WIDE_INT bitpos;
562 unsigned int align;
563
564 gcc_assert (TREE_CODE (expr) == ADDR_EXPR);
565
566 get_pointer_alignment_1 (expr, &align, &bitpos);
567 val.mask = (POINTER_TYPE_P (type) || TYPE_UNSIGNED (type)
568 ? wi::mask <widest_int> (TYPE_PRECISION (type), false)
569 : -1).and_not (align / BITS_PER_UNIT - 1);
570 val.lattice_val
571 = wi::sext (val.mask, TYPE_PRECISION (type)) == -1 ? VARYING : CONSTANT;
572 if (val.lattice_val == CONSTANT)
573 val.value = build_int_cstu (type, bitpos / BITS_PER_UNIT);
574 else
575 val.value = NULL_TREE;
576
577 return val;
578 }
579
580 /* Return the value for the tree operand EXPR. If FOR_BITS_P is true
581 return constant bits extracted from alignment information for
582 invariant addresses. */
583
584 static ccp_prop_value_t
585 get_value_for_expr (tree expr, bool for_bits_p)
586 {
587 ccp_prop_value_t val;
588
589 if (TREE_CODE (expr) == SSA_NAME)
590 {
591 val = *get_value (expr);
592 if (for_bits_p
593 && val.lattice_val == CONSTANT
594 && TREE_CODE (val.value) == ADDR_EXPR)
595 val = get_value_from_alignment (val.value);
596 /* Fall back to a copy value. */
597 if (!for_bits_p
598 && val.lattice_val == VARYING
599 && !SSA_NAME_OCCURS_IN_ABNORMAL_PHI (expr))
600 {
601 val.lattice_val = CONSTANT;
602 val.value = expr;
603 val.mask = -1;
604 }
605 }
606 else if (is_gimple_min_invariant (expr)
607 && (!for_bits_p || TREE_CODE (expr) != ADDR_EXPR))
608 {
609 val.lattice_val = CONSTANT;
610 val.value = expr;
611 val.mask = 0;
612 canonicalize_value (&val);
613 }
614 else if (TREE_CODE (expr) == ADDR_EXPR)
615 val = get_value_from_alignment (expr);
616 else
617 {
618 val.lattice_val = VARYING;
619 val.mask = -1;
620 val.value = NULL_TREE;
621 }
622
623 if (val.lattice_val == VARYING
624 && TYPE_UNSIGNED (TREE_TYPE (expr)))
625 val.mask = wi::zext (val.mask, TYPE_PRECISION (TREE_TYPE (expr)));
626
627 return val;
628 }
629
630 /* Return the likely CCP lattice value for STMT.
631
632 If STMT has no operands, then return CONSTANT.
633
634 Else if undefinedness of operands of STMT cause its value to be
635 undefined, then return UNDEFINED.
636
637 Else if any operands of STMT are constants, then return CONSTANT.
638
639 Else return VARYING. */
640
641 static ccp_lattice_t
642 likely_value (gimple *stmt)
643 {
644 bool has_constant_operand, has_undefined_operand, all_undefined_operands;
645 bool has_nsa_operand;
646 tree use;
647 ssa_op_iter iter;
648 unsigned i;
649
650 enum gimple_code code = gimple_code (stmt);
651
652 /* This function appears to be called only for assignments, calls,
653 conditionals, and switches, due to the logic in visit_stmt. */
654 gcc_assert (code == GIMPLE_ASSIGN
655 || code == GIMPLE_CALL
656 || code == GIMPLE_COND
657 || code == GIMPLE_SWITCH);
658
659 /* If the statement has volatile operands, it won't fold to a
660 constant value. */
661 if (gimple_has_volatile_ops (stmt))
662 return VARYING;
663
664 /* Arrive here for more complex cases. */
665 has_constant_operand = false;
666 has_undefined_operand = false;
667 all_undefined_operands = true;
668 has_nsa_operand = false;
669 FOR_EACH_SSA_TREE_OPERAND (use, stmt, iter, SSA_OP_USE)
670 {
671 ccp_prop_value_t *val = get_value (use);
672
673 if (val->lattice_val == UNDEFINED)
674 has_undefined_operand = true;
675 else
676 all_undefined_operands = false;
677
678 if (val->lattice_val == CONSTANT)
679 has_constant_operand = true;
680
681 if (SSA_NAME_IS_DEFAULT_DEF (use)
682 || !prop_simulate_again_p (SSA_NAME_DEF_STMT (use)))
683 has_nsa_operand = true;
684 }
685
686 /* There may be constants in regular rhs operands. For calls we
687 have to ignore lhs, fndecl and static chain, otherwise only
688 the lhs. */
689 for (i = (is_gimple_call (stmt) ? 2 : 0) + gimple_has_lhs (stmt);
690 i < gimple_num_ops (stmt); ++i)
691 {
692 tree op = gimple_op (stmt, i);
693 if (!op || TREE_CODE (op) == SSA_NAME)
694 continue;
695 if (is_gimple_min_invariant (op))
696 has_constant_operand = true;
697 }
698
699 if (has_constant_operand)
700 all_undefined_operands = false;
701
702 if (has_undefined_operand
703 && code == GIMPLE_CALL
704 && gimple_call_internal_p (stmt))
705 switch (gimple_call_internal_fn (stmt))
706 {
707 /* These 3 builtins use the first argument just as a magic
708 way how to find out a decl uid. */
709 case IFN_GOMP_SIMD_LANE:
710 case IFN_GOMP_SIMD_VF:
711 case IFN_GOMP_SIMD_LAST_LANE:
712 has_undefined_operand = false;
713 break;
714 default:
715 break;
716 }
717
718 /* If the operation combines operands like COMPLEX_EXPR make sure to
719 not mark the result UNDEFINED if only one part of the result is
720 undefined. */
721 if (has_undefined_operand && all_undefined_operands)
722 return UNDEFINED;
723 else if (code == GIMPLE_ASSIGN && has_undefined_operand)
724 {
725 switch (gimple_assign_rhs_code (stmt))
726 {
727 /* Unary operators are handled with all_undefined_operands. */
728 case PLUS_EXPR:
729 case MINUS_EXPR:
730 case POINTER_PLUS_EXPR:
731 /* Not MIN_EXPR, MAX_EXPR. One VARYING operand may be selected.
732 Not bitwise operators, one VARYING operand may specify the
733 result completely. Not logical operators for the same reason.
734 Not COMPLEX_EXPR as one VARYING operand makes the result partly
735 not UNDEFINED. Not *DIV_EXPR, comparisons and shifts because
736 the undefined operand may be promoted. */
737 return UNDEFINED;
738
739 case ADDR_EXPR:
740 /* If any part of an address is UNDEFINED, like the index
741 of an ARRAY_EXPR, then treat the result as UNDEFINED. */
742 return UNDEFINED;
743
744 default:
745 ;
746 }
747 }
748 /* If there was an UNDEFINED operand but the result may be not UNDEFINED
749 fall back to CONSTANT. During iteration UNDEFINED may still drop
750 to CONSTANT. */
751 if (has_undefined_operand)
752 return CONSTANT;
753
754 /* We do not consider virtual operands here -- load from read-only
755 memory may have only VARYING virtual operands, but still be
756 constant. Also we can combine the stmt with definitions from
757 operands whose definitions are not simulated again. */
758 if (has_constant_operand
759 || has_nsa_operand
760 || gimple_references_memory_p (stmt))
761 return CONSTANT;
762
763 return VARYING;
764 }
765
766 /* Returns true if STMT cannot be constant. */
767
768 static bool
769 surely_varying_stmt_p (gimple *stmt)
770 {
771 /* If the statement has operands that we cannot handle, it cannot be
772 constant. */
773 if (gimple_has_volatile_ops (stmt))
774 return true;
775
776 /* If it is a call and does not return a value or is not a
777 builtin and not an indirect call or a call to function with
778 assume_aligned/alloc_align attribute, it is varying. */
779 if (is_gimple_call (stmt))
780 {
781 tree fndecl, fntype = gimple_call_fntype (stmt);
782 if (!gimple_call_lhs (stmt)
783 || ((fndecl = gimple_call_fndecl (stmt)) != NULL_TREE
784 && !DECL_BUILT_IN (fndecl)
785 && !lookup_attribute ("assume_aligned",
786 TYPE_ATTRIBUTES (fntype))
787 && !lookup_attribute ("alloc_align",
788 TYPE_ATTRIBUTES (fntype))))
789 return true;
790 }
791
792 /* Any other store operation is not interesting. */
793 else if (gimple_vdef (stmt))
794 return true;
795
796 /* Anything other than assignments and conditional jumps are not
797 interesting for CCP. */
798 if (gimple_code (stmt) != GIMPLE_ASSIGN
799 && gimple_code (stmt) != GIMPLE_COND
800 && gimple_code (stmt) != GIMPLE_SWITCH
801 && gimple_code (stmt) != GIMPLE_CALL)
802 return true;
803
804 return false;
805 }
806
807 /* Initialize local data structures for CCP. */
808
809 static void
810 ccp_initialize (void)
811 {
812 basic_block bb;
813
814 n_const_val = num_ssa_names;
815 const_val = XCNEWVEC (ccp_prop_value_t, n_const_val);
816
817 /* Initialize simulation flags for PHI nodes and statements. */
818 FOR_EACH_BB_FN (bb, cfun)
819 {
820 gimple_stmt_iterator i;
821
822 for (i = gsi_start_bb (bb); !gsi_end_p (i); gsi_next (&i))
823 {
824 gimple *stmt = gsi_stmt (i);
825 bool is_varying;
826
827 /* If the statement is a control insn, then we do not
828 want to avoid simulating the statement once. Failure
829 to do so means that those edges will never get added. */
830 if (stmt_ends_bb_p (stmt))
831 is_varying = false;
832 else
833 is_varying = surely_varying_stmt_p (stmt);
834
835 if (is_varying)
836 {
837 tree def;
838 ssa_op_iter iter;
839
840 /* If the statement will not produce a constant, mark
841 all its outputs VARYING. */
842 FOR_EACH_SSA_TREE_OPERAND (def, stmt, iter, SSA_OP_ALL_DEFS)
843 set_value_varying (def);
844 }
845 prop_set_simulate_again (stmt, !is_varying);
846 }
847 }
848
849 /* Now process PHI nodes. We never clear the simulate_again flag on
850 phi nodes, since we do not know which edges are executable yet,
851 except for phi nodes for virtual operands when we do not do store ccp. */
852 FOR_EACH_BB_FN (bb, cfun)
853 {
854 gphi_iterator i;
855
856 for (i = gsi_start_phis (bb); !gsi_end_p (i); gsi_next (&i))
857 {
858 gphi *phi = i.phi ();
859
860 if (virtual_operand_p (gimple_phi_result (phi)))
861 prop_set_simulate_again (phi, false);
862 else
863 prop_set_simulate_again (phi, true);
864 }
865 }
866 }
867
868 /* Debug count support. Reset the values of ssa names
869 VARYING when the total number ssa names analyzed is
870 beyond the debug count specified. */
871
872 static void
873 do_dbg_cnt (void)
874 {
875 unsigned i;
876 for (i = 0; i < num_ssa_names; i++)
877 {
878 if (!dbg_cnt (ccp))
879 {
880 const_val[i].lattice_val = VARYING;
881 const_val[i].mask = -1;
882 const_val[i].value = NULL_TREE;
883 }
884 }
885 }
886
887
888 /* Do final substitution of propagated values, cleanup the flowgraph and
889 free allocated storage. If NONZERO_P, record nonzero bits.
890
891 Return TRUE when something was optimized. */
892
893 static bool
894 ccp_finalize (bool nonzero_p)
895 {
896 bool something_changed;
897 unsigned i;
898
899 do_dbg_cnt ();
900
901 /* Derive alignment and misalignment information from partially
902 constant pointers in the lattice or nonzero bits from partially
903 constant integers. */
904 for (i = 1; i < num_ssa_names; ++i)
905 {
906 tree name = ssa_name (i);
907 ccp_prop_value_t *val;
908 unsigned int tem, align;
909
910 if (!name
911 || (!POINTER_TYPE_P (TREE_TYPE (name))
912 && (!INTEGRAL_TYPE_P (TREE_TYPE (name))
913 /* Don't record nonzero bits before IPA to avoid
914 using too much memory. */
915 || !nonzero_p)))
916 continue;
917
918 val = get_value (name);
919 if (val->lattice_val != CONSTANT
920 || TREE_CODE (val->value) != INTEGER_CST)
921 continue;
922
923 if (POINTER_TYPE_P (TREE_TYPE (name)))
924 {
925 /* Trailing mask bits specify the alignment, trailing value
926 bits the misalignment. */
927 tem = val->mask.to_uhwi ();
928 align = (tem & -tem);
929 if (align > 1)
930 set_ptr_info_alignment (get_ptr_info (name), align,
931 (TREE_INT_CST_LOW (val->value)
932 & (align - 1)));
933 }
934 else
935 {
936 unsigned int precision = TYPE_PRECISION (TREE_TYPE (val->value));
937 wide_int nonzero_bits = wide_int::from (val->mask, precision,
938 UNSIGNED) | val->value;
939 nonzero_bits &= get_nonzero_bits (name);
940 set_nonzero_bits (name, nonzero_bits);
941 }
942 }
943
944 /* Perform substitutions based on the known constant values. */
945 something_changed = substitute_and_fold (get_constant_value,
946 ccp_fold_stmt, true);
947
948 free (const_val);
949 const_val = NULL;
950 return something_changed;;
951 }
952
953
954 /* Compute the meet operator between *VAL1 and *VAL2. Store the result
955 in VAL1.
956
957 any M UNDEFINED = any
958 any M VARYING = VARYING
959 Ci M Cj = Ci if (i == j)
960 Ci M Cj = VARYING if (i != j)
961 */
962
963 static void
964 ccp_lattice_meet (ccp_prop_value_t *val1, ccp_prop_value_t *val2)
965 {
966 if (val1->lattice_val == UNDEFINED
967 /* For UNDEFINED M SSA we can't always SSA because its definition
968 may not dominate the PHI node. Doing optimistic copy propagation
969 also causes a lot of gcc.dg/uninit-pred*.c FAILs. */
970 && (val2->lattice_val != CONSTANT
971 || TREE_CODE (val2->value) != SSA_NAME))
972 {
973 /* UNDEFINED M any = any */
974 *val1 = *val2;
975 }
976 else if (val2->lattice_val == UNDEFINED
977 /* See above. */
978 && (val1->lattice_val != CONSTANT
979 || TREE_CODE (val1->value) != SSA_NAME))
980 {
981 /* any M UNDEFINED = any
982 Nothing to do. VAL1 already contains the value we want. */
983 ;
984 }
985 else if (val1->lattice_val == VARYING
986 || val2->lattice_val == VARYING)
987 {
988 /* any M VARYING = VARYING. */
989 val1->lattice_val = VARYING;
990 val1->mask = -1;
991 val1->value = NULL_TREE;
992 }
993 else if (val1->lattice_val == CONSTANT
994 && val2->lattice_val == CONSTANT
995 && TREE_CODE (val1->value) == INTEGER_CST
996 && TREE_CODE (val2->value) == INTEGER_CST)
997 {
998 /* Ci M Cj = Ci if (i == j)
999 Ci M Cj = VARYING if (i != j)
1000
1001 For INTEGER_CSTs mask unequal bits. If no equal bits remain,
1002 drop to varying. */
1003 val1->mask = (val1->mask | val2->mask
1004 | (wi::to_widest (val1->value)
1005 ^ wi::to_widest (val2->value)));
1006 if (wi::sext (val1->mask, TYPE_PRECISION (TREE_TYPE (val1->value))) == -1)
1007 {
1008 val1->lattice_val = VARYING;
1009 val1->value = NULL_TREE;
1010 }
1011 }
1012 else if (val1->lattice_val == CONSTANT
1013 && val2->lattice_val == CONSTANT
1014 && operand_equal_p (val1->value, val2->value, 0))
1015 {
1016 /* Ci M Cj = Ci if (i == j)
1017 Ci M Cj = VARYING if (i != j)
1018
1019 VAL1 already contains the value we want for equivalent values. */
1020 }
1021 else if (val1->lattice_val == CONSTANT
1022 && val2->lattice_val == CONSTANT
1023 && (TREE_CODE (val1->value) == ADDR_EXPR
1024 || TREE_CODE (val2->value) == ADDR_EXPR))
1025 {
1026 /* When not equal addresses are involved try meeting for
1027 alignment. */
1028 ccp_prop_value_t tem = *val2;
1029 if (TREE_CODE (val1->value) == ADDR_EXPR)
1030 *val1 = get_value_for_expr (val1->value, true);
1031 if (TREE_CODE (val2->value) == ADDR_EXPR)
1032 tem = get_value_for_expr (val2->value, true);
1033 ccp_lattice_meet (val1, &tem);
1034 }
1035 else
1036 {
1037 /* Any other combination is VARYING. */
1038 val1->lattice_val = VARYING;
1039 val1->mask = -1;
1040 val1->value = NULL_TREE;
1041 }
1042 }
1043
1044
1045 /* Loop through the PHI_NODE's parameters for BLOCK and compare their
1046 lattice values to determine PHI_NODE's lattice value. The value of a
1047 PHI node is determined calling ccp_lattice_meet with all the arguments
1048 of the PHI node that are incoming via executable edges. */
1049
1050 static enum ssa_prop_result
1051 ccp_visit_phi_node (gphi *phi)
1052 {
1053 unsigned i;
1054 ccp_prop_value_t new_val;
1055
1056 if (dump_file && (dump_flags & TDF_DETAILS))
1057 {
1058 fprintf (dump_file, "\nVisiting PHI node: ");
1059 print_gimple_stmt (dump_file, phi, 0, dump_flags);
1060 }
1061
1062 new_val.lattice_val = UNDEFINED;
1063 new_val.value = NULL_TREE;
1064 new_val.mask = 0;
1065
1066 bool first = true;
1067 bool non_exec_edge = false;
1068 for (i = 0; i < gimple_phi_num_args (phi); i++)
1069 {
1070 /* Compute the meet operator over all the PHI arguments flowing
1071 through executable edges. */
1072 edge e = gimple_phi_arg_edge (phi, i);
1073
1074 if (dump_file && (dump_flags & TDF_DETAILS))
1075 {
1076 fprintf (dump_file,
1077 "\n Argument #%d (%d -> %d %sexecutable)\n",
1078 i, e->src->index, e->dest->index,
1079 (e->flags & EDGE_EXECUTABLE) ? "" : "not ");
1080 }
1081
1082 /* If the incoming edge is executable, Compute the meet operator for
1083 the existing value of the PHI node and the current PHI argument. */
1084 if (e->flags & EDGE_EXECUTABLE)
1085 {
1086 tree arg = gimple_phi_arg (phi, i)->def;
1087 ccp_prop_value_t arg_val = get_value_for_expr (arg, false);
1088
1089 if (first)
1090 {
1091 new_val = arg_val;
1092 first = false;
1093 }
1094 else
1095 ccp_lattice_meet (&new_val, &arg_val);
1096
1097 if (dump_file && (dump_flags & TDF_DETAILS))
1098 {
1099 fprintf (dump_file, "\t");
1100 print_generic_expr (dump_file, arg, dump_flags);
1101 dump_lattice_value (dump_file, "\tValue: ", arg_val);
1102 fprintf (dump_file, "\n");
1103 }
1104
1105 if (new_val.lattice_val == VARYING)
1106 break;
1107 }
1108 else
1109 non_exec_edge = true;
1110 }
1111
1112 /* In case there were non-executable edges and the value is a copy
1113 make sure its definition dominates the PHI node. */
1114 if (non_exec_edge
1115 && new_val.lattice_val == CONSTANT
1116 && TREE_CODE (new_val.value) == SSA_NAME
1117 && ! SSA_NAME_IS_DEFAULT_DEF (new_val.value)
1118 && ! dominated_by_p (CDI_DOMINATORS, gimple_bb (phi),
1119 gimple_bb (SSA_NAME_DEF_STMT (new_val.value))))
1120 {
1121 new_val.lattice_val = VARYING;
1122 new_val.value = NULL_TREE;
1123 new_val.mask = -1;
1124 }
1125
1126 if (dump_file && (dump_flags & TDF_DETAILS))
1127 {
1128 dump_lattice_value (dump_file, "\n PHI node value: ", new_val);
1129 fprintf (dump_file, "\n\n");
1130 }
1131
1132 /* Make the transition to the new value. */
1133 if (set_lattice_value (gimple_phi_result (phi), &new_val))
1134 {
1135 if (new_val.lattice_val == VARYING)
1136 return SSA_PROP_VARYING;
1137 else
1138 return SSA_PROP_INTERESTING;
1139 }
1140 else
1141 return SSA_PROP_NOT_INTERESTING;
1142 }
1143
1144 /* Return the constant value for OP or OP otherwise. */
1145
1146 static tree
1147 valueize_op (tree op)
1148 {
1149 if (TREE_CODE (op) == SSA_NAME)
1150 {
1151 tree tem = get_constant_value (op);
1152 if (tem)
1153 return tem;
1154 }
1155 return op;
1156 }
1157
1158 /* Return the constant value for OP, but signal to not follow SSA
1159 edges if the definition may be simulated again. */
1160
1161 static tree
1162 valueize_op_1 (tree op)
1163 {
1164 if (TREE_CODE (op) == SSA_NAME)
1165 {
1166 /* If the definition may be simulated again we cannot follow
1167 this SSA edge as the SSA propagator does not necessarily
1168 re-visit the use. */
1169 gimple *def_stmt = SSA_NAME_DEF_STMT (op);
1170 if (!gimple_nop_p (def_stmt)
1171 && prop_simulate_again_p (def_stmt))
1172 return NULL_TREE;
1173 tree tem = get_constant_value (op);
1174 if (tem)
1175 return tem;
1176 }
1177 return op;
1178 }
1179
1180 /* CCP specific front-end to the non-destructive constant folding
1181 routines.
1182
1183 Attempt to simplify the RHS of STMT knowing that one or more
1184 operands are constants.
1185
1186 If simplification is possible, return the simplified RHS,
1187 otherwise return the original RHS or NULL_TREE. */
1188
1189 static tree
1190 ccp_fold (gimple *stmt)
1191 {
1192 location_t loc = gimple_location (stmt);
1193 switch (gimple_code (stmt))
1194 {
1195 case GIMPLE_COND:
1196 {
1197 /* Handle comparison operators that can appear in GIMPLE form. */
1198 tree op0 = valueize_op (gimple_cond_lhs (stmt));
1199 tree op1 = valueize_op (gimple_cond_rhs (stmt));
1200 enum tree_code code = gimple_cond_code (stmt);
1201 return fold_binary_loc (loc, code, boolean_type_node, op0, op1);
1202 }
1203
1204 case GIMPLE_SWITCH:
1205 {
1206 /* Return the constant switch index. */
1207 return valueize_op (gimple_switch_index (as_a <gswitch *> (stmt)));
1208 }
1209
1210 case GIMPLE_ASSIGN:
1211 case GIMPLE_CALL:
1212 return gimple_fold_stmt_to_constant_1 (stmt,
1213 valueize_op, valueize_op_1);
1214
1215 default:
1216 gcc_unreachable ();
1217 }
1218 }
1219
1220 /* Apply the operation CODE in type TYPE to the value, mask pair
1221 RVAL and RMASK representing a value of type RTYPE and set
1222 the value, mask pair *VAL and *MASK to the result. */
1223
1224 static void
1225 bit_value_unop_1 (enum tree_code code, tree type,
1226 widest_int *val, widest_int *mask,
1227 tree rtype, const widest_int &rval, const widest_int &rmask)
1228 {
1229 switch (code)
1230 {
1231 case BIT_NOT_EXPR:
1232 *mask = rmask;
1233 *val = ~rval;
1234 break;
1235
1236 case NEGATE_EXPR:
1237 {
1238 widest_int temv, temm;
1239 /* Return ~rval + 1. */
1240 bit_value_unop_1 (BIT_NOT_EXPR, type, &temv, &temm, type, rval, rmask);
1241 bit_value_binop_1 (PLUS_EXPR, type, val, mask,
1242 type, temv, temm, type, 1, 0);
1243 break;
1244 }
1245
1246 CASE_CONVERT:
1247 {
1248 signop sgn;
1249
1250 /* First extend mask and value according to the original type. */
1251 sgn = TYPE_SIGN (rtype);
1252 *mask = wi::ext (rmask, TYPE_PRECISION (rtype), sgn);
1253 *val = wi::ext (rval, TYPE_PRECISION (rtype), sgn);
1254
1255 /* Then extend mask and value according to the target type. */
1256 sgn = TYPE_SIGN (type);
1257 *mask = wi::ext (*mask, TYPE_PRECISION (type), sgn);
1258 *val = wi::ext (*val, TYPE_PRECISION (type), sgn);
1259 break;
1260 }
1261
1262 default:
1263 *mask = -1;
1264 break;
1265 }
1266 }
1267
1268 /* Apply the operation CODE in type TYPE to the value, mask pairs
1269 R1VAL, R1MASK and R2VAL, R2MASK representing a values of type R1TYPE
1270 and R2TYPE and set the value, mask pair *VAL and *MASK to the result. */
1271
1272 static void
1273 bit_value_binop_1 (enum tree_code code, tree type,
1274 widest_int *val, widest_int *mask,
1275 tree r1type, const widest_int &r1val,
1276 const widest_int &r1mask, tree r2type,
1277 const widest_int &r2val, const widest_int &r2mask)
1278 {
1279 signop sgn = TYPE_SIGN (type);
1280 int width = TYPE_PRECISION (type);
1281 bool swap_p = false;
1282
1283 /* Assume we'll get a constant result. Use an initial non varying
1284 value, we fall back to varying in the end if necessary. */
1285 *mask = -1;
1286
1287 switch (code)
1288 {
1289 case BIT_AND_EXPR:
1290 /* The mask is constant where there is a known not
1291 set bit, (m1 | m2) & ((v1 | m1) & (v2 | m2)) */
1292 *mask = (r1mask | r2mask) & (r1val | r1mask) & (r2val | r2mask);
1293 *val = r1val & r2val;
1294 break;
1295
1296 case BIT_IOR_EXPR:
1297 /* The mask is constant where there is a known
1298 set bit, (m1 | m2) & ~((v1 & ~m1) | (v2 & ~m2)). */
1299 *mask = (r1mask | r2mask)
1300 .and_not (r1val.and_not (r1mask) | r2val.and_not (r2mask));
1301 *val = r1val | r2val;
1302 break;
1303
1304 case BIT_XOR_EXPR:
1305 /* m1 | m2 */
1306 *mask = r1mask | r2mask;
1307 *val = r1val ^ r2val;
1308 break;
1309
1310 case LROTATE_EXPR:
1311 case RROTATE_EXPR:
1312 if (r2mask == 0)
1313 {
1314 widest_int shift = r2val;
1315 if (shift == 0)
1316 {
1317 *mask = r1mask;
1318 *val = r1val;
1319 }
1320 else
1321 {
1322 if (wi::neg_p (shift))
1323 {
1324 shift = -shift;
1325 if (code == RROTATE_EXPR)
1326 code = LROTATE_EXPR;
1327 else
1328 code = RROTATE_EXPR;
1329 }
1330 if (code == RROTATE_EXPR)
1331 {
1332 *mask = wi::rrotate (r1mask, shift, width);
1333 *val = wi::rrotate (r1val, shift, width);
1334 }
1335 else
1336 {
1337 *mask = wi::lrotate (r1mask, shift, width);
1338 *val = wi::lrotate (r1val, shift, width);
1339 }
1340 }
1341 }
1342 break;
1343
1344 case LSHIFT_EXPR:
1345 case RSHIFT_EXPR:
1346 /* ??? We can handle partially known shift counts if we know
1347 its sign. That way we can tell that (x << (y | 8)) & 255
1348 is zero. */
1349 if (r2mask == 0)
1350 {
1351 widest_int shift = r2val;
1352 if (shift == 0)
1353 {
1354 *mask = r1mask;
1355 *val = r1val;
1356 }
1357 else
1358 {
1359 if (wi::neg_p (shift))
1360 {
1361 shift = -shift;
1362 if (code == RSHIFT_EXPR)
1363 code = LSHIFT_EXPR;
1364 else
1365 code = RSHIFT_EXPR;
1366 }
1367 if (code == RSHIFT_EXPR)
1368 {
1369 *mask = wi::rshift (wi::ext (r1mask, width, sgn), shift, sgn);
1370 *val = wi::rshift (wi::ext (r1val, width, sgn), shift, sgn);
1371 }
1372 else
1373 {
1374 *mask = wi::ext (wi::lshift (r1mask, shift), width, sgn);
1375 *val = wi::ext (wi::lshift (r1val, shift), width, sgn);
1376 }
1377 }
1378 }
1379 break;
1380
1381 case PLUS_EXPR:
1382 case POINTER_PLUS_EXPR:
1383 {
1384 /* Do the addition with unknown bits set to zero, to give carry-ins of
1385 zero wherever possible. */
1386 widest_int lo = r1val.and_not (r1mask) + r2val.and_not (r2mask);
1387 lo = wi::ext (lo, width, sgn);
1388 /* Do the addition with unknown bits set to one, to give carry-ins of
1389 one wherever possible. */
1390 widest_int hi = (r1val | r1mask) + (r2val | r2mask);
1391 hi = wi::ext (hi, width, sgn);
1392 /* Each bit in the result is known if (a) the corresponding bits in
1393 both inputs are known, and (b) the carry-in to that bit position
1394 is known. We can check condition (b) by seeing if we got the same
1395 result with minimised carries as with maximised carries. */
1396 *mask = r1mask | r2mask | (lo ^ hi);
1397 *mask = wi::ext (*mask, width, sgn);
1398 /* It shouldn't matter whether we choose lo or hi here. */
1399 *val = lo;
1400 break;
1401 }
1402
1403 case MINUS_EXPR:
1404 {
1405 widest_int temv, temm;
1406 bit_value_unop_1 (NEGATE_EXPR, r2type, &temv, &temm,
1407 r2type, r2val, r2mask);
1408 bit_value_binop_1 (PLUS_EXPR, type, val, mask,
1409 r1type, r1val, r1mask,
1410 r2type, temv, temm);
1411 break;
1412 }
1413
1414 case MULT_EXPR:
1415 {
1416 /* Just track trailing zeros in both operands and transfer
1417 them to the other. */
1418 int r1tz = wi::ctz (r1val | r1mask);
1419 int r2tz = wi::ctz (r2val | r2mask);
1420 if (r1tz + r2tz >= width)
1421 {
1422 *mask = 0;
1423 *val = 0;
1424 }
1425 else if (r1tz + r2tz > 0)
1426 {
1427 *mask = wi::ext (wi::mask <widest_int> (r1tz + r2tz, true),
1428 width, sgn);
1429 *val = 0;
1430 }
1431 break;
1432 }
1433
1434 case EQ_EXPR:
1435 case NE_EXPR:
1436 {
1437 widest_int m = r1mask | r2mask;
1438 if (r1val.and_not (m) != r2val.and_not (m))
1439 {
1440 *mask = 0;
1441 *val = ((code == EQ_EXPR) ? 0 : 1);
1442 }
1443 else
1444 {
1445 /* We know the result of a comparison is always one or zero. */
1446 *mask = 1;
1447 *val = 0;
1448 }
1449 break;
1450 }
1451
1452 case GE_EXPR:
1453 case GT_EXPR:
1454 swap_p = true;
1455 code = swap_tree_comparison (code);
1456 /* Fall through. */
1457 case LT_EXPR:
1458 case LE_EXPR:
1459 {
1460 int minmax, maxmin;
1461
1462 const widest_int &o1val = swap_p ? r2val : r1val;
1463 const widest_int &o1mask = swap_p ? r2mask : r1mask;
1464 const widest_int &o2val = swap_p ? r1val : r2val;
1465 const widest_int &o2mask = swap_p ? r1mask : r2mask;
1466
1467 /* If the most significant bits are not known we know nothing. */
1468 if (wi::neg_p (o1mask) || wi::neg_p (o2mask))
1469 break;
1470
1471 /* For comparisons the signedness is in the comparison operands. */
1472 sgn = TYPE_SIGN (r1type);
1473
1474 /* If we know the most significant bits we know the values
1475 value ranges by means of treating varying bits as zero
1476 or one. Do a cross comparison of the max/min pairs. */
1477 maxmin = wi::cmp (o1val | o1mask, o2val.and_not (o2mask), sgn);
1478 minmax = wi::cmp (o1val.and_not (o1mask), o2val | o2mask, sgn);
1479 if (maxmin < 0) /* o1 is less than o2. */
1480 {
1481 *mask = 0;
1482 *val = 1;
1483 }
1484 else if (minmax > 0) /* o1 is not less or equal to o2. */
1485 {
1486 *mask = 0;
1487 *val = 0;
1488 }
1489 else if (maxmin == minmax) /* o1 and o2 are equal. */
1490 {
1491 /* This probably should never happen as we'd have
1492 folded the thing during fully constant value folding. */
1493 *mask = 0;
1494 *val = (code == LE_EXPR ? 1 : 0);
1495 }
1496 else
1497 {
1498 /* We know the result of a comparison is always one or zero. */
1499 *mask = 1;
1500 *val = 0;
1501 }
1502 break;
1503 }
1504
1505 default:;
1506 }
1507 }
1508
1509 /* Return the propagation value when applying the operation CODE to
1510 the value RHS yielding type TYPE. */
1511
1512 static ccp_prop_value_t
1513 bit_value_unop (enum tree_code code, tree type, tree rhs)
1514 {
1515 ccp_prop_value_t rval = get_value_for_expr (rhs, true);
1516 widest_int value, mask;
1517 ccp_prop_value_t val;
1518
1519 if (rval.lattice_val == UNDEFINED)
1520 return rval;
1521
1522 gcc_assert ((rval.lattice_val == CONSTANT
1523 && TREE_CODE (rval.value) == INTEGER_CST)
1524 || wi::sext (rval.mask, TYPE_PRECISION (TREE_TYPE (rhs))) == -1);
1525 bit_value_unop_1 (code, type, &value, &mask,
1526 TREE_TYPE (rhs), value_to_wide_int (rval), rval.mask);
1527 if (wi::sext (mask, TYPE_PRECISION (type)) != -1)
1528 {
1529 val.lattice_val = CONSTANT;
1530 val.mask = mask;
1531 /* ??? Delay building trees here. */
1532 val.value = wide_int_to_tree (type, value);
1533 }
1534 else
1535 {
1536 val.lattice_val = VARYING;
1537 val.value = NULL_TREE;
1538 val.mask = -1;
1539 }
1540 return val;
1541 }
1542
1543 /* Return the propagation value when applying the operation CODE to
1544 the values RHS1 and RHS2 yielding type TYPE. */
1545
1546 static ccp_prop_value_t
1547 bit_value_binop (enum tree_code code, tree type, tree rhs1, tree rhs2)
1548 {
1549 ccp_prop_value_t r1val = get_value_for_expr (rhs1, true);
1550 ccp_prop_value_t r2val = get_value_for_expr (rhs2, true);
1551 widest_int value, mask;
1552 ccp_prop_value_t val;
1553
1554 if (r1val.lattice_val == UNDEFINED
1555 || r2val.lattice_val == UNDEFINED)
1556 {
1557 val.lattice_val = VARYING;
1558 val.value = NULL_TREE;
1559 val.mask = -1;
1560 return val;
1561 }
1562
1563 gcc_assert ((r1val.lattice_val == CONSTANT
1564 && TREE_CODE (r1val.value) == INTEGER_CST)
1565 || wi::sext (r1val.mask,
1566 TYPE_PRECISION (TREE_TYPE (rhs1))) == -1);
1567 gcc_assert ((r2val.lattice_val == CONSTANT
1568 && TREE_CODE (r2val.value) == INTEGER_CST)
1569 || wi::sext (r2val.mask,
1570 TYPE_PRECISION (TREE_TYPE (rhs2))) == -1);
1571 bit_value_binop_1 (code, type, &value, &mask,
1572 TREE_TYPE (rhs1), value_to_wide_int (r1val), r1val.mask,
1573 TREE_TYPE (rhs2), value_to_wide_int (r2val), r2val.mask);
1574 if (wi::sext (mask, TYPE_PRECISION (type)) != -1)
1575 {
1576 val.lattice_val = CONSTANT;
1577 val.mask = mask;
1578 /* ??? Delay building trees here. */
1579 val.value = wide_int_to_tree (type, value);
1580 }
1581 else
1582 {
1583 val.lattice_val = VARYING;
1584 val.value = NULL_TREE;
1585 val.mask = -1;
1586 }
1587 return val;
1588 }
1589
1590 /* Return the propagation value for __builtin_assume_aligned
1591 and functions with assume_aligned or alloc_aligned attribute.
1592 For __builtin_assume_aligned, ATTR is NULL_TREE,
1593 for assume_aligned attribute ATTR is non-NULL and ALLOC_ALIGNED
1594 is false, for alloc_aligned attribute ATTR is non-NULL and
1595 ALLOC_ALIGNED is true. */
1596
1597 static ccp_prop_value_t
1598 bit_value_assume_aligned (gimple *stmt, tree attr, ccp_prop_value_t ptrval,
1599 bool alloc_aligned)
1600 {
1601 tree align, misalign = NULL_TREE, type;
1602 unsigned HOST_WIDE_INT aligni, misaligni = 0;
1603 ccp_prop_value_t alignval;
1604 widest_int value, mask;
1605 ccp_prop_value_t val;
1606
1607 if (attr == NULL_TREE)
1608 {
1609 tree ptr = gimple_call_arg (stmt, 0);
1610 type = TREE_TYPE (ptr);
1611 ptrval = get_value_for_expr (ptr, true);
1612 }
1613 else
1614 {
1615 tree lhs = gimple_call_lhs (stmt);
1616 type = TREE_TYPE (lhs);
1617 }
1618
1619 if (ptrval.lattice_val == UNDEFINED)
1620 return ptrval;
1621 gcc_assert ((ptrval.lattice_val == CONSTANT
1622 && TREE_CODE (ptrval.value) == INTEGER_CST)
1623 || wi::sext (ptrval.mask, TYPE_PRECISION (type)) == -1);
1624 if (attr == NULL_TREE)
1625 {
1626 /* Get aligni and misaligni from __builtin_assume_aligned. */
1627 align = gimple_call_arg (stmt, 1);
1628 if (!tree_fits_uhwi_p (align))
1629 return ptrval;
1630 aligni = tree_to_uhwi (align);
1631 if (gimple_call_num_args (stmt) > 2)
1632 {
1633 misalign = gimple_call_arg (stmt, 2);
1634 if (!tree_fits_uhwi_p (misalign))
1635 return ptrval;
1636 misaligni = tree_to_uhwi (misalign);
1637 }
1638 }
1639 else
1640 {
1641 /* Get aligni and misaligni from assume_aligned or
1642 alloc_align attributes. */
1643 if (TREE_VALUE (attr) == NULL_TREE)
1644 return ptrval;
1645 attr = TREE_VALUE (attr);
1646 align = TREE_VALUE (attr);
1647 if (!tree_fits_uhwi_p (align))
1648 return ptrval;
1649 aligni = tree_to_uhwi (align);
1650 if (alloc_aligned)
1651 {
1652 if (aligni == 0 || aligni > gimple_call_num_args (stmt))
1653 return ptrval;
1654 align = gimple_call_arg (stmt, aligni - 1);
1655 if (!tree_fits_uhwi_p (align))
1656 return ptrval;
1657 aligni = tree_to_uhwi (align);
1658 }
1659 else if (TREE_CHAIN (attr) && TREE_VALUE (TREE_CHAIN (attr)))
1660 {
1661 misalign = TREE_VALUE (TREE_CHAIN (attr));
1662 if (!tree_fits_uhwi_p (misalign))
1663 return ptrval;
1664 misaligni = tree_to_uhwi (misalign);
1665 }
1666 }
1667 if (aligni <= 1 || (aligni & (aligni - 1)) != 0 || misaligni >= aligni)
1668 return ptrval;
1669
1670 align = build_int_cst_type (type, -aligni);
1671 alignval = get_value_for_expr (align, true);
1672 bit_value_binop_1 (BIT_AND_EXPR, type, &value, &mask,
1673 type, value_to_wide_int (ptrval), ptrval.mask,
1674 type, value_to_wide_int (alignval), alignval.mask);
1675 if (wi::sext (mask, TYPE_PRECISION (type)) != -1)
1676 {
1677 val.lattice_val = CONSTANT;
1678 val.mask = mask;
1679 gcc_assert ((mask.to_uhwi () & (aligni - 1)) == 0);
1680 gcc_assert ((value.to_uhwi () & (aligni - 1)) == 0);
1681 value |= misaligni;
1682 /* ??? Delay building trees here. */
1683 val.value = wide_int_to_tree (type, value);
1684 }
1685 else
1686 {
1687 val.lattice_val = VARYING;
1688 val.value = NULL_TREE;
1689 val.mask = -1;
1690 }
1691 return val;
1692 }
1693
1694 /* Evaluate statement STMT.
1695 Valid only for assignments, calls, conditionals, and switches. */
1696
1697 static ccp_prop_value_t
1698 evaluate_stmt (gimple *stmt)
1699 {
1700 ccp_prop_value_t val;
1701 tree simplified = NULL_TREE;
1702 ccp_lattice_t likelyvalue = likely_value (stmt);
1703 bool is_constant = false;
1704 unsigned int align;
1705
1706 if (dump_file && (dump_flags & TDF_DETAILS))
1707 {
1708 fprintf (dump_file, "which is likely ");
1709 switch (likelyvalue)
1710 {
1711 case CONSTANT:
1712 fprintf (dump_file, "CONSTANT");
1713 break;
1714 case UNDEFINED:
1715 fprintf (dump_file, "UNDEFINED");
1716 break;
1717 case VARYING:
1718 fprintf (dump_file, "VARYING");
1719 break;
1720 default:;
1721 }
1722 fprintf (dump_file, "\n");
1723 }
1724
1725 /* If the statement is likely to have a CONSTANT result, then try
1726 to fold the statement to determine the constant value. */
1727 /* FIXME. This is the only place that we call ccp_fold.
1728 Since likely_value never returns CONSTANT for calls, we will
1729 not attempt to fold them, including builtins that may profit. */
1730 if (likelyvalue == CONSTANT)
1731 {
1732 fold_defer_overflow_warnings ();
1733 simplified = ccp_fold (stmt);
1734 if (simplified && TREE_CODE (simplified) == SSA_NAME)
1735 {
1736 val = *get_value (simplified);
1737 if (val.lattice_val != VARYING)
1738 {
1739 fold_undefer_overflow_warnings (true, stmt, 0);
1740 return val;
1741 }
1742 }
1743 is_constant = simplified && is_gimple_min_invariant (simplified);
1744 fold_undefer_overflow_warnings (is_constant, stmt, 0);
1745 if (is_constant)
1746 {
1747 /* The statement produced a constant value. */
1748 val.lattice_val = CONSTANT;
1749 val.value = simplified;
1750 val.mask = 0;
1751 return val;
1752 }
1753 }
1754 /* If the statement is likely to have a VARYING result, then do not
1755 bother folding the statement. */
1756 else if (likelyvalue == VARYING)
1757 {
1758 enum gimple_code code = gimple_code (stmt);
1759 if (code == GIMPLE_ASSIGN)
1760 {
1761 enum tree_code subcode = gimple_assign_rhs_code (stmt);
1762
1763 /* Other cases cannot satisfy is_gimple_min_invariant
1764 without folding. */
1765 if (get_gimple_rhs_class (subcode) == GIMPLE_SINGLE_RHS)
1766 simplified = gimple_assign_rhs1 (stmt);
1767 }
1768 else if (code == GIMPLE_SWITCH)
1769 simplified = gimple_switch_index (as_a <gswitch *> (stmt));
1770 else
1771 /* These cannot satisfy is_gimple_min_invariant without folding. */
1772 gcc_assert (code == GIMPLE_CALL || code == GIMPLE_COND);
1773 is_constant = simplified && is_gimple_min_invariant (simplified);
1774 if (is_constant)
1775 {
1776 /* The statement produced a constant value. */
1777 val.lattice_val = CONSTANT;
1778 val.value = simplified;
1779 val.mask = 0;
1780 }
1781 }
1782 /* If the statement result is likely UNDEFINED, make it so. */
1783 else if (likelyvalue == UNDEFINED)
1784 {
1785 val.lattice_val = UNDEFINED;
1786 val.value = NULL_TREE;
1787 val.mask = 0;
1788 return val;
1789 }
1790
1791 /* Resort to simplification for bitwise tracking. */
1792 if (flag_tree_bit_ccp
1793 && (likelyvalue == CONSTANT || is_gimple_call (stmt)
1794 || (gimple_assign_single_p (stmt)
1795 && gimple_assign_rhs_code (stmt) == ADDR_EXPR))
1796 && !is_constant)
1797 {
1798 enum gimple_code code = gimple_code (stmt);
1799 val.lattice_val = VARYING;
1800 val.value = NULL_TREE;
1801 val.mask = -1;
1802 if (code == GIMPLE_ASSIGN)
1803 {
1804 enum tree_code subcode = gimple_assign_rhs_code (stmt);
1805 tree rhs1 = gimple_assign_rhs1 (stmt);
1806 tree lhs = gimple_assign_lhs (stmt);
1807 if ((INTEGRAL_TYPE_P (TREE_TYPE (lhs))
1808 || POINTER_TYPE_P (TREE_TYPE (lhs)))
1809 && (INTEGRAL_TYPE_P (TREE_TYPE (rhs1))
1810 || POINTER_TYPE_P (TREE_TYPE (rhs1))))
1811 switch (get_gimple_rhs_class (subcode))
1812 {
1813 case GIMPLE_SINGLE_RHS:
1814 val = get_value_for_expr (rhs1, true);
1815 break;
1816
1817 case GIMPLE_UNARY_RHS:
1818 val = bit_value_unop (subcode, TREE_TYPE (lhs), rhs1);
1819 break;
1820
1821 case GIMPLE_BINARY_RHS:
1822 val = bit_value_binop (subcode, TREE_TYPE (lhs), rhs1,
1823 gimple_assign_rhs2 (stmt));
1824 break;
1825
1826 default:;
1827 }
1828 }
1829 else if (code == GIMPLE_COND)
1830 {
1831 enum tree_code code = gimple_cond_code (stmt);
1832 tree rhs1 = gimple_cond_lhs (stmt);
1833 tree rhs2 = gimple_cond_rhs (stmt);
1834 if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1))
1835 || POINTER_TYPE_P (TREE_TYPE (rhs1)))
1836 val = bit_value_binop (code, TREE_TYPE (rhs1), rhs1, rhs2);
1837 }
1838 else if (gimple_call_builtin_p (stmt, BUILT_IN_NORMAL))
1839 {
1840 tree fndecl = gimple_call_fndecl (stmt);
1841 switch (DECL_FUNCTION_CODE (fndecl))
1842 {
1843 case BUILT_IN_MALLOC:
1844 case BUILT_IN_REALLOC:
1845 case BUILT_IN_CALLOC:
1846 case BUILT_IN_STRDUP:
1847 case BUILT_IN_STRNDUP:
1848 val.lattice_val = CONSTANT;
1849 val.value = build_int_cst (TREE_TYPE (gimple_get_lhs (stmt)), 0);
1850 val.mask = ~((HOST_WIDE_INT) MALLOC_ABI_ALIGNMENT
1851 / BITS_PER_UNIT - 1);
1852 break;
1853
1854 case BUILT_IN_ALLOCA:
1855 case BUILT_IN_ALLOCA_WITH_ALIGN:
1856 align = (DECL_FUNCTION_CODE (fndecl) == BUILT_IN_ALLOCA_WITH_ALIGN
1857 ? TREE_INT_CST_LOW (gimple_call_arg (stmt, 1))
1858 : BIGGEST_ALIGNMENT);
1859 val.lattice_val = CONSTANT;
1860 val.value = build_int_cst (TREE_TYPE (gimple_get_lhs (stmt)), 0);
1861 val.mask = ~((HOST_WIDE_INT) align / BITS_PER_UNIT - 1);
1862 break;
1863
1864 /* These builtins return their first argument, unmodified. */
1865 case BUILT_IN_MEMCPY:
1866 case BUILT_IN_MEMMOVE:
1867 case BUILT_IN_MEMSET:
1868 case BUILT_IN_STRCPY:
1869 case BUILT_IN_STRNCPY:
1870 case BUILT_IN_MEMCPY_CHK:
1871 case BUILT_IN_MEMMOVE_CHK:
1872 case BUILT_IN_MEMSET_CHK:
1873 case BUILT_IN_STRCPY_CHK:
1874 case BUILT_IN_STRNCPY_CHK:
1875 val = get_value_for_expr (gimple_call_arg (stmt, 0), true);
1876 break;
1877
1878 case BUILT_IN_ASSUME_ALIGNED:
1879 val = bit_value_assume_aligned (stmt, NULL_TREE, val, false);
1880 break;
1881
1882 case BUILT_IN_ALIGNED_ALLOC:
1883 {
1884 tree align = get_constant_value (gimple_call_arg (stmt, 0));
1885 if (align
1886 && tree_fits_uhwi_p (align))
1887 {
1888 unsigned HOST_WIDE_INT aligni = tree_to_uhwi (align);
1889 if (aligni > 1
1890 /* align must be power-of-two */
1891 && (aligni & (aligni - 1)) == 0)
1892 {
1893 val.lattice_val = CONSTANT;
1894 val.value = build_int_cst (ptr_type_node, 0);
1895 val.mask = -aligni;
1896 }
1897 }
1898 break;
1899 }
1900
1901 default:;
1902 }
1903 }
1904 if (is_gimple_call (stmt) && gimple_call_lhs (stmt))
1905 {
1906 tree fntype = gimple_call_fntype (stmt);
1907 if (fntype)
1908 {
1909 tree attrs = lookup_attribute ("assume_aligned",
1910 TYPE_ATTRIBUTES (fntype));
1911 if (attrs)
1912 val = bit_value_assume_aligned (stmt, attrs, val, false);
1913 attrs = lookup_attribute ("alloc_align",
1914 TYPE_ATTRIBUTES (fntype));
1915 if (attrs)
1916 val = bit_value_assume_aligned (stmt, attrs, val, true);
1917 }
1918 }
1919 is_constant = (val.lattice_val == CONSTANT);
1920 }
1921
1922 if (flag_tree_bit_ccp
1923 && ((is_constant && TREE_CODE (val.value) == INTEGER_CST)
1924 || !is_constant)
1925 && gimple_get_lhs (stmt)
1926 && TREE_CODE (gimple_get_lhs (stmt)) == SSA_NAME)
1927 {
1928 tree lhs = gimple_get_lhs (stmt);
1929 wide_int nonzero_bits = get_nonzero_bits (lhs);
1930 if (nonzero_bits != -1)
1931 {
1932 if (!is_constant)
1933 {
1934 val.lattice_val = CONSTANT;
1935 val.value = build_zero_cst (TREE_TYPE (lhs));
1936 val.mask = extend_mask (nonzero_bits);
1937 is_constant = true;
1938 }
1939 else
1940 {
1941 if (wi::bit_and_not (val.value, nonzero_bits) != 0)
1942 val.value = wide_int_to_tree (TREE_TYPE (lhs),
1943 nonzero_bits & val.value);
1944 if (nonzero_bits == 0)
1945 val.mask = 0;
1946 else
1947 val.mask = val.mask & extend_mask (nonzero_bits);
1948 }
1949 }
1950 }
1951
1952 /* The statement produced a nonconstant value. */
1953 if (!is_constant)
1954 {
1955 /* The statement produced a copy. */
1956 if (simplified && TREE_CODE (simplified) == SSA_NAME
1957 && !SSA_NAME_OCCURS_IN_ABNORMAL_PHI (simplified))
1958 {
1959 val.lattice_val = CONSTANT;
1960 val.value = simplified;
1961 val.mask = -1;
1962 }
1963 /* The statement is VARYING. */
1964 else
1965 {
1966 val.lattice_val = VARYING;
1967 val.value = NULL_TREE;
1968 val.mask = -1;
1969 }
1970 }
1971
1972 return val;
1973 }
1974
1975 typedef hash_table<nofree_ptr_hash<gimple> > gimple_htab;
1976
1977 /* Given a BUILT_IN_STACK_SAVE value SAVED_VAL, insert a clobber of VAR before
1978 each matching BUILT_IN_STACK_RESTORE. Mark visited phis in VISITED. */
1979
1980 static void
1981 insert_clobber_before_stack_restore (tree saved_val, tree var,
1982 gimple_htab **visited)
1983 {
1984 gimple *stmt;
1985 gassign *clobber_stmt;
1986 tree clobber;
1987 imm_use_iterator iter;
1988 gimple_stmt_iterator i;
1989 gimple **slot;
1990
1991 FOR_EACH_IMM_USE_STMT (stmt, iter, saved_val)
1992 if (gimple_call_builtin_p (stmt, BUILT_IN_STACK_RESTORE))
1993 {
1994 clobber = build_constructor (TREE_TYPE (var),
1995 NULL);
1996 TREE_THIS_VOLATILE (clobber) = 1;
1997 clobber_stmt = gimple_build_assign (var, clobber);
1998
1999 i = gsi_for_stmt (stmt);
2000 gsi_insert_before (&i, clobber_stmt, GSI_SAME_STMT);
2001 }
2002 else if (gimple_code (stmt) == GIMPLE_PHI)
2003 {
2004 if (!*visited)
2005 *visited = new gimple_htab (10);
2006
2007 slot = (*visited)->find_slot (stmt, INSERT);
2008 if (*slot != NULL)
2009 continue;
2010
2011 *slot = stmt;
2012 insert_clobber_before_stack_restore (gimple_phi_result (stmt), var,
2013 visited);
2014 }
2015 else if (gimple_assign_ssa_name_copy_p (stmt))
2016 insert_clobber_before_stack_restore (gimple_assign_lhs (stmt), var,
2017 visited);
2018 else if (chkp_gimple_call_builtin_p (stmt, BUILT_IN_CHKP_BNDRET))
2019 continue;
2020 else
2021 gcc_assert (is_gimple_debug (stmt));
2022 }
2023
2024 /* Advance the iterator to the previous non-debug gimple statement in the same
2025 or dominating basic block. */
2026
2027 static inline void
2028 gsi_prev_dom_bb_nondebug (gimple_stmt_iterator *i)
2029 {
2030 basic_block dom;
2031
2032 gsi_prev_nondebug (i);
2033 while (gsi_end_p (*i))
2034 {
2035 dom = get_immediate_dominator (CDI_DOMINATORS, i->bb);
2036 if (dom == NULL || dom == ENTRY_BLOCK_PTR_FOR_FN (cfun))
2037 return;
2038
2039 *i = gsi_last_bb (dom);
2040 }
2041 }
2042
2043 /* Find a BUILT_IN_STACK_SAVE dominating gsi_stmt (I), and insert
2044 a clobber of VAR before each matching BUILT_IN_STACK_RESTORE.
2045
2046 It is possible that BUILT_IN_STACK_SAVE cannot be find in a dominator when a
2047 previous pass (such as DOM) duplicated it along multiple paths to a BB. In
2048 that case the function gives up without inserting the clobbers. */
2049
2050 static void
2051 insert_clobbers_for_var (gimple_stmt_iterator i, tree var)
2052 {
2053 gimple *stmt;
2054 tree saved_val;
2055 gimple_htab *visited = NULL;
2056
2057 for (; !gsi_end_p (i); gsi_prev_dom_bb_nondebug (&i))
2058 {
2059 stmt = gsi_stmt (i);
2060
2061 if (!gimple_call_builtin_p (stmt, BUILT_IN_STACK_SAVE))
2062 continue;
2063
2064 saved_val = gimple_call_lhs (stmt);
2065 if (saved_val == NULL_TREE)
2066 continue;
2067
2068 insert_clobber_before_stack_restore (saved_val, var, &visited);
2069 break;
2070 }
2071
2072 delete visited;
2073 }
2074
2075 /* Detects a __builtin_alloca_with_align with constant size argument. Declares
2076 fixed-size array and returns the address, if found, otherwise returns
2077 NULL_TREE. */
2078
2079 static tree
2080 fold_builtin_alloca_with_align (gimple *stmt)
2081 {
2082 unsigned HOST_WIDE_INT size, threshold, n_elem;
2083 tree lhs, arg, block, var, elem_type, array_type;
2084
2085 /* Get lhs. */
2086 lhs = gimple_call_lhs (stmt);
2087 if (lhs == NULL_TREE)
2088 return NULL_TREE;
2089
2090 /* Detect constant argument. */
2091 arg = get_constant_value (gimple_call_arg (stmt, 0));
2092 if (arg == NULL_TREE
2093 || TREE_CODE (arg) != INTEGER_CST
2094 || !tree_fits_uhwi_p (arg))
2095 return NULL_TREE;
2096
2097 size = tree_to_uhwi (arg);
2098
2099 /* Heuristic: don't fold large allocas. */
2100 threshold = (unsigned HOST_WIDE_INT)PARAM_VALUE (PARAM_LARGE_STACK_FRAME);
2101 /* In case the alloca is located at function entry, it has the same lifetime
2102 as a declared array, so we allow a larger size. */
2103 block = gimple_block (stmt);
2104 if (!(cfun->after_inlining
2105 && block
2106 && TREE_CODE (BLOCK_SUPERCONTEXT (block)) == FUNCTION_DECL))
2107 threshold /= 10;
2108 if (size > threshold)
2109 return NULL_TREE;
2110
2111 /* Declare array. */
2112 elem_type = build_nonstandard_integer_type (BITS_PER_UNIT, 1);
2113 n_elem = size * 8 / BITS_PER_UNIT;
2114 array_type = build_array_type_nelts (elem_type, n_elem);
2115 var = create_tmp_var (array_type);
2116 DECL_ALIGN (var) = TREE_INT_CST_LOW (gimple_call_arg (stmt, 1));
2117 {
2118 struct ptr_info_def *pi = SSA_NAME_PTR_INFO (lhs);
2119 if (pi != NULL && !pi->pt.anything)
2120 {
2121 bool singleton_p;
2122 unsigned uid;
2123 singleton_p = pt_solution_singleton_p (&pi->pt, &uid);
2124 gcc_assert (singleton_p);
2125 SET_DECL_PT_UID (var, uid);
2126 }
2127 }
2128
2129 /* Fold alloca to the address of the array. */
2130 return fold_convert (TREE_TYPE (lhs), build_fold_addr_expr (var));
2131 }
2132
2133 /* Fold the stmt at *GSI with CCP specific information that propagating
2134 and regular folding does not catch. */
2135
2136 static bool
2137 ccp_fold_stmt (gimple_stmt_iterator *gsi)
2138 {
2139 gimple *stmt = gsi_stmt (*gsi);
2140
2141 switch (gimple_code (stmt))
2142 {
2143 case GIMPLE_COND:
2144 {
2145 gcond *cond_stmt = as_a <gcond *> (stmt);
2146 ccp_prop_value_t val;
2147 /* Statement evaluation will handle type mismatches in constants
2148 more gracefully than the final propagation. This allows us to
2149 fold more conditionals here. */
2150 val = evaluate_stmt (stmt);
2151 if (val.lattice_val != CONSTANT
2152 || val.mask != 0)
2153 return false;
2154
2155 if (dump_file)
2156 {
2157 fprintf (dump_file, "Folding predicate ");
2158 print_gimple_expr (dump_file, stmt, 0, 0);
2159 fprintf (dump_file, " to ");
2160 print_generic_expr (dump_file, val.value, 0);
2161 fprintf (dump_file, "\n");
2162 }
2163
2164 if (integer_zerop (val.value))
2165 gimple_cond_make_false (cond_stmt);
2166 else
2167 gimple_cond_make_true (cond_stmt);
2168
2169 return true;
2170 }
2171
2172 case GIMPLE_CALL:
2173 {
2174 tree lhs = gimple_call_lhs (stmt);
2175 int flags = gimple_call_flags (stmt);
2176 tree val;
2177 tree argt;
2178 bool changed = false;
2179 unsigned i;
2180
2181 /* If the call was folded into a constant make sure it goes
2182 away even if we cannot propagate into all uses because of
2183 type issues. */
2184 if (lhs
2185 && TREE_CODE (lhs) == SSA_NAME
2186 && (val = get_constant_value (lhs))
2187 /* Don't optimize away calls that have side-effects. */
2188 && (flags & (ECF_CONST|ECF_PURE)) != 0
2189 && (flags & ECF_LOOPING_CONST_OR_PURE) == 0)
2190 {
2191 tree new_rhs = unshare_expr (val);
2192 bool res;
2193 if (!useless_type_conversion_p (TREE_TYPE (lhs),
2194 TREE_TYPE (new_rhs)))
2195 new_rhs = fold_convert (TREE_TYPE (lhs), new_rhs);
2196 res = update_call_from_tree (gsi, new_rhs);
2197 gcc_assert (res);
2198 return true;
2199 }
2200
2201 /* Internal calls provide no argument types, so the extra laxity
2202 for normal calls does not apply. */
2203 if (gimple_call_internal_p (stmt))
2204 return false;
2205
2206 /* The heuristic of fold_builtin_alloca_with_align differs before and
2207 after inlining, so we don't require the arg to be changed into a
2208 constant for folding, but just to be constant. */
2209 if (gimple_call_builtin_p (stmt, BUILT_IN_ALLOCA_WITH_ALIGN))
2210 {
2211 tree new_rhs = fold_builtin_alloca_with_align (stmt);
2212 if (new_rhs)
2213 {
2214 bool res = update_call_from_tree (gsi, new_rhs);
2215 tree var = TREE_OPERAND (TREE_OPERAND (new_rhs, 0),0);
2216 gcc_assert (res);
2217 insert_clobbers_for_var (*gsi, var);
2218 return true;
2219 }
2220 }
2221
2222 /* Propagate into the call arguments. Compared to replace_uses_in
2223 this can use the argument slot types for type verification
2224 instead of the current argument type. We also can safely
2225 drop qualifiers here as we are dealing with constants anyway. */
2226 argt = TYPE_ARG_TYPES (gimple_call_fntype (stmt));
2227 for (i = 0; i < gimple_call_num_args (stmt) && argt;
2228 ++i, argt = TREE_CHAIN (argt))
2229 {
2230 tree arg = gimple_call_arg (stmt, i);
2231 if (TREE_CODE (arg) == SSA_NAME
2232 && (val = get_constant_value (arg))
2233 && useless_type_conversion_p
2234 (TYPE_MAIN_VARIANT (TREE_VALUE (argt)),
2235 TYPE_MAIN_VARIANT (TREE_TYPE (val))))
2236 {
2237 gimple_call_set_arg (stmt, i, unshare_expr (val));
2238 changed = true;
2239 }
2240 }
2241
2242 return changed;
2243 }
2244
2245 case GIMPLE_ASSIGN:
2246 {
2247 tree lhs = gimple_assign_lhs (stmt);
2248 tree val;
2249
2250 /* If we have a load that turned out to be constant replace it
2251 as we cannot propagate into all uses in all cases. */
2252 if (gimple_assign_single_p (stmt)
2253 && TREE_CODE (lhs) == SSA_NAME
2254 && (val = get_constant_value (lhs)))
2255 {
2256 tree rhs = unshare_expr (val);
2257 if (!useless_type_conversion_p (TREE_TYPE (lhs), TREE_TYPE (rhs)))
2258 rhs = fold_build1 (VIEW_CONVERT_EXPR, TREE_TYPE (lhs), rhs);
2259 gimple_assign_set_rhs_from_tree (gsi, rhs);
2260 return true;
2261 }
2262
2263 return false;
2264 }
2265
2266 default:
2267 return false;
2268 }
2269 }
2270
2271 /* Visit the assignment statement STMT. Set the value of its LHS to the
2272 value computed by the RHS and store LHS in *OUTPUT_P. If STMT
2273 creates virtual definitions, set the value of each new name to that
2274 of the RHS (if we can derive a constant out of the RHS).
2275 Value-returning call statements also perform an assignment, and
2276 are handled here. */
2277
2278 static enum ssa_prop_result
2279 visit_assignment (gimple *stmt, tree *output_p)
2280 {
2281 ccp_prop_value_t val;
2282 enum ssa_prop_result retval = SSA_PROP_NOT_INTERESTING;
2283
2284 tree lhs = gimple_get_lhs (stmt);
2285 if (TREE_CODE (lhs) == SSA_NAME)
2286 {
2287 /* Evaluate the statement, which could be
2288 either a GIMPLE_ASSIGN or a GIMPLE_CALL. */
2289 val = evaluate_stmt (stmt);
2290
2291 /* If STMT is an assignment to an SSA_NAME, we only have one
2292 value to set. */
2293 if (set_lattice_value (lhs, &val))
2294 {
2295 *output_p = lhs;
2296 if (val.lattice_val == VARYING)
2297 retval = SSA_PROP_VARYING;
2298 else
2299 retval = SSA_PROP_INTERESTING;
2300 }
2301 }
2302
2303 return retval;
2304 }
2305
2306
2307 /* Visit the conditional statement STMT. Return SSA_PROP_INTERESTING
2308 if it can determine which edge will be taken. Otherwise, return
2309 SSA_PROP_VARYING. */
2310
2311 static enum ssa_prop_result
2312 visit_cond_stmt (gimple *stmt, edge *taken_edge_p)
2313 {
2314 ccp_prop_value_t val;
2315 basic_block block;
2316
2317 block = gimple_bb (stmt);
2318 val = evaluate_stmt (stmt);
2319 if (val.lattice_val != CONSTANT
2320 || val.mask != 0)
2321 return SSA_PROP_VARYING;
2322
2323 /* Find which edge out of the conditional block will be taken and add it
2324 to the worklist. If no single edge can be determined statically,
2325 return SSA_PROP_VARYING to feed all the outgoing edges to the
2326 propagation engine. */
2327 *taken_edge_p = find_taken_edge (block, val.value);
2328 if (*taken_edge_p)
2329 return SSA_PROP_INTERESTING;
2330 else
2331 return SSA_PROP_VARYING;
2332 }
2333
2334
2335 /* Evaluate statement STMT. If the statement produces an output value and
2336 its evaluation changes the lattice value of its output, return
2337 SSA_PROP_INTERESTING and set *OUTPUT_P to the SSA_NAME holding the
2338 output value.
2339
2340 If STMT is a conditional branch and we can determine its truth
2341 value, set *TAKEN_EDGE_P accordingly. If STMT produces a varying
2342 value, return SSA_PROP_VARYING. */
2343
2344 static enum ssa_prop_result
2345 ccp_visit_stmt (gimple *stmt, edge *taken_edge_p, tree *output_p)
2346 {
2347 tree def;
2348 ssa_op_iter iter;
2349
2350 if (dump_file && (dump_flags & TDF_DETAILS))
2351 {
2352 fprintf (dump_file, "\nVisiting statement:\n");
2353 print_gimple_stmt (dump_file, stmt, 0, dump_flags);
2354 }
2355
2356 switch (gimple_code (stmt))
2357 {
2358 case GIMPLE_ASSIGN:
2359 /* If the statement is an assignment that produces a single
2360 output value, evaluate its RHS to see if the lattice value of
2361 its output has changed. */
2362 return visit_assignment (stmt, output_p);
2363
2364 case GIMPLE_CALL:
2365 /* A value-returning call also performs an assignment. */
2366 if (gimple_call_lhs (stmt) != NULL_TREE)
2367 return visit_assignment (stmt, output_p);
2368 break;
2369
2370 case GIMPLE_COND:
2371 case GIMPLE_SWITCH:
2372 /* If STMT is a conditional branch, see if we can determine
2373 which branch will be taken. */
2374 /* FIXME. It appears that we should be able to optimize
2375 computed GOTOs here as well. */
2376 return visit_cond_stmt (stmt, taken_edge_p);
2377
2378 default:
2379 break;
2380 }
2381
2382 /* Any other kind of statement is not interesting for constant
2383 propagation and, therefore, not worth simulating. */
2384 if (dump_file && (dump_flags & TDF_DETAILS))
2385 fprintf (dump_file, "No interesting values produced. Marked VARYING.\n");
2386
2387 /* Definitions made by statements other than assignments to
2388 SSA_NAMEs represent unknown modifications to their outputs.
2389 Mark them VARYING. */
2390 FOR_EACH_SSA_TREE_OPERAND (def, stmt, iter, SSA_OP_ALL_DEFS)
2391 set_value_varying (def);
2392
2393 return SSA_PROP_VARYING;
2394 }
2395
2396
2397 /* Main entry point for SSA Conditional Constant Propagation. If NONZERO_P,
2398 record nonzero bits. */
2399
2400 static unsigned int
2401 do_ssa_ccp (bool nonzero_p)
2402 {
2403 unsigned int todo = 0;
2404 calculate_dominance_info (CDI_DOMINATORS);
2405 ccp_initialize ();
2406 ssa_propagate (ccp_visit_stmt, ccp_visit_phi_node);
2407 if (ccp_finalize (nonzero_p))
2408 todo = (TODO_cleanup_cfg | TODO_update_ssa);
2409 free_dominance_info (CDI_DOMINATORS);
2410 return todo;
2411 }
2412
2413
2414 namespace {
2415
2416 const pass_data pass_data_ccp =
2417 {
2418 GIMPLE_PASS, /* type */
2419 "ccp", /* name */
2420 OPTGROUP_NONE, /* optinfo_flags */
2421 TV_TREE_CCP, /* tv_id */
2422 ( PROP_cfg | PROP_ssa ), /* properties_required */
2423 0, /* properties_provided */
2424 0, /* properties_destroyed */
2425 0, /* todo_flags_start */
2426 TODO_update_address_taken, /* todo_flags_finish */
2427 };
2428
2429 class pass_ccp : public gimple_opt_pass
2430 {
2431 public:
2432 pass_ccp (gcc::context *ctxt)
2433 : gimple_opt_pass (pass_data_ccp, ctxt), nonzero_p (false)
2434 {}
2435
2436 /* opt_pass methods: */
2437 opt_pass * clone () { return new pass_ccp (m_ctxt); }
2438 void set_pass_param (unsigned int n, bool param)
2439 {
2440 gcc_assert (n == 0);
2441 nonzero_p = param;
2442 }
2443 virtual bool gate (function *) { return flag_tree_ccp != 0; }
2444 virtual unsigned int execute (function *) { return do_ssa_ccp (nonzero_p); }
2445
2446 private:
2447 /* Determines whether the pass instance records nonzero bits. */
2448 bool nonzero_p;
2449 }; // class pass_ccp
2450
2451 } // anon namespace
2452
2453 gimple_opt_pass *
2454 make_pass_ccp (gcc::context *ctxt)
2455 {
2456 return new pass_ccp (ctxt);
2457 }
2458
2459
2460
2461 /* Try to optimize out __builtin_stack_restore. Optimize it out
2462 if there is another __builtin_stack_restore in the same basic
2463 block and no calls or ASM_EXPRs are in between, or if this block's
2464 only outgoing edge is to EXIT_BLOCK and there are no calls or
2465 ASM_EXPRs after this __builtin_stack_restore. */
2466
2467 static tree
2468 optimize_stack_restore (gimple_stmt_iterator i)
2469 {
2470 tree callee;
2471 gimple *stmt;
2472
2473 basic_block bb = gsi_bb (i);
2474 gimple *call = gsi_stmt (i);
2475
2476 if (gimple_code (call) != GIMPLE_CALL
2477 || gimple_call_num_args (call) != 1
2478 || TREE_CODE (gimple_call_arg (call, 0)) != SSA_NAME
2479 || !POINTER_TYPE_P (TREE_TYPE (gimple_call_arg (call, 0))))
2480 return NULL_TREE;
2481
2482 for (gsi_next (&i); !gsi_end_p (i); gsi_next (&i))
2483 {
2484 stmt = gsi_stmt (i);
2485 if (gimple_code (stmt) == GIMPLE_ASM)
2486 return NULL_TREE;
2487 if (gimple_code (stmt) != GIMPLE_CALL)
2488 continue;
2489
2490 callee = gimple_call_fndecl (stmt);
2491 if (!callee
2492 || DECL_BUILT_IN_CLASS (callee) != BUILT_IN_NORMAL
2493 /* All regular builtins are ok, just obviously not alloca. */
2494 || DECL_FUNCTION_CODE (callee) == BUILT_IN_ALLOCA
2495 || DECL_FUNCTION_CODE (callee) == BUILT_IN_ALLOCA_WITH_ALIGN)
2496 return NULL_TREE;
2497
2498 if (DECL_FUNCTION_CODE (callee) == BUILT_IN_STACK_RESTORE)
2499 goto second_stack_restore;
2500 }
2501
2502 if (!gsi_end_p (i))
2503 return NULL_TREE;
2504
2505 /* Allow one successor of the exit block, or zero successors. */
2506 switch (EDGE_COUNT (bb->succs))
2507 {
2508 case 0:
2509 break;
2510 case 1:
2511 if (single_succ_edge (bb)->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
2512 return NULL_TREE;
2513 break;
2514 default:
2515 return NULL_TREE;
2516 }
2517 second_stack_restore:
2518
2519 /* If there's exactly one use, then zap the call to __builtin_stack_save.
2520 If there are multiple uses, then the last one should remove the call.
2521 In any case, whether the call to __builtin_stack_save can be removed
2522 or not is irrelevant to removing the call to __builtin_stack_restore. */
2523 if (has_single_use (gimple_call_arg (call, 0)))
2524 {
2525 gimple *stack_save = SSA_NAME_DEF_STMT (gimple_call_arg (call, 0));
2526 if (is_gimple_call (stack_save))
2527 {
2528 callee = gimple_call_fndecl (stack_save);
2529 if (callee
2530 && DECL_BUILT_IN_CLASS (callee) == BUILT_IN_NORMAL
2531 && DECL_FUNCTION_CODE (callee) == BUILT_IN_STACK_SAVE)
2532 {
2533 gimple_stmt_iterator stack_save_gsi;
2534 tree rhs;
2535
2536 stack_save_gsi = gsi_for_stmt (stack_save);
2537 rhs = build_int_cst (TREE_TYPE (gimple_call_arg (call, 0)), 0);
2538 update_call_from_tree (&stack_save_gsi, rhs);
2539 }
2540 }
2541 }
2542
2543 /* No effect, so the statement will be deleted. */
2544 return integer_zero_node;
2545 }
2546
2547 /* If va_list type is a simple pointer and nothing special is needed,
2548 optimize __builtin_va_start (&ap, 0) into ap = __builtin_next_arg (0),
2549 __builtin_va_end (&ap) out as NOP and __builtin_va_copy into a simple
2550 pointer assignment. */
2551
2552 static tree
2553 optimize_stdarg_builtin (gimple *call)
2554 {
2555 tree callee, lhs, rhs, cfun_va_list;
2556 bool va_list_simple_ptr;
2557 location_t loc = gimple_location (call);
2558
2559 if (gimple_code (call) != GIMPLE_CALL)
2560 return NULL_TREE;
2561
2562 callee = gimple_call_fndecl (call);
2563
2564 cfun_va_list = targetm.fn_abi_va_list (callee);
2565 va_list_simple_ptr = POINTER_TYPE_P (cfun_va_list)
2566 && (TREE_TYPE (cfun_va_list) == void_type_node
2567 || TREE_TYPE (cfun_va_list) == char_type_node);
2568
2569 switch (DECL_FUNCTION_CODE (callee))
2570 {
2571 case BUILT_IN_VA_START:
2572 if (!va_list_simple_ptr
2573 || targetm.expand_builtin_va_start != NULL
2574 || !builtin_decl_explicit_p (BUILT_IN_NEXT_ARG))
2575 return NULL_TREE;
2576
2577 if (gimple_call_num_args (call) != 2)
2578 return NULL_TREE;
2579
2580 lhs = gimple_call_arg (call, 0);
2581 if (!POINTER_TYPE_P (TREE_TYPE (lhs))
2582 || TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (lhs)))
2583 != TYPE_MAIN_VARIANT (cfun_va_list))
2584 return NULL_TREE;
2585
2586 lhs = build_fold_indirect_ref_loc (loc, lhs);
2587 rhs = build_call_expr_loc (loc, builtin_decl_explicit (BUILT_IN_NEXT_ARG),
2588 1, integer_zero_node);
2589 rhs = fold_convert_loc (loc, TREE_TYPE (lhs), rhs);
2590 return build2 (MODIFY_EXPR, TREE_TYPE (lhs), lhs, rhs);
2591
2592 case BUILT_IN_VA_COPY:
2593 if (!va_list_simple_ptr)
2594 return NULL_TREE;
2595
2596 if (gimple_call_num_args (call) != 2)
2597 return NULL_TREE;
2598
2599 lhs = gimple_call_arg (call, 0);
2600 if (!POINTER_TYPE_P (TREE_TYPE (lhs))
2601 || TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (lhs)))
2602 != TYPE_MAIN_VARIANT (cfun_va_list))
2603 return NULL_TREE;
2604
2605 lhs = build_fold_indirect_ref_loc (loc, lhs);
2606 rhs = gimple_call_arg (call, 1);
2607 if (TYPE_MAIN_VARIANT (TREE_TYPE (rhs))
2608 != TYPE_MAIN_VARIANT (cfun_va_list))
2609 return NULL_TREE;
2610
2611 rhs = fold_convert_loc (loc, TREE_TYPE (lhs), rhs);
2612 return build2 (MODIFY_EXPR, TREE_TYPE (lhs), lhs, rhs);
2613
2614 case BUILT_IN_VA_END:
2615 /* No effect, so the statement will be deleted. */
2616 return integer_zero_node;
2617
2618 default:
2619 gcc_unreachable ();
2620 }
2621 }
2622
2623 /* Attemp to make the block of __builtin_unreachable I unreachable by changing
2624 the incoming jumps. Return true if at least one jump was changed. */
2625
2626 static bool
2627 optimize_unreachable (gimple_stmt_iterator i)
2628 {
2629 basic_block bb = gsi_bb (i);
2630 gimple_stmt_iterator gsi;
2631 gimple *stmt;
2632 edge_iterator ei;
2633 edge e;
2634 bool ret;
2635
2636 if (flag_sanitize & SANITIZE_UNREACHABLE)
2637 return false;
2638
2639 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2640 {
2641 stmt = gsi_stmt (gsi);
2642
2643 if (is_gimple_debug (stmt))
2644 continue;
2645
2646 if (glabel *label_stmt = dyn_cast <glabel *> (stmt))
2647 {
2648 /* Verify we do not need to preserve the label. */
2649 if (FORCED_LABEL (gimple_label_label (label_stmt)))
2650 return false;
2651
2652 continue;
2653 }
2654
2655 /* Only handle the case that __builtin_unreachable is the first statement
2656 in the block. We rely on DCE to remove stmts without side-effects
2657 before __builtin_unreachable. */
2658 if (gsi_stmt (gsi) != gsi_stmt (i))
2659 return false;
2660 }
2661
2662 ret = false;
2663 FOR_EACH_EDGE (e, ei, bb->preds)
2664 {
2665 gsi = gsi_last_bb (e->src);
2666 if (gsi_end_p (gsi))
2667 continue;
2668
2669 stmt = gsi_stmt (gsi);
2670 if (gcond *cond_stmt = dyn_cast <gcond *> (stmt))
2671 {
2672 if (e->flags & EDGE_TRUE_VALUE)
2673 gimple_cond_make_false (cond_stmt);
2674 else if (e->flags & EDGE_FALSE_VALUE)
2675 gimple_cond_make_true (cond_stmt);
2676 else
2677 gcc_unreachable ();
2678 update_stmt (cond_stmt);
2679 }
2680 else
2681 {
2682 /* Todo: handle other cases, f.i. switch statement. */
2683 continue;
2684 }
2685
2686 ret = true;
2687 }
2688
2689 return ret;
2690 }
2691
2692 /* A simple pass that attempts to fold all builtin functions. This pass
2693 is run after we've propagated as many constants as we can. */
2694
2695 namespace {
2696
2697 const pass_data pass_data_fold_builtins =
2698 {
2699 GIMPLE_PASS, /* type */
2700 "fab", /* name */
2701 OPTGROUP_NONE, /* optinfo_flags */
2702 TV_NONE, /* tv_id */
2703 ( PROP_cfg | PROP_ssa ), /* properties_required */
2704 0, /* properties_provided */
2705 0, /* properties_destroyed */
2706 0, /* todo_flags_start */
2707 TODO_update_ssa, /* todo_flags_finish */
2708 };
2709
2710 class pass_fold_builtins : public gimple_opt_pass
2711 {
2712 public:
2713 pass_fold_builtins (gcc::context *ctxt)
2714 : gimple_opt_pass (pass_data_fold_builtins, ctxt)
2715 {}
2716
2717 /* opt_pass methods: */
2718 opt_pass * clone () { return new pass_fold_builtins (m_ctxt); }
2719 virtual unsigned int execute (function *);
2720
2721 }; // class pass_fold_builtins
2722
2723 unsigned int
2724 pass_fold_builtins::execute (function *fun)
2725 {
2726 bool cfg_changed = false;
2727 basic_block bb;
2728 unsigned int todoflags = 0;
2729
2730 FOR_EACH_BB_FN (bb, fun)
2731 {
2732 gimple_stmt_iterator i;
2733 for (i = gsi_start_bb (bb); !gsi_end_p (i); )
2734 {
2735 gimple *stmt, *old_stmt;
2736 tree callee;
2737 enum built_in_function fcode;
2738
2739 stmt = gsi_stmt (i);
2740
2741 if (gimple_code (stmt) != GIMPLE_CALL)
2742 {
2743 /* Remove all *ssaname_N ={v} {CLOBBER}; stmts,
2744 after the last GIMPLE DSE they aren't needed and might
2745 unnecessarily keep the SSA_NAMEs live. */
2746 if (gimple_clobber_p (stmt))
2747 {
2748 tree lhs = gimple_assign_lhs (stmt);
2749 if (TREE_CODE (lhs) == MEM_REF
2750 && TREE_CODE (TREE_OPERAND (lhs, 0)) == SSA_NAME)
2751 {
2752 unlink_stmt_vdef (stmt);
2753 gsi_remove (&i, true);
2754 release_defs (stmt);
2755 continue;
2756 }
2757 }
2758 gsi_next (&i);
2759 continue;
2760 }
2761
2762 callee = gimple_call_fndecl (stmt);
2763 if (!callee || DECL_BUILT_IN_CLASS (callee) != BUILT_IN_NORMAL)
2764 {
2765 gsi_next (&i);
2766 continue;
2767 }
2768
2769 fcode = DECL_FUNCTION_CODE (callee);
2770 if (fold_stmt (&i))
2771 ;
2772 else
2773 {
2774 tree result = NULL_TREE;
2775 switch (DECL_FUNCTION_CODE (callee))
2776 {
2777 case BUILT_IN_CONSTANT_P:
2778 /* Resolve __builtin_constant_p. If it hasn't been
2779 folded to integer_one_node by now, it's fairly
2780 certain that the value simply isn't constant. */
2781 result = integer_zero_node;
2782 break;
2783
2784 case BUILT_IN_ASSUME_ALIGNED:
2785 /* Remove __builtin_assume_aligned. */
2786 result = gimple_call_arg (stmt, 0);
2787 break;
2788
2789 case BUILT_IN_STACK_RESTORE:
2790 result = optimize_stack_restore (i);
2791 if (result)
2792 break;
2793 gsi_next (&i);
2794 continue;
2795
2796 case BUILT_IN_UNREACHABLE:
2797 if (optimize_unreachable (i))
2798 cfg_changed = true;
2799 break;
2800
2801 case BUILT_IN_VA_START:
2802 case BUILT_IN_VA_END:
2803 case BUILT_IN_VA_COPY:
2804 /* These shouldn't be folded before pass_stdarg. */
2805 result = optimize_stdarg_builtin (stmt);
2806 if (result)
2807 break;
2808 /* FALLTHRU */
2809
2810 default:;
2811 }
2812
2813 if (!result)
2814 {
2815 gsi_next (&i);
2816 continue;
2817 }
2818
2819 if (!update_call_from_tree (&i, result))
2820 gimplify_and_update_call_from_tree (&i, result);
2821 }
2822
2823 todoflags |= TODO_update_address_taken;
2824
2825 if (dump_file && (dump_flags & TDF_DETAILS))
2826 {
2827 fprintf (dump_file, "Simplified\n ");
2828 print_gimple_stmt (dump_file, stmt, 0, dump_flags);
2829 }
2830
2831 old_stmt = stmt;
2832 stmt = gsi_stmt (i);
2833 update_stmt (stmt);
2834
2835 if (maybe_clean_or_replace_eh_stmt (old_stmt, stmt)
2836 && gimple_purge_dead_eh_edges (bb))
2837 cfg_changed = true;
2838
2839 if (dump_file && (dump_flags & TDF_DETAILS))
2840 {
2841 fprintf (dump_file, "to\n ");
2842 print_gimple_stmt (dump_file, stmt, 0, dump_flags);
2843 fprintf (dump_file, "\n");
2844 }
2845
2846 /* Retry the same statement if it changed into another
2847 builtin, there might be new opportunities now. */
2848 if (gimple_code (stmt) != GIMPLE_CALL)
2849 {
2850 gsi_next (&i);
2851 continue;
2852 }
2853 callee = gimple_call_fndecl (stmt);
2854 if (!callee
2855 || DECL_BUILT_IN_CLASS (callee) != BUILT_IN_NORMAL
2856 || DECL_FUNCTION_CODE (callee) == fcode)
2857 gsi_next (&i);
2858 }
2859 }
2860
2861 /* Delete unreachable blocks. */
2862 if (cfg_changed)
2863 todoflags |= TODO_cleanup_cfg;
2864
2865 return todoflags;
2866 }
2867
2868 } // anon namespace
2869
2870 gimple_opt_pass *
2871 make_pass_fold_builtins (gcc::context *ctxt)
2872 {
2873 return new pass_fold_builtins (ctxt);
2874 }