tree-ssa-ccp.c (extend_mask): New param sgn.
[gcc.git] / gcc / tree-ssa-ccp.c
1 /* Conditional constant propagation pass for the GNU compiler.
2 Copyright (C) 2000-2016 Free Software Foundation, Inc.
3 Adapted from original RTL SSA-CCP by Daniel Berlin <dberlin@dberlin.org>
4 Adapted to GIMPLE trees by Diego Novillo <dnovillo@redhat.com>
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it
9 under the terms of the GNU General Public License as published by the
10 Free Software Foundation; either version 3, or (at your option) any
11 later version.
12
13 GCC is distributed in the hope that it will be useful, but WITHOUT
14 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
21
22 /* Conditional constant propagation (CCP) is based on the SSA
23 propagation engine (tree-ssa-propagate.c). Constant assignments of
24 the form VAR = CST are propagated from the assignments into uses of
25 VAR, which in turn may generate new constants. The simulation uses
26 a four level lattice to keep track of constant values associated
27 with SSA names. Given an SSA name V_i, it may take one of the
28 following values:
29
30 UNINITIALIZED -> the initial state of the value. This value
31 is replaced with a correct initial value
32 the first time the value is used, so the
33 rest of the pass does not need to care about
34 it. Using this value simplifies initialization
35 of the pass, and prevents us from needlessly
36 scanning statements that are never reached.
37
38 UNDEFINED -> V_i is a local variable whose definition
39 has not been processed yet. Therefore we
40 don't yet know if its value is a constant
41 or not.
42
43 CONSTANT -> V_i has been found to hold a constant
44 value C.
45
46 VARYING -> V_i cannot take a constant value, or if it
47 does, it is not possible to determine it
48 at compile time.
49
50 The core of SSA-CCP is in ccp_visit_stmt and ccp_visit_phi_node:
51
52 1- In ccp_visit_stmt, we are interested in assignments whose RHS
53 evaluates into a constant and conditional jumps whose predicate
54 evaluates into a boolean true or false. When an assignment of
55 the form V_i = CONST is found, V_i's lattice value is set to
56 CONSTANT and CONST is associated with it. This causes the
57 propagation engine to add all the SSA edges coming out the
58 assignment into the worklists, so that statements that use V_i
59 can be visited.
60
61 If the statement is a conditional with a constant predicate, we
62 mark the outgoing edges as executable or not executable
63 depending on the predicate's value. This is then used when
64 visiting PHI nodes to know when a PHI argument can be ignored.
65
66
67 2- In ccp_visit_phi_node, if all the PHI arguments evaluate to the
68 same constant C, then the LHS of the PHI is set to C. This
69 evaluation is known as the "meet operation". Since one of the
70 goals of this evaluation is to optimistically return constant
71 values as often as possible, it uses two main short cuts:
72
73 - If an argument is flowing in through a non-executable edge, it
74 is ignored. This is useful in cases like this:
75
76 if (PRED)
77 a_9 = 3;
78 else
79 a_10 = 100;
80 a_11 = PHI (a_9, a_10)
81
82 If PRED is known to always evaluate to false, then we can
83 assume that a_11 will always take its value from a_10, meaning
84 that instead of consider it VARYING (a_9 and a_10 have
85 different values), we can consider it CONSTANT 100.
86
87 - If an argument has an UNDEFINED value, then it does not affect
88 the outcome of the meet operation. If a variable V_i has an
89 UNDEFINED value, it means that either its defining statement
90 hasn't been visited yet or V_i has no defining statement, in
91 which case the original symbol 'V' is being used
92 uninitialized. Since 'V' is a local variable, the compiler
93 may assume any initial value for it.
94
95
96 After propagation, every variable V_i that ends up with a lattice
97 value of CONSTANT will have the associated constant value in the
98 array CONST_VAL[i].VALUE. That is fed into substitute_and_fold for
99 final substitution and folding.
100
101 This algorithm uses wide-ints at the max precision of the target.
102 This means that, with one uninteresting exception, variables with
103 UNSIGNED types never go to VARYING because the bits above the
104 precision of the type of the variable are always zero. The
105 uninteresting case is a variable of UNSIGNED type that has the
106 maximum precision of the target. Such variables can go to VARYING,
107 but this causes no loss of infomation since these variables will
108 never be extended.
109
110 References:
111
112 Constant propagation with conditional branches,
113 Wegman and Zadeck, ACM TOPLAS 13(2):181-210.
114
115 Building an Optimizing Compiler,
116 Robert Morgan, Butterworth-Heinemann, 1998, Section 8.9.
117
118 Advanced Compiler Design and Implementation,
119 Steven Muchnick, Morgan Kaufmann, 1997, Section 12.6 */
120
121 #include "config.h"
122 #include "system.h"
123 #include "coretypes.h"
124 #include "backend.h"
125 #include "target.h"
126 #include "tree.h"
127 #include "gimple.h"
128 #include "tree-pass.h"
129 #include "ssa.h"
130 #include "gimple-pretty-print.h"
131 #include "fold-const.h"
132 #include "gimple-fold.h"
133 #include "tree-eh.h"
134 #include "gimplify.h"
135 #include "gimple-iterator.h"
136 #include "tree-cfg.h"
137 #include "tree-ssa-propagate.h"
138 #include "dbgcnt.h"
139 #include "params.h"
140 #include "builtins.h"
141 #include "tree-chkp.h"
142 #include "cfgloop.h"
143 #include "stor-layout.h"
144 #include "optabs-query.h"
145
146
147 /* Possible lattice values. */
148 typedef enum
149 {
150 UNINITIALIZED,
151 UNDEFINED,
152 CONSTANT,
153 VARYING
154 } ccp_lattice_t;
155
156 struct ccp_prop_value_t {
157 /* Lattice value. */
158 ccp_lattice_t lattice_val;
159
160 /* Propagated value. */
161 tree value;
162
163 /* Mask that applies to the propagated value during CCP. For X
164 with a CONSTANT lattice value X & ~mask == value & ~mask. The
165 zero bits in the mask cover constant values. The ones mean no
166 information. */
167 widest_int mask;
168 };
169
170 /* Array of propagated constant values. After propagation,
171 CONST_VAL[I].VALUE holds the constant value for SSA_NAME(I). If
172 the constant is held in an SSA name representing a memory store
173 (i.e., a VDEF), CONST_VAL[I].MEM_REF will contain the actual
174 memory reference used to store (i.e., the LHS of the assignment
175 doing the store). */
176 static ccp_prop_value_t *const_val;
177 static unsigned n_const_val;
178
179 static void canonicalize_value (ccp_prop_value_t *);
180 static bool ccp_fold_stmt (gimple_stmt_iterator *);
181 static void ccp_lattice_meet (ccp_prop_value_t *, ccp_prop_value_t *);
182
183 /* Dump constant propagation value VAL to file OUTF prefixed by PREFIX. */
184
185 static void
186 dump_lattice_value (FILE *outf, const char *prefix, ccp_prop_value_t val)
187 {
188 switch (val.lattice_val)
189 {
190 case UNINITIALIZED:
191 fprintf (outf, "%sUNINITIALIZED", prefix);
192 break;
193 case UNDEFINED:
194 fprintf (outf, "%sUNDEFINED", prefix);
195 break;
196 case VARYING:
197 fprintf (outf, "%sVARYING", prefix);
198 break;
199 case CONSTANT:
200 if (TREE_CODE (val.value) != INTEGER_CST
201 || val.mask == 0)
202 {
203 fprintf (outf, "%sCONSTANT ", prefix);
204 print_generic_expr (outf, val.value, dump_flags);
205 }
206 else
207 {
208 widest_int cval = wi::bit_and_not (wi::to_widest (val.value),
209 val.mask);
210 fprintf (outf, "%sCONSTANT ", prefix);
211 print_hex (cval, outf);
212 fprintf (outf, " (");
213 print_hex (val.mask, outf);
214 fprintf (outf, ")");
215 }
216 break;
217 default:
218 gcc_unreachable ();
219 }
220 }
221
222
223 /* Print lattice value VAL to stderr. */
224
225 void debug_lattice_value (ccp_prop_value_t val);
226
227 DEBUG_FUNCTION void
228 debug_lattice_value (ccp_prop_value_t val)
229 {
230 dump_lattice_value (stderr, "", val);
231 fprintf (stderr, "\n");
232 }
233
234 /* Extend NONZERO_BITS to a full mask, based on sgn. */
235
236 static widest_int
237 extend_mask (const wide_int &nonzero_bits, signop sgn)
238 {
239 return widest_int::from (nonzero_bits, sgn);
240 }
241
242 /* Compute a default value for variable VAR and store it in the
243 CONST_VAL array. The following rules are used to get default
244 values:
245
246 1- Global and static variables that are declared constant are
247 considered CONSTANT.
248
249 2- Any other value is considered UNDEFINED. This is useful when
250 considering PHI nodes. PHI arguments that are undefined do not
251 change the constant value of the PHI node, which allows for more
252 constants to be propagated.
253
254 3- Variables defined by statements other than assignments and PHI
255 nodes are considered VARYING.
256
257 4- Initial values of variables that are not GIMPLE registers are
258 considered VARYING. */
259
260 static ccp_prop_value_t
261 get_default_value (tree var)
262 {
263 ccp_prop_value_t val = { UNINITIALIZED, NULL_TREE, 0 };
264 gimple *stmt;
265
266 stmt = SSA_NAME_DEF_STMT (var);
267
268 if (gimple_nop_p (stmt))
269 {
270 /* Variables defined by an empty statement are those used
271 before being initialized. If VAR is a local variable, we
272 can assume initially that it is UNDEFINED, otherwise we must
273 consider it VARYING. */
274 if (!virtual_operand_p (var)
275 && SSA_NAME_VAR (var)
276 && TREE_CODE (SSA_NAME_VAR (var)) == VAR_DECL)
277 val.lattice_val = UNDEFINED;
278 else
279 {
280 val.lattice_val = VARYING;
281 val.mask = -1;
282 if (flag_tree_bit_ccp)
283 {
284 wide_int nonzero_bits = get_nonzero_bits (var);
285 if (nonzero_bits != -1)
286 {
287 val.lattice_val = CONSTANT;
288 val.value = build_zero_cst (TREE_TYPE (var));
289 val.mask = extend_mask (nonzero_bits, TYPE_SIGN (TREE_TYPE (var)));
290 }
291 }
292 }
293 }
294 else if (is_gimple_assign (stmt))
295 {
296 tree cst;
297 if (gimple_assign_single_p (stmt)
298 && DECL_P (gimple_assign_rhs1 (stmt))
299 && (cst = get_symbol_constant_value (gimple_assign_rhs1 (stmt))))
300 {
301 val.lattice_val = CONSTANT;
302 val.value = cst;
303 }
304 else
305 {
306 /* Any other variable defined by an assignment is considered
307 UNDEFINED. */
308 val.lattice_val = UNDEFINED;
309 }
310 }
311 else if ((is_gimple_call (stmt)
312 && gimple_call_lhs (stmt) != NULL_TREE)
313 || gimple_code (stmt) == GIMPLE_PHI)
314 {
315 /* A variable defined by a call or a PHI node is considered
316 UNDEFINED. */
317 val.lattice_val = UNDEFINED;
318 }
319 else
320 {
321 /* Otherwise, VAR will never take on a constant value. */
322 val.lattice_val = VARYING;
323 val.mask = -1;
324 }
325
326 return val;
327 }
328
329
330 /* Get the constant value associated with variable VAR. */
331
332 static inline ccp_prop_value_t *
333 get_value (tree var)
334 {
335 ccp_prop_value_t *val;
336
337 if (const_val == NULL
338 || SSA_NAME_VERSION (var) >= n_const_val)
339 return NULL;
340
341 val = &const_val[SSA_NAME_VERSION (var)];
342 if (val->lattice_val == UNINITIALIZED)
343 *val = get_default_value (var);
344
345 canonicalize_value (val);
346
347 return val;
348 }
349
350 /* Return the constant tree value associated with VAR. */
351
352 static inline tree
353 get_constant_value (tree var)
354 {
355 ccp_prop_value_t *val;
356 if (TREE_CODE (var) != SSA_NAME)
357 {
358 if (is_gimple_min_invariant (var))
359 return var;
360 return NULL_TREE;
361 }
362 val = get_value (var);
363 if (val
364 && val->lattice_val == CONSTANT
365 && (TREE_CODE (val->value) != INTEGER_CST
366 || val->mask == 0))
367 return val->value;
368 return NULL_TREE;
369 }
370
371 /* Sets the value associated with VAR to VARYING. */
372
373 static inline void
374 set_value_varying (tree var)
375 {
376 ccp_prop_value_t *val = &const_val[SSA_NAME_VERSION (var)];
377
378 val->lattice_val = VARYING;
379 val->value = NULL_TREE;
380 val->mask = -1;
381 }
382
383 /* For integer constants, make sure to drop TREE_OVERFLOW. */
384
385 static void
386 canonicalize_value (ccp_prop_value_t *val)
387 {
388 if (val->lattice_val != CONSTANT)
389 return;
390
391 if (TREE_OVERFLOW_P (val->value))
392 val->value = drop_tree_overflow (val->value);
393 }
394
395 /* Return whether the lattice transition is valid. */
396
397 static bool
398 valid_lattice_transition (ccp_prop_value_t old_val, ccp_prop_value_t new_val)
399 {
400 /* Lattice transitions must always be monotonically increasing in
401 value. */
402 if (old_val.lattice_val < new_val.lattice_val)
403 return true;
404
405 if (old_val.lattice_val != new_val.lattice_val)
406 return false;
407
408 if (!old_val.value && !new_val.value)
409 return true;
410
411 /* Now both lattice values are CONSTANT. */
412
413 /* Allow arbitrary copy changes as we might look through PHI <a_1, ...>
414 when only a single copy edge is executable. */
415 if (TREE_CODE (old_val.value) == SSA_NAME
416 && TREE_CODE (new_val.value) == SSA_NAME)
417 return true;
418
419 /* Allow transitioning from a constant to a copy. */
420 if (is_gimple_min_invariant (old_val.value)
421 && TREE_CODE (new_val.value) == SSA_NAME)
422 return true;
423
424 /* Allow transitioning from PHI <&x, not executable> == &x
425 to PHI <&x, &y> == common alignment. */
426 if (TREE_CODE (old_val.value) != INTEGER_CST
427 && TREE_CODE (new_val.value) == INTEGER_CST)
428 return true;
429
430 /* Bit-lattices have to agree in the still valid bits. */
431 if (TREE_CODE (old_val.value) == INTEGER_CST
432 && TREE_CODE (new_val.value) == INTEGER_CST)
433 return (wi::bit_and_not (wi::to_widest (old_val.value), new_val.mask)
434 == wi::bit_and_not (wi::to_widest (new_val.value), new_val.mask));
435
436 /* Otherwise constant values have to agree. */
437 if (operand_equal_p (old_val.value, new_val.value, 0))
438 return true;
439
440 /* At least the kinds and types should agree now. */
441 if (TREE_CODE (old_val.value) != TREE_CODE (new_val.value)
442 || !types_compatible_p (TREE_TYPE (old_val.value),
443 TREE_TYPE (new_val.value)))
444 return false;
445
446 /* For floats and !HONOR_NANS allow transitions from (partial) NaN
447 to non-NaN. */
448 tree type = TREE_TYPE (new_val.value);
449 if (SCALAR_FLOAT_TYPE_P (type)
450 && !HONOR_NANS (type))
451 {
452 if (REAL_VALUE_ISNAN (TREE_REAL_CST (old_val.value)))
453 return true;
454 }
455 else if (VECTOR_FLOAT_TYPE_P (type)
456 && !HONOR_NANS (type))
457 {
458 for (unsigned i = 0; i < VECTOR_CST_NELTS (old_val.value); ++i)
459 if (!REAL_VALUE_ISNAN
460 (TREE_REAL_CST (VECTOR_CST_ELT (old_val.value, i)))
461 && !operand_equal_p (VECTOR_CST_ELT (old_val.value, i),
462 VECTOR_CST_ELT (new_val.value, i), 0))
463 return false;
464 return true;
465 }
466 else if (COMPLEX_FLOAT_TYPE_P (type)
467 && !HONOR_NANS (type))
468 {
469 if (!REAL_VALUE_ISNAN (TREE_REAL_CST (TREE_REALPART (old_val.value)))
470 && !operand_equal_p (TREE_REALPART (old_val.value),
471 TREE_REALPART (new_val.value), 0))
472 return false;
473 if (!REAL_VALUE_ISNAN (TREE_REAL_CST (TREE_IMAGPART (old_val.value)))
474 && !operand_equal_p (TREE_IMAGPART (old_val.value),
475 TREE_IMAGPART (new_val.value), 0))
476 return false;
477 return true;
478 }
479 return false;
480 }
481
482 /* Set the value for variable VAR to NEW_VAL. Return true if the new
483 value is different from VAR's previous value. */
484
485 static bool
486 set_lattice_value (tree var, ccp_prop_value_t *new_val)
487 {
488 /* We can deal with old UNINITIALIZED values just fine here. */
489 ccp_prop_value_t *old_val = &const_val[SSA_NAME_VERSION (var)];
490
491 canonicalize_value (new_val);
492
493 /* We have to be careful to not go up the bitwise lattice
494 represented by the mask. Instead of dropping to VARYING
495 use the meet operator to retain a conservative value.
496 Missed optimizations like PR65851 makes this necessary.
497 It also ensures we converge to a stable lattice solution. */
498 if (new_val->lattice_val == CONSTANT
499 && old_val->lattice_val == CONSTANT
500 && TREE_CODE (new_val->value) != SSA_NAME)
501 ccp_lattice_meet (new_val, old_val);
502
503 gcc_checking_assert (valid_lattice_transition (*old_val, *new_val));
504
505 /* If *OLD_VAL and NEW_VAL are the same, return false to inform the
506 caller that this was a non-transition. */
507 if (old_val->lattice_val != new_val->lattice_val
508 || (new_val->lattice_val == CONSTANT
509 && (TREE_CODE (new_val->value) != TREE_CODE (old_val->value)
510 || (TREE_CODE (new_val->value) == INTEGER_CST
511 && (new_val->mask != old_val->mask
512 || (wi::bit_and_not (wi::to_widest (old_val->value),
513 new_val->mask)
514 != wi::bit_and_not (wi::to_widest (new_val->value),
515 new_val->mask))))
516 || (TREE_CODE (new_val->value) != INTEGER_CST
517 && !operand_equal_p (new_val->value, old_val->value, 0)))))
518 {
519 /* ??? We would like to delay creation of INTEGER_CSTs from
520 partially constants here. */
521
522 if (dump_file && (dump_flags & TDF_DETAILS))
523 {
524 dump_lattice_value (dump_file, "Lattice value changed to ", *new_val);
525 fprintf (dump_file, ". Adding SSA edges to worklist.\n");
526 }
527
528 *old_val = *new_val;
529
530 gcc_assert (new_val->lattice_val != UNINITIALIZED);
531 return true;
532 }
533
534 return false;
535 }
536
537 static ccp_prop_value_t get_value_for_expr (tree, bool);
538 static ccp_prop_value_t bit_value_binop (enum tree_code, tree, tree, tree);
539 static void bit_value_binop_1 (enum tree_code, tree, widest_int *, widest_int *,
540 tree, const widest_int &, const widest_int &,
541 tree, const widest_int &, const widest_int &);
542
543 /* Return a widest_int that can be used for bitwise simplifications
544 from VAL. */
545
546 static widest_int
547 value_to_wide_int (ccp_prop_value_t val)
548 {
549 if (val.value
550 && TREE_CODE (val.value) == INTEGER_CST)
551 return wi::to_widest (val.value);
552
553 return 0;
554 }
555
556 /* Return the value for the address expression EXPR based on alignment
557 information. */
558
559 static ccp_prop_value_t
560 get_value_from_alignment (tree expr)
561 {
562 tree type = TREE_TYPE (expr);
563 ccp_prop_value_t val;
564 unsigned HOST_WIDE_INT bitpos;
565 unsigned int align;
566
567 gcc_assert (TREE_CODE (expr) == ADDR_EXPR);
568
569 get_pointer_alignment_1 (expr, &align, &bitpos);
570 val.mask = (POINTER_TYPE_P (type) || TYPE_UNSIGNED (type)
571 ? wi::mask <widest_int> (TYPE_PRECISION (type), false)
572 : -1).and_not (align / BITS_PER_UNIT - 1);
573 val.lattice_val
574 = wi::sext (val.mask, TYPE_PRECISION (type)) == -1 ? VARYING : CONSTANT;
575 if (val.lattice_val == CONSTANT)
576 val.value = build_int_cstu (type, bitpos / BITS_PER_UNIT);
577 else
578 val.value = NULL_TREE;
579
580 return val;
581 }
582
583 /* Return the value for the tree operand EXPR. If FOR_BITS_P is true
584 return constant bits extracted from alignment information for
585 invariant addresses. */
586
587 static ccp_prop_value_t
588 get_value_for_expr (tree expr, bool for_bits_p)
589 {
590 ccp_prop_value_t val;
591
592 if (TREE_CODE (expr) == SSA_NAME)
593 {
594 val = *get_value (expr);
595 if (for_bits_p
596 && val.lattice_val == CONSTANT
597 && TREE_CODE (val.value) == ADDR_EXPR)
598 val = get_value_from_alignment (val.value);
599 /* Fall back to a copy value. */
600 if (!for_bits_p
601 && val.lattice_val == VARYING
602 && !SSA_NAME_OCCURS_IN_ABNORMAL_PHI (expr))
603 {
604 val.lattice_val = CONSTANT;
605 val.value = expr;
606 val.mask = -1;
607 }
608 }
609 else if (is_gimple_min_invariant (expr)
610 && (!for_bits_p || TREE_CODE (expr) != ADDR_EXPR))
611 {
612 val.lattice_val = CONSTANT;
613 val.value = expr;
614 val.mask = 0;
615 canonicalize_value (&val);
616 }
617 else if (TREE_CODE (expr) == ADDR_EXPR)
618 val = get_value_from_alignment (expr);
619 else
620 {
621 val.lattice_val = VARYING;
622 val.mask = -1;
623 val.value = NULL_TREE;
624 }
625
626 if (val.lattice_val == VARYING
627 && TYPE_UNSIGNED (TREE_TYPE (expr)))
628 val.mask = wi::zext (val.mask, TYPE_PRECISION (TREE_TYPE (expr)));
629
630 return val;
631 }
632
633 /* Return the likely CCP lattice value for STMT.
634
635 If STMT has no operands, then return CONSTANT.
636
637 Else if undefinedness of operands of STMT cause its value to be
638 undefined, then return UNDEFINED.
639
640 Else if any operands of STMT are constants, then return CONSTANT.
641
642 Else return VARYING. */
643
644 static ccp_lattice_t
645 likely_value (gimple *stmt)
646 {
647 bool has_constant_operand, has_undefined_operand, all_undefined_operands;
648 bool has_nsa_operand;
649 tree use;
650 ssa_op_iter iter;
651 unsigned i;
652
653 enum gimple_code code = gimple_code (stmt);
654
655 /* This function appears to be called only for assignments, calls,
656 conditionals, and switches, due to the logic in visit_stmt. */
657 gcc_assert (code == GIMPLE_ASSIGN
658 || code == GIMPLE_CALL
659 || code == GIMPLE_COND
660 || code == GIMPLE_SWITCH);
661
662 /* If the statement has volatile operands, it won't fold to a
663 constant value. */
664 if (gimple_has_volatile_ops (stmt))
665 return VARYING;
666
667 /* Arrive here for more complex cases. */
668 has_constant_operand = false;
669 has_undefined_operand = false;
670 all_undefined_operands = true;
671 has_nsa_operand = false;
672 FOR_EACH_SSA_TREE_OPERAND (use, stmt, iter, SSA_OP_USE)
673 {
674 ccp_prop_value_t *val = get_value (use);
675
676 if (val->lattice_val == UNDEFINED)
677 has_undefined_operand = true;
678 else
679 all_undefined_operands = false;
680
681 if (val->lattice_val == CONSTANT)
682 has_constant_operand = true;
683
684 if (SSA_NAME_IS_DEFAULT_DEF (use)
685 || !prop_simulate_again_p (SSA_NAME_DEF_STMT (use)))
686 has_nsa_operand = true;
687 }
688
689 /* There may be constants in regular rhs operands. For calls we
690 have to ignore lhs, fndecl and static chain, otherwise only
691 the lhs. */
692 for (i = (is_gimple_call (stmt) ? 2 : 0) + gimple_has_lhs (stmt);
693 i < gimple_num_ops (stmt); ++i)
694 {
695 tree op = gimple_op (stmt, i);
696 if (!op || TREE_CODE (op) == SSA_NAME)
697 continue;
698 if (is_gimple_min_invariant (op))
699 has_constant_operand = true;
700 }
701
702 if (has_constant_operand)
703 all_undefined_operands = false;
704
705 if (has_undefined_operand
706 && code == GIMPLE_CALL
707 && gimple_call_internal_p (stmt))
708 switch (gimple_call_internal_fn (stmt))
709 {
710 /* These 3 builtins use the first argument just as a magic
711 way how to find out a decl uid. */
712 case IFN_GOMP_SIMD_LANE:
713 case IFN_GOMP_SIMD_VF:
714 case IFN_GOMP_SIMD_LAST_LANE:
715 has_undefined_operand = false;
716 break;
717 default:
718 break;
719 }
720
721 /* If the operation combines operands like COMPLEX_EXPR make sure to
722 not mark the result UNDEFINED if only one part of the result is
723 undefined. */
724 if (has_undefined_operand && all_undefined_operands)
725 return UNDEFINED;
726 else if (code == GIMPLE_ASSIGN && has_undefined_operand)
727 {
728 switch (gimple_assign_rhs_code (stmt))
729 {
730 /* Unary operators are handled with all_undefined_operands. */
731 case PLUS_EXPR:
732 case MINUS_EXPR:
733 case POINTER_PLUS_EXPR:
734 /* Not MIN_EXPR, MAX_EXPR. One VARYING operand may be selected.
735 Not bitwise operators, one VARYING operand may specify the
736 result completely. Not logical operators for the same reason.
737 Not COMPLEX_EXPR as one VARYING operand makes the result partly
738 not UNDEFINED. Not *DIV_EXPR, comparisons and shifts because
739 the undefined operand may be promoted. */
740 return UNDEFINED;
741
742 case ADDR_EXPR:
743 /* If any part of an address is UNDEFINED, like the index
744 of an ARRAY_EXPR, then treat the result as UNDEFINED. */
745 return UNDEFINED;
746
747 default:
748 ;
749 }
750 }
751 /* If there was an UNDEFINED operand but the result may be not UNDEFINED
752 fall back to CONSTANT. During iteration UNDEFINED may still drop
753 to CONSTANT. */
754 if (has_undefined_operand)
755 return CONSTANT;
756
757 /* We do not consider virtual operands here -- load from read-only
758 memory may have only VARYING virtual operands, but still be
759 constant. Also we can combine the stmt with definitions from
760 operands whose definitions are not simulated again. */
761 if (has_constant_operand
762 || has_nsa_operand
763 || gimple_references_memory_p (stmt))
764 return CONSTANT;
765
766 return VARYING;
767 }
768
769 /* Returns true if STMT cannot be constant. */
770
771 static bool
772 surely_varying_stmt_p (gimple *stmt)
773 {
774 /* If the statement has operands that we cannot handle, it cannot be
775 constant. */
776 if (gimple_has_volatile_ops (stmt))
777 return true;
778
779 /* If it is a call and does not return a value or is not a
780 builtin and not an indirect call or a call to function with
781 assume_aligned/alloc_align attribute, it is varying. */
782 if (is_gimple_call (stmt))
783 {
784 tree fndecl, fntype = gimple_call_fntype (stmt);
785 if (!gimple_call_lhs (stmt)
786 || ((fndecl = gimple_call_fndecl (stmt)) != NULL_TREE
787 && !DECL_BUILT_IN (fndecl)
788 && !lookup_attribute ("assume_aligned",
789 TYPE_ATTRIBUTES (fntype))
790 && !lookup_attribute ("alloc_align",
791 TYPE_ATTRIBUTES (fntype))))
792 return true;
793 }
794
795 /* Any other store operation is not interesting. */
796 else if (gimple_vdef (stmt))
797 return true;
798
799 /* Anything other than assignments and conditional jumps are not
800 interesting for CCP. */
801 if (gimple_code (stmt) != GIMPLE_ASSIGN
802 && gimple_code (stmt) != GIMPLE_COND
803 && gimple_code (stmt) != GIMPLE_SWITCH
804 && gimple_code (stmt) != GIMPLE_CALL)
805 return true;
806
807 return false;
808 }
809
810 /* Initialize local data structures for CCP. */
811
812 static void
813 ccp_initialize (void)
814 {
815 basic_block bb;
816
817 n_const_val = num_ssa_names;
818 const_val = XCNEWVEC (ccp_prop_value_t, n_const_val);
819
820 /* Initialize simulation flags for PHI nodes and statements. */
821 FOR_EACH_BB_FN (bb, cfun)
822 {
823 gimple_stmt_iterator i;
824
825 for (i = gsi_start_bb (bb); !gsi_end_p (i); gsi_next (&i))
826 {
827 gimple *stmt = gsi_stmt (i);
828 bool is_varying;
829
830 /* If the statement is a control insn, then we do not
831 want to avoid simulating the statement once. Failure
832 to do so means that those edges will never get added. */
833 if (stmt_ends_bb_p (stmt))
834 is_varying = false;
835 else
836 is_varying = surely_varying_stmt_p (stmt);
837
838 if (is_varying)
839 {
840 tree def;
841 ssa_op_iter iter;
842
843 /* If the statement will not produce a constant, mark
844 all its outputs VARYING. */
845 FOR_EACH_SSA_TREE_OPERAND (def, stmt, iter, SSA_OP_ALL_DEFS)
846 set_value_varying (def);
847 }
848 prop_set_simulate_again (stmt, !is_varying);
849 }
850 }
851
852 /* Now process PHI nodes. We never clear the simulate_again flag on
853 phi nodes, since we do not know which edges are executable yet,
854 except for phi nodes for virtual operands when we do not do store ccp. */
855 FOR_EACH_BB_FN (bb, cfun)
856 {
857 gphi_iterator i;
858
859 for (i = gsi_start_phis (bb); !gsi_end_p (i); gsi_next (&i))
860 {
861 gphi *phi = i.phi ();
862
863 if (virtual_operand_p (gimple_phi_result (phi)))
864 prop_set_simulate_again (phi, false);
865 else
866 prop_set_simulate_again (phi, true);
867 }
868 }
869 }
870
871 /* Debug count support. Reset the values of ssa names
872 VARYING when the total number ssa names analyzed is
873 beyond the debug count specified. */
874
875 static void
876 do_dbg_cnt (void)
877 {
878 unsigned i;
879 for (i = 0; i < num_ssa_names; i++)
880 {
881 if (!dbg_cnt (ccp))
882 {
883 const_val[i].lattice_val = VARYING;
884 const_val[i].mask = -1;
885 const_val[i].value = NULL_TREE;
886 }
887 }
888 }
889
890
891 /* Do final substitution of propagated values, cleanup the flowgraph and
892 free allocated storage. If NONZERO_P, record nonzero bits.
893
894 Return TRUE when something was optimized. */
895
896 static bool
897 ccp_finalize (bool nonzero_p)
898 {
899 bool something_changed;
900 unsigned i;
901
902 do_dbg_cnt ();
903
904 /* Derive alignment and misalignment information from partially
905 constant pointers in the lattice or nonzero bits from partially
906 constant integers. */
907 for (i = 1; i < num_ssa_names; ++i)
908 {
909 tree name = ssa_name (i);
910 ccp_prop_value_t *val;
911 unsigned int tem, align;
912
913 if (!name
914 || (!POINTER_TYPE_P (TREE_TYPE (name))
915 && (!INTEGRAL_TYPE_P (TREE_TYPE (name))
916 /* Don't record nonzero bits before IPA to avoid
917 using too much memory. */
918 || !nonzero_p)))
919 continue;
920
921 val = get_value (name);
922 if (val->lattice_val != CONSTANT
923 || TREE_CODE (val->value) != INTEGER_CST)
924 continue;
925
926 if (POINTER_TYPE_P (TREE_TYPE (name)))
927 {
928 /* Trailing mask bits specify the alignment, trailing value
929 bits the misalignment. */
930 tem = val->mask.to_uhwi ();
931 align = (tem & -tem);
932 if (align > 1)
933 set_ptr_info_alignment (get_ptr_info (name), align,
934 (TREE_INT_CST_LOW (val->value)
935 & (align - 1)));
936 }
937 else
938 {
939 unsigned int precision = TYPE_PRECISION (TREE_TYPE (val->value));
940 wide_int nonzero_bits = wide_int::from (val->mask, precision,
941 UNSIGNED) | val->value;
942 nonzero_bits &= get_nonzero_bits (name);
943 set_nonzero_bits (name, nonzero_bits);
944 }
945 }
946
947 /* Perform substitutions based on the known constant values. */
948 something_changed = substitute_and_fold (get_constant_value,
949 ccp_fold_stmt, true);
950
951 free (const_val);
952 const_val = NULL;
953 return something_changed;;
954 }
955
956
957 /* Compute the meet operator between *VAL1 and *VAL2. Store the result
958 in VAL1.
959
960 any M UNDEFINED = any
961 any M VARYING = VARYING
962 Ci M Cj = Ci if (i == j)
963 Ci M Cj = VARYING if (i != j)
964 */
965
966 static void
967 ccp_lattice_meet (ccp_prop_value_t *val1, ccp_prop_value_t *val2)
968 {
969 if (val1->lattice_val == UNDEFINED
970 /* For UNDEFINED M SSA we can't always SSA because its definition
971 may not dominate the PHI node. Doing optimistic copy propagation
972 also causes a lot of gcc.dg/uninit-pred*.c FAILs. */
973 && (val2->lattice_val != CONSTANT
974 || TREE_CODE (val2->value) != SSA_NAME))
975 {
976 /* UNDEFINED M any = any */
977 *val1 = *val2;
978 }
979 else if (val2->lattice_val == UNDEFINED
980 /* See above. */
981 && (val1->lattice_val != CONSTANT
982 || TREE_CODE (val1->value) != SSA_NAME))
983 {
984 /* any M UNDEFINED = any
985 Nothing to do. VAL1 already contains the value we want. */
986 ;
987 }
988 else if (val1->lattice_val == VARYING
989 || val2->lattice_val == VARYING)
990 {
991 /* any M VARYING = VARYING. */
992 val1->lattice_val = VARYING;
993 val1->mask = -1;
994 val1->value = NULL_TREE;
995 }
996 else if (val1->lattice_val == CONSTANT
997 && val2->lattice_val == CONSTANT
998 && TREE_CODE (val1->value) == INTEGER_CST
999 && TREE_CODE (val2->value) == INTEGER_CST)
1000 {
1001 /* Ci M Cj = Ci if (i == j)
1002 Ci M Cj = VARYING if (i != j)
1003
1004 For INTEGER_CSTs mask unequal bits. If no equal bits remain,
1005 drop to varying. */
1006 val1->mask = (val1->mask | val2->mask
1007 | (wi::to_widest (val1->value)
1008 ^ wi::to_widest (val2->value)));
1009 if (wi::sext (val1->mask, TYPE_PRECISION (TREE_TYPE (val1->value))) == -1)
1010 {
1011 val1->lattice_val = VARYING;
1012 val1->value = NULL_TREE;
1013 }
1014 }
1015 else if (val1->lattice_val == CONSTANT
1016 && val2->lattice_val == CONSTANT
1017 && operand_equal_p (val1->value, val2->value, 0))
1018 {
1019 /* Ci M Cj = Ci if (i == j)
1020 Ci M Cj = VARYING if (i != j)
1021
1022 VAL1 already contains the value we want for equivalent values. */
1023 }
1024 else if (val1->lattice_val == CONSTANT
1025 && val2->lattice_val == CONSTANT
1026 && (TREE_CODE (val1->value) == ADDR_EXPR
1027 || TREE_CODE (val2->value) == ADDR_EXPR))
1028 {
1029 /* When not equal addresses are involved try meeting for
1030 alignment. */
1031 ccp_prop_value_t tem = *val2;
1032 if (TREE_CODE (val1->value) == ADDR_EXPR)
1033 *val1 = get_value_for_expr (val1->value, true);
1034 if (TREE_CODE (val2->value) == ADDR_EXPR)
1035 tem = get_value_for_expr (val2->value, true);
1036 ccp_lattice_meet (val1, &tem);
1037 }
1038 else
1039 {
1040 /* Any other combination is VARYING. */
1041 val1->lattice_val = VARYING;
1042 val1->mask = -1;
1043 val1->value = NULL_TREE;
1044 }
1045 }
1046
1047
1048 /* Loop through the PHI_NODE's parameters for BLOCK and compare their
1049 lattice values to determine PHI_NODE's lattice value. The value of a
1050 PHI node is determined calling ccp_lattice_meet with all the arguments
1051 of the PHI node that are incoming via executable edges. */
1052
1053 static enum ssa_prop_result
1054 ccp_visit_phi_node (gphi *phi)
1055 {
1056 unsigned i;
1057 ccp_prop_value_t new_val;
1058
1059 if (dump_file && (dump_flags & TDF_DETAILS))
1060 {
1061 fprintf (dump_file, "\nVisiting PHI node: ");
1062 print_gimple_stmt (dump_file, phi, 0, dump_flags);
1063 }
1064
1065 new_val.lattice_val = UNDEFINED;
1066 new_val.value = NULL_TREE;
1067 new_val.mask = 0;
1068
1069 bool first = true;
1070 bool non_exec_edge = false;
1071 for (i = 0; i < gimple_phi_num_args (phi); i++)
1072 {
1073 /* Compute the meet operator over all the PHI arguments flowing
1074 through executable edges. */
1075 edge e = gimple_phi_arg_edge (phi, i);
1076
1077 if (dump_file && (dump_flags & TDF_DETAILS))
1078 {
1079 fprintf (dump_file,
1080 "\n Argument #%d (%d -> %d %sexecutable)\n",
1081 i, e->src->index, e->dest->index,
1082 (e->flags & EDGE_EXECUTABLE) ? "" : "not ");
1083 }
1084
1085 /* If the incoming edge is executable, Compute the meet operator for
1086 the existing value of the PHI node and the current PHI argument. */
1087 if (e->flags & EDGE_EXECUTABLE)
1088 {
1089 tree arg = gimple_phi_arg (phi, i)->def;
1090 ccp_prop_value_t arg_val = get_value_for_expr (arg, false);
1091
1092 if (first)
1093 {
1094 new_val = arg_val;
1095 first = false;
1096 }
1097 else
1098 ccp_lattice_meet (&new_val, &arg_val);
1099
1100 if (dump_file && (dump_flags & TDF_DETAILS))
1101 {
1102 fprintf (dump_file, "\t");
1103 print_generic_expr (dump_file, arg, dump_flags);
1104 dump_lattice_value (dump_file, "\tValue: ", arg_val);
1105 fprintf (dump_file, "\n");
1106 }
1107
1108 if (new_val.lattice_val == VARYING)
1109 break;
1110 }
1111 else
1112 non_exec_edge = true;
1113 }
1114
1115 /* In case there were non-executable edges and the value is a copy
1116 make sure its definition dominates the PHI node. */
1117 if (non_exec_edge
1118 && new_val.lattice_val == CONSTANT
1119 && TREE_CODE (new_val.value) == SSA_NAME
1120 && ! SSA_NAME_IS_DEFAULT_DEF (new_val.value)
1121 && ! dominated_by_p (CDI_DOMINATORS, gimple_bb (phi),
1122 gimple_bb (SSA_NAME_DEF_STMT (new_val.value))))
1123 {
1124 new_val.lattice_val = VARYING;
1125 new_val.value = NULL_TREE;
1126 new_val.mask = -1;
1127 }
1128
1129 if (dump_file && (dump_flags & TDF_DETAILS))
1130 {
1131 dump_lattice_value (dump_file, "\n PHI node value: ", new_val);
1132 fprintf (dump_file, "\n\n");
1133 }
1134
1135 /* Make the transition to the new value. */
1136 if (set_lattice_value (gimple_phi_result (phi), &new_val))
1137 {
1138 if (new_val.lattice_val == VARYING)
1139 return SSA_PROP_VARYING;
1140 else
1141 return SSA_PROP_INTERESTING;
1142 }
1143 else
1144 return SSA_PROP_NOT_INTERESTING;
1145 }
1146
1147 /* Return the constant value for OP or OP otherwise. */
1148
1149 static tree
1150 valueize_op (tree op)
1151 {
1152 if (TREE_CODE (op) == SSA_NAME)
1153 {
1154 tree tem = get_constant_value (op);
1155 if (tem)
1156 return tem;
1157 }
1158 return op;
1159 }
1160
1161 /* Return the constant value for OP, but signal to not follow SSA
1162 edges if the definition may be simulated again. */
1163
1164 static tree
1165 valueize_op_1 (tree op)
1166 {
1167 if (TREE_CODE (op) == SSA_NAME)
1168 {
1169 /* If the definition may be simulated again we cannot follow
1170 this SSA edge as the SSA propagator does not necessarily
1171 re-visit the use. */
1172 gimple *def_stmt = SSA_NAME_DEF_STMT (op);
1173 if (!gimple_nop_p (def_stmt)
1174 && prop_simulate_again_p (def_stmt))
1175 return NULL_TREE;
1176 tree tem = get_constant_value (op);
1177 if (tem)
1178 return tem;
1179 }
1180 return op;
1181 }
1182
1183 /* CCP specific front-end to the non-destructive constant folding
1184 routines.
1185
1186 Attempt to simplify the RHS of STMT knowing that one or more
1187 operands are constants.
1188
1189 If simplification is possible, return the simplified RHS,
1190 otherwise return the original RHS or NULL_TREE. */
1191
1192 static tree
1193 ccp_fold (gimple *stmt)
1194 {
1195 location_t loc = gimple_location (stmt);
1196 switch (gimple_code (stmt))
1197 {
1198 case GIMPLE_COND:
1199 {
1200 /* Handle comparison operators that can appear in GIMPLE form. */
1201 tree op0 = valueize_op (gimple_cond_lhs (stmt));
1202 tree op1 = valueize_op (gimple_cond_rhs (stmt));
1203 enum tree_code code = gimple_cond_code (stmt);
1204 return fold_binary_loc (loc, code, boolean_type_node, op0, op1);
1205 }
1206
1207 case GIMPLE_SWITCH:
1208 {
1209 /* Return the constant switch index. */
1210 return valueize_op (gimple_switch_index (as_a <gswitch *> (stmt)));
1211 }
1212
1213 case GIMPLE_ASSIGN:
1214 case GIMPLE_CALL:
1215 return gimple_fold_stmt_to_constant_1 (stmt,
1216 valueize_op, valueize_op_1);
1217
1218 default:
1219 gcc_unreachable ();
1220 }
1221 }
1222
1223 /* Apply the operation CODE in type TYPE to the value, mask pair
1224 RVAL and RMASK representing a value of type RTYPE and set
1225 the value, mask pair *VAL and *MASK to the result. */
1226
1227 static void
1228 bit_value_unop_1 (enum tree_code code, tree type,
1229 widest_int *val, widest_int *mask,
1230 tree rtype, const widest_int &rval, const widest_int &rmask)
1231 {
1232 switch (code)
1233 {
1234 case BIT_NOT_EXPR:
1235 *mask = rmask;
1236 *val = ~rval;
1237 break;
1238
1239 case NEGATE_EXPR:
1240 {
1241 widest_int temv, temm;
1242 /* Return ~rval + 1. */
1243 bit_value_unop_1 (BIT_NOT_EXPR, type, &temv, &temm, type, rval, rmask);
1244 bit_value_binop_1 (PLUS_EXPR, type, val, mask,
1245 type, temv, temm, type, 1, 0);
1246 break;
1247 }
1248
1249 CASE_CONVERT:
1250 {
1251 signop sgn;
1252
1253 /* First extend mask and value according to the original type. */
1254 sgn = TYPE_SIGN (rtype);
1255 *mask = wi::ext (rmask, TYPE_PRECISION (rtype), sgn);
1256 *val = wi::ext (rval, TYPE_PRECISION (rtype), sgn);
1257
1258 /* Then extend mask and value according to the target type. */
1259 sgn = TYPE_SIGN (type);
1260 *mask = wi::ext (*mask, TYPE_PRECISION (type), sgn);
1261 *val = wi::ext (*val, TYPE_PRECISION (type), sgn);
1262 break;
1263 }
1264
1265 default:
1266 *mask = -1;
1267 break;
1268 }
1269 }
1270
1271 /* Apply the operation CODE in type TYPE to the value, mask pairs
1272 R1VAL, R1MASK and R2VAL, R2MASK representing a values of type R1TYPE
1273 and R2TYPE and set the value, mask pair *VAL and *MASK to the result. */
1274
1275 static void
1276 bit_value_binop_1 (enum tree_code code, tree type,
1277 widest_int *val, widest_int *mask,
1278 tree r1type, const widest_int &r1val,
1279 const widest_int &r1mask, tree r2type,
1280 const widest_int &r2val, const widest_int &r2mask)
1281 {
1282 signop sgn = TYPE_SIGN (type);
1283 int width = TYPE_PRECISION (type);
1284 bool swap_p = false;
1285
1286 /* Assume we'll get a constant result. Use an initial non varying
1287 value, we fall back to varying in the end if necessary. */
1288 *mask = -1;
1289
1290 switch (code)
1291 {
1292 case BIT_AND_EXPR:
1293 /* The mask is constant where there is a known not
1294 set bit, (m1 | m2) & ((v1 | m1) & (v2 | m2)) */
1295 *mask = (r1mask | r2mask) & (r1val | r1mask) & (r2val | r2mask);
1296 *val = r1val & r2val;
1297 break;
1298
1299 case BIT_IOR_EXPR:
1300 /* The mask is constant where there is a known
1301 set bit, (m1 | m2) & ~((v1 & ~m1) | (v2 & ~m2)). */
1302 *mask = (r1mask | r2mask)
1303 .and_not (r1val.and_not (r1mask) | r2val.and_not (r2mask));
1304 *val = r1val | r2val;
1305 break;
1306
1307 case BIT_XOR_EXPR:
1308 /* m1 | m2 */
1309 *mask = r1mask | r2mask;
1310 *val = r1val ^ r2val;
1311 break;
1312
1313 case LROTATE_EXPR:
1314 case RROTATE_EXPR:
1315 if (r2mask == 0)
1316 {
1317 widest_int shift = r2val;
1318 if (shift == 0)
1319 {
1320 *mask = r1mask;
1321 *val = r1val;
1322 }
1323 else
1324 {
1325 if (wi::neg_p (shift))
1326 {
1327 shift = -shift;
1328 if (code == RROTATE_EXPR)
1329 code = LROTATE_EXPR;
1330 else
1331 code = RROTATE_EXPR;
1332 }
1333 if (code == RROTATE_EXPR)
1334 {
1335 *mask = wi::rrotate (r1mask, shift, width);
1336 *val = wi::rrotate (r1val, shift, width);
1337 }
1338 else
1339 {
1340 *mask = wi::lrotate (r1mask, shift, width);
1341 *val = wi::lrotate (r1val, shift, width);
1342 }
1343 }
1344 }
1345 break;
1346
1347 case LSHIFT_EXPR:
1348 case RSHIFT_EXPR:
1349 /* ??? We can handle partially known shift counts if we know
1350 its sign. That way we can tell that (x << (y | 8)) & 255
1351 is zero. */
1352 if (r2mask == 0)
1353 {
1354 widest_int shift = r2val;
1355 if (shift == 0)
1356 {
1357 *mask = r1mask;
1358 *val = r1val;
1359 }
1360 else
1361 {
1362 if (wi::neg_p (shift))
1363 {
1364 shift = -shift;
1365 if (code == RSHIFT_EXPR)
1366 code = LSHIFT_EXPR;
1367 else
1368 code = RSHIFT_EXPR;
1369 }
1370 if (code == RSHIFT_EXPR)
1371 {
1372 *mask = wi::rshift (wi::ext (r1mask, width, sgn), shift, sgn);
1373 *val = wi::rshift (wi::ext (r1val, width, sgn), shift, sgn);
1374 }
1375 else
1376 {
1377 *mask = wi::ext (r1mask << shift, width, sgn);
1378 *val = wi::ext (r1val << shift, width, sgn);
1379 }
1380 }
1381 }
1382 break;
1383
1384 case PLUS_EXPR:
1385 case POINTER_PLUS_EXPR:
1386 {
1387 /* Do the addition with unknown bits set to zero, to give carry-ins of
1388 zero wherever possible. */
1389 widest_int lo = r1val.and_not (r1mask) + r2val.and_not (r2mask);
1390 lo = wi::ext (lo, width, sgn);
1391 /* Do the addition with unknown bits set to one, to give carry-ins of
1392 one wherever possible. */
1393 widest_int hi = (r1val | r1mask) + (r2val | r2mask);
1394 hi = wi::ext (hi, width, sgn);
1395 /* Each bit in the result is known if (a) the corresponding bits in
1396 both inputs are known, and (b) the carry-in to that bit position
1397 is known. We can check condition (b) by seeing if we got the same
1398 result with minimised carries as with maximised carries. */
1399 *mask = r1mask | r2mask | (lo ^ hi);
1400 *mask = wi::ext (*mask, width, sgn);
1401 /* It shouldn't matter whether we choose lo or hi here. */
1402 *val = lo;
1403 break;
1404 }
1405
1406 case MINUS_EXPR:
1407 {
1408 widest_int temv, temm;
1409 bit_value_unop_1 (NEGATE_EXPR, r2type, &temv, &temm,
1410 r2type, r2val, r2mask);
1411 bit_value_binop_1 (PLUS_EXPR, type, val, mask,
1412 r1type, r1val, r1mask,
1413 r2type, temv, temm);
1414 break;
1415 }
1416
1417 case MULT_EXPR:
1418 {
1419 /* Just track trailing zeros in both operands and transfer
1420 them to the other. */
1421 int r1tz = wi::ctz (r1val | r1mask);
1422 int r2tz = wi::ctz (r2val | r2mask);
1423 if (r1tz + r2tz >= width)
1424 {
1425 *mask = 0;
1426 *val = 0;
1427 }
1428 else if (r1tz + r2tz > 0)
1429 {
1430 *mask = wi::ext (wi::mask <widest_int> (r1tz + r2tz, true),
1431 width, sgn);
1432 *val = 0;
1433 }
1434 break;
1435 }
1436
1437 case EQ_EXPR:
1438 case NE_EXPR:
1439 {
1440 widest_int m = r1mask | r2mask;
1441 if (r1val.and_not (m) != r2val.and_not (m))
1442 {
1443 *mask = 0;
1444 *val = ((code == EQ_EXPR) ? 0 : 1);
1445 }
1446 else
1447 {
1448 /* We know the result of a comparison is always one or zero. */
1449 *mask = 1;
1450 *val = 0;
1451 }
1452 break;
1453 }
1454
1455 case GE_EXPR:
1456 case GT_EXPR:
1457 swap_p = true;
1458 code = swap_tree_comparison (code);
1459 /* Fall through. */
1460 case LT_EXPR:
1461 case LE_EXPR:
1462 {
1463 int minmax, maxmin;
1464
1465 const widest_int &o1val = swap_p ? r2val : r1val;
1466 const widest_int &o1mask = swap_p ? r2mask : r1mask;
1467 const widest_int &o2val = swap_p ? r1val : r2val;
1468 const widest_int &o2mask = swap_p ? r1mask : r2mask;
1469
1470 /* If the most significant bits are not known we know nothing. */
1471 if (wi::neg_p (o1mask) || wi::neg_p (o2mask))
1472 break;
1473
1474 /* For comparisons the signedness is in the comparison operands. */
1475 sgn = TYPE_SIGN (r1type);
1476
1477 /* If we know the most significant bits we know the values
1478 value ranges by means of treating varying bits as zero
1479 or one. Do a cross comparison of the max/min pairs. */
1480 maxmin = wi::cmp (o1val | o1mask, o2val.and_not (o2mask), sgn);
1481 minmax = wi::cmp (o1val.and_not (o1mask), o2val | o2mask, sgn);
1482 if (maxmin < 0) /* o1 is less than o2. */
1483 {
1484 *mask = 0;
1485 *val = 1;
1486 }
1487 else if (minmax > 0) /* o1 is not less or equal to o2. */
1488 {
1489 *mask = 0;
1490 *val = 0;
1491 }
1492 else if (maxmin == minmax) /* o1 and o2 are equal. */
1493 {
1494 /* This probably should never happen as we'd have
1495 folded the thing during fully constant value folding. */
1496 *mask = 0;
1497 *val = (code == LE_EXPR ? 1 : 0);
1498 }
1499 else
1500 {
1501 /* We know the result of a comparison is always one or zero. */
1502 *mask = 1;
1503 *val = 0;
1504 }
1505 break;
1506 }
1507
1508 default:;
1509 }
1510 }
1511
1512 /* Return the propagation value when applying the operation CODE to
1513 the value RHS yielding type TYPE. */
1514
1515 static ccp_prop_value_t
1516 bit_value_unop (enum tree_code code, tree type, tree rhs)
1517 {
1518 ccp_prop_value_t rval = get_value_for_expr (rhs, true);
1519 widest_int value, mask;
1520 ccp_prop_value_t val;
1521
1522 if (rval.lattice_val == UNDEFINED)
1523 return rval;
1524
1525 gcc_assert ((rval.lattice_val == CONSTANT
1526 && TREE_CODE (rval.value) == INTEGER_CST)
1527 || wi::sext (rval.mask, TYPE_PRECISION (TREE_TYPE (rhs))) == -1);
1528 bit_value_unop_1 (code, type, &value, &mask,
1529 TREE_TYPE (rhs), value_to_wide_int (rval), rval.mask);
1530 if (wi::sext (mask, TYPE_PRECISION (type)) != -1)
1531 {
1532 val.lattice_val = CONSTANT;
1533 val.mask = mask;
1534 /* ??? Delay building trees here. */
1535 val.value = wide_int_to_tree (type, value);
1536 }
1537 else
1538 {
1539 val.lattice_val = VARYING;
1540 val.value = NULL_TREE;
1541 val.mask = -1;
1542 }
1543 return val;
1544 }
1545
1546 /* Return the propagation value when applying the operation CODE to
1547 the values RHS1 and RHS2 yielding type TYPE. */
1548
1549 static ccp_prop_value_t
1550 bit_value_binop (enum tree_code code, tree type, tree rhs1, tree rhs2)
1551 {
1552 ccp_prop_value_t r1val = get_value_for_expr (rhs1, true);
1553 ccp_prop_value_t r2val = get_value_for_expr (rhs2, true);
1554 widest_int value, mask;
1555 ccp_prop_value_t val;
1556
1557 if (r1val.lattice_val == UNDEFINED
1558 || r2val.lattice_val == UNDEFINED)
1559 {
1560 val.lattice_val = VARYING;
1561 val.value = NULL_TREE;
1562 val.mask = -1;
1563 return val;
1564 }
1565
1566 gcc_assert ((r1val.lattice_val == CONSTANT
1567 && TREE_CODE (r1val.value) == INTEGER_CST)
1568 || wi::sext (r1val.mask,
1569 TYPE_PRECISION (TREE_TYPE (rhs1))) == -1);
1570 gcc_assert ((r2val.lattice_val == CONSTANT
1571 && TREE_CODE (r2val.value) == INTEGER_CST)
1572 || wi::sext (r2val.mask,
1573 TYPE_PRECISION (TREE_TYPE (rhs2))) == -1);
1574 bit_value_binop_1 (code, type, &value, &mask,
1575 TREE_TYPE (rhs1), value_to_wide_int (r1val), r1val.mask,
1576 TREE_TYPE (rhs2), value_to_wide_int (r2val), r2val.mask);
1577 if (wi::sext (mask, TYPE_PRECISION (type)) != -1)
1578 {
1579 val.lattice_val = CONSTANT;
1580 val.mask = mask;
1581 /* ??? Delay building trees here. */
1582 val.value = wide_int_to_tree (type, value);
1583 }
1584 else
1585 {
1586 val.lattice_val = VARYING;
1587 val.value = NULL_TREE;
1588 val.mask = -1;
1589 }
1590 return val;
1591 }
1592
1593 /* Return the propagation value for __builtin_assume_aligned
1594 and functions with assume_aligned or alloc_aligned attribute.
1595 For __builtin_assume_aligned, ATTR is NULL_TREE,
1596 for assume_aligned attribute ATTR is non-NULL and ALLOC_ALIGNED
1597 is false, for alloc_aligned attribute ATTR is non-NULL and
1598 ALLOC_ALIGNED is true. */
1599
1600 static ccp_prop_value_t
1601 bit_value_assume_aligned (gimple *stmt, tree attr, ccp_prop_value_t ptrval,
1602 bool alloc_aligned)
1603 {
1604 tree align, misalign = NULL_TREE, type;
1605 unsigned HOST_WIDE_INT aligni, misaligni = 0;
1606 ccp_prop_value_t alignval;
1607 widest_int value, mask;
1608 ccp_prop_value_t val;
1609
1610 if (attr == NULL_TREE)
1611 {
1612 tree ptr = gimple_call_arg (stmt, 0);
1613 type = TREE_TYPE (ptr);
1614 ptrval = get_value_for_expr (ptr, true);
1615 }
1616 else
1617 {
1618 tree lhs = gimple_call_lhs (stmt);
1619 type = TREE_TYPE (lhs);
1620 }
1621
1622 if (ptrval.lattice_val == UNDEFINED)
1623 return ptrval;
1624 gcc_assert ((ptrval.lattice_val == CONSTANT
1625 && TREE_CODE (ptrval.value) == INTEGER_CST)
1626 || wi::sext (ptrval.mask, TYPE_PRECISION (type)) == -1);
1627 if (attr == NULL_TREE)
1628 {
1629 /* Get aligni and misaligni from __builtin_assume_aligned. */
1630 align = gimple_call_arg (stmt, 1);
1631 if (!tree_fits_uhwi_p (align))
1632 return ptrval;
1633 aligni = tree_to_uhwi (align);
1634 if (gimple_call_num_args (stmt) > 2)
1635 {
1636 misalign = gimple_call_arg (stmt, 2);
1637 if (!tree_fits_uhwi_p (misalign))
1638 return ptrval;
1639 misaligni = tree_to_uhwi (misalign);
1640 }
1641 }
1642 else
1643 {
1644 /* Get aligni and misaligni from assume_aligned or
1645 alloc_align attributes. */
1646 if (TREE_VALUE (attr) == NULL_TREE)
1647 return ptrval;
1648 attr = TREE_VALUE (attr);
1649 align = TREE_VALUE (attr);
1650 if (!tree_fits_uhwi_p (align))
1651 return ptrval;
1652 aligni = tree_to_uhwi (align);
1653 if (alloc_aligned)
1654 {
1655 if (aligni == 0 || aligni > gimple_call_num_args (stmt))
1656 return ptrval;
1657 align = gimple_call_arg (stmt, aligni - 1);
1658 if (!tree_fits_uhwi_p (align))
1659 return ptrval;
1660 aligni = tree_to_uhwi (align);
1661 }
1662 else if (TREE_CHAIN (attr) && TREE_VALUE (TREE_CHAIN (attr)))
1663 {
1664 misalign = TREE_VALUE (TREE_CHAIN (attr));
1665 if (!tree_fits_uhwi_p (misalign))
1666 return ptrval;
1667 misaligni = tree_to_uhwi (misalign);
1668 }
1669 }
1670 if (aligni <= 1 || (aligni & (aligni - 1)) != 0 || misaligni >= aligni)
1671 return ptrval;
1672
1673 align = build_int_cst_type (type, -aligni);
1674 alignval = get_value_for_expr (align, true);
1675 bit_value_binop_1 (BIT_AND_EXPR, type, &value, &mask,
1676 type, value_to_wide_int (ptrval), ptrval.mask,
1677 type, value_to_wide_int (alignval), alignval.mask);
1678 if (wi::sext (mask, TYPE_PRECISION (type)) != -1)
1679 {
1680 val.lattice_val = CONSTANT;
1681 val.mask = mask;
1682 gcc_assert ((mask.to_uhwi () & (aligni - 1)) == 0);
1683 gcc_assert ((value.to_uhwi () & (aligni - 1)) == 0);
1684 value |= misaligni;
1685 /* ??? Delay building trees here. */
1686 val.value = wide_int_to_tree (type, value);
1687 }
1688 else
1689 {
1690 val.lattice_val = VARYING;
1691 val.value = NULL_TREE;
1692 val.mask = -1;
1693 }
1694 return val;
1695 }
1696
1697 /* Evaluate statement STMT.
1698 Valid only for assignments, calls, conditionals, and switches. */
1699
1700 static ccp_prop_value_t
1701 evaluate_stmt (gimple *stmt)
1702 {
1703 ccp_prop_value_t val;
1704 tree simplified = NULL_TREE;
1705 ccp_lattice_t likelyvalue = likely_value (stmt);
1706 bool is_constant = false;
1707 unsigned int align;
1708
1709 if (dump_file && (dump_flags & TDF_DETAILS))
1710 {
1711 fprintf (dump_file, "which is likely ");
1712 switch (likelyvalue)
1713 {
1714 case CONSTANT:
1715 fprintf (dump_file, "CONSTANT");
1716 break;
1717 case UNDEFINED:
1718 fprintf (dump_file, "UNDEFINED");
1719 break;
1720 case VARYING:
1721 fprintf (dump_file, "VARYING");
1722 break;
1723 default:;
1724 }
1725 fprintf (dump_file, "\n");
1726 }
1727
1728 /* If the statement is likely to have a CONSTANT result, then try
1729 to fold the statement to determine the constant value. */
1730 /* FIXME. This is the only place that we call ccp_fold.
1731 Since likely_value never returns CONSTANT for calls, we will
1732 not attempt to fold them, including builtins that may profit. */
1733 if (likelyvalue == CONSTANT)
1734 {
1735 fold_defer_overflow_warnings ();
1736 simplified = ccp_fold (stmt);
1737 if (simplified && TREE_CODE (simplified) == SSA_NAME)
1738 {
1739 val = *get_value (simplified);
1740 if (val.lattice_val != VARYING)
1741 {
1742 fold_undefer_overflow_warnings (true, stmt, 0);
1743 return val;
1744 }
1745 }
1746 is_constant = simplified && is_gimple_min_invariant (simplified);
1747 fold_undefer_overflow_warnings (is_constant, stmt, 0);
1748 if (is_constant)
1749 {
1750 /* The statement produced a constant value. */
1751 val.lattice_val = CONSTANT;
1752 val.value = simplified;
1753 val.mask = 0;
1754 return val;
1755 }
1756 }
1757 /* If the statement is likely to have a VARYING result, then do not
1758 bother folding the statement. */
1759 else if (likelyvalue == VARYING)
1760 {
1761 enum gimple_code code = gimple_code (stmt);
1762 if (code == GIMPLE_ASSIGN)
1763 {
1764 enum tree_code subcode = gimple_assign_rhs_code (stmt);
1765
1766 /* Other cases cannot satisfy is_gimple_min_invariant
1767 without folding. */
1768 if (get_gimple_rhs_class (subcode) == GIMPLE_SINGLE_RHS)
1769 simplified = gimple_assign_rhs1 (stmt);
1770 }
1771 else if (code == GIMPLE_SWITCH)
1772 simplified = gimple_switch_index (as_a <gswitch *> (stmt));
1773 else
1774 /* These cannot satisfy is_gimple_min_invariant without folding. */
1775 gcc_assert (code == GIMPLE_CALL || code == GIMPLE_COND);
1776 is_constant = simplified && is_gimple_min_invariant (simplified);
1777 if (is_constant)
1778 {
1779 /* The statement produced a constant value. */
1780 val.lattice_val = CONSTANT;
1781 val.value = simplified;
1782 val.mask = 0;
1783 }
1784 }
1785 /* If the statement result is likely UNDEFINED, make it so. */
1786 else if (likelyvalue == UNDEFINED)
1787 {
1788 val.lattice_val = UNDEFINED;
1789 val.value = NULL_TREE;
1790 val.mask = 0;
1791 return val;
1792 }
1793
1794 /* Resort to simplification for bitwise tracking. */
1795 if (flag_tree_bit_ccp
1796 && (likelyvalue == CONSTANT || is_gimple_call (stmt)
1797 || (gimple_assign_single_p (stmt)
1798 && gimple_assign_rhs_code (stmt) == ADDR_EXPR))
1799 && !is_constant)
1800 {
1801 enum gimple_code code = gimple_code (stmt);
1802 val.lattice_val = VARYING;
1803 val.value = NULL_TREE;
1804 val.mask = -1;
1805 if (code == GIMPLE_ASSIGN)
1806 {
1807 enum tree_code subcode = gimple_assign_rhs_code (stmt);
1808 tree rhs1 = gimple_assign_rhs1 (stmt);
1809 tree lhs = gimple_assign_lhs (stmt);
1810 if ((INTEGRAL_TYPE_P (TREE_TYPE (lhs))
1811 || POINTER_TYPE_P (TREE_TYPE (lhs)))
1812 && (INTEGRAL_TYPE_P (TREE_TYPE (rhs1))
1813 || POINTER_TYPE_P (TREE_TYPE (rhs1))))
1814 switch (get_gimple_rhs_class (subcode))
1815 {
1816 case GIMPLE_SINGLE_RHS:
1817 val = get_value_for_expr (rhs1, true);
1818 break;
1819
1820 case GIMPLE_UNARY_RHS:
1821 val = bit_value_unop (subcode, TREE_TYPE (lhs), rhs1);
1822 break;
1823
1824 case GIMPLE_BINARY_RHS:
1825 val = bit_value_binop (subcode, TREE_TYPE (lhs), rhs1,
1826 gimple_assign_rhs2 (stmt));
1827 break;
1828
1829 default:;
1830 }
1831 }
1832 else if (code == GIMPLE_COND)
1833 {
1834 enum tree_code code = gimple_cond_code (stmt);
1835 tree rhs1 = gimple_cond_lhs (stmt);
1836 tree rhs2 = gimple_cond_rhs (stmt);
1837 if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1))
1838 || POINTER_TYPE_P (TREE_TYPE (rhs1)))
1839 val = bit_value_binop (code, TREE_TYPE (rhs1), rhs1, rhs2);
1840 }
1841 else if (gimple_call_builtin_p (stmt, BUILT_IN_NORMAL))
1842 {
1843 tree fndecl = gimple_call_fndecl (stmt);
1844 switch (DECL_FUNCTION_CODE (fndecl))
1845 {
1846 case BUILT_IN_MALLOC:
1847 case BUILT_IN_REALLOC:
1848 case BUILT_IN_CALLOC:
1849 case BUILT_IN_STRDUP:
1850 case BUILT_IN_STRNDUP:
1851 val.lattice_val = CONSTANT;
1852 val.value = build_int_cst (TREE_TYPE (gimple_get_lhs (stmt)), 0);
1853 val.mask = ~((HOST_WIDE_INT) MALLOC_ABI_ALIGNMENT
1854 / BITS_PER_UNIT - 1);
1855 break;
1856
1857 case BUILT_IN_ALLOCA:
1858 case BUILT_IN_ALLOCA_WITH_ALIGN:
1859 align = (DECL_FUNCTION_CODE (fndecl) == BUILT_IN_ALLOCA_WITH_ALIGN
1860 ? TREE_INT_CST_LOW (gimple_call_arg (stmt, 1))
1861 : BIGGEST_ALIGNMENT);
1862 val.lattice_val = CONSTANT;
1863 val.value = build_int_cst (TREE_TYPE (gimple_get_lhs (stmt)), 0);
1864 val.mask = ~((HOST_WIDE_INT) align / BITS_PER_UNIT - 1);
1865 break;
1866
1867 /* These builtins return their first argument, unmodified. */
1868 case BUILT_IN_MEMCPY:
1869 case BUILT_IN_MEMMOVE:
1870 case BUILT_IN_MEMSET:
1871 case BUILT_IN_STRCPY:
1872 case BUILT_IN_STRNCPY:
1873 case BUILT_IN_MEMCPY_CHK:
1874 case BUILT_IN_MEMMOVE_CHK:
1875 case BUILT_IN_MEMSET_CHK:
1876 case BUILT_IN_STRCPY_CHK:
1877 case BUILT_IN_STRNCPY_CHK:
1878 val = get_value_for_expr (gimple_call_arg (stmt, 0), true);
1879 break;
1880
1881 case BUILT_IN_ASSUME_ALIGNED:
1882 val = bit_value_assume_aligned (stmt, NULL_TREE, val, false);
1883 break;
1884
1885 case BUILT_IN_ALIGNED_ALLOC:
1886 {
1887 tree align = get_constant_value (gimple_call_arg (stmt, 0));
1888 if (align
1889 && tree_fits_uhwi_p (align))
1890 {
1891 unsigned HOST_WIDE_INT aligni = tree_to_uhwi (align);
1892 if (aligni > 1
1893 /* align must be power-of-two */
1894 && (aligni & (aligni - 1)) == 0)
1895 {
1896 val.lattice_val = CONSTANT;
1897 val.value = build_int_cst (ptr_type_node, 0);
1898 val.mask = -aligni;
1899 }
1900 }
1901 break;
1902 }
1903
1904 default:;
1905 }
1906 }
1907 if (is_gimple_call (stmt) && gimple_call_lhs (stmt))
1908 {
1909 tree fntype = gimple_call_fntype (stmt);
1910 if (fntype)
1911 {
1912 tree attrs = lookup_attribute ("assume_aligned",
1913 TYPE_ATTRIBUTES (fntype));
1914 if (attrs)
1915 val = bit_value_assume_aligned (stmt, attrs, val, false);
1916 attrs = lookup_attribute ("alloc_align",
1917 TYPE_ATTRIBUTES (fntype));
1918 if (attrs)
1919 val = bit_value_assume_aligned (stmt, attrs, val, true);
1920 }
1921 }
1922 is_constant = (val.lattice_val == CONSTANT);
1923 }
1924
1925 if (flag_tree_bit_ccp
1926 && ((is_constant && TREE_CODE (val.value) == INTEGER_CST)
1927 || !is_constant)
1928 && gimple_get_lhs (stmt)
1929 && TREE_CODE (gimple_get_lhs (stmt)) == SSA_NAME)
1930 {
1931 tree lhs = gimple_get_lhs (stmt);
1932 wide_int nonzero_bits = get_nonzero_bits (lhs);
1933 if (nonzero_bits != -1)
1934 {
1935 if (!is_constant)
1936 {
1937 val.lattice_val = CONSTANT;
1938 val.value = build_zero_cst (TREE_TYPE (lhs));
1939 val.mask = extend_mask (nonzero_bits, TYPE_SIGN (TREE_TYPE (lhs)));
1940 is_constant = true;
1941 }
1942 else
1943 {
1944 if (wi::bit_and_not (val.value, nonzero_bits) != 0)
1945 val.value = wide_int_to_tree (TREE_TYPE (lhs),
1946 nonzero_bits & val.value);
1947 if (nonzero_bits == 0)
1948 val.mask = 0;
1949 else
1950 val.mask = val.mask & extend_mask (nonzero_bits,
1951 TYPE_SIGN (TREE_TYPE (lhs)));
1952 }
1953 }
1954 }
1955
1956 /* The statement produced a nonconstant value. */
1957 if (!is_constant)
1958 {
1959 /* The statement produced a copy. */
1960 if (simplified && TREE_CODE (simplified) == SSA_NAME
1961 && !SSA_NAME_OCCURS_IN_ABNORMAL_PHI (simplified))
1962 {
1963 val.lattice_val = CONSTANT;
1964 val.value = simplified;
1965 val.mask = -1;
1966 }
1967 /* The statement is VARYING. */
1968 else
1969 {
1970 val.lattice_val = VARYING;
1971 val.value = NULL_TREE;
1972 val.mask = -1;
1973 }
1974 }
1975
1976 return val;
1977 }
1978
1979 typedef hash_table<nofree_ptr_hash<gimple> > gimple_htab;
1980
1981 /* Given a BUILT_IN_STACK_SAVE value SAVED_VAL, insert a clobber of VAR before
1982 each matching BUILT_IN_STACK_RESTORE. Mark visited phis in VISITED. */
1983
1984 static void
1985 insert_clobber_before_stack_restore (tree saved_val, tree var,
1986 gimple_htab **visited)
1987 {
1988 gimple *stmt;
1989 gassign *clobber_stmt;
1990 tree clobber;
1991 imm_use_iterator iter;
1992 gimple_stmt_iterator i;
1993 gimple **slot;
1994
1995 FOR_EACH_IMM_USE_STMT (stmt, iter, saved_val)
1996 if (gimple_call_builtin_p (stmt, BUILT_IN_STACK_RESTORE))
1997 {
1998 clobber = build_constructor (TREE_TYPE (var),
1999 NULL);
2000 TREE_THIS_VOLATILE (clobber) = 1;
2001 clobber_stmt = gimple_build_assign (var, clobber);
2002
2003 i = gsi_for_stmt (stmt);
2004 gsi_insert_before (&i, clobber_stmt, GSI_SAME_STMT);
2005 }
2006 else if (gimple_code (stmt) == GIMPLE_PHI)
2007 {
2008 if (!*visited)
2009 *visited = new gimple_htab (10);
2010
2011 slot = (*visited)->find_slot (stmt, INSERT);
2012 if (*slot != NULL)
2013 continue;
2014
2015 *slot = stmt;
2016 insert_clobber_before_stack_restore (gimple_phi_result (stmt), var,
2017 visited);
2018 }
2019 else if (gimple_assign_ssa_name_copy_p (stmt))
2020 insert_clobber_before_stack_restore (gimple_assign_lhs (stmt), var,
2021 visited);
2022 else if (chkp_gimple_call_builtin_p (stmt, BUILT_IN_CHKP_BNDRET))
2023 continue;
2024 else
2025 gcc_assert (is_gimple_debug (stmt));
2026 }
2027
2028 /* Advance the iterator to the previous non-debug gimple statement in the same
2029 or dominating basic block. */
2030
2031 static inline void
2032 gsi_prev_dom_bb_nondebug (gimple_stmt_iterator *i)
2033 {
2034 basic_block dom;
2035
2036 gsi_prev_nondebug (i);
2037 while (gsi_end_p (*i))
2038 {
2039 dom = get_immediate_dominator (CDI_DOMINATORS, i->bb);
2040 if (dom == NULL || dom == ENTRY_BLOCK_PTR_FOR_FN (cfun))
2041 return;
2042
2043 *i = gsi_last_bb (dom);
2044 }
2045 }
2046
2047 /* Find a BUILT_IN_STACK_SAVE dominating gsi_stmt (I), and insert
2048 a clobber of VAR before each matching BUILT_IN_STACK_RESTORE.
2049
2050 It is possible that BUILT_IN_STACK_SAVE cannot be find in a dominator when a
2051 previous pass (such as DOM) duplicated it along multiple paths to a BB. In
2052 that case the function gives up without inserting the clobbers. */
2053
2054 static void
2055 insert_clobbers_for_var (gimple_stmt_iterator i, tree var)
2056 {
2057 gimple *stmt;
2058 tree saved_val;
2059 gimple_htab *visited = NULL;
2060
2061 for (; !gsi_end_p (i); gsi_prev_dom_bb_nondebug (&i))
2062 {
2063 stmt = gsi_stmt (i);
2064
2065 if (!gimple_call_builtin_p (stmt, BUILT_IN_STACK_SAVE))
2066 continue;
2067
2068 saved_val = gimple_call_lhs (stmt);
2069 if (saved_val == NULL_TREE)
2070 continue;
2071
2072 insert_clobber_before_stack_restore (saved_val, var, &visited);
2073 break;
2074 }
2075
2076 delete visited;
2077 }
2078
2079 /* Detects a __builtin_alloca_with_align with constant size argument. Declares
2080 fixed-size array and returns the address, if found, otherwise returns
2081 NULL_TREE. */
2082
2083 static tree
2084 fold_builtin_alloca_with_align (gimple *stmt)
2085 {
2086 unsigned HOST_WIDE_INT size, threshold, n_elem;
2087 tree lhs, arg, block, var, elem_type, array_type;
2088
2089 /* Get lhs. */
2090 lhs = gimple_call_lhs (stmt);
2091 if (lhs == NULL_TREE)
2092 return NULL_TREE;
2093
2094 /* Detect constant argument. */
2095 arg = get_constant_value (gimple_call_arg (stmt, 0));
2096 if (arg == NULL_TREE
2097 || TREE_CODE (arg) != INTEGER_CST
2098 || !tree_fits_uhwi_p (arg))
2099 return NULL_TREE;
2100
2101 size = tree_to_uhwi (arg);
2102
2103 /* Heuristic: don't fold large allocas. */
2104 threshold = (unsigned HOST_WIDE_INT)PARAM_VALUE (PARAM_LARGE_STACK_FRAME);
2105 /* In case the alloca is located at function entry, it has the same lifetime
2106 as a declared array, so we allow a larger size. */
2107 block = gimple_block (stmt);
2108 if (!(cfun->after_inlining
2109 && block
2110 && TREE_CODE (BLOCK_SUPERCONTEXT (block)) == FUNCTION_DECL))
2111 threshold /= 10;
2112 if (size > threshold)
2113 return NULL_TREE;
2114
2115 /* Declare array. */
2116 elem_type = build_nonstandard_integer_type (BITS_PER_UNIT, 1);
2117 n_elem = size * 8 / BITS_PER_UNIT;
2118 array_type = build_array_type_nelts (elem_type, n_elem);
2119 var = create_tmp_var (array_type);
2120 SET_DECL_ALIGN (var, TREE_INT_CST_LOW (gimple_call_arg (stmt, 1)));
2121 {
2122 struct ptr_info_def *pi = SSA_NAME_PTR_INFO (lhs);
2123 if (pi != NULL && !pi->pt.anything)
2124 {
2125 bool singleton_p;
2126 unsigned uid;
2127 singleton_p = pt_solution_singleton_p (&pi->pt, &uid);
2128 gcc_assert (singleton_p);
2129 SET_DECL_PT_UID (var, uid);
2130 }
2131 }
2132
2133 /* Fold alloca to the address of the array. */
2134 return fold_convert (TREE_TYPE (lhs), build_fold_addr_expr (var));
2135 }
2136
2137 /* Fold the stmt at *GSI with CCP specific information that propagating
2138 and regular folding does not catch. */
2139
2140 static bool
2141 ccp_fold_stmt (gimple_stmt_iterator *gsi)
2142 {
2143 gimple *stmt = gsi_stmt (*gsi);
2144
2145 switch (gimple_code (stmt))
2146 {
2147 case GIMPLE_COND:
2148 {
2149 gcond *cond_stmt = as_a <gcond *> (stmt);
2150 ccp_prop_value_t val;
2151 /* Statement evaluation will handle type mismatches in constants
2152 more gracefully than the final propagation. This allows us to
2153 fold more conditionals here. */
2154 val = evaluate_stmt (stmt);
2155 if (val.lattice_val != CONSTANT
2156 || val.mask != 0)
2157 return false;
2158
2159 if (dump_file)
2160 {
2161 fprintf (dump_file, "Folding predicate ");
2162 print_gimple_expr (dump_file, stmt, 0, 0);
2163 fprintf (dump_file, " to ");
2164 print_generic_expr (dump_file, val.value, 0);
2165 fprintf (dump_file, "\n");
2166 }
2167
2168 if (integer_zerop (val.value))
2169 gimple_cond_make_false (cond_stmt);
2170 else
2171 gimple_cond_make_true (cond_stmt);
2172
2173 return true;
2174 }
2175
2176 case GIMPLE_CALL:
2177 {
2178 tree lhs = gimple_call_lhs (stmt);
2179 int flags = gimple_call_flags (stmt);
2180 tree val;
2181 tree argt;
2182 bool changed = false;
2183 unsigned i;
2184
2185 /* If the call was folded into a constant make sure it goes
2186 away even if we cannot propagate into all uses because of
2187 type issues. */
2188 if (lhs
2189 && TREE_CODE (lhs) == SSA_NAME
2190 && (val = get_constant_value (lhs))
2191 /* Don't optimize away calls that have side-effects. */
2192 && (flags & (ECF_CONST|ECF_PURE)) != 0
2193 && (flags & ECF_LOOPING_CONST_OR_PURE) == 0)
2194 {
2195 tree new_rhs = unshare_expr (val);
2196 bool res;
2197 if (!useless_type_conversion_p (TREE_TYPE (lhs),
2198 TREE_TYPE (new_rhs)))
2199 new_rhs = fold_convert (TREE_TYPE (lhs), new_rhs);
2200 res = update_call_from_tree (gsi, new_rhs);
2201 gcc_assert (res);
2202 return true;
2203 }
2204
2205 /* Internal calls provide no argument types, so the extra laxity
2206 for normal calls does not apply. */
2207 if (gimple_call_internal_p (stmt))
2208 return false;
2209
2210 /* The heuristic of fold_builtin_alloca_with_align differs before and
2211 after inlining, so we don't require the arg to be changed into a
2212 constant for folding, but just to be constant. */
2213 if (gimple_call_builtin_p (stmt, BUILT_IN_ALLOCA_WITH_ALIGN))
2214 {
2215 tree new_rhs = fold_builtin_alloca_with_align (stmt);
2216 if (new_rhs)
2217 {
2218 bool res = update_call_from_tree (gsi, new_rhs);
2219 tree var = TREE_OPERAND (TREE_OPERAND (new_rhs, 0),0);
2220 gcc_assert (res);
2221 insert_clobbers_for_var (*gsi, var);
2222 return true;
2223 }
2224 }
2225
2226 /* Propagate into the call arguments. Compared to replace_uses_in
2227 this can use the argument slot types for type verification
2228 instead of the current argument type. We also can safely
2229 drop qualifiers here as we are dealing with constants anyway. */
2230 argt = TYPE_ARG_TYPES (gimple_call_fntype (stmt));
2231 for (i = 0; i < gimple_call_num_args (stmt) && argt;
2232 ++i, argt = TREE_CHAIN (argt))
2233 {
2234 tree arg = gimple_call_arg (stmt, i);
2235 if (TREE_CODE (arg) == SSA_NAME
2236 && (val = get_constant_value (arg))
2237 && useless_type_conversion_p
2238 (TYPE_MAIN_VARIANT (TREE_VALUE (argt)),
2239 TYPE_MAIN_VARIANT (TREE_TYPE (val))))
2240 {
2241 gimple_call_set_arg (stmt, i, unshare_expr (val));
2242 changed = true;
2243 }
2244 }
2245
2246 return changed;
2247 }
2248
2249 case GIMPLE_ASSIGN:
2250 {
2251 tree lhs = gimple_assign_lhs (stmt);
2252 tree val;
2253
2254 /* If we have a load that turned out to be constant replace it
2255 as we cannot propagate into all uses in all cases. */
2256 if (gimple_assign_single_p (stmt)
2257 && TREE_CODE (lhs) == SSA_NAME
2258 && (val = get_constant_value (lhs)))
2259 {
2260 tree rhs = unshare_expr (val);
2261 if (!useless_type_conversion_p (TREE_TYPE (lhs), TREE_TYPE (rhs)))
2262 rhs = fold_build1 (VIEW_CONVERT_EXPR, TREE_TYPE (lhs), rhs);
2263 gimple_assign_set_rhs_from_tree (gsi, rhs);
2264 return true;
2265 }
2266
2267 return false;
2268 }
2269
2270 default:
2271 return false;
2272 }
2273 }
2274
2275 /* Visit the assignment statement STMT. Set the value of its LHS to the
2276 value computed by the RHS and store LHS in *OUTPUT_P. If STMT
2277 creates virtual definitions, set the value of each new name to that
2278 of the RHS (if we can derive a constant out of the RHS).
2279 Value-returning call statements also perform an assignment, and
2280 are handled here. */
2281
2282 static enum ssa_prop_result
2283 visit_assignment (gimple *stmt, tree *output_p)
2284 {
2285 ccp_prop_value_t val;
2286 enum ssa_prop_result retval = SSA_PROP_NOT_INTERESTING;
2287
2288 tree lhs = gimple_get_lhs (stmt);
2289 if (TREE_CODE (lhs) == SSA_NAME)
2290 {
2291 /* Evaluate the statement, which could be
2292 either a GIMPLE_ASSIGN or a GIMPLE_CALL. */
2293 val = evaluate_stmt (stmt);
2294
2295 /* If STMT is an assignment to an SSA_NAME, we only have one
2296 value to set. */
2297 if (set_lattice_value (lhs, &val))
2298 {
2299 *output_p = lhs;
2300 if (val.lattice_val == VARYING)
2301 retval = SSA_PROP_VARYING;
2302 else
2303 retval = SSA_PROP_INTERESTING;
2304 }
2305 }
2306
2307 return retval;
2308 }
2309
2310
2311 /* Visit the conditional statement STMT. Return SSA_PROP_INTERESTING
2312 if it can determine which edge will be taken. Otherwise, return
2313 SSA_PROP_VARYING. */
2314
2315 static enum ssa_prop_result
2316 visit_cond_stmt (gimple *stmt, edge *taken_edge_p)
2317 {
2318 ccp_prop_value_t val;
2319 basic_block block;
2320
2321 block = gimple_bb (stmt);
2322 val = evaluate_stmt (stmt);
2323 if (val.lattice_val != CONSTANT
2324 || val.mask != 0)
2325 return SSA_PROP_VARYING;
2326
2327 /* Find which edge out of the conditional block will be taken and add it
2328 to the worklist. If no single edge can be determined statically,
2329 return SSA_PROP_VARYING to feed all the outgoing edges to the
2330 propagation engine. */
2331 *taken_edge_p = find_taken_edge (block, val.value);
2332 if (*taken_edge_p)
2333 return SSA_PROP_INTERESTING;
2334 else
2335 return SSA_PROP_VARYING;
2336 }
2337
2338
2339 /* Evaluate statement STMT. If the statement produces an output value and
2340 its evaluation changes the lattice value of its output, return
2341 SSA_PROP_INTERESTING and set *OUTPUT_P to the SSA_NAME holding the
2342 output value.
2343
2344 If STMT is a conditional branch and we can determine its truth
2345 value, set *TAKEN_EDGE_P accordingly. If STMT produces a varying
2346 value, return SSA_PROP_VARYING. */
2347
2348 static enum ssa_prop_result
2349 ccp_visit_stmt (gimple *stmt, edge *taken_edge_p, tree *output_p)
2350 {
2351 tree def;
2352 ssa_op_iter iter;
2353
2354 if (dump_file && (dump_flags & TDF_DETAILS))
2355 {
2356 fprintf (dump_file, "\nVisiting statement:\n");
2357 print_gimple_stmt (dump_file, stmt, 0, dump_flags);
2358 }
2359
2360 switch (gimple_code (stmt))
2361 {
2362 case GIMPLE_ASSIGN:
2363 /* If the statement is an assignment that produces a single
2364 output value, evaluate its RHS to see if the lattice value of
2365 its output has changed. */
2366 return visit_assignment (stmt, output_p);
2367
2368 case GIMPLE_CALL:
2369 /* A value-returning call also performs an assignment. */
2370 if (gimple_call_lhs (stmt) != NULL_TREE)
2371 return visit_assignment (stmt, output_p);
2372 break;
2373
2374 case GIMPLE_COND:
2375 case GIMPLE_SWITCH:
2376 /* If STMT is a conditional branch, see if we can determine
2377 which branch will be taken. */
2378 /* FIXME. It appears that we should be able to optimize
2379 computed GOTOs here as well. */
2380 return visit_cond_stmt (stmt, taken_edge_p);
2381
2382 default:
2383 break;
2384 }
2385
2386 /* Any other kind of statement is not interesting for constant
2387 propagation and, therefore, not worth simulating. */
2388 if (dump_file && (dump_flags & TDF_DETAILS))
2389 fprintf (dump_file, "No interesting values produced. Marked VARYING.\n");
2390
2391 /* Definitions made by statements other than assignments to
2392 SSA_NAMEs represent unknown modifications to their outputs.
2393 Mark them VARYING. */
2394 FOR_EACH_SSA_TREE_OPERAND (def, stmt, iter, SSA_OP_ALL_DEFS)
2395 set_value_varying (def);
2396
2397 return SSA_PROP_VARYING;
2398 }
2399
2400
2401 /* Main entry point for SSA Conditional Constant Propagation. If NONZERO_P,
2402 record nonzero bits. */
2403
2404 static unsigned int
2405 do_ssa_ccp (bool nonzero_p)
2406 {
2407 unsigned int todo = 0;
2408 calculate_dominance_info (CDI_DOMINATORS);
2409
2410 ccp_initialize ();
2411 ssa_propagate (ccp_visit_stmt, ccp_visit_phi_node);
2412 if (ccp_finalize (nonzero_p))
2413 {
2414 todo = (TODO_cleanup_cfg | TODO_update_ssa);
2415
2416 /* ccp_finalize does not preserve loop-closed ssa. */
2417 loops_state_clear (LOOP_CLOSED_SSA);
2418 }
2419
2420 free_dominance_info (CDI_DOMINATORS);
2421 return todo;
2422 }
2423
2424
2425 namespace {
2426
2427 const pass_data pass_data_ccp =
2428 {
2429 GIMPLE_PASS, /* type */
2430 "ccp", /* name */
2431 OPTGROUP_NONE, /* optinfo_flags */
2432 TV_TREE_CCP, /* tv_id */
2433 ( PROP_cfg | PROP_ssa ), /* properties_required */
2434 0, /* properties_provided */
2435 0, /* properties_destroyed */
2436 0, /* todo_flags_start */
2437 TODO_update_address_taken, /* todo_flags_finish */
2438 };
2439
2440 class pass_ccp : public gimple_opt_pass
2441 {
2442 public:
2443 pass_ccp (gcc::context *ctxt)
2444 : gimple_opt_pass (pass_data_ccp, ctxt), nonzero_p (false)
2445 {}
2446
2447 /* opt_pass methods: */
2448 opt_pass * clone () { return new pass_ccp (m_ctxt); }
2449 void set_pass_param (unsigned int n, bool param)
2450 {
2451 gcc_assert (n == 0);
2452 nonzero_p = param;
2453 }
2454 virtual bool gate (function *) { return flag_tree_ccp != 0; }
2455 virtual unsigned int execute (function *) { return do_ssa_ccp (nonzero_p); }
2456
2457 private:
2458 /* Determines whether the pass instance records nonzero bits. */
2459 bool nonzero_p;
2460 }; // class pass_ccp
2461
2462 } // anon namespace
2463
2464 gimple_opt_pass *
2465 make_pass_ccp (gcc::context *ctxt)
2466 {
2467 return new pass_ccp (ctxt);
2468 }
2469
2470
2471
2472 /* Try to optimize out __builtin_stack_restore. Optimize it out
2473 if there is another __builtin_stack_restore in the same basic
2474 block and no calls or ASM_EXPRs are in between, or if this block's
2475 only outgoing edge is to EXIT_BLOCK and there are no calls or
2476 ASM_EXPRs after this __builtin_stack_restore. */
2477
2478 static tree
2479 optimize_stack_restore (gimple_stmt_iterator i)
2480 {
2481 tree callee;
2482 gimple *stmt;
2483
2484 basic_block bb = gsi_bb (i);
2485 gimple *call = gsi_stmt (i);
2486
2487 if (gimple_code (call) != GIMPLE_CALL
2488 || gimple_call_num_args (call) != 1
2489 || TREE_CODE (gimple_call_arg (call, 0)) != SSA_NAME
2490 || !POINTER_TYPE_P (TREE_TYPE (gimple_call_arg (call, 0))))
2491 return NULL_TREE;
2492
2493 for (gsi_next (&i); !gsi_end_p (i); gsi_next (&i))
2494 {
2495 stmt = gsi_stmt (i);
2496 if (gimple_code (stmt) == GIMPLE_ASM)
2497 return NULL_TREE;
2498 if (gimple_code (stmt) != GIMPLE_CALL)
2499 continue;
2500
2501 callee = gimple_call_fndecl (stmt);
2502 if (!callee
2503 || DECL_BUILT_IN_CLASS (callee) != BUILT_IN_NORMAL
2504 /* All regular builtins are ok, just obviously not alloca. */
2505 || DECL_FUNCTION_CODE (callee) == BUILT_IN_ALLOCA
2506 || DECL_FUNCTION_CODE (callee) == BUILT_IN_ALLOCA_WITH_ALIGN)
2507 return NULL_TREE;
2508
2509 if (DECL_FUNCTION_CODE (callee) == BUILT_IN_STACK_RESTORE)
2510 goto second_stack_restore;
2511 }
2512
2513 if (!gsi_end_p (i))
2514 return NULL_TREE;
2515
2516 /* Allow one successor of the exit block, or zero successors. */
2517 switch (EDGE_COUNT (bb->succs))
2518 {
2519 case 0:
2520 break;
2521 case 1:
2522 if (single_succ_edge (bb)->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
2523 return NULL_TREE;
2524 break;
2525 default:
2526 return NULL_TREE;
2527 }
2528 second_stack_restore:
2529
2530 /* If there's exactly one use, then zap the call to __builtin_stack_save.
2531 If there are multiple uses, then the last one should remove the call.
2532 In any case, whether the call to __builtin_stack_save can be removed
2533 or not is irrelevant to removing the call to __builtin_stack_restore. */
2534 if (has_single_use (gimple_call_arg (call, 0)))
2535 {
2536 gimple *stack_save = SSA_NAME_DEF_STMT (gimple_call_arg (call, 0));
2537 if (is_gimple_call (stack_save))
2538 {
2539 callee = gimple_call_fndecl (stack_save);
2540 if (callee
2541 && DECL_BUILT_IN_CLASS (callee) == BUILT_IN_NORMAL
2542 && DECL_FUNCTION_CODE (callee) == BUILT_IN_STACK_SAVE)
2543 {
2544 gimple_stmt_iterator stack_save_gsi;
2545 tree rhs;
2546
2547 stack_save_gsi = gsi_for_stmt (stack_save);
2548 rhs = build_int_cst (TREE_TYPE (gimple_call_arg (call, 0)), 0);
2549 update_call_from_tree (&stack_save_gsi, rhs);
2550 }
2551 }
2552 }
2553
2554 /* No effect, so the statement will be deleted. */
2555 return integer_zero_node;
2556 }
2557
2558 /* If va_list type is a simple pointer and nothing special is needed,
2559 optimize __builtin_va_start (&ap, 0) into ap = __builtin_next_arg (0),
2560 __builtin_va_end (&ap) out as NOP and __builtin_va_copy into a simple
2561 pointer assignment. */
2562
2563 static tree
2564 optimize_stdarg_builtin (gimple *call)
2565 {
2566 tree callee, lhs, rhs, cfun_va_list;
2567 bool va_list_simple_ptr;
2568 location_t loc = gimple_location (call);
2569
2570 if (gimple_code (call) != GIMPLE_CALL)
2571 return NULL_TREE;
2572
2573 callee = gimple_call_fndecl (call);
2574
2575 cfun_va_list = targetm.fn_abi_va_list (callee);
2576 va_list_simple_ptr = POINTER_TYPE_P (cfun_va_list)
2577 && (TREE_TYPE (cfun_va_list) == void_type_node
2578 || TREE_TYPE (cfun_va_list) == char_type_node);
2579
2580 switch (DECL_FUNCTION_CODE (callee))
2581 {
2582 case BUILT_IN_VA_START:
2583 if (!va_list_simple_ptr
2584 || targetm.expand_builtin_va_start != NULL
2585 || !builtin_decl_explicit_p (BUILT_IN_NEXT_ARG))
2586 return NULL_TREE;
2587
2588 if (gimple_call_num_args (call) != 2)
2589 return NULL_TREE;
2590
2591 lhs = gimple_call_arg (call, 0);
2592 if (!POINTER_TYPE_P (TREE_TYPE (lhs))
2593 || TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (lhs)))
2594 != TYPE_MAIN_VARIANT (cfun_va_list))
2595 return NULL_TREE;
2596
2597 lhs = build_fold_indirect_ref_loc (loc, lhs);
2598 rhs = build_call_expr_loc (loc, builtin_decl_explicit (BUILT_IN_NEXT_ARG),
2599 1, integer_zero_node);
2600 rhs = fold_convert_loc (loc, TREE_TYPE (lhs), rhs);
2601 return build2 (MODIFY_EXPR, TREE_TYPE (lhs), lhs, rhs);
2602
2603 case BUILT_IN_VA_COPY:
2604 if (!va_list_simple_ptr)
2605 return NULL_TREE;
2606
2607 if (gimple_call_num_args (call) != 2)
2608 return NULL_TREE;
2609
2610 lhs = gimple_call_arg (call, 0);
2611 if (!POINTER_TYPE_P (TREE_TYPE (lhs))
2612 || TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (lhs)))
2613 != TYPE_MAIN_VARIANT (cfun_va_list))
2614 return NULL_TREE;
2615
2616 lhs = build_fold_indirect_ref_loc (loc, lhs);
2617 rhs = gimple_call_arg (call, 1);
2618 if (TYPE_MAIN_VARIANT (TREE_TYPE (rhs))
2619 != TYPE_MAIN_VARIANT (cfun_va_list))
2620 return NULL_TREE;
2621
2622 rhs = fold_convert_loc (loc, TREE_TYPE (lhs), rhs);
2623 return build2 (MODIFY_EXPR, TREE_TYPE (lhs), lhs, rhs);
2624
2625 case BUILT_IN_VA_END:
2626 /* No effect, so the statement will be deleted. */
2627 return integer_zero_node;
2628
2629 default:
2630 gcc_unreachable ();
2631 }
2632 }
2633
2634 /* Attemp to make the block of __builtin_unreachable I unreachable by changing
2635 the incoming jumps. Return true if at least one jump was changed. */
2636
2637 static bool
2638 optimize_unreachable (gimple_stmt_iterator i)
2639 {
2640 basic_block bb = gsi_bb (i);
2641 gimple_stmt_iterator gsi;
2642 gimple *stmt;
2643 edge_iterator ei;
2644 edge e;
2645 bool ret;
2646
2647 if (flag_sanitize & SANITIZE_UNREACHABLE)
2648 return false;
2649
2650 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2651 {
2652 stmt = gsi_stmt (gsi);
2653
2654 if (is_gimple_debug (stmt))
2655 continue;
2656
2657 if (glabel *label_stmt = dyn_cast <glabel *> (stmt))
2658 {
2659 /* Verify we do not need to preserve the label. */
2660 if (FORCED_LABEL (gimple_label_label (label_stmt)))
2661 return false;
2662
2663 continue;
2664 }
2665
2666 /* Only handle the case that __builtin_unreachable is the first statement
2667 in the block. We rely on DCE to remove stmts without side-effects
2668 before __builtin_unreachable. */
2669 if (gsi_stmt (gsi) != gsi_stmt (i))
2670 return false;
2671 }
2672
2673 ret = false;
2674 FOR_EACH_EDGE (e, ei, bb->preds)
2675 {
2676 gsi = gsi_last_bb (e->src);
2677 if (gsi_end_p (gsi))
2678 continue;
2679
2680 stmt = gsi_stmt (gsi);
2681 if (gcond *cond_stmt = dyn_cast <gcond *> (stmt))
2682 {
2683 if (e->flags & EDGE_TRUE_VALUE)
2684 gimple_cond_make_false (cond_stmt);
2685 else if (e->flags & EDGE_FALSE_VALUE)
2686 gimple_cond_make_true (cond_stmt);
2687 else
2688 gcc_unreachable ();
2689 update_stmt (cond_stmt);
2690 }
2691 else
2692 {
2693 /* Todo: handle other cases, f.i. switch statement. */
2694 continue;
2695 }
2696
2697 ret = true;
2698 }
2699
2700 return ret;
2701 }
2702
2703 /* Optimize
2704 mask_2 = 1 << cnt_1;
2705 _4 = __atomic_fetch_or_* (ptr_6, mask_2, _3);
2706 _5 = _4 & mask_2;
2707 to
2708 _4 = ATOMIC_BIT_TEST_AND_SET (ptr_6, cnt_1, 0, _3);
2709 _5 = _4;
2710 If _5 is only used in _5 != 0 or _5 == 0 comparisons, 1
2711 is passed instead of 0, and the builtin just returns a zero
2712 or 1 value instead of the actual bit.
2713 Similarly for __sync_fetch_and_or_* (without the ", _3" part
2714 in there), and/or if mask_2 is a power of 2 constant.
2715 Similarly for xor instead of or, use ATOMIC_BIT_TEST_AND_COMPLEMENT
2716 in that case. And similarly for and instead of or, except that
2717 the second argument to the builtin needs to be one's complement
2718 of the mask instead of mask. */
2719
2720 static void
2721 optimize_atomic_bit_test_and (gimple_stmt_iterator *gsip,
2722 enum internal_fn fn, bool has_model_arg,
2723 bool after)
2724 {
2725 gimple *call = gsi_stmt (*gsip);
2726 tree lhs = gimple_call_lhs (call);
2727 use_operand_p use_p;
2728 gimple *use_stmt;
2729 tree mask, bit;
2730 optab optab;
2731
2732 if (!flag_inline_atomics
2733 || optimize_debug
2734 || !gimple_call_builtin_p (call, BUILT_IN_NORMAL)
2735 || !lhs
2736 || SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs)
2737 || !single_imm_use (lhs, &use_p, &use_stmt)
2738 || !is_gimple_assign (use_stmt)
2739 || gimple_assign_rhs_code (use_stmt) != BIT_AND_EXPR
2740 || !gimple_vdef (call))
2741 return;
2742
2743 switch (fn)
2744 {
2745 case IFN_ATOMIC_BIT_TEST_AND_SET:
2746 optab = atomic_bit_test_and_set_optab;
2747 break;
2748 case IFN_ATOMIC_BIT_TEST_AND_COMPLEMENT:
2749 optab = atomic_bit_test_and_complement_optab;
2750 break;
2751 case IFN_ATOMIC_BIT_TEST_AND_RESET:
2752 optab = atomic_bit_test_and_reset_optab;
2753 break;
2754 default:
2755 return;
2756 }
2757
2758 if (optab_handler (optab, TYPE_MODE (TREE_TYPE (lhs))) == CODE_FOR_nothing)
2759 return;
2760
2761 mask = gimple_call_arg (call, 1);
2762 tree use_lhs = gimple_assign_lhs (use_stmt);
2763 if (!use_lhs)
2764 return;
2765
2766 if (TREE_CODE (mask) == INTEGER_CST)
2767 {
2768 if (fn == IFN_ATOMIC_BIT_TEST_AND_RESET)
2769 mask = const_unop (BIT_NOT_EXPR, TREE_TYPE (mask), mask);
2770 mask = fold_convert (TREE_TYPE (lhs), mask);
2771 int ibit = tree_log2 (mask);
2772 if (ibit < 0)
2773 return;
2774 bit = build_int_cst (TREE_TYPE (lhs), ibit);
2775 }
2776 else if (TREE_CODE (mask) == SSA_NAME)
2777 {
2778 gimple *g = SSA_NAME_DEF_STMT (mask);
2779 if (fn == IFN_ATOMIC_BIT_TEST_AND_RESET)
2780 {
2781 if (!is_gimple_assign (g)
2782 || gimple_assign_rhs_code (g) != BIT_NOT_EXPR)
2783 return;
2784 mask = gimple_assign_rhs1 (g);
2785 if (TREE_CODE (mask) != SSA_NAME)
2786 return;
2787 g = SSA_NAME_DEF_STMT (mask);
2788 }
2789 if (!is_gimple_assign (g)
2790 || gimple_assign_rhs_code (g) != LSHIFT_EXPR
2791 || !integer_onep (gimple_assign_rhs1 (g)))
2792 return;
2793 bit = gimple_assign_rhs2 (g);
2794 }
2795 else
2796 return;
2797
2798 if (gimple_assign_rhs1 (use_stmt) == lhs)
2799 {
2800 if (!operand_equal_p (gimple_assign_rhs2 (use_stmt), mask, 0))
2801 return;
2802 }
2803 else if (gimple_assign_rhs2 (use_stmt) != lhs
2804 || !operand_equal_p (gimple_assign_rhs1 (use_stmt), mask, 0))
2805 return;
2806
2807 bool use_bool = true;
2808 bool has_debug_uses = false;
2809 imm_use_iterator iter;
2810 gimple *g;
2811
2812 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (use_lhs))
2813 use_bool = false;
2814 FOR_EACH_IMM_USE_STMT (g, iter, use_lhs)
2815 {
2816 enum tree_code code = ERROR_MARK;
2817 tree op0, op1;
2818 if (is_gimple_debug (g))
2819 {
2820 has_debug_uses = true;
2821 continue;
2822 }
2823 else if (is_gimple_assign (g))
2824 switch (gimple_assign_rhs_code (g))
2825 {
2826 case COND_EXPR:
2827 op1 = gimple_assign_rhs1 (g);
2828 code = TREE_CODE (op1);
2829 op0 = TREE_OPERAND (op1, 0);
2830 op1 = TREE_OPERAND (op1, 1);
2831 break;
2832 case EQ_EXPR:
2833 case NE_EXPR:
2834 code = gimple_assign_rhs_code (g);
2835 op0 = gimple_assign_rhs1 (g);
2836 op1 = gimple_assign_rhs2 (g);
2837 break;
2838 default:
2839 break;
2840 }
2841 else if (gimple_code (g) == GIMPLE_COND)
2842 {
2843 code = gimple_cond_code (g);
2844 op0 = gimple_cond_lhs (g);
2845 op1 = gimple_cond_rhs (g);
2846 }
2847
2848 if ((code == EQ_EXPR || code == NE_EXPR)
2849 && op0 == use_lhs
2850 && integer_zerop (op1))
2851 {
2852 use_operand_p use_p;
2853 int n = 0;
2854 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
2855 n++;
2856 if (n == 1)
2857 continue;
2858 }
2859
2860 use_bool = false;
2861 BREAK_FROM_IMM_USE_STMT (iter);
2862 }
2863
2864 tree new_lhs = make_ssa_name (TREE_TYPE (lhs));
2865 tree flag = build_int_cst (TREE_TYPE (lhs), use_bool);
2866 if (has_model_arg)
2867 g = gimple_build_call_internal (fn, 4, gimple_call_arg (call, 0),
2868 bit, flag, gimple_call_arg (call, 2));
2869 else
2870 g = gimple_build_call_internal (fn, 3, gimple_call_arg (call, 0),
2871 bit, flag);
2872 gimple_call_set_lhs (g, new_lhs);
2873 gimple_set_location (g, gimple_location (call));
2874 gimple_set_vuse (g, gimple_vuse (call));
2875 gimple_set_vdef (g, gimple_vdef (call));
2876 SSA_NAME_DEF_STMT (gimple_vdef (call)) = g;
2877 gimple_stmt_iterator gsi = *gsip;
2878 gsi_insert_after (&gsi, g, GSI_NEW_STMT);
2879 if (after)
2880 {
2881 /* The internal function returns the value of the specified bit
2882 before the atomic operation. If we are interested in the value
2883 of the specified bit after the atomic operation (makes only sense
2884 for xor, otherwise the bit content is compile time known),
2885 we need to invert the bit. */
2886 g = gimple_build_assign (make_ssa_name (TREE_TYPE (lhs)),
2887 BIT_XOR_EXPR, new_lhs,
2888 use_bool ? build_int_cst (TREE_TYPE (lhs), 1)
2889 : mask);
2890 new_lhs = gimple_assign_lhs (g);
2891 gsi_insert_after (&gsi, g, GSI_NEW_STMT);
2892 }
2893 if (use_bool && has_debug_uses)
2894 {
2895 tree temp = make_node (DEBUG_EXPR_DECL);
2896 DECL_ARTIFICIAL (temp) = 1;
2897 TREE_TYPE (temp) = TREE_TYPE (lhs);
2898 DECL_MODE (temp) = TYPE_MODE (TREE_TYPE (lhs));
2899 tree t = build2 (LSHIFT_EXPR, TREE_TYPE (lhs), new_lhs, bit);
2900 g = gimple_build_debug_bind (temp, t, g);
2901 gsi_insert_after (&gsi, g, GSI_NEW_STMT);
2902 FOR_EACH_IMM_USE_STMT (g, iter, use_lhs)
2903 if (is_gimple_debug (g))
2904 {
2905 use_operand_p use_p;
2906 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
2907 SET_USE (use_p, temp);
2908 update_stmt (g);
2909 }
2910 }
2911 SSA_NAME_OCCURS_IN_ABNORMAL_PHI (new_lhs)
2912 = SSA_NAME_OCCURS_IN_ABNORMAL_PHI (use_lhs);
2913 replace_uses_by (use_lhs, new_lhs);
2914 gsi = gsi_for_stmt (use_stmt);
2915 gsi_remove (&gsi, true);
2916 release_defs (use_stmt);
2917 gsi_remove (gsip, true);
2918 release_ssa_name (lhs);
2919 }
2920
2921 /* A simple pass that attempts to fold all builtin functions. This pass
2922 is run after we've propagated as many constants as we can. */
2923
2924 namespace {
2925
2926 const pass_data pass_data_fold_builtins =
2927 {
2928 GIMPLE_PASS, /* type */
2929 "fab", /* name */
2930 OPTGROUP_NONE, /* optinfo_flags */
2931 TV_NONE, /* tv_id */
2932 ( PROP_cfg | PROP_ssa ), /* properties_required */
2933 0, /* properties_provided */
2934 0, /* properties_destroyed */
2935 0, /* todo_flags_start */
2936 TODO_update_ssa, /* todo_flags_finish */
2937 };
2938
2939 class pass_fold_builtins : public gimple_opt_pass
2940 {
2941 public:
2942 pass_fold_builtins (gcc::context *ctxt)
2943 : gimple_opt_pass (pass_data_fold_builtins, ctxt)
2944 {}
2945
2946 /* opt_pass methods: */
2947 opt_pass * clone () { return new pass_fold_builtins (m_ctxt); }
2948 virtual unsigned int execute (function *);
2949
2950 }; // class pass_fold_builtins
2951
2952 unsigned int
2953 pass_fold_builtins::execute (function *fun)
2954 {
2955 bool cfg_changed = false;
2956 basic_block bb;
2957 unsigned int todoflags = 0;
2958
2959 FOR_EACH_BB_FN (bb, fun)
2960 {
2961 gimple_stmt_iterator i;
2962 for (i = gsi_start_bb (bb); !gsi_end_p (i); )
2963 {
2964 gimple *stmt, *old_stmt;
2965 tree callee;
2966 enum built_in_function fcode;
2967
2968 stmt = gsi_stmt (i);
2969
2970 if (gimple_code (stmt) != GIMPLE_CALL)
2971 {
2972 /* Remove all *ssaname_N ={v} {CLOBBER}; stmts,
2973 after the last GIMPLE DSE they aren't needed and might
2974 unnecessarily keep the SSA_NAMEs live. */
2975 if (gimple_clobber_p (stmt))
2976 {
2977 tree lhs = gimple_assign_lhs (stmt);
2978 if (TREE_CODE (lhs) == MEM_REF
2979 && TREE_CODE (TREE_OPERAND (lhs, 0)) == SSA_NAME)
2980 {
2981 unlink_stmt_vdef (stmt);
2982 gsi_remove (&i, true);
2983 release_defs (stmt);
2984 continue;
2985 }
2986 }
2987 gsi_next (&i);
2988 continue;
2989 }
2990
2991 callee = gimple_call_fndecl (stmt);
2992 if (!callee || DECL_BUILT_IN_CLASS (callee) != BUILT_IN_NORMAL)
2993 {
2994 gsi_next (&i);
2995 continue;
2996 }
2997
2998 fcode = DECL_FUNCTION_CODE (callee);
2999 if (fold_stmt (&i))
3000 ;
3001 else
3002 {
3003 tree result = NULL_TREE;
3004 switch (DECL_FUNCTION_CODE (callee))
3005 {
3006 case BUILT_IN_CONSTANT_P:
3007 /* Resolve __builtin_constant_p. If it hasn't been
3008 folded to integer_one_node by now, it's fairly
3009 certain that the value simply isn't constant. */
3010 result = integer_zero_node;
3011 break;
3012
3013 case BUILT_IN_ASSUME_ALIGNED:
3014 /* Remove __builtin_assume_aligned. */
3015 result = gimple_call_arg (stmt, 0);
3016 break;
3017
3018 case BUILT_IN_STACK_RESTORE:
3019 result = optimize_stack_restore (i);
3020 if (result)
3021 break;
3022 gsi_next (&i);
3023 continue;
3024
3025 case BUILT_IN_UNREACHABLE:
3026 if (optimize_unreachable (i))
3027 cfg_changed = true;
3028 break;
3029
3030 case BUILT_IN_ATOMIC_FETCH_OR_1:
3031 case BUILT_IN_ATOMIC_FETCH_OR_2:
3032 case BUILT_IN_ATOMIC_FETCH_OR_4:
3033 case BUILT_IN_ATOMIC_FETCH_OR_8:
3034 case BUILT_IN_ATOMIC_FETCH_OR_16:
3035 optimize_atomic_bit_test_and (&i,
3036 IFN_ATOMIC_BIT_TEST_AND_SET,
3037 true, false);
3038 break;
3039 case BUILT_IN_SYNC_FETCH_AND_OR_1:
3040 case BUILT_IN_SYNC_FETCH_AND_OR_2:
3041 case BUILT_IN_SYNC_FETCH_AND_OR_4:
3042 case BUILT_IN_SYNC_FETCH_AND_OR_8:
3043 case BUILT_IN_SYNC_FETCH_AND_OR_16:
3044 optimize_atomic_bit_test_and (&i,
3045 IFN_ATOMIC_BIT_TEST_AND_SET,
3046 false, false);
3047 break;
3048
3049 case BUILT_IN_ATOMIC_FETCH_XOR_1:
3050 case BUILT_IN_ATOMIC_FETCH_XOR_2:
3051 case BUILT_IN_ATOMIC_FETCH_XOR_4:
3052 case BUILT_IN_ATOMIC_FETCH_XOR_8:
3053 case BUILT_IN_ATOMIC_FETCH_XOR_16:
3054 optimize_atomic_bit_test_and
3055 (&i, IFN_ATOMIC_BIT_TEST_AND_COMPLEMENT, true, false);
3056 break;
3057 case BUILT_IN_SYNC_FETCH_AND_XOR_1:
3058 case BUILT_IN_SYNC_FETCH_AND_XOR_2:
3059 case BUILT_IN_SYNC_FETCH_AND_XOR_4:
3060 case BUILT_IN_SYNC_FETCH_AND_XOR_8:
3061 case BUILT_IN_SYNC_FETCH_AND_XOR_16:
3062 optimize_atomic_bit_test_and
3063 (&i, IFN_ATOMIC_BIT_TEST_AND_COMPLEMENT, false, false);
3064 break;
3065
3066 case BUILT_IN_ATOMIC_XOR_FETCH_1:
3067 case BUILT_IN_ATOMIC_XOR_FETCH_2:
3068 case BUILT_IN_ATOMIC_XOR_FETCH_4:
3069 case BUILT_IN_ATOMIC_XOR_FETCH_8:
3070 case BUILT_IN_ATOMIC_XOR_FETCH_16:
3071 optimize_atomic_bit_test_and
3072 (&i, IFN_ATOMIC_BIT_TEST_AND_COMPLEMENT, true, true);
3073 break;
3074 case BUILT_IN_SYNC_XOR_AND_FETCH_1:
3075 case BUILT_IN_SYNC_XOR_AND_FETCH_2:
3076 case BUILT_IN_SYNC_XOR_AND_FETCH_4:
3077 case BUILT_IN_SYNC_XOR_AND_FETCH_8:
3078 case BUILT_IN_SYNC_XOR_AND_FETCH_16:
3079 optimize_atomic_bit_test_and
3080 (&i, IFN_ATOMIC_BIT_TEST_AND_COMPLEMENT, false, true);
3081 break;
3082
3083 case BUILT_IN_ATOMIC_FETCH_AND_1:
3084 case BUILT_IN_ATOMIC_FETCH_AND_2:
3085 case BUILT_IN_ATOMIC_FETCH_AND_4:
3086 case BUILT_IN_ATOMIC_FETCH_AND_8:
3087 case BUILT_IN_ATOMIC_FETCH_AND_16:
3088 optimize_atomic_bit_test_and (&i,
3089 IFN_ATOMIC_BIT_TEST_AND_RESET,
3090 true, false);
3091 break;
3092 case BUILT_IN_SYNC_FETCH_AND_AND_1:
3093 case BUILT_IN_SYNC_FETCH_AND_AND_2:
3094 case BUILT_IN_SYNC_FETCH_AND_AND_4:
3095 case BUILT_IN_SYNC_FETCH_AND_AND_8:
3096 case BUILT_IN_SYNC_FETCH_AND_AND_16:
3097 optimize_atomic_bit_test_and (&i,
3098 IFN_ATOMIC_BIT_TEST_AND_RESET,
3099 false, false);
3100 break;
3101
3102 case BUILT_IN_VA_START:
3103 case BUILT_IN_VA_END:
3104 case BUILT_IN_VA_COPY:
3105 /* These shouldn't be folded before pass_stdarg. */
3106 result = optimize_stdarg_builtin (stmt);
3107 if (result)
3108 break;
3109 /* FALLTHRU */
3110
3111 default:;
3112 }
3113
3114 if (!result)
3115 {
3116 gsi_next (&i);
3117 continue;
3118 }
3119
3120 if (!update_call_from_tree (&i, result))
3121 gimplify_and_update_call_from_tree (&i, result);
3122 }
3123
3124 todoflags |= TODO_update_address_taken;
3125
3126 if (dump_file && (dump_flags & TDF_DETAILS))
3127 {
3128 fprintf (dump_file, "Simplified\n ");
3129 print_gimple_stmt (dump_file, stmt, 0, dump_flags);
3130 }
3131
3132 old_stmt = stmt;
3133 stmt = gsi_stmt (i);
3134 update_stmt (stmt);
3135
3136 if (maybe_clean_or_replace_eh_stmt (old_stmt, stmt)
3137 && gimple_purge_dead_eh_edges (bb))
3138 cfg_changed = true;
3139
3140 if (dump_file && (dump_flags & TDF_DETAILS))
3141 {
3142 fprintf (dump_file, "to\n ");
3143 print_gimple_stmt (dump_file, stmt, 0, dump_flags);
3144 fprintf (dump_file, "\n");
3145 }
3146
3147 /* Retry the same statement if it changed into another
3148 builtin, there might be new opportunities now. */
3149 if (gimple_code (stmt) != GIMPLE_CALL)
3150 {
3151 gsi_next (&i);
3152 continue;
3153 }
3154 callee = gimple_call_fndecl (stmt);
3155 if (!callee
3156 || DECL_BUILT_IN_CLASS (callee) != BUILT_IN_NORMAL
3157 || DECL_FUNCTION_CODE (callee) == fcode)
3158 gsi_next (&i);
3159 }
3160 }
3161
3162 /* Delete unreachable blocks. */
3163 if (cfg_changed)
3164 todoflags |= TODO_cleanup_cfg;
3165
3166 return todoflags;
3167 }
3168
3169 } // anon namespace
3170
3171 gimple_opt_pass *
3172 make_pass_fold_builtins (gcc::context *ctxt)
3173 {
3174 return new pass_fold_builtins (ctxt);
3175 }