re PR tree-optimization/80181 (ICE in set_lattice_value, at tree-ssa-ccp.c:505)
[gcc.git] / gcc / tree-ssa-ccp.c
1 /* Conditional constant propagation pass for the GNU compiler.
2 Copyright (C) 2000-2017 Free Software Foundation, Inc.
3 Adapted from original RTL SSA-CCP by Daniel Berlin <dberlin@dberlin.org>
4 Adapted to GIMPLE trees by Diego Novillo <dnovillo@redhat.com>
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it
9 under the terms of the GNU General Public License as published by the
10 Free Software Foundation; either version 3, or (at your option) any
11 later version.
12
13 GCC is distributed in the hope that it will be useful, but WITHOUT
14 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
21
22 /* Conditional constant propagation (CCP) is based on the SSA
23 propagation engine (tree-ssa-propagate.c). Constant assignments of
24 the form VAR = CST are propagated from the assignments into uses of
25 VAR, which in turn may generate new constants. The simulation uses
26 a four level lattice to keep track of constant values associated
27 with SSA names. Given an SSA name V_i, it may take one of the
28 following values:
29
30 UNINITIALIZED -> the initial state of the value. This value
31 is replaced with a correct initial value
32 the first time the value is used, so the
33 rest of the pass does not need to care about
34 it. Using this value simplifies initialization
35 of the pass, and prevents us from needlessly
36 scanning statements that are never reached.
37
38 UNDEFINED -> V_i is a local variable whose definition
39 has not been processed yet. Therefore we
40 don't yet know if its value is a constant
41 or not.
42
43 CONSTANT -> V_i has been found to hold a constant
44 value C.
45
46 VARYING -> V_i cannot take a constant value, or if it
47 does, it is not possible to determine it
48 at compile time.
49
50 The core of SSA-CCP is in ccp_visit_stmt and ccp_visit_phi_node:
51
52 1- In ccp_visit_stmt, we are interested in assignments whose RHS
53 evaluates into a constant and conditional jumps whose predicate
54 evaluates into a boolean true or false. When an assignment of
55 the form V_i = CONST is found, V_i's lattice value is set to
56 CONSTANT and CONST is associated with it. This causes the
57 propagation engine to add all the SSA edges coming out the
58 assignment into the worklists, so that statements that use V_i
59 can be visited.
60
61 If the statement is a conditional with a constant predicate, we
62 mark the outgoing edges as executable or not executable
63 depending on the predicate's value. This is then used when
64 visiting PHI nodes to know when a PHI argument can be ignored.
65
66
67 2- In ccp_visit_phi_node, if all the PHI arguments evaluate to the
68 same constant C, then the LHS of the PHI is set to C. This
69 evaluation is known as the "meet operation". Since one of the
70 goals of this evaluation is to optimistically return constant
71 values as often as possible, it uses two main short cuts:
72
73 - If an argument is flowing in through a non-executable edge, it
74 is ignored. This is useful in cases like this:
75
76 if (PRED)
77 a_9 = 3;
78 else
79 a_10 = 100;
80 a_11 = PHI (a_9, a_10)
81
82 If PRED is known to always evaluate to false, then we can
83 assume that a_11 will always take its value from a_10, meaning
84 that instead of consider it VARYING (a_9 and a_10 have
85 different values), we can consider it CONSTANT 100.
86
87 - If an argument has an UNDEFINED value, then it does not affect
88 the outcome of the meet operation. If a variable V_i has an
89 UNDEFINED value, it means that either its defining statement
90 hasn't been visited yet or V_i has no defining statement, in
91 which case the original symbol 'V' is being used
92 uninitialized. Since 'V' is a local variable, the compiler
93 may assume any initial value for it.
94
95
96 After propagation, every variable V_i that ends up with a lattice
97 value of CONSTANT will have the associated constant value in the
98 array CONST_VAL[i].VALUE. That is fed into substitute_and_fold for
99 final substitution and folding.
100
101 This algorithm uses wide-ints at the max precision of the target.
102 This means that, with one uninteresting exception, variables with
103 UNSIGNED types never go to VARYING because the bits above the
104 precision of the type of the variable are always zero. The
105 uninteresting case is a variable of UNSIGNED type that has the
106 maximum precision of the target. Such variables can go to VARYING,
107 but this causes no loss of infomation since these variables will
108 never be extended.
109
110 References:
111
112 Constant propagation with conditional branches,
113 Wegman and Zadeck, ACM TOPLAS 13(2):181-210.
114
115 Building an Optimizing Compiler,
116 Robert Morgan, Butterworth-Heinemann, 1998, Section 8.9.
117
118 Advanced Compiler Design and Implementation,
119 Steven Muchnick, Morgan Kaufmann, 1997, Section 12.6 */
120
121 #include "config.h"
122 #include "system.h"
123 #include "coretypes.h"
124 #include "backend.h"
125 #include "target.h"
126 #include "tree.h"
127 #include "gimple.h"
128 #include "tree-pass.h"
129 #include "ssa.h"
130 #include "gimple-pretty-print.h"
131 #include "fold-const.h"
132 #include "gimple-fold.h"
133 #include "tree-eh.h"
134 #include "gimplify.h"
135 #include "gimple-iterator.h"
136 #include "tree-cfg.h"
137 #include "tree-ssa-propagate.h"
138 #include "dbgcnt.h"
139 #include "params.h"
140 #include "builtins.h"
141 #include "tree-chkp.h"
142 #include "cfgloop.h"
143 #include "stor-layout.h"
144 #include "optabs-query.h"
145 #include "tree-ssa-ccp.h"
146 #include "tree-dfa.h"
147 #include "diagnostic-core.h"
148
149 /* Possible lattice values. */
150 typedef enum
151 {
152 UNINITIALIZED,
153 UNDEFINED,
154 CONSTANT,
155 VARYING
156 } ccp_lattice_t;
157
158 struct ccp_prop_value_t {
159 /* Lattice value. */
160 ccp_lattice_t lattice_val;
161
162 /* Propagated value. */
163 tree value;
164
165 /* Mask that applies to the propagated value during CCP. For X
166 with a CONSTANT lattice value X & ~mask == value & ~mask. The
167 zero bits in the mask cover constant values. The ones mean no
168 information. */
169 widest_int mask;
170 };
171
172 /* Array of propagated constant values. After propagation,
173 CONST_VAL[I].VALUE holds the constant value for SSA_NAME(I). If
174 the constant is held in an SSA name representing a memory store
175 (i.e., a VDEF), CONST_VAL[I].MEM_REF will contain the actual
176 memory reference used to store (i.e., the LHS of the assignment
177 doing the store). */
178 static ccp_prop_value_t *const_val;
179 static unsigned n_const_val;
180
181 static void canonicalize_value (ccp_prop_value_t *);
182 static bool ccp_fold_stmt (gimple_stmt_iterator *);
183 static void ccp_lattice_meet (ccp_prop_value_t *, ccp_prop_value_t *);
184
185 /* Dump constant propagation value VAL to file OUTF prefixed by PREFIX. */
186
187 static void
188 dump_lattice_value (FILE *outf, const char *prefix, ccp_prop_value_t val)
189 {
190 switch (val.lattice_val)
191 {
192 case UNINITIALIZED:
193 fprintf (outf, "%sUNINITIALIZED", prefix);
194 break;
195 case UNDEFINED:
196 fprintf (outf, "%sUNDEFINED", prefix);
197 break;
198 case VARYING:
199 fprintf (outf, "%sVARYING", prefix);
200 break;
201 case CONSTANT:
202 if (TREE_CODE (val.value) != INTEGER_CST
203 || val.mask == 0)
204 {
205 fprintf (outf, "%sCONSTANT ", prefix);
206 print_generic_expr (outf, val.value, dump_flags);
207 }
208 else
209 {
210 widest_int cval = wi::bit_and_not (wi::to_widest (val.value),
211 val.mask);
212 fprintf (outf, "%sCONSTANT ", prefix);
213 print_hex (cval, outf);
214 fprintf (outf, " (");
215 print_hex (val.mask, outf);
216 fprintf (outf, ")");
217 }
218 break;
219 default:
220 gcc_unreachable ();
221 }
222 }
223
224
225 /* Print lattice value VAL to stderr. */
226
227 void debug_lattice_value (ccp_prop_value_t val);
228
229 DEBUG_FUNCTION void
230 debug_lattice_value (ccp_prop_value_t val)
231 {
232 dump_lattice_value (stderr, "", val);
233 fprintf (stderr, "\n");
234 }
235
236 /* Extend NONZERO_BITS to a full mask, based on sgn. */
237
238 static widest_int
239 extend_mask (const wide_int &nonzero_bits, signop sgn)
240 {
241 return widest_int::from (nonzero_bits, sgn);
242 }
243
244 /* Compute a default value for variable VAR and store it in the
245 CONST_VAL array. The following rules are used to get default
246 values:
247
248 1- Global and static variables that are declared constant are
249 considered CONSTANT.
250
251 2- Any other value is considered UNDEFINED. This is useful when
252 considering PHI nodes. PHI arguments that are undefined do not
253 change the constant value of the PHI node, which allows for more
254 constants to be propagated.
255
256 3- Variables defined by statements other than assignments and PHI
257 nodes are considered VARYING.
258
259 4- Initial values of variables that are not GIMPLE registers are
260 considered VARYING. */
261
262 static ccp_prop_value_t
263 get_default_value (tree var)
264 {
265 ccp_prop_value_t val = { UNINITIALIZED, NULL_TREE, 0 };
266 gimple *stmt;
267
268 stmt = SSA_NAME_DEF_STMT (var);
269
270 if (gimple_nop_p (stmt))
271 {
272 /* Variables defined by an empty statement are those used
273 before being initialized. If VAR is a local variable, we
274 can assume initially that it is UNDEFINED, otherwise we must
275 consider it VARYING. */
276 if (!virtual_operand_p (var)
277 && SSA_NAME_VAR (var)
278 && TREE_CODE (SSA_NAME_VAR (var)) == VAR_DECL)
279 val.lattice_val = UNDEFINED;
280 else
281 {
282 val.lattice_val = VARYING;
283 val.mask = -1;
284 if (flag_tree_bit_ccp)
285 {
286 wide_int nonzero_bits = get_nonzero_bits (var);
287 if (nonzero_bits != -1)
288 {
289 val.lattice_val = CONSTANT;
290 val.value = build_zero_cst (TREE_TYPE (var));
291 val.mask = extend_mask (nonzero_bits, TYPE_SIGN (TREE_TYPE (var)));
292 }
293 }
294 }
295 }
296 else if (is_gimple_assign (stmt))
297 {
298 tree cst;
299 if (gimple_assign_single_p (stmt)
300 && DECL_P (gimple_assign_rhs1 (stmt))
301 && (cst = get_symbol_constant_value (gimple_assign_rhs1 (stmt))))
302 {
303 val.lattice_val = CONSTANT;
304 val.value = cst;
305 }
306 else
307 {
308 /* Any other variable defined by an assignment is considered
309 UNDEFINED. */
310 val.lattice_val = UNDEFINED;
311 }
312 }
313 else if ((is_gimple_call (stmt)
314 && gimple_call_lhs (stmt) != NULL_TREE)
315 || gimple_code (stmt) == GIMPLE_PHI)
316 {
317 /* A variable defined by a call or a PHI node is considered
318 UNDEFINED. */
319 val.lattice_val = UNDEFINED;
320 }
321 else
322 {
323 /* Otherwise, VAR will never take on a constant value. */
324 val.lattice_val = VARYING;
325 val.mask = -1;
326 }
327
328 return val;
329 }
330
331
332 /* Get the constant value associated with variable VAR. */
333
334 static inline ccp_prop_value_t *
335 get_value (tree var)
336 {
337 ccp_prop_value_t *val;
338
339 if (const_val == NULL
340 || SSA_NAME_VERSION (var) >= n_const_val)
341 return NULL;
342
343 val = &const_val[SSA_NAME_VERSION (var)];
344 if (val->lattice_val == UNINITIALIZED)
345 *val = get_default_value (var);
346
347 canonicalize_value (val);
348
349 return val;
350 }
351
352 /* Return the constant tree value associated with VAR. */
353
354 static inline tree
355 get_constant_value (tree var)
356 {
357 ccp_prop_value_t *val;
358 if (TREE_CODE (var) != SSA_NAME)
359 {
360 if (is_gimple_min_invariant (var))
361 return var;
362 return NULL_TREE;
363 }
364 val = get_value (var);
365 if (val
366 && val->lattice_val == CONSTANT
367 && (TREE_CODE (val->value) != INTEGER_CST
368 || val->mask == 0))
369 return val->value;
370 return NULL_TREE;
371 }
372
373 /* Sets the value associated with VAR to VARYING. */
374
375 static inline void
376 set_value_varying (tree var)
377 {
378 ccp_prop_value_t *val = &const_val[SSA_NAME_VERSION (var)];
379
380 val->lattice_val = VARYING;
381 val->value = NULL_TREE;
382 val->mask = -1;
383 }
384
385 /* For integer constants, make sure to drop TREE_OVERFLOW. */
386
387 static void
388 canonicalize_value (ccp_prop_value_t *val)
389 {
390 if (val->lattice_val != CONSTANT)
391 return;
392
393 if (TREE_OVERFLOW_P (val->value))
394 val->value = drop_tree_overflow (val->value);
395 }
396
397 /* Return whether the lattice transition is valid. */
398
399 static bool
400 valid_lattice_transition (ccp_prop_value_t old_val, ccp_prop_value_t new_val)
401 {
402 /* Lattice transitions must always be monotonically increasing in
403 value. */
404 if (old_val.lattice_val < new_val.lattice_val)
405 return true;
406
407 if (old_val.lattice_val != new_val.lattice_val)
408 return false;
409
410 if (!old_val.value && !new_val.value)
411 return true;
412
413 /* Now both lattice values are CONSTANT. */
414
415 /* Allow arbitrary copy changes as we might look through PHI <a_1, ...>
416 when only a single copy edge is executable. */
417 if (TREE_CODE (old_val.value) == SSA_NAME
418 && TREE_CODE (new_val.value) == SSA_NAME)
419 return true;
420
421 /* Allow transitioning from a constant to a copy. */
422 if (is_gimple_min_invariant (old_val.value)
423 && TREE_CODE (new_val.value) == SSA_NAME)
424 return true;
425
426 /* Allow transitioning from PHI <&x, not executable> == &x
427 to PHI <&x, &y> == common alignment. */
428 if (TREE_CODE (old_val.value) != INTEGER_CST
429 && TREE_CODE (new_val.value) == INTEGER_CST)
430 return true;
431
432 /* Bit-lattices have to agree in the still valid bits. */
433 if (TREE_CODE (old_val.value) == INTEGER_CST
434 && TREE_CODE (new_val.value) == INTEGER_CST)
435 return (wi::bit_and_not (wi::to_widest (old_val.value), new_val.mask)
436 == wi::bit_and_not (wi::to_widest (new_val.value), new_val.mask));
437
438 /* Otherwise constant values have to agree. */
439 if (operand_equal_p (old_val.value, new_val.value, 0))
440 return true;
441
442 /* At least the kinds and types should agree now. */
443 if (TREE_CODE (old_val.value) != TREE_CODE (new_val.value)
444 || !types_compatible_p (TREE_TYPE (old_val.value),
445 TREE_TYPE (new_val.value)))
446 return false;
447
448 /* For floats and !HONOR_NANS allow transitions from (partial) NaN
449 to non-NaN. */
450 tree type = TREE_TYPE (new_val.value);
451 if (SCALAR_FLOAT_TYPE_P (type)
452 && !HONOR_NANS (type))
453 {
454 if (REAL_VALUE_ISNAN (TREE_REAL_CST (old_val.value)))
455 return true;
456 }
457 else if (VECTOR_FLOAT_TYPE_P (type)
458 && !HONOR_NANS (type))
459 {
460 for (unsigned i = 0; i < VECTOR_CST_NELTS (old_val.value); ++i)
461 if (!REAL_VALUE_ISNAN
462 (TREE_REAL_CST (VECTOR_CST_ELT (old_val.value, i)))
463 && !operand_equal_p (VECTOR_CST_ELT (old_val.value, i),
464 VECTOR_CST_ELT (new_val.value, i), 0))
465 return false;
466 return true;
467 }
468 else if (COMPLEX_FLOAT_TYPE_P (type)
469 && !HONOR_NANS (type))
470 {
471 if (!REAL_VALUE_ISNAN (TREE_REAL_CST (TREE_REALPART (old_val.value)))
472 && !operand_equal_p (TREE_REALPART (old_val.value),
473 TREE_REALPART (new_val.value), 0))
474 return false;
475 if (!REAL_VALUE_ISNAN (TREE_REAL_CST (TREE_IMAGPART (old_val.value)))
476 && !operand_equal_p (TREE_IMAGPART (old_val.value),
477 TREE_IMAGPART (new_val.value), 0))
478 return false;
479 return true;
480 }
481 return false;
482 }
483
484 /* Set the value for variable VAR to NEW_VAL. Return true if the new
485 value is different from VAR's previous value. */
486
487 static bool
488 set_lattice_value (tree var, ccp_prop_value_t *new_val)
489 {
490 /* We can deal with old UNINITIALIZED values just fine here. */
491 ccp_prop_value_t *old_val = &const_val[SSA_NAME_VERSION (var)];
492
493 canonicalize_value (new_val);
494
495 /* We have to be careful to not go up the bitwise lattice
496 represented by the mask. Instead of dropping to VARYING
497 use the meet operator to retain a conservative value.
498 Missed optimizations like PR65851 makes this necessary.
499 It also ensures we converge to a stable lattice solution. */
500 if (new_val->lattice_val == CONSTANT
501 && old_val->lattice_val == CONSTANT
502 && TREE_CODE (new_val->value) != SSA_NAME)
503 ccp_lattice_meet (new_val, old_val);
504
505 gcc_checking_assert (valid_lattice_transition (*old_val, *new_val));
506
507 /* If *OLD_VAL and NEW_VAL are the same, return false to inform the
508 caller that this was a non-transition. */
509 if (old_val->lattice_val != new_val->lattice_val
510 || (new_val->lattice_val == CONSTANT
511 && (TREE_CODE (new_val->value) != TREE_CODE (old_val->value)
512 || (TREE_CODE (new_val->value) == INTEGER_CST
513 && (new_val->mask != old_val->mask
514 || (wi::bit_and_not (wi::to_widest (old_val->value),
515 new_val->mask)
516 != wi::bit_and_not (wi::to_widest (new_val->value),
517 new_val->mask))))
518 || (TREE_CODE (new_val->value) != INTEGER_CST
519 && !operand_equal_p (new_val->value, old_val->value, 0)))))
520 {
521 /* ??? We would like to delay creation of INTEGER_CSTs from
522 partially constants here. */
523
524 if (dump_file && (dump_flags & TDF_DETAILS))
525 {
526 dump_lattice_value (dump_file, "Lattice value changed to ", *new_val);
527 fprintf (dump_file, ". Adding SSA edges to worklist.\n");
528 }
529
530 *old_val = *new_val;
531
532 gcc_assert (new_val->lattice_val != UNINITIALIZED);
533 return true;
534 }
535
536 return false;
537 }
538
539 static ccp_prop_value_t get_value_for_expr (tree, bool);
540 static ccp_prop_value_t bit_value_binop (enum tree_code, tree, tree, tree);
541 void bit_value_binop (enum tree_code, signop, int, widest_int *, widest_int *,
542 signop, int, const widest_int &, const widest_int &,
543 signop, int, const widest_int &, const widest_int &);
544
545 /* Return a widest_int that can be used for bitwise simplifications
546 from VAL. */
547
548 static widest_int
549 value_to_wide_int (ccp_prop_value_t val)
550 {
551 if (val.value
552 && TREE_CODE (val.value) == INTEGER_CST)
553 return wi::to_widest (val.value);
554
555 return 0;
556 }
557
558 /* Return the value for the address expression EXPR based on alignment
559 information. */
560
561 static ccp_prop_value_t
562 get_value_from_alignment (tree expr)
563 {
564 tree type = TREE_TYPE (expr);
565 ccp_prop_value_t val;
566 unsigned HOST_WIDE_INT bitpos;
567 unsigned int align;
568
569 gcc_assert (TREE_CODE (expr) == ADDR_EXPR);
570
571 get_pointer_alignment_1 (expr, &align, &bitpos);
572 val.mask = (POINTER_TYPE_P (type) || TYPE_UNSIGNED (type)
573 ? wi::mask <widest_int> (TYPE_PRECISION (type), false)
574 : -1).and_not (align / BITS_PER_UNIT - 1);
575 val.lattice_val
576 = wi::sext (val.mask, TYPE_PRECISION (type)) == -1 ? VARYING : CONSTANT;
577 if (val.lattice_val == CONSTANT)
578 val.value = build_int_cstu (type, bitpos / BITS_PER_UNIT);
579 else
580 val.value = NULL_TREE;
581
582 return val;
583 }
584
585 /* Return the value for the tree operand EXPR. If FOR_BITS_P is true
586 return constant bits extracted from alignment information for
587 invariant addresses. */
588
589 static ccp_prop_value_t
590 get_value_for_expr (tree expr, bool for_bits_p)
591 {
592 ccp_prop_value_t val;
593
594 if (TREE_CODE (expr) == SSA_NAME)
595 {
596 ccp_prop_value_t *val_ = get_value (expr);
597 if (val_)
598 val = *val_;
599 else
600 {
601 val.lattice_val = VARYING;
602 val.value = NULL_TREE;
603 val.mask = -1;
604 }
605 if (for_bits_p
606 && val.lattice_val == CONSTANT
607 && TREE_CODE (val.value) == ADDR_EXPR)
608 val = get_value_from_alignment (val.value);
609 /* Fall back to a copy value. */
610 if (!for_bits_p
611 && val.lattice_val == VARYING
612 && !SSA_NAME_OCCURS_IN_ABNORMAL_PHI (expr))
613 {
614 val.lattice_val = CONSTANT;
615 val.value = expr;
616 val.mask = -1;
617 }
618 }
619 else if (is_gimple_min_invariant (expr)
620 && (!for_bits_p || TREE_CODE (expr) != ADDR_EXPR))
621 {
622 val.lattice_val = CONSTANT;
623 val.value = expr;
624 val.mask = 0;
625 canonicalize_value (&val);
626 }
627 else if (TREE_CODE (expr) == ADDR_EXPR)
628 val = get_value_from_alignment (expr);
629 else
630 {
631 val.lattice_val = VARYING;
632 val.mask = -1;
633 val.value = NULL_TREE;
634 }
635
636 if (val.lattice_val == VARYING
637 && TYPE_UNSIGNED (TREE_TYPE (expr)))
638 val.mask = wi::zext (val.mask, TYPE_PRECISION (TREE_TYPE (expr)));
639
640 return val;
641 }
642
643 /* Return the likely CCP lattice value for STMT.
644
645 If STMT has no operands, then return CONSTANT.
646
647 Else if undefinedness of operands of STMT cause its value to be
648 undefined, then return UNDEFINED.
649
650 Else if any operands of STMT are constants, then return CONSTANT.
651
652 Else return VARYING. */
653
654 static ccp_lattice_t
655 likely_value (gimple *stmt)
656 {
657 bool has_constant_operand, has_undefined_operand, all_undefined_operands;
658 bool has_nsa_operand;
659 tree use;
660 ssa_op_iter iter;
661 unsigned i;
662
663 enum gimple_code code = gimple_code (stmt);
664
665 /* This function appears to be called only for assignments, calls,
666 conditionals, and switches, due to the logic in visit_stmt. */
667 gcc_assert (code == GIMPLE_ASSIGN
668 || code == GIMPLE_CALL
669 || code == GIMPLE_COND
670 || code == GIMPLE_SWITCH);
671
672 /* If the statement has volatile operands, it won't fold to a
673 constant value. */
674 if (gimple_has_volatile_ops (stmt))
675 return VARYING;
676
677 /* Arrive here for more complex cases. */
678 has_constant_operand = false;
679 has_undefined_operand = false;
680 all_undefined_operands = true;
681 has_nsa_operand = false;
682 FOR_EACH_SSA_TREE_OPERAND (use, stmt, iter, SSA_OP_USE)
683 {
684 ccp_prop_value_t *val = get_value (use);
685
686 if (val && val->lattice_val == UNDEFINED)
687 has_undefined_operand = true;
688 else
689 all_undefined_operands = false;
690
691 if (val && val->lattice_val == CONSTANT)
692 has_constant_operand = true;
693
694 if (SSA_NAME_IS_DEFAULT_DEF (use)
695 || !prop_simulate_again_p (SSA_NAME_DEF_STMT (use)))
696 has_nsa_operand = true;
697 }
698
699 /* There may be constants in regular rhs operands. For calls we
700 have to ignore lhs, fndecl and static chain, otherwise only
701 the lhs. */
702 for (i = (is_gimple_call (stmt) ? 2 : 0) + gimple_has_lhs (stmt);
703 i < gimple_num_ops (stmt); ++i)
704 {
705 tree op = gimple_op (stmt, i);
706 if (!op || TREE_CODE (op) == SSA_NAME)
707 continue;
708 if (is_gimple_min_invariant (op))
709 has_constant_operand = true;
710 }
711
712 if (has_constant_operand)
713 all_undefined_operands = false;
714
715 if (has_undefined_operand
716 && code == GIMPLE_CALL
717 && gimple_call_internal_p (stmt))
718 switch (gimple_call_internal_fn (stmt))
719 {
720 /* These 3 builtins use the first argument just as a magic
721 way how to find out a decl uid. */
722 case IFN_GOMP_SIMD_LANE:
723 case IFN_GOMP_SIMD_VF:
724 case IFN_GOMP_SIMD_LAST_LANE:
725 has_undefined_operand = false;
726 break;
727 default:
728 break;
729 }
730
731 /* If the operation combines operands like COMPLEX_EXPR make sure to
732 not mark the result UNDEFINED if only one part of the result is
733 undefined. */
734 if (has_undefined_operand && all_undefined_operands)
735 return UNDEFINED;
736 else if (code == GIMPLE_ASSIGN && has_undefined_operand)
737 {
738 switch (gimple_assign_rhs_code (stmt))
739 {
740 /* Unary operators are handled with all_undefined_operands. */
741 case PLUS_EXPR:
742 case MINUS_EXPR:
743 case POINTER_PLUS_EXPR:
744 case BIT_XOR_EXPR:
745 /* Not MIN_EXPR, MAX_EXPR. One VARYING operand may be selected.
746 Not bitwise operators, one VARYING operand may specify the
747 result completely.
748 Not logical operators for the same reason, apart from XOR.
749 Not COMPLEX_EXPR as one VARYING operand makes the result partly
750 not UNDEFINED. Not *DIV_EXPR, comparisons and shifts because
751 the undefined operand may be promoted. */
752 return UNDEFINED;
753
754 case ADDR_EXPR:
755 /* If any part of an address is UNDEFINED, like the index
756 of an ARRAY_EXPR, then treat the result as UNDEFINED. */
757 return UNDEFINED;
758
759 default:
760 ;
761 }
762 }
763 /* If there was an UNDEFINED operand but the result may be not UNDEFINED
764 fall back to CONSTANT. During iteration UNDEFINED may still drop
765 to CONSTANT. */
766 if (has_undefined_operand)
767 return CONSTANT;
768
769 /* We do not consider virtual operands here -- load from read-only
770 memory may have only VARYING virtual operands, but still be
771 constant. Also we can combine the stmt with definitions from
772 operands whose definitions are not simulated again. */
773 if (has_constant_operand
774 || has_nsa_operand
775 || gimple_references_memory_p (stmt))
776 return CONSTANT;
777
778 return VARYING;
779 }
780
781 /* Returns true if STMT cannot be constant. */
782
783 static bool
784 surely_varying_stmt_p (gimple *stmt)
785 {
786 /* If the statement has operands that we cannot handle, it cannot be
787 constant. */
788 if (gimple_has_volatile_ops (stmt))
789 return true;
790
791 /* If it is a call and does not return a value or is not a
792 builtin and not an indirect call or a call to function with
793 assume_aligned/alloc_align attribute, it is varying. */
794 if (is_gimple_call (stmt))
795 {
796 tree fndecl, fntype = gimple_call_fntype (stmt);
797 if (!gimple_call_lhs (stmt)
798 || ((fndecl = gimple_call_fndecl (stmt)) != NULL_TREE
799 && !DECL_BUILT_IN (fndecl)
800 && !lookup_attribute ("assume_aligned",
801 TYPE_ATTRIBUTES (fntype))
802 && !lookup_attribute ("alloc_align",
803 TYPE_ATTRIBUTES (fntype))))
804 return true;
805 }
806
807 /* Any other store operation is not interesting. */
808 else if (gimple_vdef (stmt))
809 return true;
810
811 /* Anything other than assignments and conditional jumps are not
812 interesting for CCP. */
813 if (gimple_code (stmt) != GIMPLE_ASSIGN
814 && gimple_code (stmt) != GIMPLE_COND
815 && gimple_code (stmt) != GIMPLE_SWITCH
816 && gimple_code (stmt) != GIMPLE_CALL)
817 return true;
818
819 return false;
820 }
821
822 /* Initialize local data structures for CCP. */
823
824 static void
825 ccp_initialize (void)
826 {
827 basic_block bb;
828
829 n_const_val = num_ssa_names;
830 const_val = XCNEWVEC (ccp_prop_value_t, n_const_val);
831
832 /* Initialize simulation flags for PHI nodes and statements. */
833 FOR_EACH_BB_FN (bb, cfun)
834 {
835 gimple_stmt_iterator i;
836
837 for (i = gsi_start_bb (bb); !gsi_end_p (i); gsi_next (&i))
838 {
839 gimple *stmt = gsi_stmt (i);
840 bool is_varying;
841
842 /* If the statement is a control insn, then we do not
843 want to avoid simulating the statement once. Failure
844 to do so means that those edges will never get added. */
845 if (stmt_ends_bb_p (stmt))
846 is_varying = false;
847 else
848 is_varying = surely_varying_stmt_p (stmt);
849
850 if (is_varying)
851 {
852 tree def;
853 ssa_op_iter iter;
854
855 /* If the statement will not produce a constant, mark
856 all its outputs VARYING. */
857 FOR_EACH_SSA_TREE_OPERAND (def, stmt, iter, SSA_OP_ALL_DEFS)
858 set_value_varying (def);
859 }
860 prop_set_simulate_again (stmt, !is_varying);
861 }
862 }
863
864 /* Now process PHI nodes. We never clear the simulate_again flag on
865 phi nodes, since we do not know which edges are executable yet,
866 except for phi nodes for virtual operands when we do not do store ccp. */
867 FOR_EACH_BB_FN (bb, cfun)
868 {
869 gphi_iterator i;
870
871 for (i = gsi_start_phis (bb); !gsi_end_p (i); gsi_next (&i))
872 {
873 gphi *phi = i.phi ();
874
875 if (virtual_operand_p (gimple_phi_result (phi)))
876 prop_set_simulate_again (phi, false);
877 else
878 prop_set_simulate_again (phi, true);
879 }
880 }
881 }
882
883 /* Debug count support. Reset the values of ssa names
884 VARYING when the total number ssa names analyzed is
885 beyond the debug count specified. */
886
887 static void
888 do_dbg_cnt (void)
889 {
890 unsigned i;
891 for (i = 0; i < num_ssa_names; i++)
892 {
893 if (!dbg_cnt (ccp))
894 {
895 const_val[i].lattice_val = VARYING;
896 const_val[i].mask = -1;
897 const_val[i].value = NULL_TREE;
898 }
899 }
900 }
901
902
903 /* Do final substitution of propagated values, cleanup the flowgraph and
904 free allocated storage. If NONZERO_P, record nonzero bits.
905
906 Return TRUE when something was optimized. */
907
908 static bool
909 ccp_finalize (bool nonzero_p)
910 {
911 bool something_changed;
912 unsigned i;
913 tree name;
914
915 do_dbg_cnt ();
916
917 /* Derive alignment and misalignment information from partially
918 constant pointers in the lattice or nonzero bits from partially
919 constant integers. */
920 FOR_EACH_SSA_NAME (i, name, cfun)
921 {
922 ccp_prop_value_t *val;
923 unsigned int tem, align;
924
925 if (!POINTER_TYPE_P (TREE_TYPE (name))
926 && (!INTEGRAL_TYPE_P (TREE_TYPE (name))
927 /* Don't record nonzero bits before IPA to avoid
928 using too much memory. */
929 || !nonzero_p))
930 continue;
931
932 val = get_value (name);
933 if (val->lattice_val != CONSTANT
934 || TREE_CODE (val->value) != INTEGER_CST
935 || val->mask == 0)
936 continue;
937
938 if (POINTER_TYPE_P (TREE_TYPE (name)))
939 {
940 /* Trailing mask bits specify the alignment, trailing value
941 bits the misalignment. */
942 tem = val->mask.to_uhwi ();
943 align = least_bit_hwi (tem);
944 if (align > 1)
945 set_ptr_info_alignment (get_ptr_info (name), align,
946 (TREE_INT_CST_LOW (val->value)
947 & (align - 1)));
948 }
949 else
950 {
951 unsigned int precision = TYPE_PRECISION (TREE_TYPE (val->value));
952 wide_int nonzero_bits = wide_int::from (val->mask, precision,
953 UNSIGNED) | val->value;
954 nonzero_bits &= get_nonzero_bits (name);
955 set_nonzero_bits (name, nonzero_bits);
956 }
957 }
958
959 /* Perform substitutions based on the known constant values. */
960 something_changed = substitute_and_fold (get_constant_value, ccp_fold_stmt);
961
962 free (const_val);
963 const_val = NULL;
964 return something_changed;;
965 }
966
967
968 /* Compute the meet operator between *VAL1 and *VAL2. Store the result
969 in VAL1.
970
971 any M UNDEFINED = any
972 any M VARYING = VARYING
973 Ci M Cj = Ci if (i == j)
974 Ci M Cj = VARYING if (i != j)
975 */
976
977 static void
978 ccp_lattice_meet (ccp_prop_value_t *val1, ccp_prop_value_t *val2)
979 {
980 if (val1->lattice_val == UNDEFINED
981 /* For UNDEFINED M SSA we can't always SSA because its definition
982 may not dominate the PHI node. Doing optimistic copy propagation
983 also causes a lot of gcc.dg/uninit-pred*.c FAILs. */
984 && (val2->lattice_val != CONSTANT
985 || TREE_CODE (val2->value) != SSA_NAME))
986 {
987 /* UNDEFINED M any = any */
988 *val1 = *val2;
989 }
990 else if (val2->lattice_val == UNDEFINED
991 /* See above. */
992 && (val1->lattice_val != CONSTANT
993 || TREE_CODE (val1->value) != SSA_NAME))
994 {
995 /* any M UNDEFINED = any
996 Nothing to do. VAL1 already contains the value we want. */
997 ;
998 }
999 else if (val1->lattice_val == VARYING
1000 || val2->lattice_val == VARYING)
1001 {
1002 /* any M VARYING = VARYING. */
1003 val1->lattice_val = VARYING;
1004 val1->mask = -1;
1005 val1->value = NULL_TREE;
1006 }
1007 else if (val1->lattice_val == CONSTANT
1008 && val2->lattice_val == CONSTANT
1009 && TREE_CODE (val1->value) == INTEGER_CST
1010 && TREE_CODE (val2->value) == INTEGER_CST)
1011 {
1012 /* Ci M Cj = Ci if (i == j)
1013 Ci M Cj = VARYING if (i != j)
1014
1015 For INTEGER_CSTs mask unequal bits. If no equal bits remain,
1016 drop to varying. */
1017 val1->mask = (val1->mask | val2->mask
1018 | (wi::to_widest (val1->value)
1019 ^ wi::to_widest (val2->value)));
1020 if (wi::sext (val1->mask, TYPE_PRECISION (TREE_TYPE (val1->value))) == -1)
1021 {
1022 val1->lattice_val = VARYING;
1023 val1->value = NULL_TREE;
1024 }
1025 }
1026 else if (val1->lattice_val == CONSTANT
1027 && val2->lattice_val == CONSTANT
1028 && operand_equal_p (val1->value, val2->value, 0))
1029 {
1030 /* Ci M Cj = Ci if (i == j)
1031 Ci M Cj = VARYING if (i != j)
1032
1033 VAL1 already contains the value we want for equivalent values. */
1034 }
1035 else if (val1->lattice_val == CONSTANT
1036 && val2->lattice_val == CONSTANT
1037 && (TREE_CODE (val1->value) == ADDR_EXPR
1038 || TREE_CODE (val2->value) == ADDR_EXPR))
1039 {
1040 /* When not equal addresses are involved try meeting for
1041 alignment. */
1042 ccp_prop_value_t tem = *val2;
1043 if (TREE_CODE (val1->value) == ADDR_EXPR)
1044 *val1 = get_value_for_expr (val1->value, true);
1045 if (TREE_CODE (val2->value) == ADDR_EXPR)
1046 tem = get_value_for_expr (val2->value, true);
1047 ccp_lattice_meet (val1, &tem);
1048 }
1049 else
1050 {
1051 /* Any other combination is VARYING. */
1052 val1->lattice_val = VARYING;
1053 val1->mask = -1;
1054 val1->value = NULL_TREE;
1055 }
1056 }
1057
1058
1059 /* Loop through the PHI_NODE's parameters for BLOCK and compare their
1060 lattice values to determine PHI_NODE's lattice value. The value of a
1061 PHI node is determined calling ccp_lattice_meet with all the arguments
1062 of the PHI node that are incoming via executable edges. */
1063
1064 static enum ssa_prop_result
1065 ccp_visit_phi_node (gphi *phi)
1066 {
1067 unsigned i;
1068 ccp_prop_value_t new_val;
1069
1070 if (dump_file && (dump_flags & TDF_DETAILS))
1071 {
1072 fprintf (dump_file, "\nVisiting PHI node: ");
1073 print_gimple_stmt (dump_file, phi, 0, dump_flags);
1074 }
1075
1076 new_val.lattice_val = UNDEFINED;
1077 new_val.value = NULL_TREE;
1078 new_val.mask = 0;
1079
1080 bool first = true;
1081 bool non_exec_edge = false;
1082 for (i = 0; i < gimple_phi_num_args (phi); i++)
1083 {
1084 /* Compute the meet operator over all the PHI arguments flowing
1085 through executable edges. */
1086 edge e = gimple_phi_arg_edge (phi, i);
1087
1088 if (dump_file && (dump_flags & TDF_DETAILS))
1089 {
1090 fprintf (dump_file,
1091 "\n Argument #%d (%d -> %d %sexecutable)\n",
1092 i, e->src->index, e->dest->index,
1093 (e->flags & EDGE_EXECUTABLE) ? "" : "not ");
1094 }
1095
1096 /* If the incoming edge is executable, Compute the meet operator for
1097 the existing value of the PHI node and the current PHI argument. */
1098 if (e->flags & EDGE_EXECUTABLE)
1099 {
1100 tree arg = gimple_phi_arg (phi, i)->def;
1101 ccp_prop_value_t arg_val = get_value_for_expr (arg, false);
1102
1103 if (first)
1104 {
1105 new_val = arg_val;
1106 first = false;
1107 }
1108 else
1109 ccp_lattice_meet (&new_val, &arg_val);
1110
1111 if (dump_file && (dump_flags & TDF_DETAILS))
1112 {
1113 fprintf (dump_file, "\t");
1114 print_generic_expr (dump_file, arg, dump_flags);
1115 dump_lattice_value (dump_file, "\tValue: ", arg_val);
1116 fprintf (dump_file, "\n");
1117 }
1118
1119 if (new_val.lattice_val == VARYING)
1120 break;
1121 }
1122 else
1123 non_exec_edge = true;
1124 }
1125
1126 /* In case there were non-executable edges and the value is a copy
1127 make sure its definition dominates the PHI node. */
1128 if (non_exec_edge
1129 && new_val.lattice_val == CONSTANT
1130 && TREE_CODE (new_val.value) == SSA_NAME
1131 && ! SSA_NAME_IS_DEFAULT_DEF (new_val.value)
1132 && ! dominated_by_p (CDI_DOMINATORS, gimple_bb (phi),
1133 gimple_bb (SSA_NAME_DEF_STMT (new_val.value))))
1134 {
1135 new_val.lattice_val = VARYING;
1136 new_val.value = NULL_TREE;
1137 new_val.mask = -1;
1138 }
1139
1140 if (dump_file && (dump_flags & TDF_DETAILS))
1141 {
1142 dump_lattice_value (dump_file, "\n PHI node value: ", new_val);
1143 fprintf (dump_file, "\n\n");
1144 }
1145
1146 /* Make the transition to the new value. */
1147 if (set_lattice_value (gimple_phi_result (phi), &new_val))
1148 {
1149 if (new_val.lattice_val == VARYING)
1150 return SSA_PROP_VARYING;
1151 else
1152 return SSA_PROP_INTERESTING;
1153 }
1154 else
1155 return SSA_PROP_NOT_INTERESTING;
1156 }
1157
1158 /* Return the constant value for OP or OP otherwise. */
1159
1160 static tree
1161 valueize_op (tree op)
1162 {
1163 if (TREE_CODE (op) == SSA_NAME)
1164 {
1165 tree tem = get_constant_value (op);
1166 if (tem)
1167 return tem;
1168 }
1169 return op;
1170 }
1171
1172 /* Return the constant value for OP, but signal to not follow SSA
1173 edges if the definition may be simulated again. */
1174
1175 static tree
1176 valueize_op_1 (tree op)
1177 {
1178 if (TREE_CODE (op) == SSA_NAME)
1179 {
1180 /* If the definition may be simulated again we cannot follow
1181 this SSA edge as the SSA propagator does not necessarily
1182 re-visit the use. */
1183 gimple *def_stmt = SSA_NAME_DEF_STMT (op);
1184 if (!gimple_nop_p (def_stmt)
1185 && prop_simulate_again_p (def_stmt))
1186 return NULL_TREE;
1187 tree tem = get_constant_value (op);
1188 if (tem)
1189 return tem;
1190 }
1191 return op;
1192 }
1193
1194 /* CCP specific front-end to the non-destructive constant folding
1195 routines.
1196
1197 Attempt to simplify the RHS of STMT knowing that one or more
1198 operands are constants.
1199
1200 If simplification is possible, return the simplified RHS,
1201 otherwise return the original RHS or NULL_TREE. */
1202
1203 static tree
1204 ccp_fold (gimple *stmt)
1205 {
1206 location_t loc = gimple_location (stmt);
1207 switch (gimple_code (stmt))
1208 {
1209 case GIMPLE_COND:
1210 {
1211 /* Handle comparison operators that can appear in GIMPLE form. */
1212 tree op0 = valueize_op (gimple_cond_lhs (stmt));
1213 tree op1 = valueize_op (gimple_cond_rhs (stmt));
1214 enum tree_code code = gimple_cond_code (stmt);
1215 return fold_binary_loc (loc, code, boolean_type_node, op0, op1);
1216 }
1217
1218 case GIMPLE_SWITCH:
1219 {
1220 /* Return the constant switch index. */
1221 return valueize_op (gimple_switch_index (as_a <gswitch *> (stmt)));
1222 }
1223
1224 case GIMPLE_ASSIGN:
1225 case GIMPLE_CALL:
1226 return gimple_fold_stmt_to_constant_1 (stmt,
1227 valueize_op, valueize_op_1);
1228
1229 default:
1230 gcc_unreachable ();
1231 }
1232 }
1233
1234 /* Apply the operation CODE in type TYPE to the value, mask pair
1235 RVAL and RMASK representing a value of type RTYPE and set
1236 the value, mask pair *VAL and *MASK to the result. */
1237
1238 void
1239 bit_value_unop (enum tree_code code, signop type_sgn, int type_precision,
1240 widest_int *val, widest_int *mask,
1241 signop rtype_sgn, int rtype_precision,
1242 const widest_int &rval, const widest_int &rmask)
1243 {
1244 switch (code)
1245 {
1246 case BIT_NOT_EXPR:
1247 *mask = rmask;
1248 *val = ~rval;
1249 break;
1250
1251 case NEGATE_EXPR:
1252 {
1253 widest_int temv, temm;
1254 /* Return ~rval + 1. */
1255 bit_value_unop (BIT_NOT_EXPR, type_sgn, type_precision, &temv, &temm,
1256 type_sgn, type_precision, rval, rmask);
1257 bit_value_binop (PLUS_EXPR, type_sgn, type_precision, val, mask,
1258 type_sgn, type_precision, temv, temm,
1259 type_sgn, type_precision, 1, 0);
1260 break;
1261 }
1262
1263 CASE_CONVERT:
1264 {
1265 /* First extend mask and value according to the original type. */
1266 *mask = wi::ext (rmask, rtype_precision, rtype_sgn);
1267 *val = wi::ext (rval, rtype_precision, rtype_sgn);
1268
1269 /* Then extend mask and value according to the target type. */
1270 *mask = wi::ext (*mask, type_precision, type_sgn);
1271 *val = wi::ext (*val, type_precision, type_sgn);
1272 break;
1273 }
1274
1275 default:
1276 *mask = -1;
1277 break;
1278 }
1279 }
1280
1281 /* Apply the operation CODE in type TYPE to the value, mask pairs
1282 R1VAL, R1MASK and R2VAL, R2MASK representing a values of type R1TYPE
1283 and R2TYPE and set the value, mask pair *VAL and *MASK to the result. */
1284
1285 void
1286 bit_value_binop (enum tree_code code, signop sgn, int width,
1287 widest_int *val, widest_int *mask,
1288 signop r1type_sgn, int r1type_precision,
1289 const widest_int &r1val, const widest_int &r1mask,
1290 signop r2type_sgn, int r2type_precision,
1291 const widest_int &r2val, const widest_int &r2mask)
1292 {
1293 bool swap_p = false;
1294
1295 /* Assume we'll get a constant result. Use an initial non varying
1296 value, we fall back to varying in the end if necessary. */
1297 *mask = -1;
1298
1299 switch (code)
1300 {
1301 case BIT_AND_EXPR:
1302 /* The mask is constant where there is a known not
1303 set bit, (m1 | m2) & ((v1 | m1) & (v2 | m2)) */
1304 *mask = (r1mask | r2mask) & (r1val | r1mask) & (r2val | r2mask);
1305 *val = r1val & r2val;
1306 break;
1307
1308 case BIT_IOR_EXPR:
1309 /* The mask is constant where there is a known
1310 set bit, (m1 | m2) & ~((v1 & ~m1) | (v2 & ~m2)). */
1311 *mask = (r1mask | r2mask)
1312 .and_not (r1val.and_not (r1mask) | r2val.and_not (r2mask));
1313 *val = r1val | r2val;
1314 break;
1315
1316 case BIT_XOR_EXPR:
1317 /* m1 | m2 */
1318 *mask = r1mask | r2mask;
1319 *val = r1val ^ r2val;
1320 break;
1321
1322 case LROTATE_EXPR:
1323 case RROTATE_EXPR:
1324 if (r2mask == 0)
1325 {
1326 widest_int shift = r2val;
1327 if (shift == 0)
1328 {
1329 *mask = r1mask;
1330 *val = r1val;
1331 }
1332 else
1333 {
1334 if (wi::neg_p (shift))
1335 {
1336 shift = -shift;
1337 if (code == RROTATE_EXPR)
1338 code = LROTATE_EXPR;
1339 else
1340 code = RROTATE_EXPR;
1341 }
1342 if (code == RROTATE_EXPR)
1343 {
1344 *mask = wi::rrotate (r1mask, shift, width);
1345 *val = wi::rrotate (r1val, shift, width);
1346 }
1347 else
1348 {
1349 *mask = wi::lrotate (r1mask, shift, width);
1350 *val = wi::lrotate (r1val, shift, width);
1351 }
1352 }
1353 }
1354 break;
1355
1356 case LSHIFT_EXPR:
1357 case RSHIFT_EXPR:
1358 /* ??? We can handle partially known shift counts if we know
1359 its sign. That way we can tell that (x << (y | 8)) & 255
1360 is zero. */
1361 if (r2mask == 0)
1362 {
1363 widest_int shift = r2val;
1364 if (shift == 0)
1365 {
1366 *mask = r1mask;
1367 *val = r1val;
1368 }
1369 else
1370 {
1371 if (wi::neg_p (shift))
1372 {
1373 shift = -shift;
1374 if (code == RSHIFT_EXPR)
1375 code = LSHIFT_EXPR;
1376 else
1377 code = RSHIFT_EXPR;
1378 }
1379 if (code == RSHIFT_EXPR)
1380 {
1381 *mask = wi::rshift (wi::ext (r1mask, width, sgn), shift, sgn);
1382 *val = wi::rshift (wi::ext (r1val, width, sgn), shift, sgn);
1383 }
1384 else
1385 {
1386 *mask = wi::ext (r1mask << shift, width, sgn);
1387 *val = wi::ext (r1val << shift, width, sgn);
1388 }
1389 }
1390 }
1391 break;
1392
1393 case PLUS_EXPR:
1394 case POINTER_PLUS_EXPR:
1395 {
1396 /* Do the addition with unknown bits set to zero, to give carry-ins of
1397 zero wherever possible. */
1398 widest_int lo = r1val.and_not (r1mask) + r2val.and_not (r2mask);
1399 lo = wi::ext (lo, width, sgn);
1400 /* Do the addition with unknown bits set to one, to give carry-ins of
1401 one wherever possible. */
1402 widest_int hi = (r1val | r1mask) + (r2val | r2mask);
1403 hi = wi::ext (hi, width, sgn);
1404 /* Each bit in the result is known if (a) the corresponding bits in
1405 both inputs are known, and (b) the carry-in to that bit position
1406 is known. We can check condition (b) by seeing if we got the same
1407 result with minimised carries as with maximised carries. */
1408 *mask = r1mask | r2mask | (lo ^ hi);
1409 *mask = wi::ext (*mask, width, sgn);
1410 /* It shouldn't matter whether we choose lo or hi here. */
1411 *val = lo;
1412 break;
1413 }
1414
1415 case MINUS_EXPR:
1416 {
1417 widest_int temv, temm;
1418 bit_value_unop (NEGATE_EXPR, r2type_sgn, r2type_precision, &temv, &temm,
1419 r2type_sgn, r2type_precision, r2val, r2mask);
1420 bit_value_binop (PLUS_EXPR, sgn, width, val, mask,
1421 r1type_sgn, r1type_precision, r1val, r1mask,
1422 r2type_sgn, r2type_precision, temv, temm);
1423 break;
1424 }
1425
1426 case MULT_EXPR:
1427 {
1428 /* Just track trailing zeros in both operands and transfer
1429 them to the other. */
1430 int r1tz = wi::ctz (r1val | r1mask);
1431 int r2tz = wi::ctz (r2val | r2mask);
1432 if (r1tz + r2tz >= width)
1433 {
1434 *mask = 0;
1435 *val = 0;
1436 }
1437 else if (r1tz + r2tz > 0)
1438 {
1439 *mask = wi::ext (wi::mask <widest_int> (r1tz + r2tz, true),
1440 width, sgn);
1441 *val = 0;
1442 }
1443 break;
1444 }
1445
1446 case EQ_EXPR:
1447 case NE_EXPR:
1448 {
1449 widest_int m = r1mask | r2mask;
1450 if (r1val.and_not (m) != r2val.and_not (m))
1451 {
1452 *mask = 0;
1453 *val = ((code == EQ_EXPR) ? 0 : 1);
1454 }
1455 else
1456 {
1457 /* We know the result of a comparison is always one or zero. */
1458 *mask = 1;
1459 *val = 0;
1460 }
1461 break;
1462 }
1463
1464 case GE_EXPR:
1465 case GT_EXPR:
1466 swap_p = true;
1467 code = swap_tree_comparison (code);
1468 /* Fall through. */
1469 case LT_EXPR:
1470 case LE_EXPR:
1471 {
1472 int minmax, maxmin;
1473
1474 const widest_int &o1val = swap_p ? r2val : r1val;
1475 const widest_int &o1mask = swap_p ? r2mask : r1mask;
1476 const widest_int &o2val = swap_p ? r1val : r2val;
1477 const widest_int &o2mask = swap_p ? r1mask : r2mask;
1478
1479 /* If the most significant bits are not known we know nothing. */
1480 if (wi::neg_p (o1mask) || wi::neg_p (o2mask))
1481 break;
1482
1483 /* For comparisons the signedness is in the comparison operands. */
1484 sgn = r1type_sgn;
1485
1486 /* If we know the most significant bits we know the values
1487 value ranges by means of treating varying bits as zero
1488 or one. Do a cross comparison of the max/min pairs. */
1489 maxmin = wi::cmp (o1val | o1mask, o2val.and_not (o2mask), sgn);
1490 minmax = wi::cmp (o1val.and_not (o1mask), o2val | o2mask, sgn);
1491 if (maxmin < 0) /* o1 is less than o2. */
1492 {
1493 *mask = 0;
1494 *val = 1;
1495 }
1496 else if (minmax > 0) /* o1 is not less or equal to o2. */
1497 {
1498 *mask = 0;
1499 *val = 0;
1500 }
1501 else if (maxmin == minmax) /* o1 and o2 are equal. */
1502 {
1503 /* This probably should never happen as we'd have
1504 folded the thing during fully constant value folding. */
1505 *mask = 0;
1506 *val = (code == LE_EXPR ? 1 : 0);
1507 }
1508 else
1509 {
1510 /* We know the result of a comparison is always one or zero. */
1511 *mask = 1;
1512 *val = 0;
1513 }
1514 break;
1515 }
1516
1517 default:;
1518 }
1519 }
1520
1521 /* Return the propagation value when applying the operation CODE to
1522 the value RHS yielding type TYPE. */
1523
1524 static ccp_prop_value_t
1525 bit_value_unop (enum tree_code code, tree type, tree rhs)
1526 {
1527 ccp_prop_value_t rval = get_value_for_expr (rhs, true);
1528 widest_int value, mask;
1529 ccp_prop_value_t val;
1530
1531 if (rval.lattice_val == UNDEFINED)
1532 return rval;
1533
1534 gcc_assert ((rval.lattice_val == CONSTANT
1535 && TREE_CODE (rval.value) == INTEGER_CST)
1536 || wi::sext (rval.mask, TYPE_PRECISION (TREE_TYPE (rhs))) == -1);
1537 bit_value_unop (code, TYPE_SIGN (type), TYPE_PRECISION (type), &value, &mask,
1538 TYPE_SIGN (TREE_TYPE (rhs)), TYPE_PRECISION (TREE_TYPE (rhs)),
1539 value_to_wide_int (rval), rval.mask);
1540 if (wi::sext (mask, TYPE_PRECISION (type)) != -1)
1541 {
1542 val.lattice_val = CONSTANT;
1543 val.mask = mask;
1544 /* ??? Delay building trees here. */
1545 val.value = wide_int_to_tree (type, value);
1546 }
1547 else
1548 {
1549 val.lattice_val = VARYING;
1550 val.value = NULL_TREE;
1551 val.mask = -1;
1552 }
1553 return val;
1554 }
1555
1556 /* Return the propagation value when applying the operation CODE to
1557 the values RHS1 and RHS2 yielding type TYPE. */
1558
1559 static ccp_prop_value_t
1560 bit_value_binop (enum tree_code code, tree type, tree rhs1, tree rhs2)
1561 {
1562 ccp_prop_value_t r1val = get_value_for_expr (rhs1, true);
1563 ccp_prop_value_t r2val = get_value_for_expr (rhs2, true);
1564 widest_int value, mask;
1565 ccp_prop_value_t val;
1566
1567 if (r1val.lattice_val == UNDEFINED
1568 || r2val.lattice_val == UNDEFINED)
1569 {
1570 val.lattice_val = VARYING;
1571 val.value = NULL_TREE;
1572 val.mask = -1;
1573 return val;
1574 }
1575
1576 gcc_assert ((r1val.lattice_val == CONSTANT
1577 && TREE_CODE (r1val.value) == INTEGER_CST)
1578 || wi::sext (r1val.mask,
1579 TYPE_PRECISION (TREE_TYPE (rhs1))) == -1);
1580 gcc_assert ((r2val.lattice_val == CONSTANT
1581 && TREE_CODE (r2val.value) == INTEGER_CST)
1582 || wi::sext (r2val.mask,
1583 TYPE_PRECISION (TREE_TYPE (rhs2))) == -1);
1584 bit_value_binop (code, TYPE_SIGN (type), TYPE_PRECISION (type), &value, &mask,
1585 TYPE_SIGN (TREE_TYPE (rhs1)), TYPE_PRECISION (TREE_TYPE (rhs1)),
1586 value_to_wide_int (r1val), r1val.mask,
1587 TYPE_SIGN (TREE_TYPE (rhs2)), TYPE_PRECISION (TREE_TYPE (rhs2)),
1588 value_to_wide_int (r2val), r2val.mask);
1589
1590 if (wi::sext (mask, TYPE_PRECISION (type)) != -1)
1591 {
1592 val.lattice_val = CONSTANT;
1593 val.mask = mask;
1594 /* ??? Delay building trees here. */
1595 val.value = wide_int_to_tree (type, value);
1596 }
1597 else
1598 {
1599 val.lattice_val = VARYING;
1600 val.value = NULL_TREE;
1601 val.mask = -1;
1602 }
1603 return val;
1604 }
1605
1606 /* Return the propagation value for __builtin_assume_aligned
1607 and functions with assume_aligned or alloc_aligned attribute.
1608 For __builtin_assume_aligned, ATTR is NULL_TREE,
1609 for assume_aligned attribute ATTR is non-NULL and ALLOC_ALIGNED
1610 is false, for alloc_aligned attribute ATTR is non-NULL and
1611 ALLOC_ALIGNED is true. */
1612
1613 static ccp_prop_value_t
1614 bit_value_assume_aligned (gimple *stmt, tree attr, ccp_prop_value_t ptrval,
1615 bool alloc_aligned)
1616 {
1617 tree align, misalign = NULL_TREE, type;
1618 unsigned HOST_WIDE_INT aligni, misaligni = 0;
1619 ccp_prop_value_t alignval;
1620 widest_int value, mask;
1621 ccp_prop_value_t val;
1622
1623 if (attr == NULL_TREE)
1624 {
1625 tree ptr = gimple_call_arg (stmt, 0);
1626 type = TREE_TYPE (ptr);
1627 ptrval = get_value_for_expr (ptr, true);
1628 }
1629 else
1630 {
1631 tree lhs = gimple_call_lhs (stmt);
1632 type = TREE_TYPE (lhs);
1633 }
1634
1635 if (ptrval.lattice_val == UNDEFINED)
1636 return ptrval;
1637 gcc_assert ((ptrval.lattice_val == CONSTANT
1638 && TREE_CODE (ptrval.value) == INTEGER_CST)
1639 || wi::sext (ptrval.mask, TYPE_PRECISION (type)) == -1);
1640 if (attr == NULL_TREE)
1641 {
1642 /* Get aligni and misaligni from __builtin_assume_aligned. */
1643 align = gimple_call_arg (stmt, 1);
1644 if (!tree_fits_uhwi_p (align))
1645 return ptrval;
1646 aligni = tree_to_uhwi (align);
1647 if (gimple_call_num_args (stmt) > 2)
1648 {
1649 misalign = gimple_call_arg (stmt, 2);
1650 if (!tree_fits_uhwi_p (misalign))
1651 return ptrval;
1652 misaligni = tree_to_uhwi (misalign);
1653 }
1654 }
1655 else
1656 {
1657 /* Get aligni and misaligni from assume_aligned or
1658 alloc_align attributes. */
1659 if (TREE_VALUE (attr) == NULL_TREE)
1660 return ptrval;
1661 attr = TREE_VALUE (attr);
1662 align = TREE_VALUE (attr);
1663 if (!tree_fits_uhwi_p (align))
1664 return ptrval;
1665 aligni = tree_to_uhwi (align);
1666 if (alloc_aligned)
1667 {
1668 if (aligni == 0 || aligni > gimple_call_num_args (stmt))
1669 return ptrval;
1670 align = gimple_call_arg (stmt, aligni - 1);
1671 if (!tree_fits_uhwi_p (align))
1672 return ptrval;
1673 aligni = tree_to_uhwi (align);
1674 }
1675 else if (TREE_CHAIN (attr) && TREE_VALUE (TREE_CHAIN (attr)))
1676 {
1677 misalign = TREE_VALUE (TREE_CHAIN (attr));
1678 if (!tree_fits_uhwi_p (misalign))
1679 return ptrval;
1680 misaligni = tree_to_uhwi (misalign);
1681 }
1682 }
1683 if (aligni <= 1 || (aligni & (aligni - 1)) != 0 || misaligni >= aligni)
1684 return ptrval;
1685
1686 align = build_int_cst_type (type, -aligni);
1687 alignval = get_value_for_expr (align, true);
1688 bit_value_binop (BIT_AND_EXPR, TYPE_SIGN (type), TYPE_PRECISION (type), &value, &mask,
1689 TYPE_SIGN (type), TYPE_PRECISION (type), value_to_wide_int (ptrval), ptrval.mask,
1690 TYPE_SIGN (type), TYPE_PRECISION (type), value_to_wide_int (alignval), alignval.mask);
1691
1692 if (wi::sext (mask, TYPE_PRECISION (type)) != -1)
1693 {
1694 val.lattice_val = CONSTANT;
1695 val.mask = mask;
1696 gcc_assert ((mask.to_uhwi () & (aligni - 1)) == 0);
1697 gcc_assert ((value.to_uhwi () & (aligni - 1)) == 0);
1698 value |= misaligni;
1699 /* ??? Delay building trees here. */
1700 val.value = wide_int_to_tree (type, value);
1701 }
1702 else
1703 {
1704 val.lattice_val = VARYING;
1705 val.value = NULL_TREE;
1706 val.mask = -1;
1707 }
1708 return val;
1709 }
1710
1711 /* Evaluate statement STMT.
1712 Valid only for assignments, calls, conditionals, and switches. */
1713
1714 static ccp_prop_value_t
1715 evaluate_stmt (gimple *stmt)
1716 {
1717 ccp_prop_value_t val;
1718 tree simplified = NULL_TREE;
1719 ccp_lattice_t likelyvalue = likely_value (stmt);
1720 bool is_constant = false;
1721 unsigned int align;
1722
1723 if (dump_file && (dump_flags & TDF_DETAILS))
1724 {
1725 fprintf (dump_file, "which is likely ");
1726 switch (likelyvalue)
1727 {
1728 case CONSTANT:
1729 fprintf (dump_file, "CONSTANT");
1730 break;
1731 case UNDEFINED:
1732 fprintf (dump_file, "UNDEFINED");
1733 break;
1734 case VARYING:
1735 fprintf (dump_file, "VARYING");
1736 break;
1737 default:;
1738 }
1739 fprintf (dump_file, "\n");
1740 }
1741
1742 /* If the statement is likely to have a CONSTANT result, then try
1743 to fold the statement to determine the constant value. */
1744 /* FIXME. This is the only place that we call ccp_fold.
1745 Since likely_value never returns CONSTANT for calls, we will
1746 not attempt to fold them, including builtins that may profit. */
1747 if (likelyvalue == CONSTANT)
1748 {
1749 fold_defer_overflow_warnings ();
1750 simplified = ccp_fold (stmt);
1751 if (simplified
1752 && TREE_CODE (simplified) == SSA_NAME
1753 /* We may not use values of something that may be simulated again,
1754 see valueize_op_1. */
1755 && (SSA_NAME_IS_DEFAULT_DEF (simplified)
1756 || ! prop_simulate_again_p (SSA_NAME_DEF_STMT (simplified))))
1757 {
1758 ccp_prop_value_t *val = get_value (simplified);
1759 if (val && val->lattice_val != VARYING)
1760 {
1761 fold_undefer_overflow_warnings (true, stmt, 0);
1762 return *val;
1763 }
1764 }
1765 is_constant = simplified && is_gimple_min_invariant (simplified);
1766 fold_undefer_overflow_warnings (is_constant, stmt, 0);
1767 if (is_constant)
1768 {
1769 /* The statement produced a constant value. */
1770 val.lattice_val = CONSTANT;
1771 val.value = simplified;
1772 val.mask = 0;
1773 return val;
1774 }
1775 }
1776 /* If the statement is likely to have a VARYING result, then do not
1777 bother folding the statement. */
1778 else if (likelyvalue == VARYING)
1779 {
1780 enum gimple_code code = gimple_code (stmt);
1781 if (code == GIMPLE_ASSIGN)
1782 {
1783 enum tree_code subcode = gimple_assign_rhs_code (stmt);
1784
1785 /* Other cases cannot satisfy is_gimple_min_invariant
1786 without folding. */
1787 if (get_gimple_rhs_class (subcode) == GIMPLE_SINGLE_RHS)
1788 simplified = gimple_assign_rhs1 (stmt);
1789 }
1790 else if (code == GIMPLE_SWITCH)
1791 simplified = gimple_switch_index (as_a <gswitch *> (stmt));
1792 else
1793 /* These cannot satisfy is_gimple_min_invariant without folding. */
1794 gcc_assert (code == GIMPLE_CALL || code == GIMPLE_COND);
1795 is_constant = simplified && is_gimple_min_invariant (simplified);
1796 if (is_constant)
1797 {
1798 /* The statement produced a constant value. */
1799 val.lattice_val = CONSTANT;
1800 val.value = simplified;
1801 val.mask = 0;
1802 }
1803 }
1804 /* If the statement result is likely UNDEFINED, make it so. */
1805 else if (likelyvalue == UNDEFINED)
1806 {
1807 val.lattice_val = UNDEFINED;
1808 val.value = NULL_TREE;
1809 val.mask = 0;
1810 return val;
1811 }
1812
1813 /* Resort to simplification for bitwise tracking. */
1814 if (flag_tree_bit_ccp
1815 && (likelyvalue == CONSTANT || is_gimple_call (stmt)
1816 || (gimple_assign_single_p (stmt)
1817 && gimple_assign_rhs_code (stmt) == ADDR_EXPR))
1818 && !is_constant)
1819 {
1820 enum gimple_code code = gimple_code (stmt);
1821 val.lattice_val = VARYING;
1822 val.value = NULL_TREE;
1823 val.mask = -1;
1824 if (code == GIMPLE_ASSIGN)
1825 {
1826 enum tree_code subcode = gimple_assign_rhs_code (stmt);
1827 tree rhs1 = gimple_assign_rhs1 (stmt);
1828 tree lhs = gimple_assign_lhs (stmt);
1829 if ((INTEGRAL_TYPE_P (TREE_TYPE (lhs))
1830 || POINTER_TYPE_P (TREE_TYPE (lhs)))
1831 && (INTEGRAL_TYPE_P (TREE_TYPE (rhs1))
1832 || POINTER_TYPE_P (TREE_TYPE (rhs1))))
1833 switch (get_gimple_rhs_class (subcode))
1834 {
1835 case GIMPLE_SINGLE_RHS:
1836 val = get_value_for_expr (rhs1, true);
1837 break;
1838
1839 case GIMPLE_UNARY_RHS:
1840 val = bit_value_unop (subcode, TREE_TYPE (lhs), rhs1);
1841 break;
1842
1843 case GIMPLE_BINARY_RHS:
1844 val = bit_value_binop (subcode, TREE_TYPE (lhs), rhs1,
1845 gimple_assign_rhs2 (stmt));
1846 break;
1847
1848 default:;
1849 }
1850 }
1851 else if (code == GIMPLE_COND)
1852 {
1853 enum tree_code code = gimple_cond_code (stmt);
1854 tree rhs1 = gimple_cond_lhs (stmt);
1855 tree rhs2 = gimple_cond_rhs (stmt);
1856 if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1))
1857 || POINTER_TYPE_P (TREE_TYPE (rhs1)))
1858 val = bit_value_binop (code, TREE_TYPE (rhs1), rhs1, rhs2);
1859 }
1860 else if (gimple_call_builtin_p (stmt, BUILT_IN_NORMAL))
1861 {
1862 tree fndecl = gimple_call_fndecl (stmt);
1863 switch (DECL_FUNCTION_CODE (fndecl))
1864 {
1865 case BUILT_IN_MALLOC:
1866 case BUILT_IN_REALLOC:
1867 case BUILT_IN_CALLOC:
1868 case BUILT_IN_STRDUP:
1869 case BUILT_IN_STRNDUP:
1870 val.lattice_val = CONSTANT;
1871 val.value = build_int_cst (TREE_TYPE (gimple_get_lhs (stmt)), 0);
1872 val.mask = ~((HOST_WIDE_INT) MALLOC_ABI_ALIGNMENT
1873 / BITS_PER_UNIT - 1);
1874 break;
1875
1876 case BUILT_IN_ALLOCA:
1877 case BUILT_IN_ALLOCA_WITH_ALIGN:
1878 align = (DECL_FUNCTION_CODE (fndecl) == BUILT_IN_ALLOCA_WITH_ALIGN
1879 ? TREE_INT_CST_LOW (gimple_call_arg (stmt, 1))
1880 : BIGGEST_ALIGNMENT);
1881 val.lattice_val = CONSTANT;
1882 val.value = build_int_cst (TREE_TYPE (gimple_get_lhs (stmt)), 0);
1883 val.mask = ~((HOST_WIDE_INT) align / BITS_PER_UNIT - 1);
1884 break;
1885
1886 /* These builtins return their first argument, unmodified. */
1887 case BUILT_IN_MEMCPY:
1888 case BUILT_IN_MEMMOVE:
1889 case BUILT_IN_MEMSET:
1890 case BUILT_IN_STRCPY:
1891 case BUILT_IN_STRNCPY:
1892 case BUILT_IN_MEMCPY_CHK:
1893 case BUILT_IN_MEMMOVE_CHK:
1894 case BUILT_IN_MEMSET_CHK:
1895 case BUILT_IN_STRCPY_CHK:
1896 case BUILT_IN_STRNCPY_CHK:
1897 val = get_value_for_expr (gimple_call_arg (stmt, 0), true);
1898 break;
1899
1900 case BUILT_IN_ASSUME_ALIGNED:
1901 val = bit_value_assume_aligned (stmt, NULL_TREE, val, false);
1902 break;
1903
1904 case BUILT_IN_ALIGNED_ALLOC:
1905 {
1906 tree align = get_constant_value (gimple_call_arg (stmt, 0));
1907 if (align
1908 && tree_fits_uhwi_p (align))
1909 {
1910 unsigned HOST_WIDE_INT aligni = tree_to_uhwi (align);
1911 if (aligni > 1
1912 /* align must be power-of-two */
1913 && (aligni & (aligni - 1)) == 0)
1914 {
1915 val.lattice_val = CONSTANT;
1916 val.value = build_int_cst (ptr_type_node, 0);
1917 val.mask = -aligni;
1918 }
1919 }
1920 break;
1921 }
1922
1923 default:;
1924 }
1925 }
1926 if (is_gimple_call (stmt) && gimple_call_lhs (stmt))
1927 {
1928 tree fntype = gimple_call_fntype (stmt);
1929 if (fntype)
1930 {
1931 tree attrs = lookup_attribute ("assume_aligned",
1932 TYPE_ATTRIBUTES (fntype));
1933 if (attrs)
1934 val = bit_value_assume_aligned (stmt, attrs, val, false);
1935 attrs = lookup_attribute ("alloc_align",
1936 TYPE_ATTRIBUTES (fntype));
1937 if (attrs)
1938 val = bit_value_assume_aligned (stmt, attrs, val, true);
1939 }
1940 }
1941 is_constant = (val.lattice_val == CONSTANT);
1942 }
1943
1944 if (flag_tree_bit_ccp
1945 && ((is_constant && TREE_CODE (val.value) == INTEGER_CST)
1946 || !is_constant)
1947 && gimple_get_lhs (stmt)
1948 && TREE_CODE (gimple_get_lhs (stmt)) == SSA_NAME)
1949 {
1950 tree lhs = gimple_get_lhs (stmt);
1951 wide_int nonzero_bits = get_nonzero_bits (lhs);
1952 if (nonzero_bits != -1)
1953 {
1954 if (!is_constant)
1955 {
1956 val.lattice_val = CONSTANT;
1957 val.value = build_zero_cst (TREE_TYPE (lhs));
1958 val.mask = extend_mask (nonzero_bits, TYPE_SIGN (TREE_TYPE (lhs)));
1959 is_constant = true;
1960 }
1961 else
1962 {
1963 if (wi::bit_and_not (val.value, nonzero_bits) != 0)
1964 val.value = wide_int_to_tree (TREE_TYPE (lhs),
1965 nonzero_bits & val.value);
1966 if (nonzero_bits == 0)
1967 val.mask = 0;
1968 else
1969 val.mask = val.mask & extend_mask (nonzero_bits,
1970 TYPE_SIGN (TREE_TYPE (lhs)));
1971 }
1972 }
1973 }
1974
1975 /* The statement produced a nonconstant value. */
1976 if (!is_constant)
1977 {
1978 /* The statement produced a copy. */
1979 if (simplified && TREE_CODE (simplified) == SSA_NAME
1980 && !SSA_NAME_OCCURS_IN_ABNORMAL_PHI (simplified))
1981 {
1982 val.lattice_val = CONSTANT;
1983 val.value = simplified;
1984 val.mask = -1;
1985 }
1986 /* The statement is VARYING. */
1987 else
1988 {
1989 val.lattice_val = VARYING;
1990 val.value = NULL_TREE;
1991 val.mask = -1;
1992 }
1993 }
1994
1995 return val;
1996 }
1997
1998 typedef hash_table<nofree_ptr_hash<gimple> > gimple_htab;
1999
2000 /* Given a BUILT_IN_STACK_SAVE value SAVED_VAL, insert a clobber of VAR before
2001 each matching BUILT_IN_STACK_RESTORE. Mark visited phis in VISITED. */
2002
2003 static void
2004 insert_clobber_before_stack_restore (tree saved_val, tree var,
2005 gimple_htab **visited)
2006 {
2007 gimple *stmt;
2008 gassign *clobber_stmt;
2009 tree clobber;
2010 imm_use_iterator iter;
2011 gimple_stmt_iterator i;
2012 gimple **slot;
2013
2014 FOR_EACH_IMM_USE_STMT (stmt, iter, saved_val)
2015 if (gimple_call_builtin_p (stmt, BUILT_IN_STACK_RESTORE))
2016 {
2017 clobber = build_constructor (TREE_TYPE (var),
2018 NULL);
2019 TREE_THIS_VOLATILE (clobber) = 1;
2020 clobber_stmt = gimple_build_assign (var, clobber);
2021
2022 i = gsi_for_stmt (stmt);
2023 gsi_insert_before (&i, clobber_stmt, GSI_SAME_STMT);
2024 }
2025 else if (gimple_code (stmt) == GIMPLE_PHI)
2026 {
2027 if (!*visited)
2028 *visited = new gimple_htab (10);
2029
2030 slot = (*visited)->find_slot (stmt, INSERT);
2031 if (*slot != NULL)
2032 continue;
2033
2034 *slot = stmt;
2035 insert_clobber_before_stack_restore (gimple_phi_result (stmt), var,
2036 visited);
2037 }
2038 else if (gimple_assign_ssa_name_copy_p (stmt))
2039 insert_clobber_before_stack_restore (gimple_assign_lhs (stmt), var,
2040 visited);
2041 else if (chkp_gimple_call_builtin_p (stmt, BUILT_IN_CHKP_BNDRET))
2042 continue;
2043 else
2044 gcc_assert (is_gimple_debug (stmt));
2045 }
2046
2047 /* Advance the iterator to the previous non-debug gimple statement in the same
2048 or dominating basic block. */
2049
2050 static inline void
2051 gsi_prev_dom_bb_nondebug (gimple_stmt_iterator *i)
2052 {
2053 basic_block dom;
2054
2055 gsi_prev_nondebug (i);
2056 while (gsi_end_p (*i))
2057 {
2058 dom = get_immediate_dominator (CDI_DOMINATORS, i->bb);
2059 if (dom == NULL || dom == ENTRY_BLOCK_PTR_FOR_FN (cfun))
2060 return;
2061
2062 *i = gsi_last_bb (dom);
2063 }
2064 }
2065
2066 /* Find a BUILT_IN_STACK_SAVE dominating gsi_stmt (I), and insert
2067 a clobber of VAR before each matching BUILT_IN_STACK_RESTORE.
2068
2069 It is possible that BUILT_IN_STACK_SAVE cannot be find in a dominator when a
2070 previous pass (such as DOM) duplicated it along multiple paths to a BB. In
2071 that case the function gives up without inserting the clobbers. */
2072
2073 static void
2074 insert_clobbers_for_var (gimple_stmt_iterator i, tree var)
2075 {
2076 gimple *stmt;
2077 tree saved_val;
2078 gimple_htab *visited = NULL;
2079
2080 for (; !gsi_end_p (i); gsi_prev_dom_bb_nondebug (&i))
2081 {
2082 stmt = gsi_stmt (i);
2083
2084 if (!gimple_call_builtin_p (stmt, BUILT_IN_STACK_SAVE))
2085 continue;
2086
2087 saved_val = gimple_call_lhs (stmt);
2088 if (saved_val == NULL_TREE)
2089 continue;
2090
2091 insert_clobber_before_stack_restore (saved_val, var, &visited);
2092 break;
2093 }
2094
2095 delete visited;
2096 }
2097
2098 /* Detects a __builtin_alloca_with_align with constant size argument. Declares
2099 fixed-size array and returns the address, if found, otherwise returns
2100 NULL_TREE. */
2101
2102 static tree
2103 fold_builtin_alloca_with_align (gimple *stmt)
2104 {
2105 unsigned HOST_WIDE_INT size, threshold, n_elem;
2106 tree lhs, arg, block, var, elem_type, array_type;
2107
2108 /* Get lhs. */
2109 lhs = gimple_call_lhs (stmt);
2110 if (lhs == NULL_TREE)
2111 return NULL_TREE;
2112
2113 /* Detect constant argument. */
2114 arg = get_constant_value (gimple_call_arg (stmt, 0));
2115 if (arg == NULL_TREE
2116 || TREE_CODE (arg) != INTEGER_CST
2117 || !tree_fits_uhwi_p (arg))
2118 return NULL_TREE;
2119
2120 size = tree_to_uhwi (arg);
2121
2122 /* Heuristic: don't fold large allocas. */
2123 threshold = (unsigned HOST_WIDE_INT)PARAM_VALUE (PARAM_LARGE_STACK_FRAME);
2124 /* In case the alloca is located at function entry, it has the same lifetime
2125 as a declared array, so we allow a larger size. */
2126 block = gimple_block (stmt);
2127 if (!(cfun->after_inlining
2128 && block
2129 && TREE_CODE (BLOCK_SUPERCONTEXT (block)) == FUNCTION_DECL))
2130 threshold /= 10;
2131 if (size > threshold)
2132 return NULL_TREE;
2133
2134 /* Declare array. */
2135 elem_type = build_nonstandard_integer_type (BITS_PER_UNIT, 1);
2136 n_elem = size * 8 / BITS_PER_UNIT;
2137 array_type = build_array_type_nelts (elem_type, n_elem);
2138 var = create_tmp_var (array_type);
2139 SET_DECL_ALIGN (var, TREE_INT_CST_LOW (gimple_call_arg (stmt, 1)));
2140 {
2141 struct ptr_info_def *pi = SSA_NAME_PTR_INFO (lhs);
2142 if (pi != NULL && !pi->pt.anything)
2143 {
2144 bool singleton_p;
2145 unsigned uid;
2146 singleton_p = pt_solution_singleton_or_null_p (&pi->pt, &uid);
2147 gcc_assert (singleton_p);
2148 SET_DECL_PT_UID (var, uid);
2149 }
2150 }
2151
2152 /* Fold alloca to the address of the array. */
2153 return fold_convert (TREE_TYPE (lhs), build_fold_addr_expr (var));
2154 }
2155
2156 /* Fold the stmt at *GSI with CCP specific information that propagating
2157 and regular folding does not catch. */
2158
2159 static bool
2160 ccp_fold_stmt (gimple_stmt_iterator *gsi)
2161 {
2162 gimple *stmt = gsi_stmt (*gsi);
2163
2164 switch (gimple_code (stmt))
2165 {
2166 case GIMPLE_COND:
2167 {
2168 gcond *cond_stmt = as_a <gcond *> (stmt);
2169 ccp_prop_value_t val;
2170 /* Statement evaluation will handle type mismatches in constants
2171 more gracefully than the final propagation. This allows us to
2172 fold more conditionals here. */
2173 val = evaluate_stmt (stmt);
2174 if (val.lattice_val != CONSTANT
2175 || val.mask != 0)
2176 return false;
2177
2178 if (dump_file)
2179 {
2180 fprintf (dump_file, "Folding predicate ");
2181 print_gimple_expr (dump_file, stmt, 0, 0);
2182 fprintf (dump_file, " to ");
2183 print_generic_expr (dump_file, val.value, 0);
2184 fprintf (dump_file, "\n");
2185 }
2186
2187 if (integer_zerop (val.value))
2188 gimple_cond_make_false (cond_stmt);
2189 else
2190 gimple_cond_make_true (cond_stmt);
2191
2192 return true;
2193 }
2194
2195 case GIMPLE_CALL:
2196 {
2197 tree lhs = gimple_call_lhs (stmt);
2198 int flags = gimple_call_flags (stmt);
2199 tree val;
2200 tree argt;
2201 bool changed = false;
2202 unsigned i;
2203
2204 /* If the call was folded into a constant make sure it goes
2205 away even if we cannot propagate into all uses because of
2206 type issues. */
2207 if (lhs
2208 && TREE_CODE (lhs) == SSA_NAME
2209 && (val = get_constant_value (lhs))
2210 /* Don't optimize away calls that have side-effects. */
2211 && (flags & (ECF_CONST|ECF_PURE)) != 0
2212 && (flags & ECF_LOOPING_CONST_OR_PURE) == 0)
2213 {
2214 tree new_rhs = unshare_expr (val);
2215 bool res;
2216 if (!useless_type_conversion_p (TREE_TYPE (lhs),
2217 TREE_TYPE (new_rhs)))
2218 new_rhs = fold_convert (TREE_TYPE (lhs), new_rhs);
2219 res = update_call_from_tree (gsi, new_rhs);
2220 gcc_assert (res);
2221 return true;
2222 }
2223
2224 /* Internal calls provide no argument types, so the extra laxity
2225 for normal calls does not apply. */
2226 if (gimple_call_internal_p (stmt))
2227 return false;
2228
2229 /* The heuristic of fold_builtin_alloca_with_align differs before and
2230 after inlining, so we don't require the arg to be changed into a
2231 constant for folding, but just to be constant. */
2232 if (gimple_call_builtin_p (stmt, BUILT_IN_ALLOCA_WITH_ALIGN))
2233 {
2234 tree new_rhs = fold_builtin_alloca_with_align (stmt);
2235 if (new_rhs)
2236 {
2237 bool res = update_call_from_tree (gsi, new_rhs);
2238 tree var = TREE_OPERAND (TREE_OPERAND (new_rhs, 0),0);
2239 gcc_assert (res);
2240 insert_clobbers_for_var (*gsi, var);
2241 return true;
2242 }
2243 }
2244
2245 /* Propagate into the call arguments. Compared to replace_uses_in
2246 this can use the argument slot types for type verification
2247 instead of the current argument type. We also can safely
2248 drop qualifiers here as we are dealing with constants anyway. */
2249 argt = TYPE_ARG_TYPES (gimple_call_fntype (stmt));
2250 for (i = 0; i < gimple_call_num_args (stmt) && argt;
2251 ++i, argt = TREE_CHAIN (argt))
2252 {
2253 tree arg = gimple_call_arg (stmt, i);
2254 if (TREE_CODE (arg) == SSA_NAME
2255 && (val = get_constant_value (arg))
2256 && useless_type_conversion_p
2257 (TYPE_MAIN_VARIANT (TREE_VALUE (argt)),
2258 TYPE_MAIN_VARIANT (TREE_TYPE (val))))
2259 {
2260 gimple_call_set_arg (stmt, i, unshare_expr (val));
2261 changed = true;
2262 }
2263 }
2264
2265 return changed;
2266 }
2267
2268 case GIMPLE_ASSIGN:
2269 {
2270 tree lhs = gimple_assign_lhs (stmt);
2271 tree val;
2272
2273 /* If we have a load that turned out to be constant replace it
2274 as we cannot propagate into all uses in all cases. */
2275 if (gimple_assign_single_p (stmt)
2276 && TREE_CODE (lhs) == SSA_NAME
2277 && (val = get_constant_value (lhs)))
2278 {
2279 tree rhs = unshare_expr (val);
2280 if (!useless_type_conversion_p (TREE_TYPE (lhs), TREE_TYPE (rhs)))
2281 rhs = fold_build1 (VIEW_CONVERT_EXPR, TREE_TYPE (lhs), rhs);
2282 gimple_assign_set_rhs_from_tree (gsi, rhs);
2283 return true;
2284 }
2285
2286 return false;
2287 }
2288
2289 default:
2290 return false;
2291 }
2292 }
2293
2294 /* Visit the assignment statement STMT. Set the value of its LHS to the
2295 value computed by the RHS and store LHS in *OUTPUT_P. If STMT
2296 creates virtual definitions, set the value of each new name to that
2297 of the RHS (if we can derive a constant out of the RHS).
2298 Value-returning call statements also perform an assignment, and
2299 are handled here. */
2300
2301 static enum ssa_prop_result
2302 visit_assignment (gimple *stmt, tree *output_p)
2303 {
2304 ccp_prop_value_t val;
2305 enum ssa_prop_result retval = SSA_PROP_NOT_INTERESTING;
2306
2307 tree lhs = gimple_get_lhs (stmt);
2308 if (TREE_CODE (lhs) == SSA_NAME)
2309 {
2310 /* Evaluate the statement, which could be
2311 either a GIMPLE_ASSIGN or a GIMPLE_CALL. */
2312 val = evaluate_stmt (stmt);
2313
2314 /* If STMT is an assignment to an SSA_NAME, we only have one
2315 value to set. */
2316 if (set_lattice_value (lhs, &val))
2317 {
2318 *output_p = lhs;
2319 if (val.lattice_val == VARYING)
2320 retval = SSA_PROP_VARYING;
2321 else
2322 retval = SSA_PROP_INTERESTING;
2323 }
2324 }
2325
2326 return retval;
2327 }
2328
2329
2330 /* Visit the conditional statement STMT. Return SSA_PROP_INTERESTING
2331 if it can determine which edge will be taken. Otherwise, return
2332 SSA_PROP_VARYING. */
2333
2334 static enum ssa_prop_result
2335 visit_cond_stmt (gimple *stmt, edge *taken_edge_p)
2336 {
2337 ccp_prop_value_t val;
2338 basic_block block;
2339
2340 block = gimple_bb (stmt);
2341 val = evaluate_stmt (stmt);
2342 if (val.lattice_val != CONSTANT
2343 || val.mask != 0)
2344 return SSA_PROP_VARYING;
2345
2346 /* Find which edge out of the conditional block will be taken and add it
2347 to the worklist. If no single edge can be determined statically,
2348 return SSA_PROP_VARYING to feed all the outgoing edges to the
2349 propagation engine. */
2350 *taken_edge_p = find_taken_edge (block, val.value);
2351 if (*taken_edge_p)
2352 return SSA_PROP_INTERESTING;
2353 else
2354 return SSA_PROP_VARYING;
2355 }
2356
2357
2358 /* Evaluate statement STMT. If the statement produces an output value and
2359 its evaluation changes the lattice value of its output, return
2360 SSA_PROP_INTERESTING and set *OUTPUT_P to the SSA_NAME holding the
2361 output value.
2362
2363 If STMT is a conditional branch and we can determine its truth
2364 value, set *TAKEN_EDGE_P accordingly. If STMT produces a varying
2365 value, return SSA_PROP_VARYING. */
2366
2367 static enum ssa_prop_result
2368 ccp_visit_stmt (gimple *stmt, edge *taken_edge_p, tree *output_p)
2369 {
2370 tree def;
2371 ssa_op_iter iter;
2372
2373 if (dump_file && (dump_flags & TDF_DETAILS))
2374 {
2375 fprintf (dump_file, "\nVisiting statement:\n");
2376 print_gimple_stmt (dump_file, stmt, 0, dump_flags);
2377 }
2378
2379 switch (gimple_code (stmt))
2380 {
2381 case GIMPLE_ASSIGN:
2382 /* If the statement is an assignment that produces a single
2383 output value, evaluate its RHS to see if the lattice value of
2384 its output has changed. */
2385 return visit_assignment (stmt, output_p);
2386
2387 case GIMPLE_CALL:
2388 /* A value-returning call also performs an assignment. */
2389 if (gimple_call_lhs (stmt) != NULL_TREE)
2390 return visit_assignment (stmt, output_p);
2391 break;
2392
2393 case GIMPLE_COND:
2394 case GIMPLE_SWITCH:
2395 /* If STMT is a conditional branch, see if we can determine
2396 which branch will be taken. */
2397 /* FIXME. It appears that we should be able to optimize
2398 computed GOTOs here as well. */
2399 return visit_cond_stmt (stmt, taken_edge_p);
2400
2401 default:
2402 break;
2403 }
2404
2405 /* Any other kind of statement is not interesting for constant
2406 propagation and, therefore, not worth simulating. */
2407 if (dump_file && (dump_flags & TDF_DETAILS))
2408 fprintf (dump_file, "No interesting values produced. Marked VARYING.\n");
2409
2410 /* Definitions made by statements other than assignments to
2411 SSA_NAMEs represent unknown modifications to their outputs.
2412 Mark them VARYING. */
2413 FOR_EACH_SSA_TREE_OPERAND (def, stmt, iter, SSA_OP_ALL_DEFS)
2414 set_value_varying (def);
2415
2416 return SSA_PROP_VARYING;
2417 }
2418
2419
2420 /* Main entry point for SSA Conditional Constant Propagation. If NONZERO_P,
2421 record nonzero bits. */
2422
2423 static unsigned int
2424 do_ssa_ccp (bool nonzero_p)
2425 {
2426 unsigned int todo = 0;
2427 calculate_dominance_info (CDI_DOMINATORS);
2428
2429 ccp_initialize ();
2430 ssa_propagate (ccp_visit_stmt, ccp_visit_phi_node);
2431 if (ccp_finalize (nonzero_p || flag_ipa_bit_cp))
2432 {
2433 todo = (TODO_cleanup_cfg | TODO_update_ssa);
2434
2435 /* ccp_finalize does not preserve loop-closed ssa. */
2436 loops_state_clear (LOOP_CLOSED_SSA);
2437 }
2438
2439 free_dominance_info (CDI_DOMINATORS);
2440 return todo;
2441 }
2442
2443
2444 namespace {
2445
2446 const pass_data pass_data_ccp =
2447 {
2448 GIMPLE_PASS, /* type */
2449 "ccp", /* name */
2450 OPTGROUP_NONE, /* optinfo_flags */
2451 TV_TREE_CCP, /* tv_id */
2452 ( PROP_cfg | PROP_ssa ), /* properties_required */
2453 0, /* properties_provided */
2454 0, /* properties_destroyed */
2455 0, /* todo_flags_start */
2456 TODO_update_address_taken, /* todo_flags_finish */
2457 };
2458
2459 class pass_ccp : public gimple_opt_pass
2460 {
2461 public:
2462 pass_ccp (gcc::context *ctxt)
2463 : gimple_opt_pass (pass_data_ccp, ctxt), nonzero_p (false)
2464 {}
2465
2466 /* opt_pass methods: */
2467 opt_pass * clone () { return new pass_ccp (m_ctxt); }
2468 void set_pass_param (unsigned int n, bool param)
2469 {
2470 gcc_assert (n == 0);
2471 nonzero_p = param;
2472 }
2473 virtual bool gate (function *) { return flag_tree_ccp != 0; }
2474 virtual unsigned int execute (function *) { return do_ssa_ccp (nonzero_p); }
2475
2476 private:
2477 /* Determines whether the pass instance records nonzero bits. */
2478 bool nonzero_p;
2479 }; // class pass_ccp
2480
2481 } // anon namespace
2482
2483 gimple_opt_pass *
2484 make_pass_ccp (gcc::context *ctxt)
2485 {
2486 return new pass_ccp (ctxt);
2487 }
2488
2489
2490
2491 /* Try to optimize out __builtin_stack_restore. Optimize it out
2492 if there is another __builtin_stack_restore in the same basic
2493 block and no calls or ASM_EXPRs are in between, or if this block's
2494 only outgoing edge is to EXIT_BLOCK and there are no calls or
2495 ASM_EXPRs after this __builtin_stack_restore. */
2496
2497 static tree
2498 optimize_stack_restore (gimple_stmt_iterator i)
2499 {
2500 tree callee;
2501 gimple *stmt;
2502
2503 basic_block bb = gsi_bb (i);
2504 gimple *call = gsi_stmt (i);
2505
2506 if (gimple_code (call) != GIMPLE_CALL
2507 || gimple_call_num_args (call) != 1
2508 || TREE_CODE (gimple_call_arg (call, 0)) != SSA_NAME
2509 || !POINTER_TYPE_P (TREE_TYPE (gimple_call_arg (call, 0))))
2510 return NULL_TREE;
2511
2512 for (gsi_next (&i); !gsi_end_p (i); gsi_next (&i))
2513 {
2514 stmt = gsi_stmt (i);
2515 if (gimple_code (stmt) == GIMPLE_ASM)
2516 return NULL_TREE;
2517 if (gimple_code (stmt) != GIMPLE_CALL)
2518 continue;
2519
2520 callee = gimple_call_fndecl (stmt);
2521 if (!callee
2522 || DECL_BUILT_IN_CLASS (callee) != BUILT_IN_NORMAL
2523 /* All regular builtins are ok, just obviously not alloca. */
2524 || DECL_FUNCTION_CODE (callee) == BUILT_IN_ALLOCA
2525 || DECL_FUNCTION_CODE (callee) == BUILT_IN_ALLOCA_WITH_ALIGN)
2526 return NULL_TREE;
2527
2528 if (DECL_FUNCTION_CODE (callee) == BUILT_IN_STACK_RESTORE)
2529 goto second_stack_restore;
2530 }
2531
2532 if (!gsi_end_p (i))
2533 return NULL_TREE;
2534
2535 /* Allow one successor of the exit block, or zero successors. */
2536 switch (EDGE_COUNT (bb->succs))
2537 {
2538 case 0:
2539 break;
2540 case 1:
2541 if (single_succ_edge (bb)->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
2542 return NULL_TREE;
2543 break;
2544 default:
2545 return NULL_TREE;
2546 }
2547 second_stack_restore:
2548
2549 /* If there's exactly one use, then zap the call to __builtin_stack_save.
2550 If there are multiple uses, then the last one should remove the call.
2551 In any case, whether the call to __builtin_stack_save can be removed
2552 or not is irrelevant to removing the call to __builtin_stack_restore. */
2553 if (has_single_use (gimple_call_arg (call, 0)))
2554 {
2555 gimple *stack_save = SSA_NAME_DEF_STMT (gimple_call_arg (call, 0));
2556 if (is_gimple_call (stack_save))
2557 {
2558 callee = gimple_call_fndecl (stack_save);
2559 if (callee
2560 && DECL_BUILT_IN_CLASS (callee) == BUILT_IN_NORMAL
2561 && DECL_FUNCTION_CODE (callee) == BUILT_IN_STACK_SAVE)
2562 {
2563 gimple_stmt_iterator stack_save_gsi;
2564 tree rhs;
2565
2566 stack_save_gsi = gsi_for_stmt (stack_save);
2567 rhs = build_int_cst (TREE_TYPE (gimple_call_arg (call, 0)), 0);
2568 update_call_from_tree (&stack_save_gsi, rhs);
2569 }
2570 }
2571 }
2572
2573 /* No effect, so the statement will be deleted. */
2574 return integer_zero_node;
2575 }
2576
2577 /* If va_list type is a simple pointer and nothing special is needed,
2578 optimize __builtin_va_start (&ap, 0) into ap = __builtin_next_arg (0),
2579 __builtin_va_end (&ap) out as NOP and __builtin_va_copy into a simple
2580 pointer assignment. */
2581
2582 static tree
2583 optimize_stdarg_builtin (gimple *call)
2584 {
2585 tree callee, lhs, rhs, cfun_va_list;
2586 bool va_list_simple_ptr;
2587 location_t loc = gimple_location (call);
2588
2589 if (gimple_code (call) != GIMPLE_CALL)
2590 return NULL_TREE;
2591
2592 callee = gimple_call_fndecl (call);
2593
2594 cfun_va_list = targetm.fn_abi_va_list (callee);
2595 va_list_simple_ptr = POINTER_TYPE_P (cfun_va_list)
2596 && (TREE_TYPE (cfun_va_list) == void_type_node
2597 || TREE_TYPE (cfun_va_list) == char_type_node);
2598
2599 switch (DECL_FUNCTION_CODE (callee))
2600 {
2601 case BUILT_IN_VA_START:
2602 if (!va_list_simple_ptr
2603 || targetm.expand_builtin_va_start != NULL
2604 || !builtin_decl_explicit_p (BUILT_IN_NEXT_ARG))
2605 return NULL_TREE;
2606
2607 if (gimple_call_num_args (call) != 2)
2608 return NULL_TREE;
2609
2610 lhs = gimple_call_arg (call, 0);
2611 if (!POINTER_TYPE_P (TREE_TYPE (lhs))
2612 || TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (lhs)))
2613 != TYPE_MAIN_VARIANT (cfun_va_list))
2614 return NULL_TREE;
2615
2616 lhs = build_fold_indirect_ref_loc (loc, lhs);
2617 rhs = build_call_expr_loc (loc, builtin_decl_explicit (BUILT_IN_NEXT_ARG),
2618 1, integer_zero_node);
2619 rhs = fold_convert_loc (loc, TREE_TYPE (lhs), rhs);
2620 return build2 (MODIFY_EXPR, TREE_TYPE (lhs), lhs, rhs);
2621
2622 case BUILT_IN_VA_COPY:
2623 if (!va_list_simple_ptr)
2624 return NULL_TREE;
2625
2626 if (gimple_call_num_args (call) != 2)
2627 return NULL_TREE;
2628
2629 lhs = gimple_call_arg (call, 0);
2630 if (!POINTER_TYPE_P (TREE_TYPE (lhs))
2631 || TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (lhs)))
2632 != TYPE_MAIN_VARIANT (cfun_va_list))
2633 return NULL_TREE;
2634
2635 lhs = build_fold_indirect_ref_loc (loc, lhs);
2636 rhs = gimple_call_arg (call, 1);
2637 if (TYPE_MAIN_VARIANT (TREE_TYPE (rhs))
2638 != TYPE_MAIN_VARIANT (cfun_va_list))
2639 return NULL_TREE;
2640
2641 rhs = fold_convert_loc (loc, TREE_TYPE (lhs), rhs);
2642 return build2 (MODIFY_EXPR, TREE_TYPE (lhs), lhs, rhs);
2643
2644 case BUILT_IN_VA_END:
2645 /* No effect, so the statement will be deleted. */
2646 return integer_zero_node;
2647
2648 default:
2649 gcc_unreachable ();
2650 }
2651 }
2652
2653 /* Attemp to make the block of __builtin_unreachable I unreachable by changing
2654 the incoming jumps. Return true if at least one jump was changed. */
2655
2656 static bool
2657 optimize_unreachable (gimple_stmt_iterator i)
2658 {
2659 basic_block bb = gsi_bb (i);
2660 gimple_stmt_iterator gsi;
2661 gimple *stmt;
2662 edge_iterator ei;
2663 edge e;
2664 bool ret;
2665
2666 if (flag_sanitize & SANITIZE_UNREACHABLE)
2667 return false;
2668
2669 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2670 {
2671 stmt = gsi_stmt (gsi);
2672
2673 if (is_gimple_debug (stmt))
2674 continue;
2675
2676 if (glabel *label_stmt = dyn_cast <glabel *> (stmt))
2677 {
2678 /* Verify we do not need to preserve the label. */
2679 if (FORCED_LABEL (gimple_label_label (label_stmt)))
2680 return false;
2681
2682 continue;
2683 }
2684
2685 /* Only handle the case that __builtin_unreachable is the first statement
2686 in the block. We rely on DCE to remove stmts without side-effects
2687 before __builtin_unreachable. */
2688 if (gsi_stmt (gsi) != gsi_stmt (i))
2689 return false;
2690 }
2691
2692 ret = false;
2693 FOR_EACH_EDGE (e, ei, bb->preds)
2694 {
2695 gsi = gsi_last_bb (e->src);
2696 if (gsi_end_p (gsi))
2697 continue;
2698
2699 stmt = gsi_stmt (gsi);
2700 if (gcond *cond_stmt = dyn_cast <gcond *> (stmt))
2701 {
2702 if (e->flags & EDGE_TRUE_VALUE)
2703 gimple_cond_make_false (cond_stmt);
2704 else if (e->flags & EDGE_FALSE_VALUE)
2705 gimple_cond_make_true (cond_stmt);
2706 else
2707 gcc_unreachable ();
2708 update_stmt (cond_stmt);
2709 }
2710 else
2711 {
2712 /* Todo: handle other cases, f.i. switch statement. */
2713 continue;
2714 }
2715
2716 ret = true;
2717 }
2718
2719 return ret;
2720 }
2721
2722 /* Optimize
2723 mask_2 = 1 << cnt_1;
2724 _4 = __atomic_fetch_or_* (ptr_6, mask_2, _3);
2725 _5 = _4 & mask_2;
2726 to
2727 _4 = ATOMIC_BIT_TEST_AND_SET (ptr_6, cnt_1, 0, _3);
2728 _5 = _4;
2729 If _5 is only used in _5 != 0 or _5 == 0 comparisons, 1
2730 is passed instead of 0, and the builtin just returns a zero
2731 or 1 value instead of the actual bit.
2732 Similarly for __sync_fetch_and_or_* (without the ", _3" part
2733 in there), and/or if mask_2 is a power of 2 constant.
2734 Similarly for xor instead of or, use ATOMIC_BIT_TEST_AND_COMPLEMENT
2735 in that case. And similarly for and instead of or, except that
2736 the second argument to the builtin needs to be one's complement
2737 of the mask instead of mask. */
2738
2739 static void
2740 optimize_atomic_bit_test_and (gimple_stmt_iterator *gsip,
2741 enum internal_fn fn, bool has_model_arg,
2742 bool after)
2743 {
2744 gimple *call = gsi_stmt (*gsip);
2745 tree lhs = gimple_call_lhs (call);
2746 use_operand_p use_p;
2747 gimple *use_stmt;
2748 tree mask, bit;
2749 optab optab;
2750
2751 if (!flag_inline_atomics
2752 || optimize_debug
2753 || !gimple_call_builtin_p (call, BUILT_IN_NORMAL)
2754 || !lhs
2755 || SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs)
2756 || !single_imm_use (lhs, &use_p, &use_stmt)
2757 || !is_gimple_assign (use_stmt)
2758 || gimple_assign_rhs_code (use_stmt) != BIT_AND_EXPR
2759 || !gimple_vdef (call))
2760 return;
2761
2762 switch (fn)
2763 {
2764 case IFN_ATOMIC_BIT_TEST_AND_SET:
2765 optab = atomic_bit_test_and_set_optab;
2766 break;
2767 case IFN_ATOMIC_BIT_TEST_AND_COMPLEMENT:
2768 optab = atomic_bit_test_and_complement_optab;
2769 break;
2770 case IFN_ATOMIC_BIT_TEST_AND_RESET:
2771 optab = atomic_bit_test_and_reset_optab;
2772 break;
2773 default:
2774 return;
2775 }
2776
2777 if (optab_handler (optab, TYPE_MODE (TREE_TYPE (lhs))) == CODE_FOR_nothing)
2778 return;
2779
2780 mask = gimple_call_arg (call, 1);
2781 tree use_lhs = gimple_assign_lhs (use_stmt);
2782 if (!use_lhs)
2783 return;
2784
2785 if (TREE_CODE (mask) == INTEGER_CST)
2786 {
2787 if (fn == IFN_ATOMIC_BIT_TEST_AND_RESET)
2788 mask = const_unop (BIT_NOT_EXPR, TREE_TYPE (mask), mask);
2789 mask = fold_convert (TREE_TYPE (lhs), mask);
2790 int ibit = tree_log2 (mask);
2791 if (ibit < 0)
2792 return;
2793 bit = build_int_cst (TREE_TYPE (lhs), ibit);
2794 }
2795 else if (TREE_CODE (mask) == SSA_NAME)
2796 {
2797 gimple *g = SSA_NAME_DEF_STMT (mask);
2798 if (fn == IFN_ATOMIC_BIT_TEST_AND_RESET)
2799 {
2800 if (!is_gimple_assign (g)
2801 || gimple_assign_rhs_code (g) != BIT_NOT_EXPR)
2802 return;
2803 mask = gimple_assign_rhs1 (g);
2804 if (TREE_CODE (mask) != SSA_NAME)
2805 return;
2806 g = SSA_NAME_DEF_STMT (mask);
2807 }
2808 if (!is_gimple_assign (g)
2809 || gimple_assign_rhs_code (g) != LSHIFT_EXPR
2810 || !integer_onep (gimple_assign_rhs1 (g)))
2811 return;
2812 bit = gimple_assign_rhs2 (g);
2813 }
2814 else
2815 return;
2816
2817 if (gimple_assign_rhs1 (use_stmt) == lhs)
2818 {
2819 if (!operand_equal_p (gimple_assign_rhs2 (use_stmt), mask, 0))
2820 return;
2821 }
2822 else if (gimple_assign_rhs2 (use_stmt) != lhs
2823 || !operand_equal_p (gimple_assign_rhs1 (use_stmt), mask, 0))
2824 return;
2825
2826 bool use_bool = true;
2827 bool has_debug_uses = false;
2828 imm_use_iterator iter;
2829 gimple *g;
2830
2831 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (use_lhs))
2832 use_bool = false;
2833 FOR_EACH_IMM_USE_STMT (g, iter, use_lhs)
2834 {
2835 enum tree_code code = ERROR_MARK;
2836 tree op0 = NULL_TREE, op1 = NULL_TREE;
2837 if (is_gimple_debug (g))
2838 {
2839 has_debug_uses = true;
2840 continue;
2841 }
2842 else if (is_gimple_assign (g))
2843 switch (gimple_assign_rhs_code (g))
2844 {
2845 case COND_EXPR:
2846 op1 = gimple_assign_rhs1 (g);
2847 code = TREE_CODE (op1);
2848 op0 = TREE_OPERAND (op1, 0);
2849 op1 = TREE_OPERAND (op1, 1);
2850 break;
2851 case EQ_EXPR:
2852 case NE_EXPR:
2853 code = gimple_assign_rhs_code (g);
2854 op0 = gimple_assign_rhs1 (g);
2855 op1 = gimple_assign_rhs2 (g);
2856 break;
2857 default:
2858 break;
2859 }
2860 else if (gimple_code (g) == GIMPLE_COND)
2861 {
2862 code = gimple_cond_code (g);
2863 op0 = gimple_cond_lhs (g);
2864 op1 = gimple_cond_rhs (g);
2865 }
2866
2867 if ((code == EQ_EXPR || code == NE_EXPR)
2868 && op0 == use_lhs
2869 && integer_zerop (op1))
2870 {
2871 use_operand_p use_p;
2872 int n = 0;
2873 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
2874 n++;
2875 if (n == 1)
2876 continue;
2877 }
2878
2879 use_bool = false;
2880 BREAK_FROM_IMM_USE_STMT (iter);
2881 }
2882
2883 tree new_lhs = make_ssa_name (TREE_TYPE (lhs));
2884 tree flag = build_int_cst (TREE_TYPE (lhs), use_bool);
2885 if (has_model_arg)
2886 g = gimple_build_call_internal (fn, 4, gimple_call_arg (call, 0),
2887 bit, flag, gimple_call_arg (call, 2));
2888 else
2889 g = gimple_build_call_internal (fn, 3, gimple_call_arg (call, 0),
2890 bit, flag);
2891 gimple_call_set_lhs (g, new_lhs);
2892 gimple_set_location (g, gimple_location (call));
2893 gimple_set_vuse (g, gimple_vuse (call));
2894 gimple_set_vdef (g, gimple_vdef (call));
2895 bool throws = stmt_can_throw_internal (call);
2896 gimple_call_set_nothrow (as_a <gcall *> (g),
2897 gimple_call_nothrow_p (as_a <gcall *> (call)));
2898 SSA_NAME_DEF_STMT (gimple_vdef (call)) = g;
2899 gimple_stmt_iterator gsi = *gsip;
2900 gsi_insert_after (&gsi, g, GSI_NEW_STMT);
2901 edge e = NULL;
2902 if (throws)
2903 {
2904 maybe_clean_or_replace_eh_stmt (call, g);
2905 if (after || (use_bool && has_debug_uses))
2906 e = find_fallthru_edge (gsi_bb (gsi)->succs);
2907 }
2908 if (after)
2909 {
2910 /* The internal function returns the value of the specified bit
2911 before the atomic operation. If we are interested in the value
2912 of the specified bit after the atomic operation (makes only sense
2913 for xor, otherwise the bit content is compile time known),
2914 we need to invert the bit. */
2915 g = gimple_build_assign (make_ssa_name (TREE_TYPE (lhs)),
2916 BIT_XOR_EXPR, new_lhs,
2917 use_bool ? build_int_cst (TREE_TYPE (lhs), 1)
2918 : mask);
2919 new_lhs = gimple_assign_lhs (g);
2920 if (throws)
2921 {
2922 gsi_insert_on_edge_immediate (e, g);
2923 gsi = gsi_for_stmt (g);
2924 }
2925 else
2926 gsi_insert_after (&gsi, g, GSI_NEW_STMT);
2927 }
2928 if (use_bool && has_debug_uses)
2929 {
2930 tree temp = NULL_TREE;
2931 if (!throws || after || single_pred_p (e->dest))
2932 {
2933 temp = make_node (DEBUG_EXPR_DECL);
2934 DECL_ARTIFICIAL (temp) = 1;
2935 TREE_TYPE (temp) = TREE_TYPE (lhs);
2936 SET_DECL_MODE (temp, TYPE_MODE (TREE_TYPE (lhs)));
2937 tree t = build2 (LSHIFT_EXPR, TREE_TYPE (lhs), new_lhs, bit);
2938 g = gimple_build_debug_bind (temp, t, g);
2939 if (throws && !after)
2940 {
2941 gsi = gsi_after_labels (e->dest);
2942 gsi_insert_before (&gsi, g, GSI_SAME_STMT);
2943 }
2944 else
2945 gsi_insert_after (&gsi, g, GSI_NEW_STMT);
2946 }
2947 FOR_EACH_IMM_USE_STMT (g, iter, use_lhs)
2948 if (is_gimple_debug (g))
2949 {
2950 use_operand_p use_p;
2951 if (temp == NULL_TREE)
2952 gimple_debug_bind_reset_value (g);
2953 else
2954 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
2955 SET_USE (use_p, temp);
2956 update_stmt (g);
2957 }
2958 }
2959 SSA_NAME_OCCURS_IN_ABNORMAL_PHI (new_lhs)
2960 = SSA_NAME_OCCURS_IN_ABNORMAL_PHI (use_lhs);
2961 replace_uses_by (use_lhs, new_lhs);
2962 gsi = gsi_for_stmt (use_stmt);
2963 gsi_remove (&gsi, true);
2964 release_defs (use_stmt);
2965 gsi_remove (gsip, true);
2966 release_ssa_name (lhs);
2967 }
2968
2969 /* Optimize
2970 a = {};
2971 b = a;
2972 into
2973 a = {};
2974 b = {};
2975 Similarly for memset (&a, ..., sizeof (a)); instead of a = {};
2976 and/or memcpy (&b, &a, sizeof (a)); instead of b = a; */
2977
2978 static void
2979 optimize_memcpy (gimple_stmt_iterator *gsip, tree dest, tree src, tree len)
2980 {
2981 gimple *stmt = gsi_stmt (*gsip);
2982 if (gimple_has_volatile_ops (stmt))
2983 return;
2984
2985 tree vuse = gimple_vuse (stmt);
2986 if (vuse == NULL)
2987 return;
2988
2989 gimple *defstmt = SSA_NAME_DEF_STMT (vuse);
2990 tree src2 = NULL_TREE, len2 = NULL_TREE;
2991 HOST_WIDE_INT offset, offset2;
2992 tree val = integer_zero_node;
2993 if (gimple_store_p (defstmt)
2994 && gimple_assign_single_p (defstmt)
2995 && TREE_CODE (gimple_assign_rhs1 (defstmt)) == CONSTRUCTOR
2996 && !gimple_clobber_p (defstmt))
2997 src2 = gimple_assign_lhs (defstmt);
2998 else if (gimple_call_builtin_p (defstmt, BUILT_IN_MEMSET)
2999 && TREE_CODE (gimple_call_arg (defstmt, 0)) == ADDR_EXPR
3000 && TREE_CODE (gimple_call_arg (defstmt, 1)) == INTEGER_CST)
3001 {
3002 src2 = TREE_OPERAND (gimple_call_arg (defstmt, 0), 0);
3003 len2 = gimple_call_arg (defstmt, 2);
3004 val = gimple_call_arg (defstmt, 1);
3005 /* For non-0 val, we'd have to transform stmt from assignment
3006 into memset (only if dest is addressable). */
3007 if (!integer_zerop (val) && is_gimple_assign (stmt))
3008 src2 = NULL_TREE;
3009 }
3010
3011 if (src2 == NULL_TREE)
3012 return;
3013
3014 if (len == NULL_TREE)
3015 len = (TREE_CODE (src) == COMPONENT_REF
3016 ? DECL_SIZE_UNIT (TREE_OPERAND (src, 1))
3017 : TYPE_SIZE_UNIT (TREE_TYPE (src)));
3018 if (len2 == NULL_TREE)
3019 len2 = (TREE_CODE (src2) == COMPONENT_REF
3020 ? DECL_SIZE_UNIT (TREE_OPERAND (src2, 1))
3021 : TYPE_SIZE_UNIT (TREE_TYPE (src2)));
3022 if (len == NULL_TREE
3023 || TREE_CODE (len) != INTEGER_CST
3024 || len2 == NULL_TREE
3025 || TREE_CODE (len2) != INTEGER_CST)
3026 return;
3027
3028 src = get_addr_base_and_unit_offset (src, &offset);
3029 src2 = get_addr_base_and_unit_offset (src2, &offset2);
3030 if (src == NULL_TREE
3031 || src2 == NULL_TREE
3032 || offset < offset2)
3033 return;
3034
3035 if (!operand_equal_p (src, src2, 0))
3036 return;
3037
3038 /* [ src + offset2, src + offset2 + len2 - 1 ] is set to val.
3039 Make sure that
3040 [ src + offset, src + offset + len - 1 ] is a subset of that. */
3041 if (wi::to_offset (len) + (offset - offset2) > wi::to_offset (len2))
3042 return;
3043
3044 if (dump_file && (dump_flags & TDF_DETAILS))
3045 {
3046 fprintf (dump_file, "Simplified\n ");
3047 print_gimple_stmt (dump_file, stmt, 0, dump_flags);
3048 fprintf (dump_file, "after previous\n ");
3049 print_gimple_stmt (dump_file, defstmt, 0, dump_flags);
3050 }
3051
3052 /* For simplicity, don't change the kind of the stmt,
3053 turn dest = src; into dest = {}; and memcpy (&dest, &src, len);
3054 into memset (&dest, val, len);
3055 In theory we could change dest = src into memset if dest
3056 is addressable (maybe beneficial if val is not 0), or
3057 memcpy (&dest, &src, len) into dest = {} if len is the size
3058 of dest, dest isn't volatile. */
3059 if (is_gimple_assign (stmt))
3060 {
3061 tree ctor = build_constructor (TREE_TYPE (dest), NULL);
3062 gimple_assign_set_rhs_from_tree (gsip, ctor);
3063 update_stmt (stmt);
3064 }
3065 else /* If stmt is memcpy, transform it into memset. */
3066 {
3067 gcall *call = as_a <gcall *> (stmt);
3068 tree fndecl = builtin_decl_implicit (BUILT_IN_MEMSET);
3069 gimple_call_set_fndecl (call, fndecl);
3070 gimple_call_set_fntype (call, TREE_TYPE (fndecl));
3071 gimple_call_set_arg (call, 1, val);
3072 update_stmt (stmt);
3073 }
3074
3075 if (dump_file && (dump_flags & TDF_DETAILS))
3076 {
3077 fprintf (dump_file, "into\n ");
3078 print_gimple_stmt (dump_file, stmt, 0, dump_flags);
3079 }
3080 }
3081
3082 /* A simple pass that attempts to fold all builtin functions. This pass
3083 is run after we've propagated as many constants as we can. */
3084
3085 namespace {
3086
3087 const pass_data pass_data_fold_builtins =
3088 {
3089 GIMPLE_PASS, /* type */
3090 "fab", /* name */
3091 OPTGROUP_NONE, /* optinfo_flags */
3092 TV_NONE, /* tv_id */
3093 ( PROP_cfg | PROP_ssa ), /* properties_required */
3094 0, /* properties_provided */
3095 0, /* properties_destroyed */
3096 0, /* todo_flags_start */
3097 TODO_update_ssa, /* todo_flags_finish */
3098 };
3099
3100 class pass_fold_builtins : public gimple_opt_pass
3101 {
3102 public:
3103 pass_fold_builtins (gcc::context *ctxt)
3104 : gimple_opt_pass (pass_data_fold_builtins, ctxt)
3105 {}
3106
3107 /* opt_pass methods: */
3108 opt_pass * clone () { return new pass_fold_builtins (m_ctxt); }
3109 virtual unsigned int execute (function *);
3110
3111 }; // class pass_fold_builtins
3112
3113 unsigned int
3114 pass_fold_builtins::execute (function *fun)
3115 {
3116 bool cfg_changed = false;
3117 basic_block bb;
3118 unsigned int todoflags = 0;
3119
3120 FOR_EACH_BB_FN (bb, fun)
3121 {
3122 gimple_stmt_iterator i;
3123 for (i = gsi_start_bb (bb); !gsi_end_p (i); )
3124 {
3125 gimple *stmt, *old_stmt;
3126 tree callee;
3127 enum built_in_function fcode;
3128
3129 stmt = gsi_stmt (i);
3130
3131 if (gimple_code (stmt) != GIMPLE_CALL)
3132 {
3133 /* Remove all *ssaname_N ={v} {CLOBBER}; stmts,
3134 after the last GIMPLE DSE they aren't needed and might
3135 unnecessarily keep the SSA_NAMEs live. */
3136 if (gimple_clobber_p (stmt))
3137 {
3138 tree lhs = gimple_assign_lhs (stmt);
3139 if (TREE_CODE (lhs) == MEM_REF
3140 && TREE_CODE (TREE_OPERAND (lhs, 0)) == SSA_NAME)
3141 {
3142 unlink_stmt_vdef (stmt);
3143 gsi_remove (&i, true);
3144 release_defs (stmt);
3145 continue;
3146 }
3147 }
3148 else if (gimple_assign_load_p (stmt) && gimple_store_p (stmt))
3149 optimize_memcpy (&i, gimple_assign_lhs (stmt),
3150 gimple_assign_rhs1 (stmt), NULL_TREE);
3151 gsi_next (&i);
3152 continue;
3153 }
3154
3155 callee = gimple_call_fndecl (stmt);
3156 if (!callee || DECL_BUILT_IN_CLASS (callee) != BUILT_IN_NORMAL)
3157 {
3158 gsi_next (&i);
3159 continue;
3160 }
3161
3162 fcode = DECL_FUNCTION_CODE (callee);
3163 if (fold_stmt (&i))
3164 ;
3165 else
3166 {
3167 tree result = NULL_TREE;
3168 switch (DECL_FUNCTION_CODE (callee))
3169 {
3170 case BUILT_IN_CONSTANT_P:
3171 /* Resolve __builtin_constant_p. If it hasn't been
3172 folded to integer_one_node by now, it's fairly
3173 certain that the value simply isn't constant. */
3174 result = integer_zero_node;
3175 break;
3176
3177 case BUILT_IN_ASSUME_ALIGNED:
3178 /* Remove __builtin_assume_aligned. */
3179 result = gimple_call_arg (stmt, 0);
3180 break;
3181
3182 case BUILT_IN_STACK_RESTORE:
3183 result = optimize_stack_restore (i);
3184 if (result)
3185 break;
3186 gsi_next (&i);
3187 continue;
3188
3189 case BUILT_IN_UNREACHABLE:
3190 if (optimize_unreachable (i))
3191 cfg_changed = true;
3192 break;
3193
3194 case BUILT_IN_ATOMIC_FETCH_OR_1:
3195 case BUILT_IN_ATOMIC_FETCH_OR_2:
3196 case BUILT_IN_ATOMIC_FETCH_OR_4:
3197 case BUILT_IN_ATOMIC_FETCH_OR_8:
3198 case BUILT_IN_ATOMIC_FETCH_OR_16:
3199 optimize_atomic_bit_test_and (&i,
3200 IFN_ATOMIC_BIT_TEST_AND_SET,
3201 true, false);
3202 break;
3203 case BUILT_IN_SYNC_FETCH_AND_OR_1:
3204 case BUILT_IN_SYNC_FETCH_AND_OR_2:
3205 case BUILT_IN_SYNC_FETCH_AND_OR_4:
3206 case BUILT_IN_SYNC_FETCH_AND_OR_8:
3207 case BUILT_IN_SYNC_FETCH_AND_OR_16:
3208 optimize_atomic_bit_test_and (&i,
3209 IFN_ATOMIC_BIT_TEST_AND_SET,
3210 false, false);
3211 break;
3212
3213 case BUILT_IN_ATOMIC_FETCH_XOR_1:
3214 case BUILT_IN_ATOMIC_FETCH_XOR_2:
3215 case BUILT_IN_ATOMIC_FETCH_XOR_4:
3216 case BUILT_IN_ATOMIC_FETCH_XOR_8:
3217 case BUILT_IN_ATOMIC_FETCH_XOR_16:
3218 optimize_atomic_bit_test_and
3219 (&i, IFN_ATOMIC_BIT_TEST_AND_COMPLEMENT, true, false);
3220 break;
3221 case BUILT_IN_SYNC_FETCH_AND_XOR_1:
3222 case BUILT_IN_SYNC_FETCH_AND_XOR_2:
3223 case BUILT_IN_SYNC_FETCH_AND_XOR_4:
3224 case BUILT_IN_SYNC_FETCH_AND_XOR_8:
3225 case BUILT_IN_SYNC_FETCH_AND_XOR_16:
3226 optimize_atomic_bit_test_and
3227 (&i, IFN_ATOMIC_BIT_TEST_AND_COMPLEMENT, false, false);
3228 break;
3229
3230 case BUILT_IN_ATOMIC_XOR_FETCH_1:
3231 case BUILT_IN_ATOMIC_XOR_FETCH_2:
3232 case BUILT_IN_ATOMIC_XOR_FETCH_4:
3233 case BUILT_IN_ATOMIC_XOR_FETCH_8:
3234 case BUILT_IN_ATOMIC_XOR_FETCH_16:
3235 optimize_atomic_bit_test_and
3236 (&i, IFN_ATOMIC_BIT_TEST_AND_COMPLEMENT, true, true);
3237 break;
3238 case BUILT_IN_SYNC_XOR_AND_FETCH_1:
3239 case BUILT_IN_SYNC_XOR_AND_FETCH_2:
3240 case BUILT_IN_SYNC_XOR_AND_FETCH_4:
3241 case BUILT_IN_SYNC_XOR_AND_FETCH_8:
3242 case BUILT_IN_SYNC_XOR_AND_FETCH_16:
3243 optimize_atomic_bit_test_and
3244 (&i, IFN_ATOMIC_BIT_TEST_AND_COMPLEMENT, false, true);
3245 break;
3246
3247 case BUILT_IN_ATOMIC_FETCH_AND_1:
3248 case BUILT_IN_ATOMIC_FETCH_AND_2:
3249 case BUILT_IN_ATOMIC_FETCH_AND_4:
3250 case BUILT_IN_ATOMIC_FETCH_AND_8:
3251 case BUILT_IN_ATOMIC_FETCH_AND_16:
3252 optimize_atomic_bit_test_and (&i,
3253 IFN_ATOMIC_BIT_TEST_AND_RESET,
3254 true, false);
3255 break;
3256 case BUILT_IN_SYNC_FETCH_AND_AND_1:
3257 case BUILT_IN_SYNC_FETCH_AND_AND_2:
3258 case BUILT_IN_SYNC_FETCH_AND_AND_4:
3259 case BUILT_IN_SYNC_FETCH_AND_AND_8:
3260 case BUILT_IN_SYNC_FETCH_AND_AND_16:
3261 optimize_atomic_bit_test_and (&i,
3262 IFN_ATOMIC_BIT_TEST_AND_RESET,
3263 false, false);
3264 break;
3265
3266 case BUILT_IN_MEMCPY:
3267 if (gimple_call_builtin_p (stmt, BUILT_IN_NORMAL)
3268 && TREE_CODE (gimple_call_arg (stmt, 0)) == ADDR_EXPR
3269 && TREE_CODE (gimple_call_arg (stmt, 1)) == ADDR_EXPR
3270 && TREE_CODE (gimple_call_arg (stmt, 2)) == INTEGER_CST)
3271 {
3272 tree dest = TREE_OPERAND (gimple_call_arg (stmt, 0), 0);
3273 tree src = TREE_OPERAND (gimple_call_arg (stmt, 1), 0);
3274 tree len = gimple_call_arg (stmt, 2);
3275 optimize_memcpy (&i, dest, src, len);
3276 }
3277 break;
3278
3279 case BUILT_IN_VA_START:
3280 case BUILT_IN_VA_END:
3281 case BUILT_IN_VA_COPY:
3282 /* These shouldn't be folded before pass_stdarg. */
3283 result = optimize_stdarg_builtin (stmt);
3284 break;
3285
3286 default:;
3287 }
3288
3289 if (!result)
3290 {
3291 gsi_next (&i);
3292 continue;
3293 }
3294
3295 if (!update_call_from_tree (&i, result))
3296 gimplify_and_update_call_from_tree (&i, result);
3297 }
3298
3299 todoflags |= TODO_update_address_taken;
3300
3301 if (dump_file && (dump_flags & TDF_DETAILS))
3302 {
3303 fprintf (dump_file, "Simplified\n ");
3304 print_gimple_stmt (dump_file, stmt, 0, dump_flags);
3305 }
3306
3307 old_stmt = stmt;
3308 stmt = gsi_stmt (i);
3309 update_stmt (stmt);
3310
3311 if (maybe_clean_or_replace_eh_stmt (old_stmt, stmt)
3312 && gimple_purge_dead_eh_edges (bb))
3313 cfg_changed = true;
3314
3315 if (dump_file && (dump_flags & TDF_DETAILS))
3316 {
3317 fprintf (dump_file, "to\n ");
3318 print_gimple_stmt (dump_file, stmt, 0, dump_flags);
3319 fprintf (dump_file, "\n");
3320 }
3321
3322 /* Retry the same statement if it changed into another
3323 builtin, there might be new opportunities now. */
3324 if (gimple_code (stmt) != GIMPLE_CALL)
3325 {
3326 gsi_next (&i);
3327 continue;
3328 }
3329 callee = gimple_call_fndecl (stmt);
3330 if (!callee
3331 || DECL_BUILT_IN_CLASS (callee) != BUILT_IN_NORMAL
3332 || DECL_FUNCTION_CODE (callee) == fcode)
3333 gsi_next (&i);
3334 }
3335 }
3336
3337 /* Delete unreachable blocks. */
3338 if (cfg_changed)
3339 todoflags |= TODO_cleanup_cfg;
3340
3341 return todoflags;
3342 }
3343
3344 } // anon namespace
3345
3346 gimple_opt_pass *
3347 make_pass_fold_builtins (gcc::context *ctxt)
3348 {
3349 return new pass_fold_builtins (ctxt);
3350 }
3351
3352 /* A simple pass that emits some warnings post IPA. */
3353
3354 namespace {
3355
3356 const pass_data pass_data_post_ipa_warn =
3357 {
3358 GIMPLE_PASS, /* type */
3359 "post_ipa_warn", /* name */
3360 OPTGROUP_NONE, /* optinfo_flags */
3361 TV_NONE, /* tv_id */
3362 ( PROP_cfg | PROP_ssa ), /* properties_required */
3363 0, /* properties_provided */
3364 0, /* properties_destroyed */
3365 0, /* todo_flags_start */
3366 0, /* todo_flags_finish */
3367 };
3368
3369 class pass_post_ipa_warn : public gimple_opt_pass
3370 {
3371 public:
3372 pass_post_ipa_warn (gcc::context *ctxt)
3373 : gimple_opt_pass (pass_data_post_ipa_warn, ctxt)
3374 {}
3375
3376 /* opt_pass methods: */
3377 opt_pass * clone () { return new pass_post_ipa_warn (m_ctxt); }
3378 virtual bool gate (function *) { return warn_nonnull != 0; }
3379 virtual unsigned int execute (function *);
3380
3381 }; // class pass_fold_builtins
3382
3383 unsigned int
3384 pass_post_ipa_warn::execute (function *fun)
3385 {
3386 basic_block bb;
3387
3388 FOR_EACH_BB_FN (bb, fun)
3389 {
3390 gimple_stmt_iterator gsi;
3391 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
3392 {
3393 gimple *stmt = gsi_stmt (gsi);
3394 if (!is_gimple_call (stmt) || gimple_no_warning_p (stmt))
3395 continue;
3396
3397 if (warn_nonnull)
3398 {
3399 bitmap nonnullargs
3400 = get_nonnull_args (gimple_call_fntype (stmt));
3401 if (nonnullargs)
3402 {
3403 for (unsigned i = 0; i < gimple_call_num_args (stmt); i++)
3404 {
3405 tree arg = gimple_call_arg (stmt, i);
3406 if (TREE_CODE (TREE_TYPE (arg)) != POINTER_TYPE)
3407 continue;
3408 if (!integer_zerop (arg))
3409 continue;
3410 if (!bitmap_empty_p (nonnullargs)
3411 && !bitmap_bit_p (nonnullargs, i))
3412 continue;
3413
3414 location_t loc = gimple_location (stmt);
3415 if (warning_at (loc, OPT_Wnonnull,
3416 "argument %u null where non-null "
3417 "expected", i + 1))
3418 {
3419 tree fndecl = gimple_call_fndecl (stmt);
3420 if (fndecl && DECL_IS_BUILTIN (fndecl))
3421 inform (loc, "in a call to built-in function %qD",
3422 fndecl);
3423 else if (fndecl)
3424 inform (DECL_SOURCE_LOCATION (fndecl),
3425 "in a call to function %qD declared here",
3426 fndecl);
3427
3428 }
3429 }
3430 BITMAP_FREE (nonnullargs);
3431 }
3432 }
3433 }
3434 }
3435 return 0;
3436 }
3437
3438 } // anon namespace
3439
3440 gimple_opt_pass *
3441 make_pass_post_ipa_warn (gcc::context *ctxt)
3442 {
3443 return new pass_post_ipa_warn (ctxt);
3444 }