tree-ssa-propagate.h (ssa_prop_visit_stmt_fn): Remove typedef.
[gcc.git] / gcc / tree-ssa-ccp.c
1 /* Conditional constant propagation pass for the GNU compiler.
2 Copyright (C) 2000-2017 Free Software Foundation, Inc.
3 Adapted from original RTL SSA-CCP by Daniel Berlin <dberlin@dberlin.org>
4 Adapted to GIMPLE trees by Diego Novillo <dnovillo@redhat.com>
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it
9 under the terms of the GNU General Public License as published by the
10 Free Software Foundation; either version 3, or (at your option) any
11 later version.
12
13 GCC is distributed in the hope that it will be useful, but WITHOUT
14 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
21
22 /* Conditional constant propagation (CCP) is based on the SSA
23 propagation engine (tree-ssa-propagate.c). Constant assignments of
24 the form VAR = CST are propagated from the assignments into uses of
25 VAR, which in turn may generate new constants. The simulation uses
26 a four level lattice to keep track of constant values associated
27 with SSA names. Given an SSA name V_i, it may take one of the
28 following values:
29
30 UNINITIALIZED -> the initial state of the value. This value
31 is replaced with a correct initial value
32 the first time the value is used, so the
33 rest of the pass does not need to care about
34 it. Using this value simplifies initialization
35 of the pass, and prevents us from needlessly
36 scanning statements that are never reached.
37
38 UNDEFINED -> V_i is a local variable whose definition
39 has not been processed yet. Therefore we
40 don't yet know if its value is a constant
41 or not.
42
43 CONSTANT -> V_i has been found to hold a constant
44 value C.
45
46 VARYING -> V_i cannot take a constant value, or if it
47 does, it is not possible to determine it
48 at compile time.
49
50 The core of SSA-CCP is in ccp_visit_stmt and ccp_visit_phi_node:
51
52 1- In ccp_visit_stmt, we are interested in assignments whose RHS
53 evaluates into a constant and conditional jumps whose predicate
54 evaluates into a boolean true or false. When an assignment of
55 the form V_i = CONST is found, V_i's lattice value is set to
56 CONSTANT and CONST is associated with it. This causes the
57 propagation engine to add all the SSA edges coming out the
58 assignment into the worklists, so that statements that use V_i
59 can be visited.
60
61 If the statement is a conditional with a constant predicate, we
62 mark the outgoing edges as executable or not executable
63 depending on the predicate's value. This is then used when
64 visiting PHI nodes to know when a PHI argument can be ignored.
65
66
67 2- In ccp_visit_phi_node, if all the PHI arguments evaluate to the
68 same constant C, then the LHS of the PHI is set to C. This
69 evaluation is known as the "meet operation". Since one of the
70 goals of this evaluation is to optimistically return constant
71 values as often as possible, it uses two main short cuts:
72
73 - If an argument is flowing in through a non-executable edge, it
74 is ignored. This is useful in cases like this:
75
76 if (PRED)
77 a_9 = 3;
78 else
79 a_10 = 100;
80 a_11 = PHI (a_9, a_10)
81
82 If PRED is known to always evaluate to false, then we can
83 assume that a_11 will always take its value from a_10, meaning
84 that instead of consider it VARYING (a_9 and a_10 have
85 different values), we can consider it CONSTANT 100.
86
87 - If an argument has an UNDEFINED value, then it does not affect
88 the outcome of the meet operation. If a variable V_i has an
89 UNDEFINED value, it means that either its defining statement
90 hasn't been visited yet or V_i has no defining statement, in
91 which case the original symbol 'V' is being used
92 uninitialized. Since 'V' is a local variable, the compiler
93 may assume any initial value for it.
94
95
96 After propagation, every variable V_i that ends up with a lattice
97 value of CONSTANT will have the associated constant value in the
98 array CONST_VAL[i].VALUE. That is fed into substitute_and_fold for
99 final substitution and folding.
100
101 This algorithm uses wide-ints at the max precision of the target.
102 This means that, with one uninteresting exception, variables with
103 UNSIGNED types never go to VARYING because the bits above the
104 precision of the type of the variable are always zero. The
105 uninteresting case is a variable of UNSIGNED type that has the
106 maximum precision of the target. Such variables can go to VARYING,
107 but this causes no loss of infomation since these variables will
108 never be extended.
109
110 References:
111
112 Constant propagation with conditional branches,
113 Wegman and Zadeck, ACM TOPLAS 13(2):181-210.
114
115 Building an Optimizing Compiler,
116 Robert Morgan, Butterworth-Heinemann, 1998, Section 8.9.
117
118 Advanced Compiler Design and Implementation,
119 Steven Muchnick, Morgan Kaufmann, 1997, Section 12.6 */
120
121 #include "config.h"
122 #include "system.h"
123 #include "coretypes.h"
124 #include "backend.h"
125 #include "target.h"
126 #include "tree.h"
127 #include "gimple.h"
128 #include "tree-pass.h"
129 #include "ssa.h"
130 #include "gimple-pretty-print.h"
131 #include "fold-const.h"
132 #include "gimple-fold.h"
133 #include "tree-eh.h"
134 #include "gimplify.h"
135 #include "gimple-iterator.h"
136 #include "tree-cfg.h"
137 #include "tree-ssa-propagate.h"
138 #include "dbgcnt.h"
139 #include "params.h"
140 #include "builtins.h"
141 #include "tree-chkp.h"
142 #include "cfgloop.h"
143 #include "stor-layout.h"
144 #include "optabs-query.h"
145 #include "tree-ssa-ccp.h"
146 #include "tree-dfa.h"
147 #include "diagnostic-core.h"
148 #include "stringpool.h"
149 #include "attribs.h"
150
151 /* Possible lattice values. */
152 typedef enum
153 {
154 UNINITIALIZED,
155 UNDEFINED,
156 CONSTANT,
157 VARYING
158 } ccp_lattice_t;
159
160 struct ccp_prop_value_t {
161 /* Lattice value. */
162 ccp_lattice_t lattice_val;
163
164 /* Propagated value. */
165 tree value;
166
167 /* Mask that applies to the propagated value during CCP. For X
168 with a CONSTANT lattice value X & ~mask == value & ~mask. The
169 zero bits in the mask cover constant values. The ones mean no
170 information. */
171 widest_int mask;
172 };
173
174 class ccp_propagate : public ssa_propagation_engine
175 {
176 public:
177 enum ssa_prop_result visit_stmt (gimple *, edge *, tree *) FINAL OVERRIDE;
178 enum ssa_prop_result visit_phi (gphi *) FINAL OVERRIDE;
179 };
180
181 /* Array of propagated constant values. After propagation,
182 CONST_VAL[I].VALUE holds the constant value for SSA_NAME(I). If
183 the constant is held in an SSA name representing a memory store
184 (i.e., a VDEF), CONST_VAL[I].MEM_REF will contain the actual
185 memory reference used to store (i.e., the LHS of the assignment
186 doing the store). */
187 static ccp_prop_value_t *const_val;
188 static unsigned n_const_val;
189
190 static void canonicalize_value (ccp_prop_value_t *);
191 static bool ccp_fold_stmt (gimple_stmt_iterator *);
192 static void ccp_lattice_meet (ccp_prop_value_t *, ccp_prop_value_t *);
193
194 /* Dump constant propagation value VAL to file OUTF prefixed by PREFIX. */
195
196 static void
197 dump_lattice_value (FILE *outf, const char *prefix, ccp_prop_value_t val)
198 {
199 switch (val.lattice_val)
200 {
201 case UNINITIALIZED:
202 fprintf (outf, "%sUNINITIALIZED", prefix);
203 break;
204 case UNDEFINED:
205 fprintf (outf, "%sUNDEFINED", prefix);
206 break;
207 case VARYING:
208 fprintf (outf, "%sVARYING", prefix);
209 break;
210 case CONSTANT:
211 if (TREE_CODE (val.value) != INTEGER_CST
212 || val.mask == 0)
213 {
214 fprintf (outf, "%sCONSTANT ", prefix);
215 print_generic_expr (outf, val.value, dump_flags);
216 }
217 else
218 {
219 widest_int cval = wi::bit_and_not (wi::to_widest (val.value),
220 val.mask);
221 fprintf (outf, "%sCONSTANT ", prefix);
222 print_hex (cval, outf);
223 fprintf (outf, " (");
224 print_hex (val.mask, outf);
225 fprintf (outf, ")");
226 }
227 break;
228 default:
229 gcc_unreachable ();
230 }
231 }
232
233
234 /* Print lattice value VAL to stderr. */
235
236 void debug_lattice_value (ccp_prop_value_t val);
237
238 DEBUG_FUNCTION void
239 debug_lattice_value (ccp_prop_value_t val)
240 {
241 dump_lattice_value (stderr, "", val);
242 fprintf (stderr, "\n");
243 }
244
245 /* Extend NONZERO_BITS to a full mask, based on sgn. */
246
247 static widest_int
248 extend_mask (const wide_int &nonzero_bits, signop sgn)
249 {
250 return widest_int::from (nonzero_bits, sgn);
251 }
252
253 /* Compute a default value for variable VAR and store it in the
254 CONST_VAL array. The following rules are used to get default
255 values:
256
257 1- Global and static variables that are declared constant are
258 considered CONSTANT.
259
260 2- Any other value is considered UNDEFINED. This is useful when
261 considering PHI nodes. PHI arguments that are undefined do not
262 change the constant value of the PHI node, which allows for more
263 constants to be propagated.
264
265 3- Variables defined by statements other than assignments and PHI
266 nodes are considered VARYING.
267
268 4- Initial values of variables that are not GIMPLE registers are
269 considered VARYING. */
270
271 static ccp_prop_value_t
272 get_default_value (tree var)
273 {
274 ccp_prop_value_t val = { UNINITIALIZED, NULL_TREE, 0 };
275 gimple *stmt;
276
277 stmt = SSA_NAME_DEF_STMT (var);
278
279 if (gimple_nop_p (stmt))
280 {
281 /* Variables defined by an empty statement are those used
282 before being initialized. If VAR is a local variable, we
283 can assume initially that it is UNDEFINED, otherwise we must
284 consider it VARYING. */
285 if (!virtual_operand_p (var)
286 && SSA_NAME_VAR (var)
287 && TREE_CODE (SSA_NAME_VAR (var)) == VAR_DECL)
288 val.lattice_val = UNDEFINED;
289 else
290 {
291 val.lattice_val = VARYING;
292 val.mask = -1;
293 if (flag_tree_bit_ccp)
294 {
295 wide_int nonzero_bits = get_nonzero_bits (var);
296 if (nonzero_bits != -1)
297 {
298 val.lattice_val = CONSTANT;
299 val.value = build_zero_cst (TREE_TYPE (var));
300 val.mask = extend_mask (nonzero_bits, TYPE_SIGN (TREE_TYPE (var)));
301 }
302 }
303 }
304 }
305 else if (is_gimple_assign (stmt))
306 {
307 tree cst;
308 if (gimple_assign_single_p (stmt)
309 && DECL_P (gimple_assign_rhs1 (stmt))
310 && (cst = get_symbol_constant_value (gimple_assign_rhs1 (stmt))))
311 {
312 val.lattice_val = CONSTANT;
313 val.value = cst;
314 }
315 else
316 {
317 /* Any other variable defined by an assignment is considered
318 UNDEFINED. */
319 val.lattice_val = UNDEFINED;
320 }
321 }
322 else if ((is_gimple_call (stmt)
323 && gimple_call_lhs (stmt) != NULL_TREE)
324 || gimple_code (stmt) == GIMPLE_PHI)
325 {
326 /* A variable defined by a call or a PHI node is considered
327 UNDEFINED. */
328 val.lattice_val = UNDEFINED;
329 }
330 else
331 {
332 /* Otherwise, VAR will never take on a constant value. */
333 val.lattice_val = VARYING;
334 val.mask = -1;
335 }
336
337 return val;
338 }
339
340
341 /* Get the constant value associated with variable VAR. */
342
343 static inline ccp_prop_value_t *
344 get_value (tree var)
345 {
346 ccp_prop_value_t *val;
347
348 if (const_val == NULL
349 || SSA_NAME_VERSION (var) >= n_const_val)
350 return NULL;
351
352 val = &const_val[SSA_NAME_VERSION (var)];
353 if (val->lattice_val == UNINITIALIZED)
354 *val = get_default_value (var);
355
356 canonicalize_value (val);
357
358 return val;
359 }
360
361 /* Return the constant tree value associated with VAR. */
362
363 static inline tree
364 get_constant_value (tree var)
365 {
366 ccp_prop_value_t *val;
367 if (TREE_CODE (var) != SSA_NAME)
368 {
369 if (is_gimple_min_invariant (var))
370 return var;
371 return NULL_TREE;
372 }
373 val = get_value (var);
374 if (val
375 && val->lattice_val == CONSTANT
376 && (TREE_CODE (val->value) != INTEGER_CST
377 || val->mask == 0))
378 return val->value;
379 return NULL_TREE;
380 }
381
382 /* Sets the value associated with VAR to VARYING. */
383
384 static inline void
385 set_value_varying (tree var)
386 {
387 ccp_prop_value_t *val = &const_val[SSA_NAME_VERSION (var)];
388
389 val->lattice_val = VARYING;
390 val->value = NULL_TREE;
391 val->mask = -1;
392 }
393
394 /* For integer constants, make sure to drop TREE_OVERFLOW. */
395
396 static void
397 canonicalize_value (ccp_prop_value_t *val)
398 {
399 if (val->lattice_val != CONSTANT)
400 return;
401
402 if (TREE_OVERFLOW_P (val->value))
403 val->value = drop_tree_overflow (val->value);
404 }
405
406 /* Return whether the lattice transition is valid. */
407
408 static bool
409 valid_lattice_transition (ccp_prop_value_t old_val, ccp_prop_value_t new_val)
410 {
411 /* Lattice transitions must always be monotonically increasing in
412 value. */
413 if (old_val.lattice_val < new_val.lattice_val)
414 return true;
415
416 if (old_val.lattice_val != new_val.lattice_val)
417 return false;
418
419 if (!old_val.value && !new_val.value)
420 return true;
421
422 /* Now both lattice values are CONSTANT. */
423
424 /* Allow arbitrary copy changes as we might look through PHI <a_1, ...>
425 when only a single copy edge is executable. */
426 if (TREE_CODE (old_val.value) == SSA_NAME
427 && TREE_CODE (new_val.value) == SSA_NAME)
428 return true;
429
430 /* Allow transitioning from a constant to a copy. */
431 if (is_gimple_min_invariant (old_val.value)
432 && TREE_CODE (new_val.value) == SSA_NAME)
433 return true;
434
435 /* Allow transitioning from PHI <&x, not executable> == &x
436 to PHI <&x, &y> == common alignment. */
437 if (TREE_CODE (old_val.value) != INTEGER_CST
438 && TREE_CODE (new_val.value) == INTEGER_CST)
439 return true;
440
441 /* Bit-lattices have to agree in the still valid bits. */
442 if (TREE_CODE (old_val.value) == INTEGER_CST
443 && TREE_CODE (new_val.value) == INTEGER_CST)
444 return (wi::bit_and_not (wi::to_widest (old_val.value), new_val.mask)
445 == wi::bit_and_not (wi::to_widest (new_val.value), new_val.mask));
446
447 /* Otherwise constant values have to agree. */
448 if (operand_equal_p (old_val.value, new_val.value, 0))
449 return true;
450
451 /* At least the kinds and types should agree now. */
452 if (TREE_CODE (old_val.value) != TREE_CODE (new_val.value)
453 || !types_compatible_p (TREE_TYPE (old_val.value),
454 TREE_TYPE (new_val.value)))
455 return false;
456
457 /* For floats and !HONOR_NANS allow transitions from (partial) NaN
458 to non-NaN. */
459 tree type = TREE_TYPE (new_val.value);
460 if (SCALAR_FLOAT_TYPE_P (type)
461 && !HONOR_NANS (type))
462 {
463 if (REAL_VALUE_ISNAN (TREE_REAL_CST (old_val.value)))
464 return true;
465 }
466 else if (VECTOR_FLOAT_TYPE_P (type)
467 && !HONOR_NANS (type))
468 {
469 for (unsigned i = 0; i < VECTOR_CST_NELTS (old_val.value); ++i)
470 if (!REAL_VALUE_ISNAN
471 (TREE_REAL_CST (VECTOR_CST_ELT (old_val.value, i)))
472 && !operand_equal_p (VECTOR_CST_ELT (old_val.value, i),
473 VECTOR_CST_ELT (new_val.value, i), 0))
474 return false;
475 return true;
476 }
477 else if (COMPLEX_FLOAT_TYPE_P (type)
478 && !HONOR_NANS (type))
479 {
480 if (!REAL_VALUE_ISNAN (TREE_REAL_CST (TREE_REALPART (old_val.value)))
481 && !operand_equal_p (TREE_REALPART (old_val.value),
482 TREE_REALPART (new_val.value), 0))
483 return false;
484 if (!REAL_VALUE_ISNAN (TREE_REAL_CST (TREE_IMAGPART (old_val.value)))
485 && !operand_equal_p (TREE_IMAGPART (old_val.value),
486 TREE_IMAGPART (new_val.value), 0))
487 return false;
488 return true;
489 }
490 return false;
491 }
492
493 /* Set the value for variable VAR to NEW_VAL. Return true if the new
494 value is different from VAR's previous value. */
495
496 static bool
497 set_lattice_value (tree var, ccp_prop_value_t *new_val)
498 {
499 /* We can deal with old UNINITIALIZED values just fine here. */
500 ccp_prop_value_t *old_val = &const_val[SSA_NAME_VERSION (var)];
501
502 canonicalize_value (new_val);
503
504 /* We have to be careful to not go up the bitwise lattice
505 represented by the mask. Instead of dropping to VARYING
506 use the meet operator to retain a conservative value.
507 Missed optimizations like PR65851 makes this necessary.
508 It also ensures we converge to a stable lattice solution. */
509 if (old_val->lattice_val != UNINITIALIZED)
510 ccp_lattice_meet (new_val, old_val);
511
512 gcc_checking_assert (valid_lattice_transition (*old_val, *new_val));
513
514 /* If *OLD_VAL and NEW_VAL are the same, return false to inform the
515 caller that this was a non-transition. */
516 if (old_val->lattice_val != new_val->lattice_val
517 || (new_val->lattice_val == CONSTANT
518 && (TREE_CODE (new_val->value) != TREE_CODE (old_val->value)
519 || (TREE_CODE (new_val->value) == INTEGER_CST
520 && (new_val->mask != old_val->mask
521 || (wi::bit_and_not (wi::to_widest (old_val->value),
522 new_val->mask)
523 != wi::bit_and_not (wi::to_widest (new_val->value),
524 new_val->mask))))
525 || (TREE_CODE (new_val->value) != INTEGER_CST
526 && !operand_equal_p (new_val->value, old_val->value, 0)))))
527 {
528 /* ??? We would like to delay creation of INTEGER_CSTs from
529 partially constants here. */
530
531 if (dump_file && (dump_flags & TDF_DETAILS))
532 {
533 dump_lattice_value (dump_file, "Lattice value changed to ", *new_val);
534 fprintf (dump_file, ". Adding SSA edges to worklist.\n");
535 }
536
537 *old_val = *new_val;
538
539 gcc_assert (new_val->lattice_val != UNINITIALIZED);
540 return true;
541 }
542
543 return false;
544 }
545
546 static ccp_prop_value_t get_value_for_expr (tree, bool);
547 static ccp_prop_value_t bit_value_binop (enum tree_code, tree, tree, tree);
548 void bit_value_binop (enum tree_code, signop, int, widest_int *, widest_int *,
549 signop, int, const widest_int &, const widest_int &,
550 signop, int, const widest_int &, const widest_int &);
551
552 /* Return a widest_int that can be used for bitwise simplifications
553 from VAL. */
554
555 static widest_int
556 value_to_wide_int (ccp_prop_value_t val)
557 {
558 if (val.value
559 && TREE_CODE (val.value) == INTEGER_CST)
560 return wi::to_widest (val.value);
561
562 return 0;
563 }
564
565 /* Return the value for the address expression EXPR based on alignment
566 information. */
567
568 static ccp_prop_value_t
569 get_value_from_alignment (tree expr)
570 {
571 tree type = TREE_TYPE (expr);
572 ccp_prop_value_t val;
573 unsigned HOST_WIDE_INT bitpos;
574 unsigned int align;
575
576 gcc_assert (TREE_CODE (expr) == ADDR_EXPR);
577
578 get_pointer_alignment_1 (expr, &align, &bitpos);
579 val.mask = wi::bit_and_not
580 (POINTER_TYPE_P (type) || TYPE_UNSIGNED (type)
581 ? wi::mask <widest_int> (TYPE_PRECISION (type), false)
582 : -1,
583 align / BITS_PER_UNIT - 1);
584 val.lattice_val
585 = wi::sext (val.mask, TYPE_PRECISION (type)) == -1 ? VARYING : CONSTANT;
586 if (val.lattice_val == CONSTANT)
587 val.value = build_int_cstu (type, bitpos / BITS_PER_UNIT);
588 else
589 val.value = NULL_TREE;
590
591 return val;
592 }
593
594 /* Return the value for the tree operand EXPR. If FOR_BITS_P is true
595 return constant bits extracted from alignment information for
596 invariant addresses. */
597
598 static ccp_prop_value_t
599 get_value_for_expr (tree expr, bool for_bits_p)
600 {
601 ccp_prop_value_t val;
602
603 if (TREE_CODE (expr) == SSA_NAME)
604 {
605 ccp_prop_value_t *val_ = get_value (expr);
606 if (val_)
607 val = *val_;
608 else
609 {
610 val.lattice_val = VARYING;
611 val.value = NULL_TREE;
612 val.mask = -1;
613 }
614 if (for_bits_p
615 && val.lattice_val == CONSTANT
616 && TREE_CODE (val.value) == ADDR_EXPR)
617 val = get_value_from_alignment (val.value);
618 /* Fall back to a copy value. */
619 if (!for_bits_p
620 && val.lattice_val == VARYING
621 && !SSA_NAME_OCCURS_IN_ABNORMAL_PHI (expr))
622 {
623 val.lattice_val = CONSTANT;
624 val.value = expr;
625 val.mask = -1;
626 }
627 }
628 else if (is_gimple_min_invariant (expr)
629 && (!for_bits_p || TREE_CODE (expr) == INTEGER_CST))
630 {
631 val.lattice_val = CONSTANT;
632 val.value = expr;
633 val.mask = 0;
634 canonicalize_value (&val);
635 }
636 else if (TREE_CODE (expr) == ADDR_EXPR)
637 val = get_value_from_alignment (expr);
638 else
639 {
640 val.lattice_val = VARYING;
641 val.mask = -1;
642 val.value = NULL_TREE;
643 }
644
645 if (val.lattice_val == VARYING
646 && TYPE_UNSIGNED (TREE_TYPE (expr)))
647 val.mask = wi::zext (val.mask, TYPE_PRECISION (TREE_TYPE (expr)));
648
649 return val;
650 }
651
652 /* Return the likely CCP lattice value for STMT.
653
654 If STMT has no operands, then return CONSTANT.
655
656 Else if undefinedness of operands of STMT cause its value to be
657 undefined, then return UNDEFINED.
658
659 Else if any operands of STMT are constants, then return CONSTANT.
660
661 Else return VARYING. */
662
663 static ccp_lattice_t
664 likely_value (gimple *stmt)
665 {
666 bool has_constant_operand, has_undefined_operand, all_undefined_operands;
667 bool has_nsa_operand;
668 tree use;
669 ssa_op_iter iter;
670 unsigned i;
671
672 enum gimple_code code = gimple_code (stmt);
673
674 /* This function appears to be called only for assignments, calls,
675 conditionals, and switches, due to the logic in visit_stmt. */
676 gcc_assert (code == GIMPLE_ASSIGN
677 || code == GIMPLE_CALL
678 || code == GIMPLE_COND
679 || code == GIMPLE_SWITCH);
680
681 /* If the statement has volatile operands, it won't fold to a
682 constant value. */
683 if (gimple_has_volatile_ops (stmt))
684 return VARYING;
685
686 /* Arrive here for more complex cases. */
687 has_constant_operand = false;
688 has_undefined_operand = false;
689 all_undefined_operands = true;
690 has_nsa_operand = false;
691 FOR_EACH_SSA_TREE_OPERAND (use, stmt, iter, SSA_OP_USE)
692 {
693 ccp_prop_value_t *val = get_value (use);
694
695 if (val && val->lattice_val == UNDEFINED)
696 has_undefined_operand = true;
697 else
698 all_undefined_operands = false;
699
700 if (val && val->lattice_val == CONSTANT)
701 has_constant_operand = true;
702
703 if (SSA_NAME_IS_DEFAULT_DEF (use)
704 || !prop_simulate_again_p (SSA_NAME_DEF_STMT (use)))
705 has_nsa_operand = true;
706 }
707
708 /* There may be constants in regular rhs operands. For calls we
709 have to ignore lhs, fndecl and static chain, otherwise only
710 the lhs. */
711 for (i = (is_gimple_call (stmt) ? 2 : 0) + gimple_has_lhs (stmt);
712 i < gimple_num_ops (stmt); ++i)
713 {
714 tree op = gimple_op (stmt, i);
715 if (!op || TREE_CODE (op) == SSA_NAME)
716 continue;
717 if (is_gimple_min_invariant (op))
718 has_constant_operand = true;
719 }
720
721 if (has_constant_operand)
722 all_undefined_operands = false;
723
724 if (has_undefined_operand
725 && code == GIMPLE_CALL
726 && gimple_call_internal_p (stmt))
727 switch (gimple_call_internal_fn (stmt))
728 {
729 /* These 3 builtins use the first argument just as a magic
730 way how to find out a decl uid. */
731 case IFN_GOMP_SIMD_LANE:
732 case IFN_GOMP_SIMD_VF:
733 case IFN_GOMP_SIMD_LAST_LANE:
734 has_undefined_operand = false;
735 break;
736 default:
737 break;
738 }
739
740 /* If the operation combines operands like COMPLEX_EXPR make sure to
741 not mark the result UNDEFINED if only one part of the result is
742 undefined. */
743 if (has_undefined_operand && all_undefined_operands)
744 return UNDEFINED;
745 else if (code == GIMPLE_ASSIGN && has_undefined_operand)
746 {
747 switch (gimple_assign_rhs_code (stmt))
748 {
749 /* Unary operators are handled with all_undefined_operands. */
750 case PLUS_EXPR:
751 case MINUS_EXPR:
752 case POINTER_PLUS_EXPR:
753 case BIT_XOR_EXPR:
754 /* Not MIN_EXPR, MAX_EXPR. One VARYING operand may be selected.
755 Not bitwise operators, one VARYING operand may specify the
756 result completely.
757 Not logical operators for the same reason, apart from XOR.
758 Not COMPLEX_EXPR as one VARYING operand makes the result partly
759 not UNDEFINED. Not *DIV_EXPR, comparisons and shifts because
760 the undefined operand may be promoted. */
761 return UNDEFINED;
762
763 case ADDR_EXPR:
764 /* If any part of an address is UNDEFINED, like the index
765 of an ARRAY_EXPR, then treat the result as UNDEFINED. */
766 return UNDEFINED;
767
768 default:
769 ;
770 }
771 }
772 /* If there was an UNDEFINED operand but the result may be not UNDEFINED
773 fall back to CONSTANT. During iteration UNDEFINED may still drop
774 to CONSTANT. */
775 if (has_undefined_operand)
776 return CONSTANT;
777
778 /* We do not consider virtual operands here -- load from read-only
779 memory may have only VARYING virtual operands, but still be
780 constant. Also we can combine the stmt with definitions from
781 operands whose definitions are not simulated again. */
782 if (has_constant_operand
783 || has_nsa_operand
784 || gimple_references_memory_p (stmt))
785 return CONSTANT;
786
787 return VARYING;
788 }
789
790 /* Returns true if STMT cannot be constant. */
791
792 static bool
793 surely_varying_stmt_p (gimple *stmt)
794 {
795 /* If the statement has operands that we cannot handle, it cannot be
796 constant. */
797 if (gimple_has_volatile_ops (stmt))
798 return true;
799
800 /* If it is a call and does not return a value or is not a
801 builtin and not an indirect call or a call to function with
802 assume_aligned/alloc_align attribute, it is varying. */
803 if (is_gimple_call (stmt))
804 {
805 tree fndecl, fntype = gimple_call_fntype (stmt);
806 if (!gimple_call_lhs (stmt)
807 || ((fndecl = gimple_call_fndecl (stmt)) != NULL_TREE
808 && !DECL_BUILT_IN (fndecl)
809 && !lookup_attribute ("assume_aligned",
810 TYPE_ATTRIBUTES (fntype))
811 && !lookup_attribute ("alloc_align",
812 TYPE_ATTRIBUTES (fntype))))
813 return true;
814 }
815
816 /* Any other store operation is not interesting. */
817 else if (gimple_vdef (stmt))
818 return true;
819
820 /* Anything other than assignments and conditional jumps are not
821 interesting for CCP. */
822 if (gimple_code (stmt) != GIMPLE_ASSIGN
823 && gimple_code (stmt) != GIMPLE_COND
824 && gimple_code (stmt) != GIMPLE_SWITCH
825 && gimple_code (stmt) != GIMPLE_CALL)
826 return true;
827
828 return false;
829 }
830
831 /* Initialize local data structures for CCP. */
832
833 static void
834 ccp_initialize (void)
835 {
836 basic_block bb;
837
838 n_const_val = num_ssa_names;
839 const_val = XCNEWVEC (ccp_prop_value_t, n_const_val);
840
841 /* Initialize simulation flags for PHI nodes and statements. */
842 FOR_EACH_BB_FN (bb, cfun)
843 {
844 gimple_stmt_iterator i;
845
846 for (i = gsi_start_bb (bb); !gsi_end_p (i); gsi_next (&i))
847 {
848 gimple *stmt = gsi_stmt (i);
849 bool is_varying;
850
851 /* If the statement is a control insn, then we do not
852 want to avoid simulating the statement once. Failure
853 to do so means that those edges will never get added. */
854 if (stmt_ends_bb_p (stmt))
855 is_varying = false;
856 else
857 is_varying = surely_varying_stmt_p (stmt);
858
859 if (is_varying)
860 {
861 tree def;
862 ssa_op_iter iter;
863
864 /* If the statement will not produce a constant, mark
865 all its outputs VARYING. */
866 FOR_EACH_SSA_TREE_OPERAND (def, stmt, iter, SSA_OP_ALL_DEFS)
867 set_value_varying (def);
868 }
869 prop_set_simulate_again (stmt, !is_varying);
870 }
871 }
872
873 /* Now process PHI nodes. We never clear the simulate_again flag on
874 phi nodes, since we do not know which edges are executable yet,
875 except for phi nodes for virtual operands when we do not do store ccp. */
876 FOR_EACH_BB_FN (bb, cfun)
877 {
878 gphi_iterator i;
879
880 for (i = gsi_start_phis (bb); !gsi_end_p (i); gsi_next (&i))
881 {
882 gphi *phi = i.phi ();
883
884 if (virtual_operand_p (gimple_phi_result (phi)))
885 prop_set_simulate_again (phi, false);
886 else
887 prop_set_simulate_again (phi, true);
888 }
889 }
890 }
891
892 /* Debug count support. Reset the values of ssa names
893 VARYING when the total number ssa names analyzed is
894 beyond the debug count specified. */
895
896 static void
897 do_dbg_cnt (void)
898 {
899 unsigned i;
900 for (i = 0; i < num_ssa_names; i++)
901 {
902 if (!dbg_cnt (ccp))
903 {
904 const_val[i].lattice_val = VARYING;
905 const_val[i].mask = -1;
906 const_val[i].value = NULL_TREE;
907 }
908 }
909 }
910
911
912 /* Do final substitution of propagated values, cleanup the flowgraph and
913 free allocated storage. If NONZERO_P, record nonzero bits.
914
915 Return TRUE when something was optimized. */
916
917 static bool
918 ccp_finalize (bool nonzero_p)
919 {
920 bool something_changed;
921 unsigned i;
922 tree name;
923
924 do_dbg_cnt ();
925
926 /* Derive alignment and misalignment information from partially
927 constant pointers in the lattice or nonzero bits from partially
928 constant integers. */
929 FOR_EACH_SSA_NAME (i, name, cfun)
930 {
931 ccp_prop_value_t *val;
932 unsigned int tem, align;
933
934 if (!POINTER_TYPE_P (TREE_TYPE (name))
935 && (!INTEGRAL_TYPE_P (TREE_TYPE (name))
936 /* Don't record nonzero bits before IPA to avoid
937 using too much memory. */
938 || !nonzero_p))
939 continue;
940
941 val = get_value (name);
942 if (val->lattice_val != CONSTANT
943 || TREE_CODE (val->value) != INTEGER_CST
944 || val->mask == 0)
945 continue;
946
947 if (POINTER_TYPE_P (TREE_TYPE (name)))
948 {
949 /* Trailing mask bits specify the alignment, trailing value
950 bits the misalignment. */
951 tem = val->mask.to_uhwi ();
952 align = least_bit_hwi (tem);
953 if (align > 1)
954 set_ptr_info_alignment (get_ptr_info (name), align,
955 (TREE_INT_CST_LOW (val->value)
956 & (align - 1)));
957 }
958 else
959 {
960 unsigned int precision = TYPE_PRECISION (TREE_TYPE (val->value));
961 wide_int nonzero_bits
962 = (wide_int::from (val->mask, precision, UNSIGNED)
963 | wi::to_wide (val->value));
964 nonzero_bits &= get_nonzero_bits (name);
965 set_nonzero_bits (name, nonzero_bits);
966 }
967 }
968
969 /* Perform substitutions based on the known constant values. */
970 something_changed = substitute_and_fold (get_constant_value, ccp_fold_stmt);
971
972 free (const_val);
973 const_val = NULL;
974 return something_changed;;
975 }
976
977
978 /* Compute the meet operator between *VAL1 and *VAL2. Store the result
979 in VAL1.
980
981 any M UNDEFINED = any
982 any M VARYING = VARYING
983 Ci M Cj = Ci if (i == j)
984 Ci M Cj = VARYING if (i != j)
985 */
986
987 static void
988 ccp_lattice_meet (ccp_prop_value_t *val1, ccp_prop_value_t *val2)
989 {
990 if (val1->lattice_val == UNDEFINED
991 /* For UNDEFINED M SSA we can't always SSA because its definition
992 may not dominate the PHI node. Doing optimistic copy propagation
993 also causes a lot of gcc.dg/uninit-pred*.c FAILs. */
994 && (val2->lattice_val != CONSTANT
995 || TREE_CODE (val2->value) != SSA_NAME))
996 {
997 /* UNDEFINED M any = any */
998 *val1 = *val2;
999 }
1000 else if (val2->lattice_val == UNDEFINED
1001 /* See above. */
1002 && (val1->lattice_val != CONSTANT
1003 || TREE_CODE (val1->value) != SSA_NAME))
1004 {
1005 /* any M UNDEFINED = any
1006 Nothing to do. VAL1 already contains the value we want. */
1007 ;
1008 }
1009 else if (val1->lattice_val == VARYING
1010 || val2->lattice_val == VARYING)
1011 {
1012 /* any M VARYING = VARYING. */
1013 val1->lattice_val = VARYING;
1014 val1->mask = -1;
1015 val1->value = NULL_TREE;
1016 }
1017 else if (val1->lattice_val == CONSTANT
1018 && val2->lattice_val == CONSTANT
1019 && TREE_CODE (val1->value) == INTEGER_CST
1020 && TREE_CODE (val2->value) == INTEGER_CST)
1021 {
1022 /* Ci M Cj = Ci if (i == j)
1023 Ci M Cj = VARYING if (i != j)
1024
1025 For INTEGER_CSTs mask unequal bits. If no equal bits remain,
1026 drop to varying. */
1027 val1->mask = (val1->mask | val2->mask
1028 | (wi::to_widest (val1->value)
1029 ^ wi::to_widest (val2->value)));
1030 if (wi::sext (val1->mask, TYPE_PRECISION (TREE_TYPE (val1->value))) == -1)
1031 {
1032 val1->lattice_val = VARYING;
1033 val1->value = NULL_TREE;
1034 }
1035 }
1036 else if (val1->lattice_val == CONSTANT
1037 && val2->lattice_val == CONSTANT
1038 && operand_equal_p (val1->value, val2->value, 0))
1039 {
1040 /* Ci M Cj = Ci if (i == j)
1041 Ci M Cj = VARYING if (i != j)
1042
1043 VAL1 already contains the value we want for equivalent values. */
1044 }
1045 else if (val1->lattice_val == CONSTANT
1046 && val2->lattice_val == CONSTANT
1047 && (TREE_CODE (val1->value) == ADDR_EXPR
1048 || TREE_CODE (val2->value) == ADDR_EXPR))
1049 {
1050 /* When not equal addresses are involved try meeting for
1051 alignment. */
1052 ccp_prop_value_t tem = *val2;
1053 if (TREE_CODE (val1->value) == ADDR_EXPR)
1054 *val1 = get_value_for_expr (val1->value, true);
1055 if (TREE_CODE (val2->value) == ADDR_EXPR)
1056 tem = get_value_for_expr (val2->value, true);
1057 ccp_lattice_meet (val1, &tem);
1058 }
1059 else
1060 {
1061 /* Any other combination is VARYING. */
1062 val1->lattice_val = VARYING;
1063 val1->mask = -1;
1064 val1->value = NULL_TREE;
1065 }
1066 }
1067
1068
1069 /* Loop through the PHI_NODE's parameters for BLOCK and compare their
1070 lattice values to determine PHI_NODE's lattice value. The value of a
1071 PHI node is determined calling ccp_lattice_meet with all the arguments
1072 of the PHI node that are incoming via executable edges. */
1073
1074 enum ssa_prop_result
1075 ccp_propagate::visit_phi (gphi *phi)
1076 {
1077 unsigned i;
1078 ccp_prop_value_t new_val;
1079
1080 if (dump_file && (dump_flags & TDF_DETAILS))
1081 {
1082 fprintf (dump_file, "\nVisiting PHI node: ");
1083 print_gimple_stmt (dump_file, phi, 0, dump_flags);
1084 }
1085
1086 new_val.lattice_val = UNDEFINED;
1087 new_val.value = NULL_TREE;
1088 new_val.mask = 0;
1089
1090 bool first = true;
1091 bool non_exec_edge = false;
1092 for (i = 0; i < gimple_phi_num_args (phi); i++)
1093 {
1094 /* Compute the meet operator over all the PHI arguments flowing
1095 through executable edges. */
1096 edge e = gimple_phi_arg_edge (phi, i);
1097
1098 if (dump_file && (dump_flags & TDF_DETAILS))
1099 {
1100 fprintf (dump_file,
1101 "\n Argument #%d (%d -> %d %sexecutable)\n",
1102 i, e->src->index, e->dest->index,
1103 (e->flags & EDGE_EXECUTABLE) ? "" : "not ");
1104 }
1105
1106 /* If the incoming edge is executable, Compute the meet operator for
1107 the existing value of the PHI node and the current PHI argument. */
1108 if (e->flags & EDGE_EXECUTABLE)
1109 {
1110 tree arg = gimple_phi_arg (phi, i)->def;
1111 ccp_prop_value_t arg_val = get_value_for_expr (arg, false);
1112
1113 if (first)
1114 {
1115 new_val = arg_val;
1116 first = false;
1117 }
1118 else
1119 ccp_lattice_meet (&new_val, &arg_val);
1120
1121 if (dump_file && (dump_flags & TDF_DETAILS))
1122 {
1123 fprintf (dump_file, "\t");
1124 print_generic_expr (dump_file, arg, dump_flags);
1125 dump_lattice_value (dump_file, "\tValue: ", arg_val);
1126 fprintf (dump_file, "\n");
1127 }
1128
1129 if (new_val.lattice_val == VARYING)
1130 break;
1131 }
1132 else
1133 non_exec_edge = true;
1134 }
1135
1136 /* In case there were non-executable edges and the value is a copy
1137 make sure its definition dominates the PHI node. */
1138 if (non_exec_edge
1139 && new_val.lattice_val == CONSTANT
1140 && TREE_CODE (new_val.value) == SSA_NAME
1141 && ! SSA_NAME_IS_DEFAULT_DEF (new_val.value)
1142 && ! dominated_by_p (CDI_DOMINATORS, gimple_bb (phi),
1143 gimple_bb (SSA_NAME_DEF_STMT (new_val.value))))
1144 {
1145 new_val.lattice_val = VARYING;
1146 new_val.value = NULL_TREE;
1147 new_val.mask = -1;
1148 }
1149
1150 if (dump_file && (dump_flags & TDF_DETAILS))
1151 {
1152 dump_lattice_value (dump_file, "\n PHI node value: ", new_val);
1153 fprintf (dump_file, "\n\n");
1154 }
1155
1156 /* Make the transition to the new value. */
1157 if (set_lattice_value (gimple_phi_result (phi), &new_val))
1158 {
1159 if (new_val.lattice_val == VARYING)
1160 return SSA_PROP_VARYING;
1161 else
1162 return SSA_PROP_INTERESTING;
1163 }
1164 else
1165 return SSA_PROP_NOT_INTERESTING;
1166 }
1167
1168 /* Return the constant value for OP or OP otherwise. */
1169
1170 static tree
1171 valueize_op (tree op)
1172 {
1173 if (TREE_CODE (op) == SSA_NAME)
1174 {
1175 tree tem = get_constant_value (op);
1176 if (tem)
1177 return tem;
1178 }
1179 return op;
1180 }
1181
1182 /* Return the constant value for OP, but signal to not follow SSA
1183 edges if the definition may be simulated again. */
1184
1185 static tree
1186 valueize_op_1 (tree op)
1187 {
1188 if (TREE_CODE (op) == SSA_NAME)
1189 {
1190 /* If the definition may be simulated again we cannot follow
1191 this SSA edge as the SSA propagator does not necessarily
1192 re-visit the use. */
1193 gimple *def_stmt = SSA_NAME_DEF_STMT (op);
1194 if (!gimple_nop_p (def_stmt)
1195 && prop_simulate_again_p (def_stmt))
1196 return NULL_TREE;
1197 tree tem = get_constant_value (op);
1198 if (tem)
1199 return tem;
1200 }
1201 return op;
1202 }
1203
1204 /* CCP specific front-end to the non-destructive constant folding
1205 routines.
1206
1207 Attempt to simplify the RHS of STMT knowing that one or more
1208 operands are constants.
1209
1210 If simplification is possible, return the simplified RHS,
1211 otherwise return the original RHS or NULL_TREE. */
1212
1213 static tree
1214 ccp_fold (gimple *stmt)
1215 {
1216 location_t loc = gimple_location (stmt);
1217 switch (gimple_code (stmt))
1218 {
1219 case GIMPLE_COND:
1220 {
1221 /* Handle comparison operators that can appear in GIMPLE form. */
1222 tree op0 = valueize_op (gimple_cond_lhs (stmt));
1223 tree op1 = valueize_op (gimple_cond_rhs (stmt));
1224 enum tree_code code = gimple_cond_code (stmt);
1225 return fold_binary_loc (loc, code, boolean_type_node, op0, op1);
1226 }
1227
1228 case GIMPLE_SWITCH:
1229 {
1230 /* Return the constant switch index. */
1231 return valueize_op (gimple_switch_index (as_a <gswitch *> (stmt)));
1232 }
1233
1234 case GIMPLE_ASSIGN:
1235 case GIMPLE_CALL:
1236 return gimple_fold_stmt_to_constant_1 (stmt,
1237 valueize_op, valueize_op_1);
1238
1239 default:
1240 gcc_unreachable ();
1241 }
1242 }
1243
1244 /* Apply the operation CODE in type TYPE to the value, mask pair
1245 RVAL and RMASK representing a value of type RTYPE and set
1246 the value, mask pair *VAL and *MASK to the result. */
1247
1248 void
1249 bit_value_unop (enum tree_code code, signop type_sgn, int type_precision,
1250 widest_int *val, widest_int *mask,
1251 signop rtype_sgn, int rtype_precision,
1252 const widest_int &rval, const widest_int &rmask)
1253 {
1254 switch (code)
1255 {
1256 case BIT_NOT_EXPR:
1257 *mask = rmask;
1258 *val = ~rval;
1259 break;
1260
1261 case NEGATE_EXPR:
1262 {
1263 widest_int temv, temm;
1264 /* Return ~rval + 1. */
1265 bit_value_unop (BIT_NOT_EXPR, type_sgn, type_precision, &temv, &temm,
1266 type_sgn, type_precision, rval, rmask);
1267 bit_value_binop (PLUS_EXPR, type_sgn, type_precision, val, mask,
1268 type_sgn, type_precision, temv, temm,
1269 type_sgn, type_precision, 1, 0);
1270 break;
1271 }
1272
1273 CASE_CONVERT:
1274 {
1275 /* First extend mask and value according to the original type. */
1276 *mask = wi::ext (rmask, rtype_precision, rtype_sgn);
1277 *val = wi::ext (rval, rtype_precision, rtype_sgn);
1278
1279 /* Then extend mask and value according to the target type. */
1280 *mask = wi::ext (*mask, type_precision, type_sgn);
1281 *val = wi::ext (*val, type_precision, type_sgn);
1282 break;
1283 }
1284
1285 default:
1286 *mask = -1;
1287 break;
1288 }
1289 }
1290
1291 /* Apply the operation CODE in type TYPE to the value, mask pairs
1292 R1VAL, R1MASK and R2VAL, R2MASK representing a values of type R1TYPE
1293 and R2TYPE and set the value, mask pair *VAL and *MASK to the result. */
1294
1295 void
1296 bit_value_binop (enum tree_code code, signop sgn, int width,
1297 widest_int *val, widest_int *mask,
1298 signop r1type_sgn, int r1type_precision,
1299 const widest_int &r1val, const widest_int &r1mask,
1300 signop r2type_sgn, int r2type_precision,
1301 const widest_int &r2val, const widest_int &r2mask)
1302 {
1303 bool swap_p = false;
1304
1305 /* Assume we'll get a constant result. Use an initial non varying
1306 value, we fall back to varying in the end if necessary. */
1307 *mask = -1;
1308
1309 switch (code)
1310 {
1311 case BIT_AND_EXPR:
1312 /* The mask is constant where there is a known not
1313 set bit, (m1 | m2) & ((v1 | m1) & (v2 | m2)) */
1314 *mask = (r1mask | r2mask) & (r1val | r1mask) & (r2val | r2mask);
1315 *val = r1val & r2val;
1316 break;
1317
1318 case BIT_IOR_EXPR:
1319 /* The mask is constant where there is a known
1320 set bit, (m1 | m2) & ~((v1 & ~m1) | (v2 & ~m2)). */
1321 *mask = wi::bit_and_not (r1mask | r2mask,
1322 wi::bit_and_not (r1val, r1mask)
1323 | wi::bit_and_not (r2val, r2mask));
1324 *val = r1val | r2val;
1325 break;
1326
1327 case BIT_XOR_EXPR:
1328 /* m1 | m2 */
1329 *mask = r1mask | r2mask;
1330 *val = r1val ^ r2val;
1331 break;
1332
1333 case LROTATE_EXPR:
1334 case RROTATE_EXPR:
1335 if (r2mask == 0)
1336 {
1337 widest_int shift = r2val;
1338 if (shift == 0)
1339 {
1340 *mask = r1mask;
1341 *val = r1val;
1342 }
1343 else
1344 {
1345 if (wi::neg_p (shift))
1346 {
1347 shift = -shift;
1348 if (code == RROTATE_EXPR)
1349 code = LROTATE_EXPR;
1350 else
1351 code = RROTATE_EXPR;
1352 }
1353 if (code == RROTATE_EXPR)
1354 {
1355 *mask = wi::rrotate (r1mask, shift, width);
1356 *val = wi::rrotate (r1val, shift, width);
1357 }
1358 else
1359 {
1360 *mask = wi::lrotate (r1mask, shift, width);
1361 *val = wi::lrotate (r1val, shift, width);
1362 }
1363 }
1364 }
1365 break;
1366
1367 case LSHIFT_EXPR:
1368 case RSHIFT_EXPR:
1369 /* ??? We can handle partially known shift counts if we know
1370 its sign. That way we can tell that (x << (y | 8)) & 255
1371 is zero. */
1372 if (r2mask == 0)
1373 {
1374 widest_int shift = r2val;
1375 if (shift == 0)
1376 {
1377 *mask = r1mask;
1378 *val = r1val;
1379 }
1380 else
1381 {
1382 if (wi::neg_p (shift))
1383 {
1384 shift = -shift;
1385 if (code == RSHIFT_EXPR)
1386 code = LSHIFT_EXPR;
1387 else
1388 code = RSHIFT_EXPR;
1389 }
1390 if (code == RSHIFT_EXPR)
1391 {
1392 *mask = wi::rshift (wi::ext (r1mask, width, sgn), shift, sgn);
1393 *val = wi::rshift (wi::ext (r1val, width, sgn), shift, sgn);
1394 }
1395 else
1396 {
1397 *mask = wi::ext (r1mask << shift, width, sgn);
1398 *val = wi::ext (r1val << shift, width, sgn);
1399 }
1400 }
1401 }
1402 break;
1403
1404 case PLUS_EXPR:
1405 case POINTER_PLUS_EXPR:
1406 {
1407 /* Do the addition with unknown bits set to zero, to give carry-ins of
1408 zero wherever possible. */
1409 widest_int lo = (wi::bit_and_not (r1val, r1mask)
1410 + wi::bit_and_not (r2val, r2mask));
1411 lo = wi::ext (lo, width, sgn);
1412 /* Do the addition with unknown bits set to one, to give carry-ins of
1413 one wherever possible. */
1414 widest_int hi = (r1val | r1mask) + (r2val | r2mask);
1415 hi = wi::ext (hi, width, sgn);
1416 /* Each bit in the result is known if (a) the corresponding bits in
1417 both inputs are known, and (b) the carry-in to that bit position
1418 is known. We can check condition (b) by seeing if we got the same
1419 result with minimised carries as with maximised carries. */
1420 *mask = r1mask | r2mask | (lo ^ hi);
1421 *mask = wi::ext (*mask, width, sgn);
1422 /* It shouldn't matter whether we choose lo or hi here. */
1423 *val = lo;
1424 break;
1425 }
1426
1427 case MINUS_EXPR:
1428 {
1429 widest_int temv, temm;
1430 bit_value_unop (NEGATE_EXPR, r2type_sgn, r2type_precision, &temv, &temm,
1431 r2type_sgn, r2type_precision, r2val, r2mask);
1432 bit_value_binop (PLUS_EXPR, sgn, width, val, mask,
1433 r1type_sgn, r1type_precision, r1val, r1mask,
1434 r2type_sgn, r2type_precision, temv, temm);
1435 break;
1436 }
1437
1438 case MULT_EXPR:
1439 {
1440 /* Just track trailing zeros in both operands and transfer
1441 them to the other. */
1442 int r1tz = wi::ctz (r1val | r1mask);
1443 int r2tz = wi::ctz (r2val | r2mask);
1444 if (r1tz + r2tz >= width)
1445 {
1446 *mask = 0;
1447 *val = 0;
1448 }
1449 else if (r1tz + r2tz > 0)
1450 {
1451 *mask = wi::ext (wi::mask <widest_int> (r1tz + r2tz, true),
1452 width, sgn);
1453 *val = 0;
1454 }
1455 break;
1456 }
1457
1458 case EQ_EXPR:
1459 case NE_EXPR:
1460 {
1461 widest_int m = r1mask | r2mask;
1462 if (wi::bit_and_not (r1val, m) != wi::bit_and_not (r2val, m))
1463 {
1464 *mask = 0;
1465 *val = ((code == EQ_EXPR) ? 0 : 1);
1466 }
1467 else
1468 {
1469 /* We know the result of a comparison is always one or zero. */
1470 *mask = 1;
1471 *val = 0;
1472 }
1473 break;
1474 }
1475
1476 case GE_EXPR:
1477 case GT_EXPR:
1478 swap_p = true;
1479 code = swap_tree_comparison (code);
1480 /* Fall through. */
1481 case LT_EXPR:
1482 case LE_EXPR:
1483 {
1484 int minmax, maxmin;
1485
1486 const widest_int &o1val = swap_p ? r2val : r1val;
1487 const widest_int &o1mask = swap_p ? r2mask : r1mask;
1488 const widest_int &o2val = swap_p ? r1val : r2val;
1489 const widest_int &o2mask = swap_p ? r1mask : r2mask;
1490
1491 /* If the most significant bits are not known we know nothing. */
1492 if (wi::neg_p (o1mask) || wi::neg_p (o2mask))
1493 break;
1494
1495 /* For comparisons the signedness is in the comparison operands. */
1496 sgn = r1type_sgn;
1497
1498 /* If we know the most significant bits we know the values
1499 value ranges by means of treating varying bits as zero
1500 or one. Do a cross comparison of the max/min pairs. */
1501 maxmin = wi::cmp (o1val | o1mask,
1502 wi::bit_and_not (o2val, o2mask), sgn);
1503 minmax = wi::cmp (wi::bit_and_not (o1val, o1mask),
1504 o2val | o2mask, sgn);
1505 if (maxmin < 0) /* o1 is less than o2. */
1506 {
1507 *mask = 0;
1508 *val = 1;
1509 }
1510 else if (minmax > 0) /* o1 is not less or equal to o2. */
1511 {
1512 *mask = 0;
1513 *val = 0;
1514 }
1515 else if (maxmin == minmax) /* o1 and o2 are equal. */
1516 {
1517 /* This probably should never happen as we'd have
1518 folded the thing during fully constant value folding. */
1519 *mask = 0;
1520 *val = (code == LE_EXPR ? 1 : 0);
1521 }
1522 else
1523 {
1524 /* We know the result of a comparison is always one or zero. */
1525 *mask = 1;
1526 *val = 0;
1527 }
1528 break;
1529 }
1530
1531 default:;
1532 }
1533 }
1534
1535 /* Return the propagation value when applying the operation CODE to
1536 the value RHS yielding type TYPE. */
1537
1538 static ccp_prop_value_t
1539 bit_value_unop (enum tree_code code, tree type, tree rhs)
1540 {
1541 ccp_prop_value_t rval = get_value_for_expr (rhs, true);
1542 widest_int value, mask;
1543 ccp_prop_value_t val;
1544
1545 if (rval.lattice_val == UNDEFINED)
1546 return rval;
1547
1548 gcc_assert ((rval.lattice_val == CONSTANT
1549 && TREE_CODE (rval.value) == INTEGER_CST)
1550 || wi::sext (rval.mask, TYPE_PRECISION (TREE_TYPE (rhs))) == -1);
1551 bit_value_unop (code, TYPE_SIGN (type), TYPE_PRECISION (type), &value, &mask,
1552 TYPE_SIGN (TREE_TYPE (rhs)), TYPE_PRECISION (TREE_TYPE (rhs)),
1553 value_to_wide_int (rval), rval.mask);
1554 if (wi::sext (mask, TYPE_PRECISION (type)) != -1)
1555 {
1556 val.lattice_val = CONSTANT;
1557 val.mask = mask;
1558 /* ??? Delay building trees here. */
1559 val.value = wide_int_to_tree (type, value);
1560 }
1561 else
1562 {
1563 val.lattice_val = VARYING;
1564 val.value = NULL_TREE;
1565 val.mask = -1;
1566 }
1567 return val;
1568 }
1569
1570 /* Return the propagation value when applying the operation CODE to
1571 the values RHS1 and RHS2 yielding type TYPE. */
1572
1573 static ccp_prop_value_t
1574 bit_value_binop (enum tree_code code, tree type, tree rhs1, tree rhs2)
1575 {
1576 ccp_prop_value_t r1val = get_value_for_expr (rhs1, true);
1577 ccp_prop_value_t r2val = get_value_for_expr (rhs2, true);
1578 widest_int value, mask;
1579 ccp_prop_value_t val;
1580
1581 if (r1val.lattice_val == UNDEFINED
1582 || r2val.lattice_val == UNDEFINED)
1583 {
1584 val.lattice_val = VARYING;
1585 val.value = NULL_TREE;
1586 val.mask = -1;
1587 return val;
1588 }
1589
1590 gcc_assert ((r1val.lattice_val == CONSTANT
1591 && TREE_CODE (r1val.value) == INTEGER_CST)
1592 || wi::sext (r1val.mask,
1593 TYPE_PRECISION (TREE_TYPE (rhs1))) == -1);
1594 gcc_assert ((r2val.lattice_val == CONSTANT
1595 && TREE_CODE (r2val.value) == INTEGER_CST)
1596 || wi::sext (r2val.mask,
1597 TYPE_PRECISION (TREE_TYPE (rhs2))) == -1);
1598 bit_value_binop (code, TYPE_SIGN (type), TYPE_PRECISION (type), &value, &mask,
1599 TYPE_SIGN (TREE_TYPE (rhs1)), TYPE_PRECISION (TREE_TYPE (rhs1)),
1600 value_to_wide_int (r1val), r1val.mask,
1601 TYPE_SIGN (TREE_TYPE (rhs2)), TYPE_PRECISION (TREE_TYPE (rhs2)),
1602 value_to_wide_int (r2val), r2val.mask);
1603
1604 if (wi::sext (mask, TYPE_PRECISION (type)) != -1)
1605 {
1606 val.lattice_val = CONSTANT;
1607 val.mask = mask;
1608 /* ??? Delay building trees here. */
1609 val.value = wide_int_to_tree (type, value);
1610 }
1611 else
1612 {
1613 val.lattice_val = VARYING;
1614 val.value = NULL_TREE;
1615 val.mask = -1;
1616 }
1617 return val;
1618 }
1619
1620 /* Return the propagation value for __builtin_assume_aligned
1621 and functions with assume_aligned or alloc_aligned attribute.
1622 For __builtin_assume_aligned, ATTR is NULL_TREE,
1623 for assume_aligned attribute ATTR is non-NULL and ALLOC_ALIGNED
1624 is false, for alloc_aligned attribute ATTR is non-NULL and
1625 ALLOC_ALIGNED is true. */
1626
1627 static ccp_prop_value_t
1628 bit_value_assume_aligned (gimple *stmt, tree attr, ccp_prop_value_t ptrval,
1629 bool alloc_aligned)
1630 {
1631 tree align, misalign = NULL_TREE, type;
1632 unsigned HOST_WIDE_INT aligni, misaligni = 0;
1633 ccp_prop_value_t alignval;
1634 widest_int value, mask;
1635 ccp_prop_value_t val;
1636
1637 if (attr == NULL_TREE)
1638 {
1639 tree ptr = gimple_call_arg (stmt, 0);
1640 type = TREE_TYPE (ptr);
1641 ptrval = get_value_for_expr (ptr, true);
1642 }
1643 else
1644 {
1645 tree lhs = gimple_call_lhs (stmt);
1646 type = TREE_TYPE (lhs);
1647 }
1648
1649 if (ptrval.lattice_val == UNDEFINED)
1650 return ptrval;
1651 gcc_assert ((ptrval.lattice_val == CONSTANT
1652 && TREE_CODE (ptrval.value) == INTEGER_CST)
1653 || wi::sext (ptrval.mask, TYPE_PRECISION (type)) == -1);
1654 if (attr == NULL_TREE)
1655 {
1656 /* Get aligni and misaligni from __builtin_assume_aligned. */
1657 align = gimple_call_arg (stmt, 1);
1658 if (!tree_fits_uhwi_p (align))
1659 return ptrval;
1660 aligni = tree_to_uhwi (align);
1661 if (gimple_call_num_args (stmt) > 2)
1662 {
1663 misalign = gimple_call_arg (stmt, 2);
1664 if (!tree_fits_uhwi_p (misalign))
1665 return ptrval;
1666 misaligni = tree_to_uhwi (misalign);
1667 }
1668 }
1669 else
1670 {
1671 /* Get aligni and misaligni from assume_aligned or
1672 alloc_align attributes. */
1673 if (TREE_VALUE (attr) == NULL_TREE)
1674 return ptrval;
1675 attr = TREE_VALUE (attr);
1676 align = TREE_VALUE (attr);
1677 if (!tree_fits_uhwi_p (align))
1678 return ptrval;
1679 aligni = tree_to_uhwi (align);
1680 if (alloc_aligned)
1681 {
1682 if (aligni == 0 || aligni > gimple_call_num_args (stmt))
1683 return ptrval;
1684 align = gimple_call_arg (stmt, aligni - 1);
1685 if (!tree_fits_uhwi_p (align))
1686 return ptrval;
1687 aligni = tree_to_uhwi (align);
1688 }
1689 else if (TREE_CHAIN (attr) && TREE_VALUE (TREE_CHAIN (attr)))
1690 {
1691 misalign = TREE_VALUE (TREE_CHAIN (attr));
1692 if (!tree_fits_uhwi_p (misalign))
1693 return ptrval;
1694 misaligni = tree_to_uhwi (misalign);
1695 }
1696 }
1697 if (aligni <= 1 || (aligni & (aligni - 1)) != 0 || misaligni >= aligni)
1698 return ptrval;
1699
1700 align = build_int_cst_type (type, -aligni);
1701 alignval = get_value_for_expr (align, true);
1702 bit_value_binop (BIT_AND_EXPR, TYPE_SIGN (type), TYPE_PRECISION (type), &value, &mask,
1703 TYPE_SIGN (type), TYPE_PRECISION (type), value_to_wide_int (ptrval), ptrval.mask,
1704 TYPE_SIGN (type), TYPE_PRECISION (type), value_to_wide_int (alignval), alignval.mask);
1705
1706 if (wi::sext (mask, TYPE_PRECISION (type)) != -1)
1707 {
1708 val.lattice_val = CONSTANT;
1709 val.mask = mask;
1710 gcc_assert ((mask.to_uhwi () & (aligni - 1)) == 0);
1711 gcc_assert ((value.to_uhwi () & (aligni - 1)) == 0);
1712 value |= misaligni;
1713 /* ??? Delay building trees here. */
1714 val.value = wide_int_to_tree (type, value);
1715 }
1716 else
1717 {
1718 val.lattice_val = VARYING;
1719 val.value = NULL_TREE;
1720 val.mask = -1;
1721 }
1722 return val;
1723 }
1724
1725 /* Evaluate statement STMT.
1726 Valid only for assignments, calls, conditionals, and switches. */
1727
1728 static ccp_prop_value_t
1729 evaluate_stmt (gimple *stmt)
1730 {
1731 ccp_prop_value_t val;
1732 tree simplified = NULL_TREE;
1733 ccp_lattice_t likelyvalue = likely_value (stmt);
1734 bool is_constant = false;
1735 unsigned int align;
1736
1737 if (dump_file && (dump_flags & TDF_DETAILS))
1738 {
1739 fprintf (dump_file, "which is likely ");
1740 switch (likelyvalue)
1741 {
1742 case CONSTANT:
1743 fprintf (dump_file, "CONSTANT");
1744 break;
1745 case UNDEFINED:
1746 fprintf (dump_file, "UNDEFINED");
1747 break;
1748 case VARYING:
1749 fprintf (dump_file, "VARYING");
1750 break;
1751 default:;
1752 }
1753 fprintf (dump_file, "\n");
1754 }
1755
1756 /* If the statement is likely to have a CONSTANT result, then try
1757 to fold the statement to determine the constant value. */
1758 /* FIXME. This is the only place that we call ccp_fold.
1759 Since likely_value never returns CONSTANT for calls, we will
1760 not attempt to fold them, including builtins that may profit. */
1761 if (likelyvalue == CONSTANT)
1762 {
1763 fold_defer_overflow_warnings ();
1764 simplified = ccp_fold (stmt);
1765 if (simplified
1766 && TREE_CODE (simplified) == SSA_NAME)
1767 {
1768 /* We may not use values of something that may be simulated again,
1769 see valueize_op_1. */
1770 if (SSA_NAME_IS_DEFAULT_DEF (simplified)
1771 || ! prop_simulate_again_p (SSA_NAME_DEF_STMT (simplified)))
1772 {
1773 ccp_prop_value_t *val = get_value (simplified);
1774 if (val && val->lattice_val != VARYING)
1775 {
1776 fold_undefer_overflow_warnings (true, stmt, 0);
1777 return *val;
1778 }
1779 }
1780 else
1781 /* We may also not place a non-valueized copy in the lattice
1782 as that might become stale if we never re-visit this stmt. */
1783 simplified = NULL_TREE;
1784 }
1785 is_constant = simplified && is_gimple_min_invariant (simplified);
1786 fold_undefer_overflow_warnings (is_constant, stmt, 0);
1787 if (is_constant)
1788 {
1789 /* The statement produced a constant value. */
1790 val.lattice_val = CONSTANT;
1791 val.value = simplified;
1792 val.mask = 0;
1793 return val;
1794 }
1795 }
1796 /* If the statement is likely to have a VARYING result, then do not
1797 bother folding the statement. */
1798 else if (likelyvalue == VARYING)
1799 {
1800 enum gimple_code code = gimple_code (stmt);
1801 if (code == GIMPLE_ASSIGN)
1802 {
1803 enum tree_code subcode = gimple_assign_rhs_code (stmt);
1804
1805 /* Other cases cannot satisfy is_gimple_min_invariant
1806 without folding. */
1807 if (get_gimple_rhs_class (subcode) == GIMPLE_SINGLE_RHS)
1808 simplified = gimple_assign_rhs1 (stmt);
1809 }
1810 else if (code == GIMPLE_SWITCH)
1811 simplified = gimple_switch_index (as_a <gswitch *> (stmt));
1812 else
1813 /* These cannot satisfy is_gimple_min_invariant without folding. */
1814 gcc_assert (code == GIMPLE_CALL || code == GIMPLE_COND);
1815 is_constant = simplified && is_gimple_min_invariant (simplified);
1816 if (is_constant)
1817 {
1818 /* The statement produced a constant value. */
1819 val.lattice_val = CONSTANT;
1820 val.value = simplified;
1821 val.mask = 0;
1822 }
1823 }
1824 /* If the statement result is likely UNDEFINED, make it so. */
1825 else if (likelyvalue == UNDEFINED)
1826 {
1827 val.lattice_val = UNDEFINED;
1828 val.value = NULL_TREE;
1829 val.mask = 0;
1830 return val;
1831 }
1832
1833 /* Resort to simplification for bitwise tracking. */
1834 if (flag_tree_bit_ccp
1835 && (likelyvalue == CONSTANT || is_gimple_call (stmt)
1836 || (gimple_assign_single_p (stmt)
1837 && gimple_assign_rhs_code (stmt) == ADDR_EXPR))
1838 && !is_constant)
1839 {
1840 enum gimple_code code = gimple_code (stmt);
1841 val.lattice_val = VARYING;
1842 val.value = NULL_TREE;
1843 val.mask = -1;
1844 if (code == GIMPLE_ASSIGN)
1845 {
1846 enum tree_code subcode = gimple_assign_rhs_code (stmt);
1847 tree rhs1 = gimple_assign_rhs1 (stmt);
1848 tree lhs = gimple_assign_lhs (stmt);
1849 if ((INTEGRAL_TYPE_P (TREE_TYPE (lhs))
1850 || POINTER_TYPE_P (TREE_TYPE (lhs)))
1851 && (INTEGRAL_TYPE_P (TREE_TYPE (rhs1))
1852 || POINTER_TYPE_P (TREE_TYPE (rhs1))))
1853 switch (get_gimple_rhs_class (subcode))
1854 {
1855 case GIMPLE_SINGLE_RHS:
1856 val = get_value_for_expr (rhs1, true);
1857 break;
1858
1859 case GIMPLE_UNARY_RHS:
1860 val = bit_value_unop (subcode, TREE_TYPE (lhs), rhs1);
1861 break;
1862
1863 case GIMPLE_BINARY_RHS:
1864 val = bit_value_binop (subcode, TREE_TYPE (lhs), rhs1,
1865 gimple_assign_rhs2 (stmt));
1866 break;
1867
1868 default:;
1869 }
1870 }
1871 else if (code == GIMPLE_COND)
1872 {
1873 enum tree_code code = gimple_cond_code (stmt);
1874 tree rhs1 = gimple_cond_lhs (stmt);
1875 tree rhs2 = gimple_cond_rhs (stmt);
1876 if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1))
1877 || POINTER_TYPE_P (TREE_TYPE (rhs1)))
1878 val = bit_value_binop (code, TREE_TYPE (rhs1), rhs1, rhs2);
1879 }
1880 else if (gimple_call_builtin_p (stmt, BUILT_IN_NORMAL))
1881 {
1882 tree fndecl = gimple_call_fndecl (stmt);
1883 switch (DECL_FUNCTION_CODE (fndecl))
1884 {
1885 case BUILT_IN_MALLOC:
1886 case BUILT_IN_REALLOC:
1887 case BUILT_IN_CALLOC:
1888 case BUILT_IN_STRDUP:
1889 case BUILT_IN_STRNDUP:
1890 val.lattice_val = CONSTANT;
1891 val.value = build_int_cst (TREE_TYPE (gimple_get_lhs (stmt)), 0);
1892 val.mask = ~((HOST_WIDE_INT) MALLOC_ABI_ALIGNMENT
1893 / BITS_PER_UNIT - 1);
1894 break;
1895
1896 CASE_BUILT_IN_ALLOCA:
1897 align = (DECL_FUNCTION_CODE (fndecl) == BUILT_IN_ALLOCA
1898 ? BIGGEST_ALIGNMENT
1899 : TREE_INT_CST_LOW (gimple_call_arg (stmt, 1)));
1900 val.lattice_val = CONSTANT;
1901 val.value = build_int_cst (TREE_TYPE (gimple_get_lhs (stmt)), 0);
1902 val.mask = ~((HOST_WIDE_INT) align / BITS_PER_UNIT - 1);
1903 break;
1904
1905 /* These builtins return their first argument, unmodified. */
1906 case BUILT_IN_MEMCPY:
1907 case BUILT_IN_MEMMOVE:
1908 case BUILT_IN_MEMSET:
1909 case BUILT_IN_STRCPY:
1910 case BUILT_IN_STRNCPY:
1911 case BUILT_IN_MEMCPY_CHK:
1912 case BUILT_IN_MEMMOVE_CHK:
1913 case BUILT_IN_MEMSET_CHK:
1914 case BUILT_IN_STRCPY_CHK:
1915 case BUILT_IN_STRNCPY_CHK:
1916 val = get_value_for_expr (gimple_call_arg (stmt, 0), true);
1917 break;
1918
1919 case BUILT_IN_ASSUME_ALIGNED:
1920 val = bit_value_assume_aligned (stmt, NULL_TREE, val, false);
1921 break;
1922
1923 case BUILT_IN_ALIGNED_ALLOC:
1924 {
1925 tree align = get_constant_value (gimple_call_arg (stmt, 0));
1926 if (align
1927 && tree_fits_uhwi_p (align))
1928 {
1929 unsigned HOST_WIDE_INT aligni = tree_to_uhwi (align);
1930 if (aligni > 1
1931 /* align must be power-of-two */
1932 && (aligni & (aligni - 1)) == 0)
1933 {
1934 val.lattice_val = CONSTANT;
1935 val.value = build_int_cst (ptr_type_node, 0);
1936 val.mask = -aligni;
1937 }
1938 }
1939 break;
1940 }
1941
1942 default:;
1943 }
1944 }
1945 if (is_gimple_call (stmt) && gimple_call_lhs (stmt))
1946 {
1947 tree fntype = gimple_call_fntype (stmt);
1948 if (fntype)
1949 {
1950 tree attrs = lookup_attribute ("assume_aligned",
1951 TYPE_ATTRIBUTES (fntype));
1952 if (attrs)
1953 val = bit_value_assume_aligned (stmt, attrs, val, false);
1954 attrs = lookup_attribute ("alloc_align",
1955 TYPE_ATTRIBUTES (fntype));
1956 if (attrs)
1957 val = bit_value_assume_aligned (stmt, attrs, val, true);
1958 }
1959 }
1960 is_constant = (val.lattice_val == CONSTANT);
1961 }
1962
1963 if (flag_tree_bit_ccp
1964 && ((is_constant && TREE_CODE (val.value) == INTEGER_CST)
1965 || !is_constant)
1966 && gimple_get_lhs (stmt)
1967 && TREE_CODE (gimple_get_lhs (stmt)) == SSA_NAME)
1968 {
1969 tree lhs = gimple_get_lhs (stmt);
1970 wide_int nonzero_bits = get_nonzero_bits (lhs);
1971 if (nonzero_bits != -1)
1972 {
1973 if (!is_constant)
1974 {
1975 val.lattice_val = CONSTANT;
1976 val.value = build_zero_cst (TREE_TYPE (lhs));
1977 val.mask = extend_mask (nonzero_bits, TYPE_SIGN (TREE_TYPE (lhs)));
1978 is_constant = true;
1979 }
1980 else
1981 {
1982 if (wi::bit_and_not (wi::to_wide (val.value), nonzero_bits) != 0)
1983 val.value = wide_int_to_tree (TREE_TYPE (lhs),
1984 nonzero_bits
1985 & wi::to_wide (val.value));
1986 if (nonzero_bits == 0)
1987 val.mask = 0;
1988 else
1989 val.mask = val.mask & extend_mask (nonzero_bits,
1990 TYPE_SIGN (TREE_TYPE (lhs)));
1991 }
1992 }
1993 }
1994
1995 /* The statement produced a nonconstant value. */
1996 if (!is_constant)
1997 {
1998 /* The statement produced a copy. */
1999 if (simplified && TREE_CODE (simplified) == SSA_NAME
2000 && !SSA_NAME_OCCURS_IN_ABNORMAL_PHI (simplified))
2001 {
2002 val.lattice_val = CONSTANT;
2003 val.value = simplified;
2004 val.mask = -1;
2005 }
2006 /* The statement is VARYING. */
2007 else
2008 {
2009 val.lattice_val = VARYING;
2010 val.value = NULL_TREE;
2011 val.mask = -1;
2012 }
2013 }
2014
2015 return val;
2016 }
2017
2018 typedef hash_table<nofree_ptr_hash<gimple> > gimple_htab;
2019
2020 /* Given a BUILT_IN_STACK_SAVE value SAVED_VAL, insert a clobber of VAR before
2021 each matching BUILT_IN_STACK_RESTORE. Mark visited phis in VISITED. */
2022
2023 static void
2024 insert_clobber_before_stack_restore (tree saved_val, tree var,
2025 gimple_htab **visited)
2026 {
2027 gimple *stmt;
2028 gassign *clobber_stmt;
2029 tree clobber;
2030 imm_use_iterator iter;
2031 gimple_stmt_iterator i;
2032 gimple **slot;
2033
2034 FOR_EACH_IMM_USE_STMT (stmt, iter, saved_val)
2035 if (gimple_call_builtin_p (stmt, BUILT_IN_STACK_RESTORE))
2036 {
2037 clobber = build_constructor (TREE_TYPE (var),
2038 NULL);
2039 TREE_THIS_VOLATILE (clobber) = 1;
2040 clobber_stmt = gimple_build_assign (var, clobber);
2041
2042 i = gsi_for_stmt (stmt);
2043 gsi_insert_before (&i, clobber_stmt, GSI_SAME_STMT);
2044 }
2045 else if (gimple_code (stmt) == GIMPLE_PHI)
2046 {
2047 if (!*visited)
2048 *visited = new gimple_htab (10);
2049
2050 slot = (*visited)->find_slot (stmt, INSERT);
2051 if (*slot != NULL)
2052 continue;
2053
2054 *slot = stmt;
2055 insert_clobber_before_stack_restore (gimple_phi_result (stmt), var,
2056 visited);
2057 }
2058 else if (gimple_assign_ssa_name_copy_p (stmt))
2059 insert_clobber_before_stack_restore (gimple_assign_lhs (stmt), var,
2060 visited);
2061 else if (chkp_gimple_call_builtin_p (stmt, BUILT_IN_CHKP_BNDRET))
2062 continue;
2063 else
2064 gcc_assert (is_gimple_debug (stmt));
2065 }
2066
2067 /* Advance the iterator to the previous non-debug gimple statement in the same
2068 or dominating basic block. */
2069
2070 static inline void
2071 gsi_prev_dom_bb_nondebug (gimple_stmt_iterator *i)
2072 {
2073 basic_block dom;
2074
2075 gsi_prev_nondebug (i);
2076 while (gsi_end_p (*i))
2077 {
2078 dom = get_immediate_dominator (CDI_DOMINATORS, i->bb);
2079 if (dom == NULL || dom == ENTRY_BLOCK_PTR_FOR_FN (cfun))
2080 return;
2081
2082 *i = gsi_last_bb (dom);
2083 }
2084 }
2085
2086 /* Find a BUILT_IN_STACK_SAVE dominating gsi_stmt (I), and insert
2087 a clobber of VAR before each matching BUILT_IN_STACK_RESTORE.
2088
2089 It is possible that BUILT_IN_STACK_SAVE cannot be find in a dominator when a
2090 previous pass (such as DOM) duplicated it along multiple paths to a BB. In
2091 that case the function gives up without inserting the clobbers. */
2092
2093 static void
2094 insert_clobbers_for_var (gimple_stmt_iterator i, tree var)
2095 {
2096 gimple *stmt;
2097 tree saved_val;
2098 gimple_htab *visited = NULL;
2099
2100 for (; !gsi_end_p (i); gsi_prev_dom_bb_nondebug (&i))
2101 {
2102 stmt = gsi_stmt (i);
2103
2104 if (!gimple_call_builtin_p (stmt, BUILT_IN_STACK_SAVE))
2105 continue;
2106
2107 saved_val = gimple_call_lhs (stmt);
2108 if (saved_val == NULL_TREE)
2109 continue;
2110
2111 insert_clobber_before_stack_restore (saved_val, var, &visited);
2112 break;
2113 }
2114
2115 delete visited;
2116 }
2117
2118 /* Detects a __builtin_alloca_with_align with constant size argument. Declares
2119 fixed-size array and returns the address, if found, otherwise returns
2120 NULL_TREE. */
2121
2122 static tree
2123 fold_builtin_alloca_with_align (gimple *stmt)
2124 {
2125 unsigned HOST_WIDE_INT size, threshold, n_elem;
2126 tree lhs, arg, block, var, elem_type, array_type;
2127
2128 /* Get lhs. */
2129 lhs = gimple_call_lhs (stmt);
2130 if (lhs == NULL_TREE)
2131 return NULL_TREE;
2132
2133 /* Detect constant argument. */
2134 arg = get_constant_value (gimple_call_arg (stmt, 0));
2135 if (arg == NULL_TREE
2136 || TREE_CODE (arg) != INTEGER_CST
2137 || !tree_fits_uhwi_p (arg))
2138 return NULL_TREE;
2139
2140 size = tree_to_uhwi (arg);
2141
2142 /* Heuristic: don't fold large allocas. */
2143 threshold = (unsigned HOST_WIDE_INT)PARAM_VALUE (PARAM_LARGE_STACK_FRAME);
2144 /* In case the alloca is located at function entry, it has the same lifetime
2145 as a declared array, so we allow a larger size. */
2146 block = gimple_block (stmt);
2147 if (!(cfun->after_inlining
2148 && block
2149 && TREE_CODE (BLOCK_SUPERCONTEXT (block)) == FUNCTION_DECL))
2150 threshold /= 10;
2151 if (size > threshold)
2152 return NULL_TREE;
2153
2154 /* Declare array. */
2155 elem_type = build_nonstandard_integer_type (BITS_PER_UNIT, 1);
2156 n_elem = size * 8 / BITS_PER_UNIT;
2157 array_type = build_array_type_nelts (elem_type, n_elem);
2158 var = create_tmp_var (array_type);
2159 SET_DECL_ALIGN (var, TREE_INT_CST_LOW (gimple_call_arg (stmt, 1)));
2160 {
2161 struct ptr_info_def *pi = SSA_NAME_PTR_INFO (lhs);
2162 if (pi != NULL && !pi->pt.anything)
2163 {
2164 bool singleton_p;
2165 unsigned uid;
2166 singleton_p = pt_solution_singleton_or_null_p (&pi->pt, &uid);
2167 gcc_assert (singleton_p);
2168 SET_DECL_PT_UID (var, uid);
2169 }
2170 }
2171
2172 /* Fold alloca to the address of the array. */
2173 return fold_convert (TREE_TYPE (lhs), build_fold_addr_expr (var));
2174 }
2175
2176 /* Fold the stmt at *GSI with CCP specific information that propagating
2177 and regular folding does not catch. */
2178
2179 static bool
2180 ccp_fold_stmt (gimple_stmt_iterator *gsi)
2181 {
2182 gimple *stmt = gsi_stmt (*gsi);
2183
2184 switch (gimple_code (stmt))
2185 {
2186 case GIMPLE_COND:
2187 {
2188 gcond *cond_stmt = as_a <gcond *> (stmt);
2189 ccp_prop_value_t val;
2190 /* Statement evaluation will handle type mismatches in constants
2191 more gracefully than the final propagation. This allows us to
2192 fold more conditionals here. */
2193 val = evaluate_stmt (stmt);
2194 if (val.lattice_val != CONSTANT
2195 || val.mask != 0)
2196 return false;
2197
2198 if (dump_file)
2199 {
2200 fprintf (dump_file, "Folding predicate ");
2201 print_gimple_expr (dump_file, stmt, 0);
2202 fprintf (dump_file, " to ");
2203 print_generic_expr (dump_file, val.value);
2204 fprintf (dump_file, "\n");
2205 }
2206
2207 if (integer_zerop (val.value))
2208 gimple_cond_make_false (cond_stmt);
2209 else
2210 gimple_cond_make_true (cond_stmt);
2211
2212 return true;
2213 }
2214
2215 case GIMPLE_CALL:
2216 {
2217 tree lhs = gimple_call_lhs (stmt);
2218 int flags = gimple_call_flags (stmt);
2219 tree val;
2220 tree argt;
2221 bool changed = false;
2222 unsigned i;
2223
2224 /* If the call was folded into a constant make sure it goes
2225 away even if we cannot propagate into all uses because of
2226 type issues. */
2227 if (lhs
2228 && TREE_CODE (lhs) == SSA_NAME
2229 && (val = get_constant_value (lhs))
2230 /* Don't optimize away calls that have side-effects. */
2231 && (flags & (ECF_CONST|ECF_PURE)) != 0
2232 && (flags & ECF_LOOPING_CONST_OR_PURE) == 0)
2233 {
2234 tree new_rhs = unshare_expr (val);
2235 bool res;
2236 if (!useless_type_conversion_p (TREE_TYPE (lhs),
2237 TREE_TYPE (new_rhs)))
2238 new_rhs = fold_convert (TREE_TYPE (lhs), new_rhs);
2239 res = update_call_from_tree (gsi, new_rhs);
2240 gcc_assert (res);
2241 return true;
2242 }
2243
2244 /* Internal calls provide no argument types, so the extra laxity
2245 for normal calls does not apply. */
2246 if (gimple_call_internal_p (stmt))
2247 return false;
2248
2249 /* The heuristic of fold_builtin_alloca_with_align differs before and
2250 after inlining, so we don't require the arg to be changed into a
2251 constant for folding, but just to be constant. */
2252 if (gimple_call_builtin_p (stmt, BUILT_IN_ALLOCA_WITH_ALIGN)
2253 || gimple_call_builtin_p (stmt, BUILT_IN_ALLOCA_WITH_ALIGN_AND_MAX))
2254 {
2255 tree new_rhs = fold_builtin_alloca_with_align (stmt);
2256 if (new_rhs)
2257 {
2258 bool res = update_call_from_tree (gsi, new_rhs);
2259 tree var = TREE_OPERAND (TREE_OPERAND (new_rhs, 0),0);
2260 gcc_assert (res);
2261 insert_clobbers_for_var (*gsi, var);
2262 return true;
2263 }
2264 }
2265
2266 /* Propagate into the call arguments. Compared to replace_uses_in
2267 this can use the argument slot types for type verification
2268 instead of the current argument type. We also can safely
2269 drop qualifiers here as we are dealing with constants anyway. */
2270 argt = TYPE_ARG_TYPES (gimple_call_fntype (stmt));
2271 for (i = 0; i < gimple_call_num_args (stmt) && argt;
2272 ++i, argt = TREE_CHAIN (argt))
2273 {
2274 tree arg = gimple_call_arg (stmt, i);
2275 if (TREE_CODE (arg) == SSA_NAME
2276 && (val = get_constant_value (arg))
2277 && useless_type_conversion_p
2278 (TYPE_MAIN_VARIANT (TREE_VALUE (argt)),
2279 TYPE_MAIN_VARIANT (TREE_TYPE (val))))
2280 {
2281 gimple_call_set_arg (stmt, i, unshare_expr (val));
2282 changed = true;
2283 }
2284 }
2285
2286 return changed;
2287 }
2288
2289 case GIMPLE_ASSIGN:
2290 {
2291 tree lhs = gimple_assign_lhs (stmt);
2292 tree val;
2293
2294 /* If we have a load that turned out to be constant replace it
2295 as we cannot propagate into all uses in all cases. */
2296 if (gimple_assign_single_p (stmt)
2297 && TREE_CODE (lhs) == SSA_NAME
2298 && (val = get_constant_value (lhs)))
2299 {
2300 tree rhs = unshare_expr (val);
2301 if (!useless_type_conversion_p (TREE_TYPE (lhs), TREE_TYPE (rhs)))
2302 rhs = fold_build1 (VIEW_CONVERT_EXPR, TREE_TYPE (lhs), rhs);
2303 gimple_assign_set_rhs_from_tree (gsi, rhs);
2304 return true;
2305 }
2306
2307 return false;
2308 }
2309
2310 default:
2311 return false;
2312 }
2313 }
2314
2315 /* Visit the assignment statement STMT. Set the value of its LHS to the
2316 value computed by the RHS and store LHS in *OUTPUT_P. If STMT
2317 creates virtual definitions, set the value of each new name to that
2318 of the RHS (if we can derive a constant out of the RHS).
2319 Value-returning call statements also perform an assignment, and
2320 are handled here. */
2321
2322 static enum ssa_prop_result
2323 visit_assignment (gimple *stmt, tree *output_p)
2324 {
2325 ccp_prop_value_t val;
2326 enum ssa_prop_result retval = SSA_PROP_NOT_INTERESTING;
2327
2328 tree lhs = gimple_get_lhs (stmt);
2329 if (TREE_CODE (lhs) == SSA_NAME)
2330 {
2331 /* Evaluate the statement, which could be
2332 either a GIMPLE_ASSIGN or a GIMPLE_CALL. */
2333 val = evaluate_stmt (stmt);
2334
2335 /* If STMT is an assignment to an SSA_NAME, we only have one
2336 value to set. */
2337 if (set_lattice_value (lhs, &val))
2338 {
2339 *output_p = lhs;
2340 if (val.lattice_val == VARYING)
2341 retval = SSA_PROP_VARYING;
2342 else
2343 retval = SSA_PROP_INTERESTING;
2344 }
2345 }
2346
2347 return retval;
2348 }
2349
2350
2351 /* Visit the conditional statement STMT. Return SSA_PROP_INTERESTING
2352 if it can determine which edge will be taken. Otherwise, return
2353 SSA_PROP_VARYING. */
2354
2355 static enum ssa_prop_result
2356 visit_cond_stmt (gimple *stmt, edge *taken_edge_p)
2357 {
2358 ccp_prop_value_t val;
2359 basic_block block;
2360
2361 block = gimple_bb (stmt);
2362 val = evaluate_stmt (stmt);
2363 if (val.lattice_val != CONSTANT
2364 || val.mask != 0)
2365 return SSA_PROP_VARYING;
2366
2367 /* Find which edge out of the conditional block will be taken and add it
2368 to the worklist. If no single edge can be determined statically,
2369 return SSA_PROP_VARYING to feed all the outgoing edges to the
2370 propagation engine. */
2371 *taken_edge_p = find_taken_edge (block, val.value);
2372 if (*taken_edge_p)
2373 return SSA_PROP_INTERESTING;
2374 else
2375 return SSA_PROP_VARYING;
2376 }
2377
2378
2379 /* Evaluate statement STMT. If the statement produces an output value and
2380 its evaluation changes the lattice value of its output, return
2381 SSA_PROP_INTERESTING and set *OUTPUT_P to the SSA_NAME holding the
2382 output value.
2383
2384 If STMT is a conditional branch and we can determine its truth
2385 value, set *TAKEN_EDGE_P accordingly. If STMT produces a varying
2386 value, return SSA_PROP_VARYING. */
2387
2388 enum ssa_prop_result
2389 ccp_propagate::visit_stmt (gimple *stmt, edge *taken_edge_p, tree *output_p)
2390 {
2391 tree def;
2392 ssa_op_iter iter;
2393
2394 if (dump_file && (dump_flags & TDF_DETAILS))
2395 {
2396 fprintf (dump_file, "\nVisiting statement:\n");
2397 print_gimple_stmt (dump_file, stmt, 0, dump_flags);
2398 }
2399
2400 switch (gimple_code (stmt))
2401 {
2402 case GIMPLE_ASSIGN:
2403 /* If the statement is an assignment that produces a single
2404 output value, evaluate its RHS to see if the lattice value of
2405 its output has changed. */
2406 return visit_assignment (stmt, output_p);
2407
2408 case GIMPLE_CALL:
2409 /* A value-returning call also performs an assignment. */
2410 if (gimple_call_lhs (stmt) != NULL_TREE)
2411 return visit_assignment (stmt, output_p);
2412 break;
2413
2414 case GIMPLE_COND:
2415 case GIMPLE_SWITCH:
2416 /* If STMT is a conditional branch, see if we can determine
2417 which branch will be taken. */
2418 /* FIXME. It appears that we should be able to optimize
2419 computed GOTOs here as well. */
2420 return visit_cond_stmt (stmt, taken_edge_p);
2421
2422 default:
2423 break;
2424 }
2425
2426 /* Any other kind of statement is not interesting for constant
2427 propagation and, therefore, not worth simulating. */
2428 if (dump_file && (dump_flags & TDF_DETAILS))
2429 fprintf (dump_file, "No interesting values produced. Marked VARYING.\n");
2430
2431 /* Definitions made by statements other than assignments to
2432 SSA_NAMEs represent unknown modifications to their outputs.
2433 Mark them VARYING. */
2434 FOR_EACH_SSA_TREE_OPERAND (def, stmt, iter, SSA_OP_ALL_DEFS)
2435 set_value_varying (def);
2436
2437 return SSA_PROP_VARYING;
2438 }
2439
2440
2441 /* Main entry point for SSA Conditional Constant Propagation. If NONZERO_P,
2442 record nonzero bits. */
2443
2444 static unsigned int
2445 do_ssa_ccp (bool nonzero_p)
2446 {
2447 unsigned int todo = 0;
2448 calculate_dominance_info (CDI_DOMINATORS);
2449
2450 ccp_initialize ();
2451 class ccp_propagate ccp_propagate;
2452 ccp_propagate.ssa_propagate ();
2453 if (ccp_finalize (nonzero_p || flag_ipa_bit_cp))
2454 {
2455 todo = (TODO_cleanup_cfg | TODO_update_ssa);
2456
2457 /* ccp_finalize does not preserve loop-closed ssa. */
2458 loops_state_clear (LOOP_CLOSED_SSA);
2459 }
2460
2461 free_dominance_info (CDI_DOMINATORS);
2462 return todo;
2463 }
2464
2465
2466 namespace {
2467
2468 const pass_data pass_data_ccp =
2469 {
2470 GIMPLE_PASS, /* type */
2471 "ccp", /* name */
2472 OPTGROUP_NONE, /* optinfo_flags */
2473 TV_TREE_CCP, /* tv_id */
2474 ( PROP_cfg | PROP_ssa ), /* properties_required */
2475 0, /* properties_provided */
2476 0, /* properties_destroyed */
2477 0, /* todo_flags_start */
2478 TODO_update_address_taken, /* todo_flags_finish */
2479 };
2480
2481 class pass_ccp : public gimple_opt_pass
2482 {
2483 public:
2484 pass_ccp (gcc::context *ctxt)
2485 : gimple_opt_pass (pass_data_ccp, ctxt), nonzero_p (false)
2486 {}
2487
2488 /* opt_pass methods: */
2489 opt_pass * clone () { return new pass_ccp (m_ctxt); }
2490 void set_pass_param (unsigned int n, bool param)
2491 {
2492 gcc_assert (n == 0);
2493 nonzero_p = param;
2494 }
2495 virtual bool gate (function *) { return flag_tree_ccp != 0; }
2496 virtual unsigned int execute (function *) { return do_ssa_ccp (nonzero_p); }
2497
2498 private:
2499 /* Determines whether the pass instance records nonzero bits. */
2500 bool nonzero_p;
2501 }; // class pass_ccp
2502
2503 } // anon namespace
2504
2505 gimple_opt_pass *
2506 make_pass_ccp (gcc::context *ctxt)
2507 {
2508 return new pass_ccp (ctxt);
2509 }
2510
2511
2512
2513 /* Try to optimize out __builtin_stack_restore. Optimize it out
2514 if there is another __builtin_stack_restore in the same basic
2515 block and no calls or ASM_EXPRs are in between, or if this block's
2516 only outgoing edge is to EXIT_BLOCK and there are no calls or
2517 ASM_EXPRs after this __builtin_stack_restore. */
2518
2519 static tree
2520 optimize_stack_restore (gimple_stmt_iterator i)
2521 {
2522 tree callee;
2523 gimple *stmt;
2524
2525 basic_block bb = gsi_bb (i);
2526 gimple *call = gsi_stmt (i);
2527
2528 if (gimple_code (call) != GIMPLE_CALL
2529 || gimple_call_num_args (call) != 1
2530 || TREE_CODE (gimple_call_arg (call, 0)) != SSA_NAME
2531 || !POINTER_TYPE_P (TREE_TYPE (gimple_call_arg (call, 0))))
2532 return NULL_TREE;
2533
2534 for (gsi_next (&i); !gsi_end_p (i); gsi_next (&i))
2535 {
2536 stmt = gsi_stmt (i);
2537 if (gimple_code (stmt) == GIMPLE_ASM)
2538 return NULL_TREE;
2539 if (gimple_code (stmt) != GIMPLE_CALL)
2540 continue;
2541
2542 callee = gimple_call_fndecl (stmt);
2543 if (!callee
2544 || DECL_BUILT_IN_CLASS (callee) != BUILT_IN_NORMAL
2545 /* All regular builtins are ok, just obviously not alloca. */
2546 || ALLOCA_FUNCTION_CODE_P (DECL_FUNCTION_CODE (callee)))
2547 return NULL_TREE;
2548
2549 if (DECL_FUNCTION_CODE (callee) == BUILT_IN_STACK_RESTORE)
2550 goto second_stack_restore;
2551 }
2552
2553 if (!gsi_end_p (i))
2554 return NULL_TREE;
2555
2556 /* Allow one successor of the exit block, or zero successors. */
2557 switch (EDGE_COUNT (bb->succs))
2558 {
2559 case 0:
2560 break;
2561 case 1:
2562 if (single_succ_edge (bb)->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
2563 return NULL_TREE;
2564 break;
2565 default:
2566 return NULL_TREE;
2567 }
2568 second_stack_restore:
2569
2570 /* If there's exactly one use, then zap the call to __builtin_stack_save.
2571 If there are multiple uses, then the last one should remove the call.
2572 In any case, whether the call to __builtin_stack_save can be removed
2573 or not is irrelevant to removing the call to __builtin_stack_restore. */
2574 if (has_single_use (gimple_call_arg (call, 0)))
2575 {
2576 gimple *stack_save = SSA_NAME_DEF_STMT (gimple_call_arg (call, 0));
2577 if (is_gimple_call (stack_save))
2578 {
2579 callee = gimple_call_fndecl (stack_save);
2580 if (callee
2581 && DECL_BUILT_IN_CLASS (callee) == BUILT_IN_NORMAL
2582 && DECL_FUNCTION_CODE (callee) == BUILT_IN_STACK_SAVE)
2583 {
2584 gimple_stmt_iterator stack_save_gsi;
2585 tree rhs;
2586
2587 stack_save_gsi = gsi_for_stmt (stack_save);
2588 rhs = build_int_cst (TREE_TYPE (gimple_call_arg (call, 0)), 0);
2589 update_call_from_tree (&stack_save_gsi, rhs);
2590 }
2591 }
2592 }
2593
2594 /* No effect, so the statement will be deleted. */
2595 return integer_zero_node;
2596 }
2597
2598 /* If va_list type is a simple pointer and nothing special is needed,
2599 optimize __builtin_va_start (&ap, 0) into ap = __builtin_next_arg (0),
2600 __builtin_va_end (&ap) out as NOP and __builtin_va_copy into a simple
2601 pointer assignment. */
2602
2603 static tree
2604 optimize_stdarg_builtin (gimple *call)
2605 {
2606 tree callee, lhs, rhs, cfun_va_list;
2607 bool va_list_simple_ptr;
2608 location_t loc = gimple_location (call);
2609
2610 if (gimple_code (call) != GIMPLE_CALL)
2611 return NULL_TREE;
2612
2613 callee = gimple_call_fndecl (call);
2614
2615 cfun_va_list = targetm.fn_abi_va_list (callee);
2616 va_list_simple_ptr = POINTER_TYPE_P (cfun_va_list)
2617 && (TREE_TYPE (cfun_va_list) == void_type_node
2618 || TREE_TYPE (cfun_va_list) == char_type_node);
2619
2620 switch (DECL_FUNCTION_CODE (callee))
2621 {
2622 case BUILT_IN_VA_START:
2623 if (!va_list_simple_ptr
2624 || targetm.expand_builtin_va_start != NULL
2625 || !builtin_decl_explicit_p (BUILT_IN_NEXT_ARG))
2626 return NULL_TREE;
2627
2628 if (gimple_call_num_args (call) != 2)
2629 return NULL_TREE;
2630
2631 lhs = gimple_call_arg (call, 0);
2632 if (!POINTER_TYPE_P (TREE_TYPE (lhs))
2633 || TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (lhs)))
2634 != TYPE_MAIN_VARIANT (cfun_va_list))
2635 return NULL_TREE;
2636
2637 lhs = build_fold_indirect_ref_loc (loc, lhs);
2638 rhs = build_call_expr_loc (loc, builtin_decl_explicit (BUILT_IN_NEXT_ARG),
2639 1, integer_zero_node);
2640 rhs = fold_convert_loc (loc, TREE_TYPE (lhs), rhs);
2641 return build2 (MODIFY_EXPR, TREE_TYPE (lhs), lhs, rhs);
2642
2643 case BUILT_IN_VA_COPY:
2644 if (!va_list_simple_ptr)
2645 return NULL_TREE;
2646
2647 if (gimple_call_num_args (call) != 2)
2648 return NULL_TREE;
2649
2650 lhs = gimple_call_arg (call, 0);
2651 if (!POINTER_TYPE_P (TREE_TYPE (lhs))
2652 || TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (lhs)))
2653 != TYPE_MAIN_VARIANT (cfun_va_list))
2654 return NULL_TREE;
2655
2656 lhs = build_fold_indirect_ref_loc (loc, lhs);
2657 rhs = gimple_call_arg (call, 1);
2658 if (TYPE_MAIN_VARIANT (TREE_TYPE (rhs))
2659 != TYPE_MAIN_VARIANT (cfun_va_list))
2660 return NULL_TREE;
2661
2662 rhs = fold_convert_loc (loc, TREE_TYPE (lhs), rhs);
2663 return build2 (MODIFY_EXPR, TREE_TYPE (lhs), lhs, rhs);
2664
2665 case BUILT_IN_VA_END:
2666 /* No effect, so the statement will be deleted. */
2667 return integer_zero_node;
2668
2669 default:
2670 gcc_unreachable ();
2671 }
2672 }
2673
2674 /* Attemp to make the block of __builtin_unreachable I unreachable by changing
2675 the incoming jumps. Return true if at least one jump was changed. */
2676
2677 static bool
2678 optimize_unreachable (gimple_stmt_iterator i)
2679 {
2680 basic_block bb = gsi_bb (i);
2681 gimple_stmt_iterator gsi;
2682 gimple *stmt;
2683 edge_iterator ei;
2684 edge e;
2685 bool ret;
2686
2687 if (flag_sanitize & SANITIZE_UNREACHABLE)
2688 return false;
2689
2690 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2691 {
2692 stmt = gsi_stmt (gsi);
2693
2694 if (is_gimple_debug (stmt))
2695 continue;
2696
2697 if (glabel *label_stmt = dyn_cast <glabel *> (stmt))
2698 {
2699 /* Verify we do not need to preserve the label. */
2700 if (FORCED_LABEL (gimple_label_label (label_stmt)))
2701 return false;
2702
2703 continue;
2704 }
2705
2706 /* Only handle the case that __builtin_unreachable is the first statement
2707 in the block. We rely on DCE to remove stmts without side-effects
2708 before __builtin_unreachable. */
2709 if (gsi_stmt (gsi) != gsi_stmt (i))
2710 return false;
2711 }
2712
2713 ret = false;
2714 FOR_EACH_EDGE (e, ei, bb->preds)
2715 {
2716 gsi = gsi_last_bb (e->src);
2717 if (gsi_end_p (gsi))
2718 continue;
2719
2720 stmt = gsi_stmt (gsi);
2721 if (gcond *cond_stmt = dyn_cast <gcond *> (stmt))
2722 {
2723 if (e->flags & EDGE_TRUE_VALUE)
2724 gimple_cond_make_false (cond_stmt);
2725 else if (e->flags & EDGE_FALSE_VALUE)
2726 gimple_cond_make_true (cond_stmt);
2727 else
2728 gcc_unreachable ();
2729 update_stmt (cond_stmt);
2730 }
2731 else
2732 {
2733 /* Todo: handle other cases. Note that unreachable switch case
2734 statements have already been removed. */
2735 continue;
2736 }
2737
2738 ret = true;
2739 }
2740
2741 return ret;
2742 }
2743
2744 /* Optimize
2745 mask_2 = 1 << cnt_1;
2746 _4 = __atomic_fetch_or_* (ptr_6, mask_2, _3);
2747 _5 = _4 & mask_2;
2748 to
2749 _4 = ATOMIC_BIT_TEST_AND_SET (ptr_6, cnt_1, 0, _3);
2750 _5 = _4;
2751 If _5 is only used in _5 != 0 or _5 == 0 comparisons, 1
2752 is passed instead of 0, and the builtin just returns a zero
2753 or 1 value instead of the actual bit.
2754 Similarly for __sync_fetch_and_or_* (without the ", _3" part
2755 in there), and/or if mask_2 is a power of 2 constant.
2756 Similarly for xor instead of or, use ATOMIC_BIT_TEST_AND_COMPLEMENT
2757 in that case. And similarly for and instead of or, except that
2758 the second argument to the builtin needs to be one's complement
2759 of the mask instead of mask. */
2760
2761 static void
2762 optimize_atomic_bit_test_and (gimple_stmt_iterator *gsip,
2763 enum internal_fn fn, bool has_model_arg,
2764 bool after)
2765 {
2766 gimple *call = gsi_stmt (*gsip);
2767 tree lhs = gimple_call_lhs (call);
2768 use_operand_p use_p;
2769 gimple *use_stmt;
2770 tree mask, bit;
2771 optab optab;
2772
2773 if (!flag_inline_atomics
2774 || optimize_debug
2775 || !gimple_call_builtin_p (call, BUILT_IN_NORMAL)
2776 || !lhs
2777 || SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs)
2778 || !single_imm_use (lhs, &use_p, &use_stmt)
2779 || !is_gimple_assign (use_stmt)
2780 || gimple_assign_rhs_code (use_stmt) != BIT_AND_EXPR
2781 || !gimple_vdef (call))
2782 return;
2783
2784 switch (fn)
2785 {
2786 case IFN_ATOMIC_BIT_TEST_AND_SET:
2787 optab = atomic_bit_test_and_set_optab;
2788 break;
2789 case IFN_ATOMIC_BIT_TEST_AND_COMPLEMENT:
2790 optab = atomic_bit_test_and_complement_optab;
2791 break;
2792 case IFN_ATOMIC_BIT_TEST_AND_RESET:
2793 optab = atomic_bit_test_and_reset_optab;
2794 break;
2795 default:
2796 return;
2797 }
2798
2799 if (optab_handler (optab, TYPE_MODE (TREE_TYPE (lhs))) == CODE_FOR_nothing)
2800 return;
2801
2802 mask = gimple_call_arg (call, 1);
2803 tree use_lhs = gimple_assign_lhs (use_stmt);
2804 if (!use_lhs)
2805 return;
2806
2807 if (TREE_CODE (mask) == INTEGER_CST)
2808 {
2809 if (fn == IFN_ATOMIC_BIT_TEST_AND_RESET)
2810 mask = const_unop (BIT_NOT_EXPR, TREE_TYPE (mask), mask);
2811 mask = fold_convert (TREE_TYPE (lhs), mask);
2812 int ibit = tree_log2 (mask);
2813 if (ibit < 0)
2814 return;
2815 bit = build_int_cst (TREE_TYPE (lhs), ibit);
2816 }
2817 else if (TREE_CODE (mask) == SSA_NAME)
2818 {
2819 gimple *g = SSA_NAME_DEF_STMT (mask);
2820 if (fn == IFN_ATOMIC_BIT_TEST_AND_RESET)
2821 {
2822 if (!is_gimple_assign (g)
2823 || gimple_assign_rhs_code (g) != BIT_NOT_EXPR)
2824 return;
2825 mask = gimple_assign_rhs1 (g);
2826 if (TREE_CODE (mask) != SSA_NAME)
2827 return;
2828 g = SSA_NAME_DEF_STMT (mask);
2829 }
2830 if (!is_gimple_assign (g)
2831 || gimple_assign_rhs_code (g) != LSHIFT_EXPR
2832 || !integer_onep (gimple_assign_rhs1 (g)))
2833 return;
2834 bit = gimple_assign_rhs2 (g);
2835 }
2836 else
2837 return;
2838
2839 if (gimple_assign_rhs1 (use_stmt) == lhs)
2840 {
2841 if (!operand_equal_p (gimple_assign_rhs2 (use_stmt), mask, 0))
2842 return;
2843 }
2844 else if (gimple_assign_rhs2 (use_stmt) != lhs
2845 || !operand_equal_p (gimple_assign_rhs1 (use_stmt), mask, 0))
2846 return;
2847
2848 bool use_bool = true;
2849 bool has_debug_uses = false;
2850 imm_use_iterator iter;
2851 gimple *g;
2852
2853 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (use_lhs))
2854 use_bool = false;
2855 FOR_EACH_IMM_USE_STMT (g, iter, use_lhs)
2856 {
2857 enum tree_code code = ERROR_MARK;
2858 tree op0 = NULL_TREE, op1 = NULL_TREE;
2859 if (is_gimple_debug (g))
2860 {
2861 has_debug_uses = true;
2862 continue;
2863 }
2864 else if (is_gimple_assign (g))
2865 switch (gimple_assign_rhs_code (g))
2866 {
2867 case COND_EXPR:
2868 op1 = gimple_assign_rhs1 (g);
2869 code = TREE_CODE (op1);
2870 op0 = TREE_OPERAND (op1, 0);
2871 op1 = TREE_OPERAND (op1, 1);
2872 break;
2873 case EQ_EXPR:
2874 case NE_EXPR:
2875 code = gimple_assign_rhs_code (g);
2876 op0 = gimple_assign_rhs1 (g);
2877 op1 = gimple_assign_rhs2 (g);
2878 break;
2879 default:
2880 break;
2881 }
2882 else if (gimple_code (g) == GIMPLE_COND)
2883 {
2884 code = gimple_cond_code (g);
2885 op0 = gimple_cond_lhs (g);
2886 op1 = gimple_cond_rhs (g);
2887 }
2888
2889 if ((code == EQ_EXPR || code == NE_EXPR)
2890 && op0 == use_lhs
2891 && integer_zerop (op1))
2892 {
2893 use_operand_p use_p;
2894 int n = 0;
2895 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
2896 n++;
2897 if (n == 1)
2898 continue;
2899 }
2900
2901 use_bool = false;
2902 BREAK_FROM_IMM_USE_STMT (iter);
2903 }
2904
2905 tree new_lhs = make_ssa_name (TREE_TYPE (lhs));
2906 tree flag = build_int_cst (TREE_TYPE (lhs), use_bool);
2907 if (has_model_arg)
2908 g = gimple_build_call_internal (fn, 4, gimple_call_arg (call, 0),
2909 bit, flag, gimple_call_arg (call, 2));
2910 else
2911 g = gimple_build_call_internal (fn, 3, gimple_call_arg (call, 0),
2912 bit, flag);
2913 gimple_call_set_lhs (g, new_lhs);
2914 gimple_set_location (g, gimple_location (call));
2915 gimple_set_vuse (g, gimple_vuse (call));
2916 gimple_set_vdef (g, gimple_vdef (call));
2917 bool throws = stmt_can_throw_internal (call);
2918 gimple_call_set_nothrow (as_a <gcall *> (g),
2919 gimple_call_nothrow_p (as_a <gcall *> (call)));
2920 SSA_NAME_DEF_STMT (gimple_vdef (call)) = g;
2921 gimple_stmt_iterator gsi = *gsip;
2922 gsi_insert_after (&gsi, g, GSI_NEW_STMT);
2923 edge e = NULL;
2924 if (throws)
2925 {
2926 maybe_clean_or_replace_eh_stmt (call, g);
2927 if (after || (use_bool && has_debug_uses))
2928 e = find_fallthru_edge (gsi_bb (gsi)->succs);
2929 }
2930 if (after)
2931 {
2932 /* The internal function returns the value of the specified bit
2933 before the atomic operation. If we are interested in the value
2934 of the specified bit after the atomic operation (makes only sense
2935 for xor, otherwise the bit content is compile time known),
2936 we need to invert the bit. */
2937 g = gimple_build_assign (make_ssa_name (TREE_TYPE (lhs)),
2938 BIT_XOR_EXPR, new_lhs,
2939 use_bool ? build_int_cst (TREE_TYPE (lhs), 1)
2940 : mask);
2941 new_lhs = gimple_assign_lhs (g);
2942 if (throws)
2943 {
2944 gsi_insert_on_edge_immediate (e, g);
2945 gsi = gsi_for_stmt (g);
2946 }
2947 else
2948 gsi_insert_after (&gsi, g, GSI_NEW_STMT);
2949 }
2950 if (use_bool && has_debug_uses)
2951 {
2952 tree temp = NULL_TREE;
2953 if (!throws || after || single_pred_p (e->dest))
2954 {
2955 temp = make_node (DEBUG_EXPR_DECL);
2956 DECL_ARTIFICIAL (temp) = 1;
2957 TREE_TYPE (temp) = TREE_TYPE (lhs);
2958 SET_DECL_MODE (temp, TYPE_MODE (TREE_TYPE (lhs)));
2959 tree t = build2 (LSHIFT_EXPR, TREE_TYPE (lhs), new_lhs, bit);
2960 g = gimple_build_debug_bind (temp, t, g);
2961 if (throws && !after)
2962 {
2963 gsi = gsi_after_labels (e->dest);
2964 gsi_insert_before (&gsi, g, GSI_SAME_STMT);
2965 }
2966 else
2967 gsi_insert_after (&gsi, g, GSI_NEW_STMT);
2968 }
2969 FOR_EACH_IMM_USE_STMT (g, iter, use_lhs)
2970 if (is_gimple_debug (g))
2971 {
2972 use_operand_p use_p;
2973 if (temp == NULL_TREE)
2974 gimple_debug_bind_reset_value (g);
2975 else
2976 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
2977 SET_USE (use_p, temp);
2978 update_stmt (g);
2979 }
2980 }
2981 SSA_NAME_OCCURS_IN_ABNORMAL_PHI (new_lhs)
2982 = SSA_NAME_OCCURS_IN_ABNORMAL_PHI (use_lhs);
2983 replace_uses_by (use_lhs, new_lhs);
2984 gsi = gsi_for_stmt (use_stmt);
2985 gsi_remove (&gsi, true);
2986 release_defs (use_stmt);
2987 gsi_remove (gsip, true);
2988 release_ssa_name (lhs);
2989 }
2990
2991 /* Optimize
2992 a = {};
2993 b = a;
2994 into
2995 a = {};
2996 b = {};
2997 Similarly for memset (&a, ..., sizeof (a)); instead of a = {};
2998 and/or memcpy (&b, &a, sizeof (a)); instead of b = a; */
2999
3000 static void
3001 optimize_memcpy (gimple_stmt_iterator *gsip, tree dest, tree src, tree len)
3002 {
3003 gimple *stmt = gsi_stmt (*gsip);
3004 if (gimple_has_volatile_ops (stmt))
3005 return;
3006
3007 tree vuse = gimple_vuse (stmt);
3008 if (vuse == NULL)
3009 return;
3010
3011 gimple *defstmt = SSA_NAME_DEF_STMT (vuse);
3012 tree src2 = NULL_TREE, len2 = NULL_TREE;
3013 HOST_WIDE_INT offset, offset2;
3014 tree val = integer_zero_node;
3015 if (gimple_store_p (defstmt)
3016 && gimple_assign_single_p (defstmt)
3017 && TREE_CODE (gimple_assign_rhs1 (defstmt)) == CONSTRUCTOR
3018 && !gimple_clobber_p (defstmt))
3019 src2 = gimple_assign_lhs (defstmt);
3020 else if (gimple_call_builtin_p (defstmt, BUILT_IN_MEMSET)
3021 && TREE_CODE (gimple_call_arg (defstmt, 0)) == ADDR_EXPR
3022 && TREE_CODE (gimple_call_arg (defstmt, 1)) == INTEGER_CST)
3023 {
3024 src2 = TREE_OPERAND (gimple_call_arg (defstmt, 0), 0);
3025 len2 = gimple_call_arg (defstmt, 2);
3026 val = gimple_call_arg (defstmt, 1);
3027 /* For non-0 val, we'd have to transform stmt from assignment
3028 into memset (only if dest is addressable). */
3029 if (!integer_zerop (val) && is_gimple_assign (stmt))
3030 src2 = NULL_TREE;
3031 }
3032
3033 if (src2 == NULL_TREE)
3034 return;
3035
3036 if (len == NULL_TREE)
3037 len = (TREE_CODE (src) == COMPONENT_REF
3038 ? DECL_SIZE_UNIT (TREE_OPERAND (src, 1))
3039 : TYPE_SIZE_UNIT (TREE_TYPE (src)));
3040 if (len2 == NULL_TREE)
3041 len2 = (TREE_CODE (src2) == COMPONENT_REF
3042 ? DECL_SIZE_UNIT (TREE_OPERAND (src2, 1))
3043 : TYPE_SIZE_UNIT (TREE_TYPE (src2)));
3044 if (len == NULL_TREE
3045 || TREE_CODE (len) != INTEGER_CST
3046 || len2 == NULL_TREE
3047 || TREE_CODE (len2) != INTEGER_CST)
3048 return;
3049
3050 src = get_addr_base_and_unit_offset (src, &offset);
3051 src2 = get_addr_base_and_unit_offset (src2, &offset2);
3052 if (src == NULL_TREE
3053 || src2 == NULL_TREE
3054 || offset < offset2)
3055 return;
3056
3057 if (!operand_equal_p (src, src2, 0))
3058 return;
3059
3060 /* [ src + offset2, src + offset2 + len2 - 1 ] is set to val.
3061 Make sure that
3062 [ src + offset, src + offset + len - 1 ] is a subset of that. */
3063 if (wi::to_offset (len) + (offset - offset2) > wi::to_offset (len2))
3064 return;
3065
3066 if (dump_file && (dump_flags & TDF_DETAILS))
3067 {
3068 fprintf (dump_file, "Simplified\n ");
3069 print_gimple_stmt (dump_file, stmt, 0, dump_flags);
3070 fprintf (dump_file, "after previous\n ");
3071 print_gimple_stmt (dump_file, defstmt, 0, dump_flags);
3072 }
3073
3074 /* For simplicity, don't change the kind of the stmt,
3075 turn dest = src; into dest = {}; and memcpy (&dest, &src, len);
3076 into memset (&dest, val, len);
3077 In theory we could change dest = src into memset if dest
3078 is addressable (maybe beneficial if val is not 0), or
3079 memcpy (&dest, &src, len) into dest = {} if len is the size
3080 of dest, dest isn't volatile. */
3081 if (is_gimple_assign (stmt))
3082 {
3083 tree ctor = build_constructor (TREE_TYPE (dest), NULL);
3084 gimple_assign_set_rhs_from_tree (gsip, ctor);
3085 update_stmt (stmt);
3086 }
3087 else /* If stmt is memcpy, transform it into memset. */
3088 {
3089 gcall *call = as_a <gcall *> (stmt);
3090 tree fndecl = builtin_decl_implicit (BUILT_IN_MEMSET);
3091 gimple_call_set_fndecl (call, fndecl);
3092 gimple_call_set_fntype (call, TREE_TYPE (fndecl));
3093 gimple_call_set_arg (call, 1, val);
3094 update_stmt (stmt);
3095 }
3096
3097 if (dump_file && (dump_flags & TDF_DETAILS))
3098 {
3099 fprintf (dump_file, "into\n ");
3100 print_gimple_stmt (dump_file, stmt, 0, dump_flags);
3101 }
3102 }
3103
3104 /* A simple pass that attempts to fold all builtin functions. This pass
3105 is run after we've propagated as many constants as we can. */
3106
3107 namespace {
3108
3109 const pass_data pass_data_fold_builtins =
3110 {
3111 GIMPLE_PASS, /* type */
3112 "fab", /* name */
3113 OPTGROUP_NONE, /* optinfo_flags */
3114 TV_NONE, /* tv_id */
3115 ( PROP_cfg | PROP_ssa ), /* properties_required */
3116 0, /* properties_provided */
3117 0, /* properties_destroyed */
3118 0, /* todo_flags_start */
3119 TODO_update_ssa, /* todo_flags_finish */
3120 };
3121
3122 class pass_fold_builtins : public gimple_opt_pass
3123 {
3124 public:
3125 pass_fold_builtins (gcc::context *ctxt)
3126 : gimple_opt_pass (pass_data_fold_builtins, ctxt)
3127 {}
3128
3129 /* opt_pass methods: */
3130 opt_pass * clone () { return new pass_fold_builtins (m_ctxt); }
3131 virtual unsigned int execute (function *);
3132
3133 }; // class pass_fold_builtins
3134
3135 unsigned int
3136 pass_fold_builtins::execute (function *fun)
3137 {
3138 bool cfg_changed = false;
3139 basic_block bb;
3140 unsigned int todoflags = 0;
3141
3142 FOR_EACH_BB_FN (bb, fun)
3143 {
3144 gimple_stmt_iterator i;
3145 for (i = gsi_start_bb (bb); !gsi_end_p (i); )
3146 {
3147 gimple *stmt, *old_stmt;
3148 tree callee;
3149 enum built_in_function fcode;
3150
3151 stmt = gsi_stmt (i);
3152
3153 if (gimple_code (stmt) != GIMPLE_CALL)
3154 {
3155 /* Remove all *ssaname_N ={v} {CLOBBER}; stmts,
3156 after the last GIMPLE DSE they aren't needed and might
3157 unnecessarily keep the SSA_NAMEs live. */
3158 if (gimple_clobber_p (stmt))
3159 {
3160 tree lhs = gimple_assign_lhs (stmt);
3161 if (TREE_CODE (lhs) == MEM_REF
3162 && TREE_CODE (TREE_OPERAND (lhs, 0)) == SSA_NAME)
3163 {
3164 unlink_stmt_vdef (stmt);
3165 gsi_remove (&i, true);
3166 release_defs (stmt);
3167 continue;
3168 }
3169 }
3170 else if (gimple_assign_load_p (stmt) && gimple_store_p (stmt))
3171 optimize_memcpy (&i, gimple_assign_lhs (stmt),
3172 gimple_assign_rhs1 (stmt), NULL_TREE);
3173 gsi_next (&i);
3174 continue;
3175 }
3176
3177 callee = gimple_call_fndecl (stmt);
3178 if (!callee || DECL_BUILT_IN_CLASS (callee) != BUILT_IN_NORMAL)
3179 {
3180 gsi_next (&i);
3181 continue;
3182 }
3183
3184 fcode = DECL_FUNCTION_CODE (callee);
3185 if (fold_stmt (&i))
3186 ;
3187 else
3188 {
3189 tree result = NULL_TREE;
3190 switch (DECL_FUNCTION_CODE (callee))
3191 {
3192 case BUILT_IN_CONSTANT_P:
3193 /* Resolve __builtin_constant_p. If it hasn't been
3194 folded to integer_one_node by now, it's fairly
3195 certain that the value simply isn't constant. */
3196 result = integer_zero_node;
3197 break;
3198
3199 case BUILT_IN_ASSUME_ALIGNED:
3200 /* Remove __builtin_assume_aligned. */
3201 result = gimple_call_arg (stmt, 0);
3202 break;
3203
3204 case BUILT_IN_STACK_RESTORE:
3205 result = optimize_stack_restore (i);
3206 if (result)
3207 break;
3208 gsi_next (&i);
3209 continue;
3210
3211 case BUILT_IN_UNREACHABLE:
3212 if (optimize_unreachable (i))
3213 cfg_changed = true;
3214 break;
3215
3216 case BUILT_IN_ATOMIC_FETCH_OR_1:
3217 case BUILT_IN_ATOMIC_FETCH_OR_2:
3218 case BUILT_IN_ATOMIC_FETCH_OR_4:
3219 case BUILT_IN_ATOMIC_FETCH_OR_8:
3220 case BUILT_IN_ATOMIC_FETCH_OR_16:
3221 optimize_atomic_bit_test_and (&i,
3222 IFN_ATOMIC_BIT_TEST_AND_SET,
3223 true, false);
3224 break;
3225 case BUILT_IN_SYNC_FETCH_AND_OR_1:
3226 case BUILT_IN_SYNC_FETCH_AND_OR_2:
3227 case BUILT_IN_SYNC_FETCH_AND_OR_4:
3228 case BUILT_IN_SYNC_FETCH_AND_OR_8:
3229 case BUILT_IN_SYNC_FETCH_AND_OR_16:
3230 optimize_atomic_bit_test_and (&i,
3231 IFN_ATOMIC_BIT_TEST_AND_SET,
3232 false, false);
3233 break;
3234
3235 case BUILT_IN_ATOMIC_FETCH_XOR_1:
3236 case BUILT_IN_ATOMIC_FETCH_XOR_2:
3237 case BUILT_IN_ATOMIC_FETCH_XOR_4:
3238 case BUILT_IN_ATOMIC_FETCH_XOR_8:
3239 case BUILT_IN_ATOMIC_FETCH_XOR_16:
3240 optimize_atomic_bit_test_and
3241 (&i, IFN_ATOMIC_BIT_TEST_AND_COMPLEMENT, true, false);
3242 break;
3243 case BUILT_IN_SYNC_FETCH_AND_XOR_1:
3244 case BUILT_IN_SYNC_FETCH_AND_XOR_2:
3245 case BUILT_IN_SYNC_FETCH_AND_XOR_4:
3246 case BUILT_IN_SYNC_FETCH_AND_XOR_8:
3247 case BUILT_IN_SYNC_FETCH_AND_XOR_16:
3248 optimize_atomic_bit_test_and
3249 (&i, IFN_ATOMIC_BIT_TEST_AND_COMPLEMENT, false, false);
3250 break;
3251
3252 case BUILT_IN_ATOMIC_XOR_FETCH_1:
3253 case BUILT_IN_ATOMIC_XOR_FETCH_2:
3254 case BUILT_IN_ATOMIC_XOR_FETCH_4:
3255 case BUILT_IN_ATOMIC_XOR_FETCH_8:
3256 case BUILT_IN_ATOMIC_XOR_FETCH_16:
3257 optimize_atomic_bit_test_and
3258 (&i, IFN_ATOMIC_BIT_TEST_AND_COMPLEMENT, true, true);
3259 break;
3260 case BUILT_IN_SYNC_XOR_AND_FETCH_1:
3261 case BUILT_IN_SYNC_XOR_AND_FETCH_2:
3262 case BUILT_IN_SYNC_XOR_AND_FETCH_4:
3263 case BUILT_IN_SYNC_XOR_AND_FETCH_8:
3264 case BUILT_IN_SYNC_XOR_AND_FETCH_16:
3265 optimize_atomic_bit_test_and
3266 (&i, IFN_ATOMIC_BIT_TEST_AND_COMPLEMENT, false, true);
3267 break;
3268
3269 case BUILT_IN_ATOMIC_FETCH_AND_1:
3270 case BUILT_IN_ATOMIC_FETCH_AND_2:
3271 case BUILT_IN_ATOMIC_FETCH_AND_4:
3272 case BUILT_IN_ATOMIC_FETCH_AND_8:
3273 case BUILT_IN_ATOMIC_FETCH_AND_16:
3274 optimize_atomic_bit_test_and (&i,
3275 IFN_ATOMIC_BIT_TEST_AND_RESET,
3276 true, false);
3277 break;
3278 case BUILT_IN_SYNC_FETCH_AND_AND_1:
3279 case BUILT_IN_SYNC_FETCH_AND_AND_2:
3280 case BUILT_IN_SYNC_FETCH_AND_AND_4:
3281 case BUILT_IN_SYNC_FETCH_AND_AND_8:
3282 case BUILT_IN_SYNC_FETCH_AND_AND_16:
3283 optimize_atomic_bit_test_and (&i,
3284 IFN_ATOMIC_BIT_TEST_AND_RESET,
3285 false, false);
3286 break;
3287
3288 case BUILT_IN_MEMCPY:
3289 if (gimple_call_builtin_p (stmt, BUILT_IN_NORMAL)
3290 && TREE_CODE (gimple_call_arg (stmt, 0)) == ADDR_EXPR
3291 && TREE_CODE (gimple_call_arg (stmt, 1)) == ADDR_EXPR
3292 && TREE_CODE (gimple_call_arg (stmt, 2)) == INTEGER_CST)
3293 {
3294 tree dest = TREE_OPERAND (gimple_call_arg (stmt, 0), 0);
3295 tree src = TREE_OPERAND (gimple_call_arg (stmt, 1), 0);
3296 tree len = gimple_call_arg (stmt, 2);
3297 optimize_memcpy (&i, dest, src, len);
3298 }
3299 break;
3300
3301 case BUILT_IN_VA_START:
3302 case BUILT_IN_VA_END:
3303 case BUILT_IN_VA_COPY:
3304 /* These shouldn't be folded before pass_stdarg. */
3305 result = optimize_stdarg_builtin (stmt);
3306 break;
3307
3308 default:;
3309 }
3310
3311 if (!result)
3312 {
3313 gsi_next (&i);
3314 continue;
3315 }
3316
3317 if (!update_call_from_tree (&i, result))
3318 gimplify_and_update_call_from_tree (&i, result);
3319 }
3320
3321 todoflags |= TODO_update_address_taken;
3322
3323 if (dump_file && (dump_flags & TDF_DETAILS))
3324 {
3325 fprintf (dump_file, "Simplified\n ");
3326 print_gimple_stmt (dump_file, stmt, 0, dump_flags);
3327 }
3328
3329 old_stmt = stmt;
3330 stmt = gsi_stmt (i);
3331 update_stmt (stmt);
3332
3333 if (maybe_clean_or_replace_eh_stmt (old_stmt, stmt)
3334 && gimple_purge_dead_eh_edges (bb))
3335 cfg_changed = true;
3336
3337 if (dump_file && (dump_flags & TDF_DETAILS))
3338 {
3339 fprintf (dump_file, "to\n ");
3340 print_gimple_stmt (dump_file, stmt, 0, dump_flags);
3341 fprintf (dump_file, "\n");
3342 }
3343
3344 /* Retry the same statement if it changed into another
3345 builtin, there might be new opportunities now. */
3346 if (gimple_code (stmt) != GIMPLE_CALL)
3347 {
3348 gsi_next (&i);
3349 continue;
3350 }
3351 callee = gimple_call_fndecl (stmt);
3352 if (!callee
3353 || DECL_BUILT_IN_CLASS (callee) != BUILT_IN_NORMAL
3354 || DECL_FUNCTION_CODE (callee) == fcode)
3355 gsi_next (&i);
3356 }
3357 }
3358
3359 /* Delete unreachable blocks. */
3360 if (cfg_changed)
3361 todoflags |= TODO_cleanup_cfg;
3362
3363 return todoflags;
3364 }
3365
3366 } // anon namespace
3367
3368 gimple_opt_pass *
3369 make_pass_fold_builtins (gcc::context *ctxt)
3370 {
3371 return new pass_fold_builtins (ctxt);
3372 }
3373
3374 /* A simple pass that emits some warnings post IPA. */
3375
3376 namespace {
3377
3378 const pass_data pass_data_post_ipa_warn =
3379 {
3380 GIMPLE_PASS, /* type */
3381 "post_ipa_warn", /* name */
3382 OPTGROUP_NONE, /* optinfo_flags */
3383 TV_NONE, /* tv_id */
3384 ( PROP_cfg | PROP_ssa ), /* properties_required */
3385 0, /* properties_provided */
3386 0, /* properties_destroyed */
3387 0, /* todo_flags_start */
3388 0, /* todo_flags_finish */
3389 };
3390
3391 class pass_post_ipa_warn : public gimple_opt_pass
3392 {
3393 public:
3394 pass_post_ipa_warn (gcc::context *ctxt)
3395 : gimple_opt_pass (pass_data_post_ipa_warn, ctxt)
3396 {}
3397
3398 /* opt_pass methods: */
3399 opt_pass * clone () { return new pass_post_ipa_warn (m_ctxt); }
3400 virtual bool gate (function *) { return warn_nonnull != 0; }
3401 virtual unsigned int execute (function *);
3402
3403 }; // class pass_fold_builtins
3404
3405 unsigned int
3406 pass_post_ipa_warn::execute (function *fun)
3407 {
3408 basic_block bb;
3409
3410 FOR_EACH_BB_FN (bb, fun)
3411 {
3412 gimple_stmt_iterator gsi;
3413 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
3414 {
3415 gimple *stmt = gsi_stmt (gsi);
3416 if (!is_gimple_call (stmt) || gimple_no_warning_p (stmt))
3417 continue;
3418
3419 if (warn_nonnull)
3420 {
3421 bitmap nonnullargs
3422 = get_nonnull_args (gimple_call_fntype (stmt));
3423 if (nonnullargs)
3424 {
3425 for (unsigned i = 0; i < gimple_call_num_args (stmt); i++)
3426 {
3427 tree arg = gimple_call_arg (stmt, i);
3428 if (TREE_CODE (TREE_TYPE (arg)) != POINTER_TYPE)
3429 continue;
3430 if (!integer_zerop (arg))
3431 continue;
3432 if (!bitmap_empty_p (nonnullargs)
3433 && !bitmap_bit_p (nonnullargs, i))
3434 continue;
3435
3436 location_t loc = gimple_location (stmt);
3437 if (warning_at (loc, OPT_Wnonnull,
3438 "argument %u null where non-null "
3439 "expected", i + 1))
3440 {
3441 tree fndecl = gimple_call_fndecl (stmt);
3442 if (fndecl && DECL_IS_BUILTIN (fndecl))
3443 inform (loc, "in a call to built-in function %qD",
3444 fndecl);
3445 else if (fndecl)
3446 inform (DECL_SOURCE_LOCATION (fndecl),
3447 "in a call to function %qD declared here",
3448 fndecl);
3449
3450 }
3451 }
3452 BITMAP_FREE (nonnullargs);
3453 }
3454 }
3455 }
3456 }
3457 return 0;
3458 }
3459
3460 } // anon namespace
3461
3462 gimple_opt_pass *
3463 make_pass_post_ipa_warn (gcc::context *ctxt)
3464 {
3465 return new pass_post_ipa_warn (ctxt);
3466 }