Make get_value_for_expr check for INTEGER_CSTs
[gcc.git] / gcc / tree-ssa-ccp.c
1 /* Conditional constant propagation pass for the GNU compiler.
2 Copyright (C) 2000-2019 Free Software Foundation, Inc.
3 Adapted from original RTL SSA-CCP by Daniel Berlin <dberlin@dberlin.org>
4 Adapted to GIMPLE trees by Diego Novillo <dnovillo@redhat.com>
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it
9 under the terms of the GNU General Public License as published by the
10 Free Software Foundation; either version 3, or (at your option) any
11 later version.
12
13 GCC is distributed in the hope that it will be useful, but WITHOUT
14 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
21
22 /* Conditional constant propagation (CCP) is based on the SSA
23 propagation engine (tree-ssa-propagate.c). Constant assignments of
24 the form VAR = CST are propagated from the assignments into uses of
25 VAR, which in turn may generate new constants. The simulation uses
26 a four level lattice to keep track of constant values associated
27 with SSA names. Given an SSA name V_i, it may take one of the
28 following values:
29
30 UNINITIALIZED -> the initial state of the value. This value
31 is replaced with a correct initial value
32 the first time the value is used, so the
33 rest of the pass does not need to care about
34 it. Using this value simplifies initialization
35 of the pass, and prevents us from needlessly
36 scanning statements that are never reached.
37
38 UNDEFINED -> V_i is a local variable whose definition
39 has not been processed yet. Therefore we
40 don't yet know if its value is a constant
41 or not.
42
43 CONSTANT -> V_i has been found to hold a constant
44 value C.
45
46 VARYING -> V_i cannot take a constant value, or if it
47 does, it is not possible to determine it
48 at compile time.
49
50 The core of SSA-CCP is in ccp_visit_stmt and ccp_visit_phi_node:
51
52 1- In ccp_visit_stmt, we are interested in assignments whose RHS
53 evaluates into a constant and conditional jumps whose predicate
54 evaluates into a boolean true or false. When an assignment of
55 the form V_i = CONST is found, V_i's lattice value is set to
56 CONSTANT and CONST is associated with it. This causes the
57 propagation engine to add all the SSA edges coming out the
58 assignment into the worklists, so that statements that use V_i
59 can be visited.
60
61 If the statement is a conditional with a constant predicate, we
62 mark the outgoing edges as executable or not executable
63 depending on the predicate's value. This is then used when
64 visiting PHI nodes to know when a PHI argument can be ignored.
65
66
67 2- In ccp_visit_phi_node, if all the PHI arguments evaluate to the
68 same constant C, then the LHS of the PHI is set to C. This
69 evaluation is known as the "meet operation". Since one of the
70 goals of this evaluation is to optimistically return constant
71 values as often as possible, it uses two main short cuts:
72
73 - If an argument is flowing in through a non-executable edge, it
74 is ignored. This is useful in cases like this:
75
76 if (PRED)
77 a_9 = 3;
78 else
79 a_10 = 100;
80 a_11 = PHI (a_9, a_10)
81
82 If PRED is known to always evaluate to false, then we can
83 assume that a_11 will always take its value from a_10, meaning
84 that instead of consider it VARYING (a_9 and a_10 have
85 different values), we can consider it CONSTANT 100.
86
87 - If an argument has an UNDEFINED value, then it does not affect
88 the outcome of the meet operation. If a variable V_i has an
89 UNDEFINED value, it means that either its defining statement
90 hasn't been visited yet or V_i has no defining statement, in
91 which case the original symbol 'V' is being used
92 uninitialized. Since 'V' is a local variable, the compiler
93 may assume any initial value for it.
94
95
96 After propagation, every variable V_i that ends up with a lattice
97 value of CONSTANT will have the associated constant value in the
98 array CONST_VAL[i].VALUE. That is fed into substitute_and_fold for
99 final substitution and folding.
100
101 This algorithm uses wide-ints at the max precision of the target.
102 This means that, with one uninteresting exception, variables with
103 UNSIGNED types never go to VARYING because the bits above the
104 precision of the type of the variable are always zero. The
105 uninteresting case is a variable of UNSIGNED type that has the
106 maximum precision of the target. Such variables can go to VARYING,
107 but this causes no loss of infomation since these variables will
108 never be extended.
109
110 References:
111
112 Constant propagation with conditional branches,
113 Wegman and Zadeck, ACM TOPLAS 13(2):181-210.
114
115 Building an Optimizing Compiler,
116 Robert Morgan, Butterworth-Heinemann, 1998, Section 8.9.
117
118 Advanced Compiler Design and Implementation,
119 Steven Muchnick, Morgan Kaufmann, 1997, Section 12.6 */
120
121 #include "config.h"
122 #include "system.h"
123 #include "coretypes.h"
124 #include "backend.h"
125 #include "target.h"
126 #include "tree.h"
127 #include "gimple.h"
128 #include "tree-pass.h"
129 #include "ssa.h"
130 #include "gimple-pretty-print.h"
131 #include "fold-const.h"
132 #include "gimple-fold.h"
133 #include "tree-eh.h"
134 #include "gimplify.h"
135 #include "gimple-iterator.h"
136 #include "tree-cfg.h"
137 #include "tree-ssa-propagate.h"
138 #include "dbgcnt.h"
139 #include "params.h"
140 #include "builtins.h"
141 #include "cfgloop.h"
142 #include "stor-layout.h"
143 #include "optabs-query.h"
144 #include "tree-ssa-ccp.h"
145 #include "tree-dfa.h"
146 #include "diagnostic-core.h"
147 #include "stringpool.h"
148 #include "attribs.h"
149 #include "tree-vector-builder.h"
150
151 /* Possible lattice values. */
152 typedef enum
153 {
154 UNINITIALIZED,
155 UNDEFINED,
156 CONSTANT,
157 VARYING
158 } ccp_lattice_t;
159
160 class ccp_prop_value_t {
161 public:
162 /* Lattice value. */
163 ccp_lattice_t lattice_val;
164
165 /* Propagated value. */
166 tree value;
167
168 /* Mask that applies to the propagated value during CCP. For X
169 with a CONSTANT lattice value X & ~mask == value & ~mask. The
170 zero bits in the mask cover constant values. The ones mean no
171 information. */
172 widest_int mask;
173 };
174
175 class ccp_propagate : public ssa_propagation_engine
176 {
177 public:
178 enum ssa_prop_result visit_stmt (gimple *, edge *, tree *) FINAL OVERRIDE;
179 enum ssa_prop_result visit_phi (gphi *) FINAL OVERRIDE;
180 };
181
182 /* Array of propagated constant values. After propagation,
183 CONST_VAL[I].VALUE holds the constant value for SSA_NAME(I). If
184 the constant is held in an SSA name representing a memory store
185 (i.e., a VDEF), CONST_VAL[I].MEM_REF will contain the actual
186 memory reference used to store (i.e., the LHS of the assignment
187 doing the store). */
188 static ccp_prop_value_t *const_val;
189 static unsigned n_const_val;
190
191 static void canonicalize_value (ccp_prop_value_t *);
192 static void ccp_lattice_meet (ccp_prop_value_t *, ccp_prop_value_t *);
193
194 /* Dump constant propagation value VAL to file OUTF prefixed by PREFIX. */
195
196 static void
197 dump_lattice_value (FILE *outf, const char *prefix, ccp_prop_value_t val)
198 {
199 switch (val.lattice_val)
200 {
201 case UNINITIALIZED:
202 fprintf (outf, "%sUNINITIALIZED", prefix);
203 break;
204 case UNDEFINED:
205 fprintf (outf, "%sUNDEFINED", prefix);
206 break;
207 case VARYING:
208 fprintf (outf, "%sVARYING", prefix);
209 break;
210 case CONSTANT:
211 if (TREE_CODE (val.value) != INTEGER_CST
212 || val.mask == 0)
213 {
214 fprintf (outf, "%sCONSTANT ", prefix);
215 print_generic_expr (outf, val.value, dump_flags);
216 }
217 else
218 {
219 widest_int cval = wi::bit_and_not (wi::to_widest (val.value),
220 val.mask);
221 fprintf (outf, "%sCONSTANT ", prefix);
222 print_hex (cval, outf);
223 fprintf (outf, " (");
224 print_hex (val.mask, outf);
225 fprintf (outf, ")");
226 }
227 break;
228 default:
229 gcc_unreachable ();
230 }
231 }
232
233
234 /* Print lattice value VAL to stderr. */
235
236 void debug_lattice_value (ccp_prop_value_t val);
237
238 DEBUG_FUNCTION void
239 debug_lattice_value (ccp_prop_value_t val)
240 {
241 dump_lattice_value (stderr, "", val);
242 fprintf (stderr, "\n");
243 }
244
245 /* Extend NONZERO_BITS to a full mask, based on sgn. */
246
247 static widest_int
248 extend_mask (const wide_int &nonzero_bits, signop sgn)
249 {
250 return widest_int::from (nonzero_bits, sgn);
251 }
252
253 /* Compute a default value for variable VAR and store it in the
254 CONST_VAL array. The following rules are used to get default
255 values:
256
257 1- Global and static variables that are declared constant are
258 considered CONSTANT.
259
260 2- Any other value is considered UNDEFINED. This is useful when
261 considering PHI nodes. PHI arguments that are undefined do not
262 change the constant value of the PHI node, which allows for more
263 constants to be propagated.
264
265 3- Variables defined by statements other than assignments and PHI
266 nodes are considered VARYING.
267
268 4- Initial values of variables that are not GIMPLE registers are
269 considered VARYING. */
270
271 static ccp_prop_value_t
272 get_default_value (tree var)
273 {
274 ccp_prop_value_t val = { UNINITIALIZED, NULL_TREE, 0 };
275 gimple *stmt;
276
277 stmt = SSA_NAME_DEF_STMT (var);
278
279 if (gimple_nop_p (stmt))
280 {
281 /* Variables defined by an empty statement are those used
282 before being initialized. If VAR is a local variable, we
283 can assume initially that it is UNDEFINED, otherwise we must
284 consider it VARYING. */
285 if (!virtual_operand_p (var)
286 && SSA_NAME_VAR (var)
287 && TREE_CODE (SSA_NAME_VAR (var)) == VAR_DECL)
288 val.lattice_val = UNDEFINED;
289 else
290 {
291 val.lattice_val = VARYING;
292 val.mask = -1;
293 if (flag_tree_bit_ccp)
294 {
295 wide_int nonzero_bits = get_nonzero_bits (var);
296 if (nonzero_bits != -1)
297 {
298 val.lattice_val = CONSTANT;
299 val.value = build_zero_cst (TREE_TYPE (var));
300 val.mask = extend_mask (nonzero_bits, TYPE_SIGN (TREE_TYPE (var)));
301 }
302 }
303 }
304 }
305 else if (is_gimple_assign (stmt))
306 {
307 tree cst;
308 if (gimple_assign_single_p (stmt)
309 && DECL_P (gimple_assign_rhs1 (stmt))
310 && (cst = get_symbol_constant_value (gimple_assign_rhs1 (stmt))))
311 {
312 val.lattice_val = CONSTANT;
313 val.value = cst;
314 }
315 else
316 {
317 /* Any other variable defined by an assignment is considered
318 UNDEFINED. */
319 val.lattice_val = UNDEFINED;
320 }
321 }
322 else if ((is_gimple_call (stmt)
323 && gimple_call_lhs (stmt) != NULL_TREE)
324 || gimple_code (stmt) == GIMPLE_PHI)
325 {
326 /* A variable defined by a call or a PHI node is considered
327 UNDEFINED. */
328 val.lattice_val = UNDEFINED;
329 }
330 else
331 {
332 /* Otherwise, VAR will never take on a constant value. */
333 val.lattice_val = VARYING;
334 val.mask = -1;
335 }
336
337 return val;
338 }
339
340
341 /* Get the constant value associated with variable VAR. */
342
343 static inline ccp_prop_value_t *
344 get_value (tree var)
345 {
346 ccp_prop_value_t *val;
347
348 if (const_val == NULL
349 || SSA_NAME_VERSION (var) >= n_const_val)
350 return NULL;
351
352 val = &const_val[SSA_NAME_VERSION (var)];
353 if (val->lattice_val == UNINITIALIZED)
354 *val = get_default_value (var);
355
356 canonicalize_value (val);
357
358 return val;
359 }
360
361 /* Return the constant tree value associated with VAR. */
362
363 static inline tree
364 get_constant_value (tree var)
365 {
366 ccp_prop_value_t *val;
367 if (TREE_CODE (var) != SSA_NAME)
368 {
369 if (is_gimple_min_invariant (var))
370 return var;
371 return NULL_TREE;
372 }
373 val = get_value (var);
374 if (val
375 && val->lattice_val == CONSTANT
376 && (TREE_CODE (val->value) != INTEGER_CST
377 || val->mask == 0))
378 return val->value;
379 return NULL_TREE;
380 }
381
382 /* Sets the value associated with VAR to VARYING. */
383
384 static inline void
385 set_value_varying (tree var)
386 {
387 ccp_prop_value_t *val = &const_val[SSA_NAME_VERSION (var)];
388
389 val->lattice_val = VARYING;
390 val->value = NULL_TREE;
391 val->mask = -1;
392 }
393
394 /* For integer constants, make sure to drop TREE_OVERFLOW. */
395
396 static void
397 canonicalize_value (ccp_prop_value_t *val)
398 {
399 if (val->lattice_val != CONSTANT)
400 return;
401
402 if (TREE_OVERFLOW_P (val->value))
403 val->value = drop_tree_overflow (val->value);
404 }
405
406 /* Return whether the lattice transition is valid. */
407
408 static bool
409 valid_lattice_transition (ccp_prop_value_t old_val, ccp_prop_value_t new_val)
410 {
411 /* Lattice transitions must always be monotonically increasing in
412 value. */
413 if (old_val.lattice_val < new_val.lattice_val)
414 return true;
415
416 if (old_val.lattice_val != new_val.lattice_val)
417 return false;
418
419 if (!old_val.value && !new_val.value)
420 return true;
421
422 /* Now both lattice values are CONSTANT. */
423
424 /* Allow arbitrary copy changes as we might look through PHI <a_1, ...>
425 when only a single copy edge is executable. */
426 if (TREE_CODE (old_val.value) == SSA_NAME
427 && TREE_CODE (new_val.value) == SSA_NAME)
428 return true;
429
430 /* Allow transitioning from a constant to a copy. */
431 if (is_gimple_min_invariant (old_val.value)
432 && TREE_CODE (new_val.value) == SSA_NAME)
433 return true;
434
435 /* Allow transitioning from PHI <&x, not executable> == &x
436 to PHI <&x, &y> == common alignment. */
437 if (TREE_CODE (old_val.value) != INTEGER_CST
438 && TREE_CODE (new_val.value) == INTEGER_CST)
439 return true;
440
441 /* Bit-lattices have to agree in the still valid bits. */
442 if (TREE_CODE (old_val.value) == INTEGER_CST
443 && TREE_CODE (new_val.value) == INTEGER_CST)
444 return (wi::bit_and_not (wi::to_widest (old_val.value), new_val.mask)
445 == wi::bit_and_not (wi::to_widest (new_val.value), new_val.mask));
446
447 /* Otherwise constant values have to agree. */
448 if (operand_equal_p (old_val.value, new_val.value, 0))
449 return true;
450
451 /* At least the kinds and types should agree now. */
452 if (TREE_CODE (old_val.value) != TREE_CODE (new_val.value)
453 || !types_compatible_p (TREE_TYPE (old_val.value),
454 TREE_TYPE (new_val.value)))
455 return false;
456
457 /* For floats and !HONOR_NANS allow transitions from (partial) NaN
458 to non-NaN. */
459 tree type = TREE_TYPE (new_val.value);
460 if (SCALAR_FLOAT_TYPE_P (type)
461 && !HONOR_NANS (type))
462 {
463 if (REAL_VALUE_ISNAN (TREE_REAL_CST (old_val.value)))
464 return true;
465 }
466 else if (VECTOR_FLOAT_TYPE_P (type)
467 && !HONOR_NANS (type))
468 {
469 unsigned int count
470 = tree_vector_builder::binary_encoded_nelts (old_val.value,
471 new_val.value);
472 for (unsigned int i = 0; i < count; ++i)
473 if (!REAL_VALUE_ISNAN
474 (TREE_REAL_CST (VECTOR_CST_ENCODED_ELT (old_val.value, i)))
475 && !operand_equal_p (VECTOR_CST_ENCODED_ELT (old_val.value, i),
476 VECTOR_CST_ENCODED_ELT (new_val.value, i), 0))
477 return false;
478 return true;
479 }
480 else if (COMPLEX_FLOAT_TYPE_P (type)
481 && !HONOR_NANS (type))
482 {
483 if (!REAL_VALUE_ISNAN (TREE_REAL_CST (TREE_REALPART (old_val.value)))
484 && !operand_equal_p (TREE_REALPART (old_val.value),
485 TREE_REALPART (new_val.value), 0))
486 return false;
487 if (!REAL_VALUE_ISNAN (TREE_REAL_CST (TREE_IMAGPART (old_val.value)))
488 && !operand_equal_p (TREE_IMAGPART (old_val.value),
489 TREE_IMAGPART (new_val.value), 0))
490 return false;
491 return true;
492 }
493 return false;
494 }
495
496 /* Set the value for variable VAR to NEW_VAL. Return true if the new
497 value is different from VAR's previous value. */
498
499 static bool
500 set_lattice_value (tree var, ccp_prop_value_t *new_val)
501 {
502 /* We can deal with old UNINITIALIZED values just fine here. */
503 ccp_prop_value_t *old_val = &const_val[SSA_NAME_VERSION (var)];
504
505 canonicalize_value (new_val);
506
507 /* We have to be careful to not go up the bitwise lattice
508 represented by the mask. Instead of dropping to VARYING
509 use the meet operator to retain a conservative value.
510 Missed optimizations like PR65851 makes this necessary.
511 It also ensures we converge to a stable lattice solution. */
512 if (old_val->lattice_val != UNINITIALIZED)
513 ccp_lattice_meet (new_val, old_val);
514
515 gcc_checking_assert (valid_lattice_transition (*old_val, *new_val));
516
517 /* If *OLD_VAL and NEW_VAL are the same, return false to inform the
518 caller that this was a non-transition. */
519 if (old_val->lattice_val != new_val->lattice_val
520 || (new_val->lattice_val == CONSTANT
521 && (TREE_CODE (new_val->value) != TREE_CODE (old_val->value)
522 || (TREE_CODE (new_val->value) == INTEGER_CST
523 && (new_val->mask != old_val->mask
524 || (wi::bit_and_not (wi::to_widest (old_val->value),
525 new_val->mask)
526 != wi::bit_and_not (wi::to_widest (new_val->value),
527 new_val->mask))))
528 || (TREE_CODE (new_val->value) != INTEGER_CST
529 && !operand_equal_p (new_val->value, old_val->value, 0)))))
530 {
531 /* ??? We would like to delay creation of INTEGER_CSTs from
532 partially constants here. */
533
534 if (dump_file && (dump_flags & TDF_DETAILS))
535 {
536 dump_lattice_value (dump_file, "Lattice value changed to ", *new_val);
537 fprintf (dump_file, ". Adding SSA edges to worklist.\n");
538 }
539
540 *old_val = *new_val;
541
542 gcc_assert (new_val->lattice_val != UNINITIALIZED);
543 return true;
544 }
545
546 return false;
547 }
548
549 static ccp_prop_value_t get_value_for_expr (tree, bool);
550 static ccp_prop_value_t bit_value_binop (enum tree_code, tree, tree, tree);
551 void bit_value_binop (enum tree_code, signop, int, widest_int *, widest_int *,
552 signop, int, const widest_int &, const widest_int &,
553 signop, int, const widest_int &, const widest_int &);
554
555 /* Return a widest_int that can be used for bitwise simplifications
556 from VAL. */
557
558 static widest_int
559 value_to_wide_int (ccp_prop_value_t val)
560 {
561 if (val.value
562 && TREE_CODE (val.value) == INTEGER_CST)
563 return wi::to_widest (val.value);
564
565 return 0;
566 }
567
568 /* Return the value for the address expression EXPR based on alignment
569 information. */
570
571 static ccp_prop_value_t
572 get_value_from_alignment (tree expr)
573 {
574 tree type = TREE_TYPE (expr);
575 ccp_prop_value_t val;
576 unsigned HOST_WIDE_INT bitpos;
577 unsigned int align;
578
579 gcc_assert (TREE_CODE (expr) == ADDR_EXPR);
580
581 get_pointer_alignment_1 (expr, &align, &bitpos);
582 val.mask = wi::bit_and_not
583 (POINTER_TYPE_P (type) || TYPE_UNSIGNED (type)
584 ? wi::mask <widest_int> (TYPE_PRECISION (type), false)
585 : -1,
586 align / BITS_PER_UNIT - 1);
587 val.lattice_val
588 = wi::sext (val.mask, TYPE_PRECISION (type)) == -1 ? VARYING : CONSTANT;
589 if (val.lattice_val == CONSTANT)
590 val.value = build_int_cstu (type, bitpos / BITS_PER_UNIT);
591 else
592 val.value = NULL_TREE;
593
594 return val;
595 }
596
597 /* Return the value for the tree operand EXPR. If FOR_BITS_P is true
598 return constant bits extracted from alignment information for
599 invariant addresses. */
600
601 static ccp_prop_value_t
602 get_value_for_expr (tree expr, bool for_bits_p)
603 {
604 ccp_prop_value_t val;
605
606 if (TREE_CODE (expr) == SSA_NAME)
607 {
608 ccp_prop_value_t *val_ = get_value (expr);
609 if (val_)
610 val = *val_;
611 else
612 {
613 val.lattice_val = VARYING;
614 val.value = NULL_TREE;
615 val.mask = -1;
616 }
617 if (for_bits_p
618 && val.lattice_val == CONSTANT)
619 {
620 if (TREE_CODE (val.value) == ADDR_EXPR)
621 val = get_value_from_alignment (val.value);
622 else if (TREE_CODE (val.value) != INTEGER_CST)
623 {
624 val.lattice_val = VARYING;
625 val.value = NULL_TREE;
626 val.mask = -1;
627 }
628 }
629 /* Fall back to a copy value. */
630 if (!for_bits_p
631 && val.lattice_val == VARYING
632 && !SSA_NAME_OCCURS_IN_ABNORMAL_PHI (expr))
633 {
634 val.lattice_val = CONSTANT;
635 val.value = expr;
636 val.mask = -1;
637 }
638 }
639 else if (is_gimple_min_invariant (expr)
640 && (!for_bits_p || TREE_CODE (expr) == INTEGER_CST))
641 {
642 val.lattice_val = CONSTANT;
643 val.value = expr;
644 val.mask = 0;
645 canonicalize_value (&val);
646 }
647 else if (TREE_CODE (expr) == ADDR_EXPR)
648 val = get_value_from_alignment (expr);
649 else
650 {
651 val.lattice_val = VARYING;
652 val.mask = -1;
653 val.value = NULL_TREE;
654 }
655
656 if (val.lattice_val == VARYING
657 && TYPE_UNSIGNED (TREE_TYPE (expr)))
658 val.mask = wi::zext (val.mask, TYPE_PRECISION (TREE_TYPE (expr)));
659
660 return val;
661 }
662
663 /* Return the likely CCP lattice value for STMT.
664
665 If STMT has no operands, then return CONSTANT.
666
667 Else if undefinedness of operands of STMT cause its value to be
668 undefined, then return UNDEFINED.
669
670 Else if any operands of STMT are constants, then return CONSTANT.
671
672 Else return VARYING. */
673
674 static ccp_lattice_t
675 likely_value (gimple *stmt)
676 {
677 bool has_constant_operand, has_undefined_operand, all_undefined_operands;
678 bool has_nsa_operand;
679 tree use;
680 ssa_op_iter iter;
681 unsigned i;
682
683 enum gimple_code code = gimple_code (stmt);
684
685 /* This function appears to be called only for assignments, calls,
686 conditionals, and switches, due to the logic in visit_stmt. */
687 gcc_assert (code == GIMPLE_ASSIGN
688 || code == GIMPLE_CALL
689 || code == GIMPLE_COND
690 || code == GIMPLE_SWITCH);
691
692 /* If the statement has volatile operands, it won't fold to a
693 constant value. */
694 if (gimple_has_volatile_ops (stmt))
695 return VARYING;
696
697 /* Arrive here for more complex cases. */
698 has_constant_operand = false;
699 has_undefined_operand = false;
700 all_undefined_operands = true;
701 has_nsa_operand = false;
702 FOR_EACH_SSA_TREE_OPERAND (use, stmt, iter, SSA_OP_USE)
703 {
704 ccp_prop_value_t *val = get_value (use);
705
706 if (val && val->lattice_val == UNDEFINED)
707 has_undefined_operand = true;
708 else
709 all_undefined_operands = false;
710
711 if (val && val->lattice_val == CONSTANT)
712 has_constant_operand = true;
713
714 if (SSA_NAME_IS_DEFAULT_DEF (use)
715 || !prop_simulate_again_p (SSA_NAME_DEF_STMT (use)))
716 has_nsa_operand = true;
717 }
718
719 /* There may be constants in regular rhs operands. For calls we
720 have to ignore lhs, fndecl and static chain, otherwise only
721 the lhs. */
722 for (i = (is_gimple_call (stmt) ? 2 : 0) + gimple_has_lhs (stmt);
723 i < gimple_num_ops (stmt); ++i)
724 {
725 tree op = gimple_op (stmt, i);
726 if (!op || TREE_CODE (op) == SSA_NAME)
727 continue;
728 if (is_gimple_min_invariant (op))
729 has_constant_operand = true;
730 }
731
732 if (has_constant_operand)
733 all_undefined_operands = false;
734
735 if (has_undefined_operand
736 && code == GIMPLE_CALL
737 && gimple_call_internal_p (stmt))
738 switch (gimple_call_internal_fn (stmt))
739 {
740 /* These 3 builtins use the first argument just as a magic
741 way how to find out a decl uid. */
742 case IFN_GOMP_SIMD_LANE:
743 case IFN_GOMP_SIMD_VF:
744 case IFN_GOMP_SIMD_LAST_LANE:
745 has_undefined_operand = false;
746 break;
747 default:
748 break;
749 }
750
751 /* If the operation combines operands like COMPLEX_EXPR make sure to
752 not mark the result UNDEFINED if only one part of the result is
753 undefined. */
754 if (has_undefined_operand && all_undefined_operands)
755 return UNDEFINED;
756 else if (code == GIMPLE_ASSIGN && has_undefined_operand)
757 {
758 switch (gimple_assign_rhs_code (stmt))
759 {
760 /* Unary operators are handled with all_undefined_operands. */
761 case PLUS_EXPR:
762 case MINUS_EXPR:
763 case POINTER_PLUS_EXPR:
764 case BIT_XOR_EXPR:
765 /* Not MIN_EXPR, MAX_EXPR. One VARYING operand may be selected.
766 Not bitwise operators, one VARYING operand may specify the
767 result completely.
768 Not logical operators for the same reason, apart from XOR.
769 Not COMPLEX_EXPR as one VARYING operand makes the result partly
770 not UNDEFINED. Not *DIV_EXPR, comparisons and shifts because
771 the undefined operand may be promoted. */
772 return UNDEFINED;
773
774 case ADDR_EXPR:
775 /* If any part of an address is UNDEFINED, like the index
776 of an ARRAY_EXPR, then treat the result as UNDEFINED. */
777 return UNDEFINED;
778
779 default:
780 ;
781 }
782 }
783 /* If there was an UNDEFINED operand but the result may be not UNDEFINED
784 fall back to CONSTANT. During iteration UNDEFINED may still drop
785 to CONSTANT. */
786 if (has_undefined_operand)
787 return CONSTANT;
788
789 /* We do not consider virtual operands here -- load from read-only
790 memory may have only VARYING virtual operands, but still be
791 constant. Also we can combine the stmt with definitions from
792 operands whose definitions are not simulated again. */
793 if (has_constant_operand
794 || has_nsa_operand
795 || gimple_references_memory_p (stmt))
796 return CONSTANT;
797
798 return VARYING;
799 }
800
801 /* Returns true if STMT cannot be constant. */
802
803 static bool
804 surely_varying_stmt_p (gimple *stmt)
805 {
806 /* If the statement has operands that we cannot handle, it cannot be
807 constant. */
808 if (gimple_has_volatile_ops (stmt))
809 return true;
810
811 /* If it is a call and does not return a value or is not a
812 builtin and not an indirect call or a call to function with
813 assume_aligned/alloc_align attribute, it is varying. */
814 if (is_gimple_call (stmt))
815 {
816 tree fndecl, fntype = gimple_call_fntype (stmt);
817 if (!gimple_call_lhs (stmt)
818 || ((fndecl = gimple_call_fndecl (stmt)) != NULL_TREE
819 && !fndecl_built_in_p (fndecl)
820 && !lookup_attribute ("assume_aligned",
821 TYPE_ATTRIBUTES (fntype))
822 && !lookup_attribute ("alloc_align",
823 TYPE_ATTRIBUTES (fntype))))
824 return true;
825 }
826
827 /* Any other store operation is not interesting. */
828 else if (gimple_vdef (stmt))
829 return true;
830
831 /* Anything other than assignments and conditional jumps are not
832 interesting for CCP. */
833 if (gimple_code (stmt) != GIMPLE_ASSIGN
834 && gimple_code (stmt) != GIMPLE_COND
835 && gimple_code (stmt) != GIMPLE_SWITCH
836 && gimple_code (stmt) != GIMPLE_CALL)
837 return true;
838
839 return false;
840 }
841
842 /* Initialize local data structures for CCP. */
843
844 static void
845 ccp_initialize (void)
846 {
847 basic_block bb;
848
849 n_const_val = num_ssa_names;
850 const_val = XCNEWVEC (ccp_prop_value_t, n_const_val);
851
852 /* Initialize simulation flags for PHI nodes and statements. */
853 FOR_EACH_BB_FN (bb, cfun)
854 {
855 gimple_stmt_iterator i;
856
857 for (i = gsi_start_bb (bb); !gsi_end_p (i); gsi_next (&i))
858 {
859 gimple *stmt = gsi_stmt (i);
860 bool is_varying;
861
862 /* If the statement is a control insn, then we do not
863 want to avoid simulating the statement once. Failure
864 to do so means that those edges will never get added. */
865 if (stmt_ends_bb_p (stmt))
866 is_varying = false;
867 else
868 is_varying = surely_varying_stmt_p (stmt);
869
870 if (is_varying)
871 {
872 tree def;
873 ssa_op_iter iter;
874
875 /* If the statement will not produce a constant, mark
876 all its outputs VARYING. */
877 FOR_EACH_SSA_TREE_OPERAND (def, stmt, iter, SSA_OP_ALL_DEFS)
878 set_value_varying (def);
879 }
880 prop_set_simulate_again (stmt, !is_varying);
881 }
882 }
883
884 /* Now process PHI nodes. We never clear the simulate_again flag on
885 phi nodes, since we do not know which edges are executable yet,
886 except for phi nodes for virtual operands when we do not do store ccp. */
887 FOR_EACH_BB_FN (bb, cfun)
888 {
889 gphi_iterator i;
890
891 for (i = gsi_start_phis (bb); !gsi_end_p (i); gsi_next (&i))
892 {
893 gphi *phi = i.phi ();
894
895 if (virtual_operand_p (gimple_phi_result (phi)))
896 prop_set_simulate_again (phi, false);
897 else
898 prop_set_simulate_again (phi, true);
899 }
900 }
901 }
902
903 /* Debug count support. Reset the values of ssa names
904 VARYING when the total number ssa names analyzed is
905 beyond the debug count specified. */
906
907 static void
908 do_dbg_cnt (void)
909 {
910 unsigned i;
911 for (i = 0; i < num_ssa_names; i++)
912 {
913 if (!dbg_cnt (ccp))
914 {
915 const_val[i].lattice_val = VARYING;
916 const_val[i].mask = -1;
917 const_val[i].value = NULL_TREE;
918 }
919 }
920 }
921
922
923 /* We want to provide our own GET_VALUE and FOLD_STMT virtual methods. */
924 class ccp_folder : public substitute_and_fold_engine
925 {
926 public:
927 tree get_value (tree) FINAL OVERRIDE;
928 bool fold_stmt (gimple_stmt_iterator *) FINAL OVERRIDE;
929 };
930
931 /* This method just wraps GET_CONSTANT_VALUE for now. Over time
932 naked calls to GET_CONSTANT_VALUE should be eliminated in favor
933 of calling member functions. */
934
935 tree
936 ccp_folder::get_value (tree op)
937 {
938 return get_constant_value (op);
939 }
940
941 /* Do final substitution of propagated values, cleanup the flowgraph and
942 free allocated storage. If NONZERO_P, record nonzero bits.
943
944 Return TRUE when something was optimized. */
945
946 static bool
947 ccp_finalize (bool nonzero_p)
948 {
949 bool something_changed;
950 unsigned i;
951 tree name;
952
953 do_dbg_cnt ();
954
955 /* Derive alignment and misalignment information from partially
956 constant pointers in the lattice or nonzero bits from partially
957 constant integers. */
958 FOR_EACH_SSA_NAME (i, name, cfun)
959 {
960 ccp_prop_value_t *val;
961 unsigned int tem, align;
962
963 if (!POINTER_TYPE_P (TREE_TYPE (name))
964 && (!INTEGRAL_TYPE_P (TREE_TYPE (name))
965 /* Don't record nonzero bits before IPA to avoid
966 using too much memory. */
967 || !nonzero_p))
968 continue;
969
970 val = get_value (name);
971 if (val->lattice_val != CONSTANT
972 || TREE_CODE (val->value) != INTEGER_CST
973 || val->mask == 0)
974 continue;
975
976 if (POINTER_TYPE_P (TREE_TYPE (name)))
977 {
978 /* Trailing mask bits specify the alignment, trailing value
979 bits the misalignment. */
980 tem = val->mask.to_uhwi ();
981 align = least_bit_hwi (tem);
982 if (align > 1)
983 set_ptr_info_alignment (get_ptr_info (name), align,
984 (TREE_INT_CST_LOW (val->value)
985 & (align - 1)));
986 }
987 else
988 {
989 unsigned int precision = TYPE_PRECISION (TREE_TYPE (val->value));
990 wide_int nonzero_bits
991 = (wide_int::from (val->mask, precision, UNSIGNED)
992 | wi::to_wide (val->value));
993 nonzero_bits &= get_nonzero_bits (name);
994 set_nonzero_bits (name, nonzero_bits);
995 }
996 }
997
998 /* Perform substitutions based on the known constant values. */
999 class ccp_folder ccp_folder;
1000 something_changed = ccp_folder.substitute_and_fold ();
1001
1002 free (const_val);
1003 const_val = NULL;
1004 return something_changed;
1005 }
1006
1007
1008 /* Compute the meet operator between *VAL1 and *VAL2. Store the result
1009 in VAL1.
1010
1011 any M UNDEFINED = any
1012 any M VARYING = VARYING
1013 Ci M Cj = Ci if (i == j)
1014 Ci M Cj = VARYING if (i != j)
1015 */
1016
1017 static void
1018 ccp_lattice_meet (ccp_prop_value_t *val1, ccp_prop_value_t *val2)
1019 {
1020 if (val1->lattice_val == UNDEFINED
1021 /* For UNDEFINED M SSA we can't always SSA because its definition
1022 may not dominate the PHI node. Doing optimistic copy propagation
1023 also causes a lot of gcc.dg/uninit-pred*.c FAILs. */
1024 && (val2->lattice_val != CONSTANT
1025 || TREE_CODE (val2->value) != SSA_NAME))
1026 {
1027 /* UNDEFINED M any = any */
1028 *val1 = *val2;
1029 }
1030 else if (val2->lattice_val == UNDEFINED
1031 /* See above. */
1032 && (val1->lattice_val != CONSTANT
1033 || TREE_CODE (val1->value) != SSA_NAME))
1034 {
1035 /* any M UNDEFINED = any
1036 Nothing to do. VAL1 already contains the value we want. */
1037 ;
1038 }
1039 else if (val1->lattice_val == VARYING
1040 || val2->lattice_val == VARYING)
1041 {
1042 /* any M VARYING = VARYING. */
1043 val1->lattice_val = VARYING;
1044 val1->mask = -1;
1045 val1->value = NULL_TREE;
1046 }
1047 else if (val1->lattice_val == CONSTANT
1048 && val2->lattice_val == CONSTANT
1049 && TREE_CODE (val1->value) == INTEGER_CST
1050 && TREE_CODE (val2->value) == INTEGER_CST)
1051 {
1052 /* Ci M Cj = Ci if (i == j)
1053 Ci M Cj = VARYING if (i != j)
1054
1055 For INTEGER_CSTs mask unequal bits. If no equal bits remain,
1056 drop to varying. */
1057 val1->mask = (val1->mask | val2->mask
1058 | (wi::to_widest (val1->value)
1059 ^ wi::to_widest (val2->value)));
1060 if (wi::sext (val1->mask, TYPE_PRECISION (TREE_TYPE (val1->value))) == -1)
1061 {
1062 val1->lattice_val = VARYING;
1063 val1->value = NULL_TREE;
1064 }
1065 }
1066 else if (val1->lattice_val == CONSTANT
1067 && val2->lattice_val == CONSTANT
1068 && operand_equal_p (val1->value, val2->value, 0))
1069 {
1070 /* Ci M Cj = Ci if (i == j)
1071 Ci M Cj = VARYING if (i != j)
1072
1073 VAL1 already contains the value we want for equivalent values. */
1074 }
1075 else if (val1->lattice_val == CONSTANT
1076 && val2->lattice_val == CONSTANT
1077 && (TREE_CODE (val1->value) == ADDR_EXPR
1078 || TREE_CODE (val2->value) == ADDR_EXPR))
1079 {
1080 /* When not equal addresses are involved try meeting for
1081 alignment. */
1082 ccp_prop_value_t tem = *val2;
1083 if (TREE_CODE (val1->value) == ADDR_EXPR)
1084 *val1 = get_value_for_expr (val1->value, true);
1085 if (TREE_CODE (val2->value) == ADDR_EXPR)
1086 tem = get_value_for_expr (val2->value, true);
1087 ccp_lattice_meet (val1, &tem);
1088 }
1089 else
1090 {
1091 /* Any other combination is VARYING. */
1092 val1->lattice_val = VARYING;
1093 val1->mask = -1;
1094 val1->value = NULL_TREE;
1095 }
1096 }
1097
1098
1099 /* Loop through the PHI_NODE's parameters for BLOCK and compare their
1100 lattice values to determine PHI_NODE's lattice value. The value of a
1101 PHI node is determined calling ccp_lattice_meet with all the arguments
1102 of the PHI node that are incoming via executable edges. */
1103
1104 enum ssa_prop_result
1105 ccp_propagate::visit_phi (gphi *phi)
1106 {
1107 unsigned i;
1108 ccp_prop_value_t new_val;
1109
1110 if (dump_file && (dump_flags & TDF_DETAILS))
1111 {
1112 fprintf (dump_file, "\nVisiting PHI node: ");
1113 print_gimple_stmt (dump_file, phi, 0, dump_flags);
1114 }
1115
1116 new_val.lattice_val = UNDEFINED;
1117 new_val.value = NULL_TREE;
1118 new_val.mask = 0;
1119
1120 bool first = true;
1121 bool non_exec_edge = false;
1122 for (i = 0; i < gimple_phi_num_args (phi); i++)
1123 {
1124 /* Compute the meet operator over all the PHI arguments flowing
1125 through executable edges. */
1126 edge e = gimple_phi_arg_edge (phi, i);
1127
1128 if (dump_file && (dump_flags & TDF_DETAILS))
1129 {
1130 fprintf (dump_file,
1131 "\tArgument #%d (%d -> %d %sexecutable)\n",
1132 i, e->src->index, e->dest->index,
1133 (e->flags & EDGE_EXECUTABLE) ? "" : "not ");
1134 }
1135
1136 /* If the incoming edge is executable, Compute the meet operator for
1137 the existing value of the PHI node and the current PHI argument. */
1138 if (e->flags & EDGE_EXECUTABLE)
1139 {
1140 tree arg = gimple_phi_arg (phi, i)->def;
1141 ccp_prop_value_t arg_val = get_value_for_expr (arg, false);
1142
1143 if (first)
1144 {
1145 new_val = arg_val;
1146 first = false;
1147 }
1148 else
1149 ccp_lattice_meet (&new_val, &arg_val);
1150
1151 if (dump_file && (dump_flags & TDF_DETAILS))
1152 {
1153 fprintf (dump_file, "\t");
1154 print_generic_expr (dump_file, arg, dump_flags);
1155 dump_lattice_value (dump_file, "\tValue: ", arg_val);
1156 fprintf (dump_file, "\n");
1157 }
1158
1159 if (new_val.lattice_val == VARYING)
1160 break;
1161 }
1162 else
1163 non_exec_edge = true;
1164 }
1165
1166 /* In case there were non-executable edges and the value is a copy
1167 make sure its definition dominates the PHI node. */
1168 if (non_exec_edge
1169 && new_val.lattice_val == CONSTANT
1170 && TREE_CODE (new_val.value) == SSA_NAME
1171 && ! SSA_NAME_IS_DEFAULT_DEF (new_val.value)
1172 && ! dominated_by_p (CDI_DOMINATORS, gimple_bb (phi),
1173 gimple_bb (SSA_NAME_DEF_STMT (new_val.value))))
1174 {
1175 new_val.lattice_val = VARYING;
1176 new_val.value = NULL_TREE;
1177 new_val.mask = -1;
1178 }
1179
1180 if (dump_file && (dump_flags & TDF_DETAILS))
1181 {
1182 dump_lattice_value (dump_file, "\n PHI node value: ", new_val);
1183 fprintf (dump_file, "\n\n");
1184 }
1185
1186 /* Make the transition to the new value. */
1187 if (set_lattice_value (gimple_phi_result (phi), &new_val))
1188 {
1189 if (new_val.lattice_val == VARYING)
1190 return SSA_PROP_VARYING;
1191 else
1192 return SSA_PROP_INTERESTING;
1193 }
1194 else
1195 return SSA_PROP_NOT_INTERESTING;
1196 }
1197
1198 /* Return the constant value for OP or OP otherwise. */
1199
1200 static tree
1201 valueize_op (tree op)
1202 {
1203 if (TREE_CODE (op) == SSA_NAME)
1204 {
1205 tree tem = get_constant_value (op);
1206 if (tem)
1207 return tem;
1208 }
1209 return op;
1210 }
1211
1212 /* Return the constant value for OP, but signal to not follow SSA
1213 edges if the definition may be simulated again. */
1214
1215 static tree
1216 valueize_op_1 (tree op)
1217 {
1218 if (TREE_CODE (op) == SSA_NAME)
1219 {
1220 /* If the definition may be simulated again we cannot follow
1221 this SSA edge as the SSA propagator does not necessarily
1222 re-visit the use. */
1223 gimple *def_stmt = SSA_NAME_DEF_STMT (op);
1224 if (!gimple_nop_p (def_stmt)
1225 && prop_simulate_again_p (def_stmt))
1226 return NULL_TREE;
1227 tree tem = get_constant_value (op);
1228 if (tem)
1229 return tem;
1230 }
1231 return op;
1232 }
1233
1234 /* CCP specific front-end to the non-destructive constant folding
1235 routines.
1236
1237 Attempt to simplify the RHS of STMT knowing that one or more
1238 operands are constants.
1239
1240 If simplification is possible, return the simplified RHS,
1241 otherwise return the original RHS or NULL_TREE. */
1242
1243 static tree
1244 ccp_fold (gimple *stmt)
1245 {
1246 location_t loc = gimple_location (stmt);
1247 switch (gimple_code (stmt))
1248 {
1249 case GIMPLE_COND:
1250 {
1251 /* Handle comparison operators that can appear in GIMPLE form. */
1252 tree op0 = valueize_op (gimple_cond_lhs (stmt));
1253 tree op1 = valueize_op (gimple_cond_rhs (stmt));
1254 enum tree_code code = gimple_cond_code (stmt);
1255 return fold_binary_loc (loc, code, boolean_type_node, op0, op1);
1256 }
1257
1258 case GIMPLE_SWITCH:
1259 {
1260 /* Return the constant switch index. */
1261 return valueize_op (gimple_switch_index (as_a <gswitch *> (stmt)));
1262 }
1263
1264 case GIMPLE_ASSIGN:
1265 case GIMPLE_CALL:
1266 return gimple_fold_stmt_to_constant_1 (stmt,
1267 valueize_op, valueize_op_1);
1268
1269 default:
1270 gcc_unreachable ();
1271 }
1272 }
1273
1274 /* Apply the operation CODE in type TYPE to the value, mask pair
1275 RVAL and RMASK representing a value of type RTYPE and set
1276 the value, mask pair *VAL and *MASK to the result. */
1277
1278 void
1279 bit_value_unop (enum tree_code code, signop type_sgn, int type_precision,
1280 widest_int *val, widest_int *mask,
1281 signop rtype_sgn, int rtype_precision,
1282 const widest_int &rval, const widest_int &rmask)
1283 {
1284 switch (code)
1285 {
1286 case BIT_NOT_EXPR:
1287 *mask = rmask;
1288 *val = ~rval;
1289 break;
1290
1291 case NEGATE_EXPR:
1292 {
1293 widest_int temv, temm;
1294 /* Return ~rval + 1. */
1295 bit_value_unop (BIT_NOT_EXPR, type_sgn, type_precision, &temv, &temm,
1296 type_sgn, type_precision, rval, rmask);
1297 bit_value_binop (PLUS_EXPR, type_sgn, type_precision, val, mask,
1298 type_sgn, type_precision, temv, temm,
1299 type_sgn, type_precision, 1, 0);
1300 break;
1301 }
1302
1303 CASE_CONVERT:
1304 {
1305 /* First extend mask and value according to the original type. */
1306 *mask = wi::ext (rmask, rtype_precision, rtype_sgn);
1307 *val = wi::ext (rval, rtype_precision, rtype_sgn);
1308
1309 /* Then extend mask and value according to the target type. */
1310 *mask = wi::ext (*mask, type_precision, type_sgn);
1311 *val = wi::ext (*val, type_precision, type_sgn);
1312 break;
1313 }
1314
1315 default:
1316 *mask = -1;
1317 break;
1318 }
1319 }
1320
1321 /* Apply the operation CODE in type TYPE to the value, mask pairs
1322 R1VAL, R1MASK and R2VAL, R2MASK representing a values of type R1TYPE
1323 and R2TYPE and set the value, mask pair *VAL and *MASK to the result. */
1324
1325 void
1326 bit_value_binop (enum tree_code code, signop sgn, int width,
1327 widest_int *val, widest_int *mask,
1328 signop r1type_sgn, int r1type_precision,
1329 const widest_int &r1val, const widest_int &r1mask,
1330 signop r2type_sgn, int r2type_precision,
1331 const widest_int &r2val, const widest_int &r2mask)
1332 {
1333 bool swap_p = false;
1334
1335 /* Assume we'll get a constant result. Use an initial non varying
1336 value, we fall back to varying in the end if necessary. */
1337 *mask = -1;
1338
1339 switch (code)
1340 {
1341 case BIT_AND_EXPR:
1342 /* The mask is constant where there is a known not
1343 set bit, (m1 | m2) & ((v1 | m1) & (v2 | m2)) */
1344 *mask = (r1mask | r2mask) & (r1val | r1mask) & (r2val | r2mask);
1345 *val = r1val & r2val;
1346 break;
1347
1348 case BIT_IOR_EXPR:
1349 /* The mask is constant where there is a known
1350 set bit, (m1 | m2) & ~((v1 & ~m1) | (v2 & ~m2)). */
1351 *mask = wi::bit_and_not (r1mask | r2mask,
1352 wi::bit_and_not (r1val, r1mask)
1353 | wi::bit_and_not (r2val, r2mask));
1354 *val = r1val | r2val;
1355 break;
1356
1357 case BIT_XOR_EXPR:
1358 /* m1 | m2 */
1359 *mask = r1mask | r2mask;
1360 *val = r1val ^ r2val;
1361 break;
1362
1363 case LROTATE_EXPR:
1364 case RROTATE_EXPR:
1365 if (r2mask == 0)
1366 {
1367 widest_int shift = r2val;
1368 if (shift == 0)
1369 {
1370 *mask = r1mask;
1371 *val = r1val;
1372 }
1373 else
1374 {
1375 if (wi::neg_p (shift))
1376 {
1377 shift = -shift;
1378 if (code == RROTATE_EXPR)
1379 code = LROTATE_EXPR;
1380 else
1381 code = RROTATE_EXPR;
1382 }
1383 if (code == RROTATE_EXPR)
1384 {
1385 *mask = wi::rrotate (r1mask, shift, width);
1386 *val = wi::rrotate (r1val, shift, width);
1387 }
1388 else
1389 {
1390 *mask = wi::lrotate (r1mask, shift, width);
1391 *val = wi::lrotate (r1val, shift, width);
1392 }
1393 }
1394 }
1395 break;
1396
1397 case LSHIFT_EXPR:
1398 case RSHIFT_EXPR:
1399 /* ??? We can handle partially known shift counts if we know
1400 its sign. That way we can tell that (x << (y | 8)) & 255
1401 is zero. */
1402 if (r2mask == 0)
1403 {
1404 widest_int shift = r2val;
1405 if (shift == 0)
1406 {
1407 *mask = r1mask;
1408 *val = r1val;
1409 }
1410 else
1411 {
1412 if (wi::neg_p (shift))
1413 {
1414 shift = -shift;
1415 if (code == RSHIFT_EXPR)
1416 code = LSHIFT_EXPR;
1417 else
1418 code = RSHIFT_EXPR;
1419 }
1420 if (code == RSHIFT_EXPR)
1421 {
1422 *mask = wi::rshift (wi::ext (r1mask, width, sgn), shift, sgn);
1423 *val = wi::rshift (wi::ext (r1val, width, sgn), shift, sgn);
1424 }
1425 else
1426 {
1427 *mask = wi::ext (r1mask << shift, width, sgn);
1428 *val = wi::ext (r1val << shift, width, sgn);
1429 }
1430 }
1431 }
1432 break;
1433
1434 case PLUS_EXPR:
1435 case POINTER_PLUS_EXPR:
1436 {
1437 /* Do the addition with unknown bits set to zero, to give carry-ins of
1438 zero wherever possible. */
1439 widest_int lo = (wi::bit_and_not (r1val, r1mask)
1440 + wi::bit_and_not (r2val, r2mask));
1441 lo = wi::ext (lo, width, sgn);
1442 /* Do the addition with unknown bits set to one, to give carry-ins of
1443 one wherever possible. */
1444 widest_int hi = (r1val | r1mask) + (r2val | r2mask);
1445 hi = wi::ext (hi, width, sgn);
1446 /* Each bit in the result is known if (a) the corresponding bits in
1447 both inputs are known, and (b) the carry-in to that bit position
1448 is known. We can check condition (b) by seeing if we got the same
1449 result with minimised carries as with maximised carries. */
1450 *mask = r1mask | r2mask | (lo ^ hi);
1451 *mask = wi::ext (*mask, width, sgn);
1452 /* It shouldn't matter whether we choose lo or hi here. */
1453 *val = lo;
1454 break;
1455 }
1456
1457 case MINUS_EXPR:
1458 {
1459 widest_int temv, temm;
1460 bit_value_unop (NEGATE_EXPR, r2type_sgn, r2type_precision, &temv, &temm,
1461 r2type_sgn, r2type_precision, r2val, r2mask);
1462 bit_value_binop (PLUS_EXPR, sgn, width, val, mask,
1463 r1type_sgn, r1type_precision, r1val, r1mask,
1464 r2type_sgn, r2type_precision, temv, temm);
1465 break;
1466 }
1467
1468 case MULT_EXPR:
1469 {
1470 /* Just track trailing zeros in both operands and transfer
1471 them to the other. */
1472 int r1tz = wi::ctz (r1val | r1mask);
1473 int r2tz = wi::ctz (r2val | r2mask);
1474 if (r1tz + r2tz >= width)
1475 {
1476 *mask = 0;
1477 *val = 0;
1478 }
1479 else if (r1tz + r2tz > 0)
1480 {
1481 *mask = wi::ext (wi::mask <widest_int> (r1tz + r2tz, true),
1482 width, sgn);
1483 *val = 0;
1484 }
1485 break;
1486 }
1487
1488 case EQ_EXPR:
1489 case NE_EXPR:
1490 {
1491 widest_int m = r1mask | r2mask;
1492 if (wi::bit_and_not (r1val, m) != wi::bit_and_not (r2val, m))
1493 {
1494 *mask = 0;
1495 *val = ((code == EQ_EXPR) ? 0 : 1);
1496 }
1497 else
1498 {
1499 /* We know the result of a comparison is always one or zero. */
1500 *mask = 1;
1501 *val = 0;
1502 }
1503 break;
1504 }
1505
1506 case GE_EXPR:
1507 case GT_EXPR:
1508 swap_p = true;
1509 code = swap_tree_comparison (code);
1510 /* Fall through. */
1511 case LT_EXPR:
1512 case LE_EXPR:
1513 {
1514 int minmax, maxmin;
1515
1516 const widest_int &o1val = swap_p ? r2val : r1val;
1517 const widest_int &o1mask = swap_p ? r2mask : r1mask;
1518 const widest_int &o2val = swap_p ? r1val : r2val;
1519 const widest_int &o2mask = swap_p ? r1mask : r2mask;
1520
1521 /* If the most significant bits are not known we know nothing. */
1522 if (wi::neg_p (o1mask) || wi::neg_p (o2mask))
1523 break;
1524
1525 /* For comparisons the signedness is in the comparison operands. */
1526 sgn = r1type_sgn;
1527
1528 /* If we know the most significant bits we know the values
1529 value ranges by means of treating varying bits as zero
1530 or one. Do a cross comparison of the max/min pairs. */
1531 maxmin = wi::cmp (o1val | o1mask,
1532 wi::bit_and_not (o2val, o2mask), sgn);
1533 minmax = wi::cmp (wi::bit_and_not (o1val, o1mask),
1534 o2val | o2mask, sgn);
1535 if (maxmin < 0) /* o1 is less than o2. */
1536 {
1537 *mask = 0;
1538 *val = 1;
1539 }
1540 else if (minmax > 0) /* o1 is not less or equal to o2. */
1541 {
1542 *mask = 0;
1543 *val = 0;
1544 }
1545 else if (maxmin == minmax) /* o1 and o2 are equal. */
1546 {
1547 /* This probably should never happen as we'd have
1548 folded the thing during fully constant value folding. */
1549 *mask = 0;
1550 *val = (code == LE_EXPR ? 1 : 0);
1551 }
1552 else
1553 {
1554 /* We know the result of a comparison is always one or zero. */
1555 *mask = 1;
1556 *val = 0;
1557 }
1558 break;
1559 }
1560
1561 default:;
1562 }
1563 }
1564
1565 /* Return the propagation value when applying the operation CODE to
1566 the value RHS yielding type TYPE. */
1567
1568 static ccp_prop_value_t
1569 bit_value_unop (enum tree_code code, tree type, tree rhs)
1570 {
1571 ccp_prop_value_t rval = get_value_for_expr (rhs, true);
1572 widest_int value, mask;
1573 ccp_prop_value_t val;
1574
1575 if (rval.lattice_val == UNDEFINED)
1576 return rval;
1577
1578 gcc_assert ((rval.lattice_val == CONSTANT
1579 && TREE_CODE (rval.value) == INTEGER_CST)
1580 || wi::sext (rval.mask, TYPE_PRECISION (TREE_TYPE (rhs))) == -1);
1581 bit_value_unop (code, TYPE_SIGN (type), TYPE_PRECISION (type), &value, &mask,
1582 TYPE_SIGN (TREE_TYPE (rhs)), TYPE_PRECISION (TREE_TYPE (rhs)),
1583 value_to_wide_int (rval), rval.mask);
1584 if (wi::sext (mask, TYPE_PRECISION (type)) != -1)
1585 {
1586 val.lattice_val = CONSTANT;
1587 val.mask = mask;
1588 /* ??? Delay building trees here. */
1589 val.value = wide_int_to_tree (type, value);
1590 }
1591 else
1592 {
1593 val.lattice_val = VARYING;
1594 val.value = NULL_TREE;
1595 val.mask = -1;
1596 }
1597 return val;
1598 }
1599
1600 /* Return the propagation value when applying the operation CODE to
1601 the values RHS1 and RHS2 yielding type TYPE. */
1602
1603 static ccp_prop_value_t
1604 bit_value_binop (enum tree_code code, tree type, tree rhs1, tree rhs2)
1605 {
1606 ccp_prop_value_t r1val = get_value_for_expr (rhs1, true);
1607 ccp_prop_value_t r2val = get_value_for_expr (rhs2, true);
1608 widest_int value, mask;
1609 ccp_prop_value_t val;
1610
1611 if (r1val.lattice_val == UNDEFINED
1612 || r2val.lattice_val == UNDEFINED)
1613 {
1614 val.lattice_val = VARYING;
1615 val.value = NULL_TREE;
1616 val.mask = -1;
1617 return val;
1618 }
1619
1620 gcc_assert ((r1val.lattice_val == CONSTANT
1621 && TREE_CODE (r1val.value) == INTEGER_CST)
1622 || wi::sext (r1val.mask,
1623 TYPE_PRECISION (TREE_TYPE (rhs1))) == -1);
1624 gcc_assert ((r2val.lattice_val == CONSTANT
1625 && TREE_CODE (r2val.value) == INTEGER_CST)
1626 || wi::sext (r2val.mask,
1627 TYPE_PRECISION (TREE_TYPE (rhs2))) == -1);
1628 bit_value_binop (code, TYPE_SIGN (type), TYPE_PRECISION (type), &value, &mask,
1629 TYPE_SIGN (TREE_TYPE (rhs1)), TYPE_PRECISION (TREE_TYPE (rhs1)),
1630 value_to_wide_int (r1val), r1val.mask,
1631 TYPE_SIGN (TREE_TYPE (rhs2)), TYPE_PRECISION (TREE_TYPE (rhs2)),
1632 value_to_wide_int (r2val), r2val.mask);
1633
1634 if (wi::sext (mask, TYPE_PRECISION (type)) != -1)
1635 {
1636 val.lattice_val = CONSTANT;
1637 val.mask = mask;
1638 /* ??? Delay building trees here. */
1639 val.value = wide_int_to_tree (type, value);
1640 }
1641 else
1642 {
1643 val.lattice_val = VARYING;
1644 val.value = NULL_TREE;
1645 val.mask = -1;
1646 }
1647 return val;
1648 }
1649
1650 /* Return the propagation value for __builtin_assume_aligned
1651 and functions with assume_aligned or alloc_aligned attribute.
1652 For __builtin_assume_aligned, ATTR is NULL_TREE,
1653 for assume_aligned attribute ATTR is non-NULL and ALLOC_ALIGNED
1654 is false, for alloc_aligned attribute ATTR is non-NULL and
1655 ALLOC_ALIGNED is true. */
1656
1657 static ccp_prop_value_t
1658 bit_value_assume_aligned (gimple *stmt, tree attr, ccp_prop_value_t ptrval,
1659 bool alloc_aligned)
1660 {
1661 tree align, misalign = NULL_TREE, type;
1662 unsigned HOST_WIDE_INT aligni, misaligni = 0;
1663 ccp_prop_value_t alignval;
1664 widest_int value, mask;
1665 ccp_prop_value_t val;
1666
1667 if (attr == NULL_TREE)
1668 {
1669 tree ptr = gimple_call_arg (stmt, 0);
1670 type = TREE_TYPE (ptr);
1671 ptrval = get_value_for_expr (ptr, true);
1672 }
1673 else
1674 {
1675 tree lhs = gimple_call_lhs (stmt);
1676 type = TREE_TYPE (lhs);
1677 }
1678
1679 if (ptrval.lattice_val == UNDEFINED)
1680 return ptrval;
1681 gcc_assert ((ptrval.lattice_val == CONSTANT
1682 && TREE_CODE (ptrval.value) == INTEGER_CST)
1683 || wi::sext (ptrval.mask, TYPE_PRECISION (type)) == -1);
1684 if (attr == NULL_TREE)
1685 {
1686 /* Get aligni and misaligni from __builtin_assume_aligned. */
1687 align = gimple_call_arg (stmt, 1);
1688 if (!tree_fits_uhwi_p (align))
1689 return ptrval;
1690 aligni = tree_to_uhwi (align);
1691 if (gimple_call_num_args (stmt) > 2)
1692 {
1693 misalign = gimple_call_arg (stmt, 2);
1694 if (!tree_fits_uhwi_p (misalign))
1695 return ptrval;
1696 misaligni = tree_to_uhwi (misalign);
1697 }
1698 }
1699 else
1700 {
1701 /* Get aligni and misaligni from assume_aligned or
1702 alloc_align attributes. */
1703 if (TREE_VALUE (attr) == NULL_TREE)
1704 return ptrval;
1705 attr = TREE_VALUE (attr);
1706 align = TREE_VALUE (attr);
1707 if (!tree_fits_uhwi_p (align))
1708 return ptrval;
1709 aligni = tree_to_uhwi (align);
1710 if (alloc_aligned)
1711 {
1712 if (aligni == 0 || aligni > gimple_call_num_args (stmt))
1713 return ptrval;
1714 align = gimple_call_arg (stmt, aligni - 1);
1715 if (!tree_fits_uhwi_p (align))
1716 return ptrval;
1717 aligni = tree_to_uhwi (align);
1718 }
1719 else if (TREE_CHAIN (attr) && TREE_VALUE (TREE_CHAIN (attr)))
1720 {
1721 misalign = TREE_VALUE (TREE_CHAIN (attr));
1722 if (!tree_fits_uhwi_p (misalign))
1723 return ptrval;
1724 misaligni = tree_to_uhwi (misalign);
1725 }
1726 }
1727 if (aligni <= 1 || (aligni & (aligni - 1)) != 0 || misaligni >= aligni)
1728 return ptrval;
1729
1730 align = build_int_cst_type (type, -aligni);
1731 alignval = get_value_for_expr (align, true);
1732 bit_value_binop (BIT_AND_EXPR, TYPE_SIGN (type), TYPE_PRECISION (type), &value, &mask,
1733 TYPE_SIGN (type), TYPE_PRECISION (type), value_to_wide_int (ptrval), ptrval.mask,
1734 TYPE_SIGN (type), TYPE_PRECISION (type), value_to_wide_int (alignval), alignval.mask);
1735
1736 if (wi::sext (mask, TYPE_PRECISION (type)) != -1)
1737 {
1738 val.lattice_val = CONSTANT;
1739 val.mask = mask;
1740 gcc_assert ((mask.to_uhwi () & (aligni - 1)) == 0);
1741 gcc_assert ((value.to_uhwi () & (aligni - 1)) == 0);
1742 value |= misaligni;
1743 /* ??? Delay building trees here. */
1744 val.value = wide_int_to_tree (type, value);
1745 }
1746 else
1747 {
1748 val.lattice_val = VARYING;
1749 val.value = NULL_TREE;
1750 val.mask = -1;
1751 }
1752 return val;
1753 }
1754
1755 /* Evaluate statement STMT.
1756 Valid only for assignments, calls, conditionals, and switches. */
1757
1758 static ccp_prop_value_t
1759 evaluate_stmt (gimple *stmt)
1760 {
1761 ccp_prop_value_t val;
1762 tree simplified = NULL_TREE;
1763 ccp_lattice_t likelyvalue = likely_value (stmt);
1764 bool is_constant = false;
1765 unsigned int align;
1766
1767 if (dump_file && (dump_flags & TDF_DETAILS))
1768 {
1769 fprintf (dump_file, "which is likely ");
1770 switch (likelyvalue)
1771 {
1772 case CONSTANT:
1773 fprintf (dump_file, "CONSTANT");
1774 break;
1775 case UNDEFINED:
1776 fprintf (dump_file, "UNDEFINED");
1777 break;
1778 case VARYING:
1779 fprintf (dump_file, "VARYING");
1780 break;
1781 default:;
1782 }
1783 fprintf (dump_file, "\n");
1784 }
1785
1786 /* If the statement is likely to have a CONSTANT result, then try
1787 to fold the statement to determine the constant value. */
1788 /* FIXME. This is the only place that we call ccp_fold.
1789 Since likely_value never returns CONSTANT for calls, we will
1790 not attempt to fold them, including builtins that may profit. */
1791 if (likelyvalue == CONSTANT)
1792 {
1793 fold_defer_overflow_warnings ();
1794 simplified = ccp_fold (stmt);
1795 if (simplified
1796 && TREE_CODE (simplified) == SSA_NAME)
1797 {
1798 /* We may not use values of something that may be simulated again,
1799 see valueize_op_1. */
1800 if (SSA_NAME_IS_DEFAULT_DEF (simplified)
1801 || ! prop_simulate_again_p (SSA_NAME_DEF_STMT (simplified)))
1802 {
1803 ccp_prop_value_t *val = get_value (simplified);
1804 if (val && val->lattice_val != VARYING)
1805 {
1806 fold_undefer_overflow_warnings (true, stmt, 0);
1807 return *val;
1808 }
1809 }
1810 else
1811 /* We may also not place a non-valueized copy in the lattice
1812 as that might become stale if we never re-visit this stmt. */
1813 simplified = NULL_TREE;
1814 }
1815 is_constant = simplified && is_gimple_min_invariant (simplified);
1816 fold_undefer_overflow_warnings (is_constant, stmt, 0);
1817 if (is_constant)
1818 {
1819 /* The statement produced a constant value. */
1820 val.lattice_val = CONSTANT;
1821 val.value = simplified;
1822 val.mask = 0;
1823 return val;
1824 }
1825 }
1826 /* If the statement is likely to have a VARYING result, then do not
1827 bother folding the statement. */
1828 else if (likelyvalue == VARYING)
1829 {
1830 enum gimple_code code = gimple_code (stmt);
1831 if (code == GIMPLE_ASSIGN)
1832 {
1833 enum tree_code subcode = gimple_assign_rhs_code (stmt);
1834
1835 /* Other cases cannot satisfy is_gimple_min_invariant
1836 without folding. */
1837 if (get_gimple_rhs_class (subcode) == GIMPLE_SINGLE_RHS)
1838 simplified = gimple_assign_rhs1 (stmt);
1839 }
1840 else if (code == GIMPLE_SWITCH)
1841 simplified = gimple_switch_index (as_a <gswitch *> (stmt));
1842 else
1843 /* These cannot satisfy is_gimple_min_invariant without folding. */
1844 gcc_assert (code == GIMPLE_CALL || code == GIMPLE_COND);
1845 is_constant = simplified && is_gimple_min_invariant (simplified);
1846 if (is_constant)
1847 {
1848 /* The statement produced a constant value. */
1849 val.lattice_val = CONSTANT;
1850 val.value = simplified;
1851 val.mask = 0;
1852 }
1853 }
1854 /* If the statement result is likely UNDEFINED, make it so. */
1855 else if (likelyvalue == UNDEFINED)
1856 {
1857 val.lattice_val = UNDEFINED;
1858 val.value = NULL_TREE;
1859 val.mask = 0;
1860 return val;
1861 }
1862
1863 /* Resort to simplification for bitwise tracking. */
1864 if (flag_tree_bit_ccp
1865 && (likelyvalue == CONSTANT || is_gimple_call (stmt)
1866 || (gimple_assign_single_p (stmt)
1867 && gimple_assign_rhs_code (stmt) == ADDR_EXPR))
1868 && !is_constant)
1869 {
1870 enum gimple_code code = gimple_code (stmt);
1871 val.lattice_val = VARYING;
1872 val.value = NULL_TREE;
1873 val.mask = -1;
1874 if (code == GIMPLE_ASSIGN)
1875 {
1876 enum tree_code subcode = gimple_assign_rhs_code (stmt);
1877 tree rhs1 = gimple_assign_rhs1 (stmt);
1878 tree lhs = gimple_assign_lhs (stmt);
1879 if ((INTEGRAL_TYPE_P (TREE_TYPE (lhs))
1880 || POINTER_TYPE_P (TREE_TYPE (lhs)))
1881 && (INTEGRAL_TYPE_P (TREE_TYPE (rhs1))
1882 || POINTER_TYPE_P (TREE_TYPE (rhs1))))
1883 switch (get_gimple_rhs_class (subcode))
1884 {
1885 case GIMPLE_SINGLE_RHS:
1886 val = get_value_for_expr (rhs1, true);
1887 break;
1888
1889 case GIMPLE_UNARY_RHS:
1890 val = bit_value_unop (subcode, TREE_TYPE (lhs), rhs1);
1891 break;
1892
1893 case GIMPLE_BINARY_RHS:
1894 val = bit_value_binop (subcode, TREE_TYPE (lhs), rhs1,
1895 gimple_assign_rhs2 (stmt));
1896 break;
1897
1898 default:;
1899 }
1900 }
1901 else if (code == GIMPLE_COND)
1902 {
1903 enum tree_code code = gimple_cond_code (stmt);
1904 tree rhs1 = gimple_cond_lhs (stmt);
1905 tree rhs2 = gimple_cond_rhs (stmt);
1906 if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1))
1907 || POINTER_TYPE_P (TREE_TYPE (rhs1)))
1908 val = bit_value_binop (code, TREE_TYPE (rhs1), rhs1, rhs2);
1909 }
1910 else if (gimple_call_builtin_p (stmt, BUILT_IN_NORMAL))
1911 {
1912 tree fndecl = gimple_call_fndecl (stmt);
1913 switch (DECL_FUNCTION_CODE (fndecl))
1914 {
1915 case BUILT_IN_MALLOC:
1916 case BUILT_IN_REALLOC:
1917 case BUILT_IN_CALLOC:
1918 case BUILT_IN_STRDUP:
1919 case BUILT_IN_STRNDUP:
1920 val.lattice_val = CONSTANT;
1921 val.value = build_int_cst (TREE_TYPE (gimple_get_lhs (stmt)), 0);
1922 val.mask = ~((HOST_WIDE_INT) MALLOC_ABI_ALIGNMENT
1923 / BITS_PER_UNIT - 1);
1924 break;
1925
1926 CASE_BUILT_IN_ALLOCA:
1927 align = (DECL_FUNCTION_CODE (fndecl) == BUILT_IN_ALLOCA
1928 ? BIGGEST_ALIGNMENT
1929 : TREE_INT_CST_LOW (gimple_call_arg (stmt, 1)));
1930 val.lattice_val = CONSTANT;
1931 val.value = build_int_cst (TREE_TYPE (gimple_get_lhs (stmt)), 0);
1932 val.mask = ~((HOST_WIDE_INT) align / BITS_PER_UNIT - 1);
1933 break;
1934
1935 /* These builtins return their first argument, unmodified. */
1936 case BUILT_IN_MEMCPY:
1937 case BUILT_IN_MEMMOVE:
1938 case BUILT_IN_MEMSET:
1939 case BUILT_IN_STRCPY:
1940 case BUILT_IN_STRNCPY:
1941 case BUILT_IN_MEMCPY_CHK:
1942 case BUILT_IN_MEMMOVE_CHK:
1943 case BUILT_IN_MEMSET_CHK:
1944 case BUILT_IN_STRCPY_CHK:
1945 case BUILT_IN_STRNCPY_CHK:
1946 val = get_value_for_expr (gimple_call_arg (stmt, 0), true);
1947 break;
1948
1949 case BUILT_IN_ASSUME_ALIGNED:
1950 val = bit_value_assume_aligned (stmt, NULL_TREE, val, false);
1951 break;
1952
1953 case BUILT_IN_ALIGNED_ALLOC:
1954 {
1955 tree align = get_constant_value (gimple_call_arg (stmt, 0));
1956 if (align
1957 && tree_fits_uhwi_p (align))
1958 {
1959 unsigned HOST_WIDE_INT aligni = tree_to_uhwi (align);
1960 if (aligni > 1
1961 /* align must be power-of-two */
1962 && (aligni & (aligni - 1)) == 0)
1963 {
1964 val.lattice_val = CONSTANT;
1965 val.value = build_int_cst (ptr_type_node, 0);
1966 val.mask = -aligni;
1967 }
1968 }
1969 break;
1970 }
1971
1972 case BUILT_IN_BSWAP16:
1973 case BUILT_IN_BSWAP32:
1974 case BUILT_IN_BSWAP64:
1975 val = get_value_for_expr (gimple_call_arg (stmt, 0), true);
1976 if (val.lattice_val == UNDEFINED)
1977 break;
1978 else if (val.lattice_val == CONSTANT
1979 && val.value
1980 && TREE_CODE (val.value) == INTEGER_CST)
1981 {
1982 tree type = TREE_TYPE (gimple_call_lhs (stmt));
1983 int prec = TYPE_PRECISION (type);
1984 wide_int wval = wi::to_wide (val.value);
1985 val.value
1986 = wide_int_to_tree (type,
1987 wide_int::from (wval, prec,
1988 UNSIGNED).bswap ());
1989 val.mask
1990 = widest_int::from (wide_int::from (val.mask, prec,
1991 UNSIGNED).bswap (),
1992 UNSIGNED);
1993 if (wi::sext (val.mask, prec) != -1)
1994 break;
1995 }
1996 val.lattice_val = VARYING;
1997 val.value = NULL_TREE;
1998 val.mask = -1;
1999 break;
2000
2001 default:;
2002 }
2003 }
2004 if (is_gimple_call (stmt) && gimple_call_lhs (stmt))
2005 {
2006 tree fntype = gimple_call_fntype (stmt);
2007 if (fntype)
2008 {
2009 tree attrs = lookup_attribute ("assume_aligned",
2010 TYPE_ATTRIBUTES (fntype));
2011 if (attrs)
2012 val = bit_value_assume_aligned (stmt, attrs, val, false);
2013 attrs = lookup_attribute ("alloc_align",
2014 TYPE_ATTRIBUTES (fntype));
2015 if (attrs)
2016 val = bit_value_assume_aligned (stmt, attrs, val, true);
2017 }
2018 }
2019 is_constant = (val.lattice_val == CONSTANT);
2020 }
2021
2022 if (flag_tree_bit_ccp
2023 && ((is_constant && TREE_CODE (val.value) == INTEGER_CST)
2024 || !is_constant)
2025 && gimple_get_lhs (stmt)
2026 && TREE_CODE (gimple_get_lhs (stmt)) == SSA_NAME)
2027 {
2028 tree lhs = gimple_get_lhs (stmt);
2029 wide_int nonzero_bits = get_nonzero_bits (lhs);
2030 if (nonzero_bits != -1)
2031 {
2032 if (!is_constant)
2033 {
2034 val.lattice_val = CONSTANT;
2035 val.value = build_zero_cst (TREE_TYPE (lhs));
2036 val.mask = extend_mask (nonzero_bits, TYPE_SIGN (TREE_TYPE (lhs)));
2037 is_constant = true;
2038 }
2039 else
2040 {
2041 if (wi::bit_and_not (wi::to_wide (val.value), nonzero_bits) != 0)
2042 val.value = wide_int_to_tree (TREE_TYPE (lhs),
2043 nonzero_bits
2044 & wi::to_wide (val.value));
2045 if (nonzero_bits == 0)
2046 val.mask = 0;
2047 else
2048 val.mask = val.mask & extend_mask (nonzero_bits,
2049 TYPE_SIGN (TREE_TYPE (lhs)));
2050 }
2051 }
2052 }
2053
2054 /* The statement produced a nonconstant value. */
2055 if (!is_constant)
2056 {
2057 /* The statement produced a copy. */
2058 if (simplified && TREE_CODE (simplified) == SSA_NAME
2059 && !SSA_NAME_OCCURS_IN_ABNORMAL_PHI (simplified))
2060 {
2061 val.lattice_val = CONSTANT;
2062 val.value = simplified;
2063 val.mask = -1;
2064 }
2065 /* The statement is VARYING. */
2066 else
2067 {
2068 val.lattice_val = VARYING;
2069 val.value = NULL_TREE;
2070 val.mask = -1;
2071 }
2072 }
2073
2074 return val;
2075 }
2076
2077 typedef hash_table<nofree_ptr_hash<gimple> > gimple_htab;
2078
2079 /* Given a BUILT_IN_STACK_SAVE value SAVED_VAL, insert a clobber of VAR before
2080 each matching BUILT_IN_STACK_RESTORE. Mark visited phis in VISITED. */
2081
2082 static void
2083 insert_clobber_before_stack_restore (tree saved_val, tree var,
2084 gimple_htab **visited)
2085 {
2086 gimple *stmt;
2087 gassign *clobber_stmt;
2088 tree clobber;
2089 imm_use_iterator iter;
2090 gimple_stmt_iterator i;
2091 gimple **slot;
2092
2093 FOR_EACH_IMM_USE_STMT (stmt, iter, saved_val)
2094 if (gimple_call_builtin_p (stmt, BUILT_IN_STACK_RESTORE))
2095 {
2096 clobber = build_constructor (TREE_TYPE (var),
2097 NULL);
2098 TREE_THIS_VOLATILE (clobber) = 1;
2099 clobber_stmt = gimple_build_assign (var, clobber);
2100
2101 i = gsi_for_stmt (stmt);
2102 gsi_insert_before (&i, clobber_stmt, GSI_SAME_STMT);
2103 }
2104 else if (gimple_code (stmt) == GIMPLE_PHI)
2105 {
2106 if (!*visited)
2107 *visited = new gimple_htab (10);
2108
2109 slot = (*visited)->find_slot (stmt, INSERT);
2110 if (*slot != NULL)
2111 continue;
2112
2113 *slot = stmt;
2114 insert_clobber_before_stack_restore (gimple_phi_result (stmt), var,
2115 visited);
2116 }
2117 else if (gimple_assign_ssa_name_copy_p (stmt))
2118 insert_clobber_before_stack_restore (gimple_assign_lhs (stmt), var,
2119 visited);
2120 else
2121 gcc_assert (is_gimple_debug (stmt));
2122 }
2123
2124 /* Advance the iterator to the previous non-debug gimple statement in the same
2125 or dominating basic block. */
2126
2127 static inline void
2128 gsi_prev_dom_bb_nondebug (gimple_stmt_iterator *i)
2129 {
2130 basic_block dom;
2131
2132 gsi_prev_nondebug (i);
2133 while (gsi_end_p (*i))
2134 {
2135 dom = get_immediate_dominator (CDI_DOMINATORS, i->bb);
2136 if (dom == NULL || dom == ENTRY_BLOCK_PTR_FOR_FN (cfun))
2137 return;
2138
2139 *i = gsi_last_bb (dom);
2140 }
2141 }
2142
2143 /* Find a BUILT_IN_STACK_SAVE dominating gsi_stmt (I), and insert
2144 a clobber of VAR before each matching BUILT_IN_STACK_RESTORE.
2145
2146 It is possible that BUILT_IN_STACK_SAVE cannot be find in a dominator when a
2147 previous pass (such as DOM) duplicated it along multiple paths to a BB. In
2148 that case the function gives up without inserting the clobbers. */
2149
2150 static void
2151 insert_clobbers_for_var (gimple_stmt_iterator i, tree var)
2152 {
2153 gimple *stmt;
2154 tree saved_val;
2155 gimple_htab *visited = NULL;
2156
2157 for (; !gsi_end_p (i); gsi_prev_dom_bb_nondebug (&i))
2158 {
2159 stmt = gsi_stmt (i);
2160
2161 if (!gimple_call_builtin_p (stmt, BUILT_IN_STACK_SAVE))
2162 continue;
2163
2164 saved_val = gimple_call_lhs (stmt);
2165 if (saved_val == NULL_TREE)
2166 continue;
2167
2168 insert_clobber_before_stack_restore (saved_val, var, &visited);
2169 break;
2170 }
2171
2172 delete visited;
2173 }
2174
2175 /* Detects a __builtin_alloca_with_align with constant size argument. Declares
2176 fixed-size array and returns the address, if found, otherwise returns
2177 NULL_TREE. */
2178
2179 static tree
2180 fold_builtin_alloca_with_align (gimple *stmt)
2181 {
2182 unsigned HOST_WIDE_INT size, threshold, n_elem;
2183 tree lhs, arg, block, var, elem_type, array_type;
2184
2185 /* Get lhs. */
2186 lhs = gimple_call_lhs (stmt);
2187 if (lhs == NULL_TREE)
2188 return NULL_TREE;
2189
2190 /* Detect constant argument. */
2191 arg = get_constant_value (gimple_call_arg (stmt, 0));
2192 if (arg == NULL_TREE
2193 || TREE_CODE (arg) != INTEGER_CST
2194 || !tree_fits_uhwi_p (arg))
2195 return NULL_TREE;
2196
2197 size = tree_to_uhwi (arg);
2198
2199 /* Heuristic: don't fold large allocas. */
2200 threshold = (unsigned HOST_WIDE_INT)PARAM_VALUE (PARAM_LARGE_STACK_FRAME);
2201 /* In case the alloca is located at function entry, it has the same lifetime
2202 as a declared array, so we allow a larger size. */
2203 block = gimple_block (stmt);
2204 if (!(cfun->after_inlining
2205 && block
2206 && TREE_CODE (BLOCK_SUPERCONTEXT (block)) == FUNCTION_DECL))
2207 threshold /= 10;
2208 if (size > threshold)
2209 return NULL_TREE;
2210
2211 /* We have to be able to move points-to info. We used to assert
2212 that we can but IPA PTA might end up with two UIDs here
2213 as it might need to handle more than one instance being
2214 live at the same time. Instead of trying to detect this case
2215 (using the first UID would be OK) just give up for now. */
2216 struct ptr_info_def *pi = SSA_NAME_PTR_INFO (lhs);
2217 unsigned uid = 0;
2218 if (pi != NULL
2219 && !pi->pt.anything
2220 && !pt_solution_singleton_or_null_p (&pi->pt, &uid))
2221 return NULL_TREE;
2222
2223 /* Declare array. */
2224 elem_type = build_nonstandard_integer_type (BITS_PER_UNIT, 1);
2225 n_elem = size * 8 / BITS_PER_UNIT;
2226 array_type = build_array_type_nelts (elem_type, n_elem);
2227 var = create_tmp_var (array_type);
2228 SET_DECL_ALIGN (var, TREE_INT_CST_LOW (gimple_call_arg (stmt, 1)));
2229 if (uid != 0)
2230 SET_DECL_PT_UID (var, uid);
2231
2232 /* Fold alloca to the address of the array. */
2233 return fold_convert (TREE_TYPE (lhs), build_fold_addr_expr (var));
2234 }
2235
2236 /* Fold the stmt at *GSI with CCP specific information that propagating
2237 and regular folding does not catch. */
2238
2239 bool
2240 ccp_folder::fold_stmt (gimple_stmt_iterator *gsi)
2241 {
2242 gimple *stmt = gsi_stmt (*gsi);
2243
2244 switch (gimple_code (stmt))
2245 {
2246 case GIMPLE_COND:
2247 {
2248 gcond *cond_stmt = as_a <gcond *> (stmt);
2249 ccp_prop_value_t val;
2250 /* Statement evaluation will handle type mismatches in constants
2251 more gracefully than the final propagation. This allows us to
2252 fold more conditionals here. */
2253 val = evaluate_stmt (stmt);
2254 if (val.lattice_val != CONSTANT
2255 || val.mask != 0)
2256 return false;
2257
2258 if (dump_file)
2259 {
2260 fprintf (dump_file, "Folding predicate ");
2261 print_gimple_expr (dump_file, stmt, 0);
2262 fprintf (dump_file, " to ");
2263 print_generic_expr (dump_file, val.value);
2264 fprintf (dump_file, "\n");
2265 }
2266
2267 if (integer_zerop (val.value))
2268 gimple_cond_make_false (cond_stmt);
2269 else
2270 gimple_cond_make_true (cond_stmt);
2271
2272 return true;
2273 }
2274
2275 case GIMPLE_CALL:
2276 {
2277 tree lhs = gimple_call_lhs (stmt);
2278 int flags = gimple_call_flags (stmt);
2279 tree val;
2280 tree argt;
2281 bool changed = false;
2282 unsigned i;
2283
2284 /* If the call was folded into a constant make sure it goes
2285 away even if we cannot propagate into all uses because of
2286 type issues. */
2287 if (lhs
2288 && TREE_CODE (lhs) == SSA_NAME
2289 && (val = get_constant_value (lhs))
2290 /* Don't optimize away calls that have side-effects. */
2291 && (flags & (ECF_CONST|ECF_PURE)) != 0
2292 && (flags & ECF_LOOPING_CONST_OR_PURE) == 0)
2293 {
2294 tree new_rhs = unshare_expr (val);
2295 bool res;
2296 if (!useless_type_conversion_p (TREE_TYPE (lhs),
2297 TREE_TYPE (new_rhs)))
2298 new_rhs = fold_convert (TREE_TYPE (lhs), new_rhs);
2299 res = update_call_from_tree (gsi, new_rhs);
2300 gcc_assert (res);
2301 return true;
2302 }
2303
2304 /* Internal calls provide no argument types, so the extra laxity
2305 for normal calls does not apply. */
2306 if (gimple_call_internal_p (stmt))
2307 return false;
2308
2309 /* The heuristic of fold_builtin_alloca_with_align differs before and
2310 after inlining, so we don't require the arg to be changed into a
2311 constant for folding, but just to be constant. */
2312 if (gimple_call_builtin_p (stmt, BUILT_IN_ALLOCA_WITH_ALIGN)
2313 || gimple_call_builtin_p (stmt, BUILT_IN_ALLOCA_WITH_ALIGN_AND_MAX))
2314 {
2315 tree new_rhs = fold_builtin_alloca_with_align (stmt);
2316 if (new_rhs)
2317 {
2318 bool res = update_call_from_tree (gsi, new_rhs);
2319 tree var = TREE_OPERAND (TREE_OPERAND (new_rhs, 0),0);
2320 gcc_assert (res);
2321 insert_clobbers_for_var (*gsi, var);
2322 return true;
2323 }
2324 }
2325
2326 /* If there's no extra info from an assume_aligned call,
2327 drop it so it doesn't act as otherwise useless dataflow
2328 barrier. */
2329 if (gimple_call_builtin_p (stmt, BUILT_IN_ASSUME_ALIGNED))
2330 {
2331 tree ptr = gimple_call_arg (stmt, 0);
2332 ccp_prop_value_t ptrval = get_value_for_expr (ptr, true);
2333 if (ptrval.lattice_val == CONSTANT
2334 && TREE_CODE (ptrval.value) == INTEGER_CST
2335 && ptrval.mask != 0)
2336 {
2337 ccp_prop_value_t val
2338 = bit_value_assume_aligned (stmt, NULL_TREE, ptrval, false);
2339 unsigned int ptralign = least_bit_hwi (ptrval.mask.to_uhwi ());
2340 unsigned int align = least_bit_hwi (val.mask.to_uhwi ());
2341 if (ptralign == align
2342 && ((TREE_INT_CST_LOW (ptrval.value) & (align - 1))
2343 == (TREE_INT_CST_LOW (val.value) & (align - 1))))
2344 {
2345 bool res = update_call_from_tree (gsi, ptr);
2346 gcc_assert (res);
2347 return true;
2348 }
2349 }
2350 }
2351
2352 /* Propagate into the call arguments. Compared to replace_uses_in
2353 this can use the argument slot types for type verification
2354 instead of the current argument type. We also can safely
2355 drop qualifiers here as we are dealing with constants anyway. */
2356 argt = TYPE_ARG_TYPES (gimple_call_fntype (stmt));
2357 for (i = 0; i < gimple_call_num_args (stmt) && argt;
2358 ++i, argt = TREE_CHAIN (argt))
2359 {
2360 tree arg = gimple_call_arg (stmt, i);
2361 if (TREE_CODE (arg) == SSA_NAME
2362 && (val = get_constant_value (arg))
2363 && useless_type_conversion_p
2364 (TYPE_MAIN_VARIANT (TREE_VALUE (argt)),
2365 TYPE_MAIN_VARIANT (TREE_TYPE (val))))
2366 {
2367 gimple_call_set_arg (stmt, i, unshare_expr (val));
2368 changed = true;
2369 }
2370 }
2371
2372 return changed;
2373 }
2374
2375 case GIMPLE_ASSIGN:
2376 {
2377 tree lhs = gimple_assign_lhs (stmt);
2378 tree val;
2379
2380 /* If we have a load that turned out to be constant replace it
2381 as we cannot propagate into all uses in all cases. */
2382 if (gimple_assign_single_p (stmt)
2383 && TREE_CODE (lhs) == SSA_NAME
2384 && (val = get_constant_value (lhs)))
2385 {
2386 tree rhs = unshare_expr (val);
2387 if (!useless_type_conversion_p (TREE_TYPE (lhs), TREE_TYPE (rhs)))
2388 rhs = fold_build1 (VIEW_CONVERT_EXPR, TREE_TYPE (lhs), rhs);
2389 gimple_assign_set_rhs_from_tree (gsi, rhs);
2390 return true;
2391 }
2392
2393 return false;
2394 }
2395
2396 default:
2397 return false;
2398 }
2399 }
2400
2401 /* Visit the assignment statement STMT. Set the value of its LHS to the
2402 value computed by the RHS and store LHS in *OUTPUT_P. If STMT
2403 creates virtual definitions, set the value of each new name to that
2404 of the RHS (if we can derive a constant out of the RHS).
2405 Value-returning call statements also perform an assignment, and
2406 are handled here. */
2407
2408 static enum ssa_prop_result
2409 visit_assignment (gimple *stmt, tree *output_p)
2410 {
2411 ccp_prop_value_t val;
2412 enum ssa_prop_result retval = SSA_PROP_NOT_INTERESTING;
2413
2414 tree lhs = gimple_get_lhs (stmt);
2415 if (TREE_CODE (lhs) == SSA_NAME)
2416 {
2417 /* Evaluate the statement, which could be
2418 either a GIMPLE_ASSIGN or a GIMPLE_CALL. */
2419 val = evaluate_stmt (stmt);
2420
2421 /* If STMT is an assignment to an SSA_NAME, we only have one
2422 value to set. */
2423 if (set_lattice_value (lhs, &val))
2424 {
2425 *output_p = lhs;
2426 if (val.lattice_val == VARYING)
2427 retval = SSA_PROP_VARYING;
2428 else
2429 retval = SSA_PROP_INTERESTING;
2430 }
2431 }
2432
2433 return retval;
2434 }
2435
2436
2437 /* Visit the conditional statement STMT. Return SSA_PROP_INTERESTING
2438 if it can determine which edge will be taken. Otherwise, return
2439 SSA_PROP_VARYING. */
2440
2441 static enum ssa_prop_result
2442 visit_cond_stmt (gimple *stmt, edge *taken_edge_p)
2443 {
2444 ccp_prop_value_t val;
2445 basic_block block;
2446
2447 block = gimple_bb (stmt);
2448 val = evaluate_stmt (stmt);
2449 if (val.lattice_val != CONSTANT
2450 || val.mask != 0)
2451 return SSA_PROP_VARYING;
2452
2453 /* Find which edge out of the conditional block will be taken and add it
2454 to the worklist. If no single edge can be determined statically,
2455 return SSA_PROP_VARYING to feed all the outgoing edges to the
2456 propagation engine. */
2457 *taken_edge_p = find_taken_edge (block, val.value);
2458 if (*taken_edge_p)
2459 return SSA_PROP_INTERESTING;
2460 else
2461 return SSA_PROP_VARYING;
2462 }
2463
2464
2465 /* Evaluate statement STMT. If the statement produces an output value and
2466 its evaluation changes the lattice value of its output, return
2467 SSA_PROP_INTERESTING and set *OUTPUT_P to the SSA_NAME holding the
2468 output value.
2469
2470 If STMT is a conditional branch and we can determine its truth
2471 value, set *TAKEN_EDGE_P accordingly. If STMT produces a varying
2472 value, return SSA_PROP_VARYING. */
2473
2474 enum ssa_prop_result
2475 ccp_propagate::visit_stmt (gimple *stmt, edge *taken_edge_p, tree *output_p)
2476 {
2477 tree def;
2478 ssa_op_iter iter;
2479
2480 if (dump_file && (dump_flags & TDF_DETAILS))
2481 {
2482 fprintf (dump_file, "\nVisiting statement:\n");
2483 print_gimple_stmt (dump_file, stmt, 0, dump_flags);
2484 }
2485
2486 switch (gimple_code (stmt))
2487 {
2488 case GIMPLE_ASSIGN:
2489 /* If the statement is an assignment that produces a single
2490 output value, evaluate its RHS to see if the lattice value of
2491 its output has changed. */
2492 return visit_assignment (stmt, output_p);
2493
2494 case GIMPLE_CALL:
2495 /* A value-returning call also performs an assignment. */
2496 if (gimple_call_lhs (stmt) != NULL_TREE)
2497 return visit_assignment (stmt, output_p);
2498 break;
2499
2500 case GIMPLE_COND:
2501 case GIMPLE_SWITCH:
2502 /* If STMT is a conditional branch, see if we can determine
2503 which branch will be taken. */
2504 /* FIXME. It appears that we should be able to optimize
2505 computed GOTOs here as well. */
2506 return visit_cond_stmt (stmt, taken_edge_p);
2507
2508 default:
2509 break;
2510 }
2511
2512 /* Any other kind of statement is not interesting for constant
2513 propagation and, therefore, not worth simulating. */
2514 if (dump_file && (dump_flags & TDF_DETAILS))
2515 fprintf (dump_file, "No interesting values produced. Marked VARYING.\n");
2516
2517 /* Definitions made by statements other than assignments to
2518 SSA_NAMEs represent unknown modifications to their outputs.
2519 Mark them VARYING. */
2520 FOR_EACH_SSA_TREE_OPERAND (def, stmt, iter, SSA_OP_ALL_DEFS)
2521 set_value_varying (def);
2522
2523 return SSA_PROP_VARYING;
2524 }
2525
2526
2527 /* Main entry point for SSA Conditional Constant Propagation. If NONZERO_P,
2528 record nonzero bits. */
2529
2530 static unsigned int
2531 do_ssa_ccp (bool nonzero_p)
2532 {
2533 unsigned int todo = 0;
2534 calculate_dominance_info (CDI_DOMINATORS);
2535
2536 ccp_initialize ();
2537 class ccp_propagate ccp_propagate;
2538 ccp_propagate.ssa_propagate ();
2539 if (ccp_finalize (nonzero_p || flag_ipa_bit_cp))
2540 {
2541 todo = (TODO_cleanup_cfg | TODO_update_ssa);
2542
2543 /* ccp_finalize does not preserve loop-closed ssa. */
2544 loops_state_clear (LOOP_CLOSED_SSA);
2545 }
2546
2547 free_dominance_info (CDI_DOMINATORS);
2548 return todo;
2549 }
2550
2551
2552 namespace {
2553
2554 const pass_data pass_data_ccp =
2555 {
2556 GIMPLE_PASS, /* type */
2557 "ccp", /* name */
2558 OPTGROUP_NONE, /* optinfo_flags */
2559 TV_TREE_CCP, /* tv_id */
2560 ( PROP_cfg | PROP_ssa ), /* properties_required */
2561 0, /* properties_provided */
2562 0, /* properties_destroyed */
2563 0, /* todo_flags_start */
2564 TODO_update_address_taken, /* todo_flags_finish */
2565 };
2566
2567 class pass_ccp : public gimple_opt_pass
2568 {
2569 public:
2570 pass_ccp (gcc::context *ctxt)
2571 : gimple_opt_pass (pass_data_ccp, ctxt), nonzero_p (false)
2572 {}
2573
2574 /* opt_pass methods: */
2575 opt_pass * clone () { return new pass_ccp (m_ctxt); }
2576 void set_pass_param (unsigned int n, bool param)
2577 {
2578 gcc_assert (n == 0);
2579 nonzero_p = param;
2580 }
2581 virtual bool gate (function *) { return flag_tree_ccp != 0; }
2582 virtual unsigned int execute (function *) { return do_ssa_ccp (nonzero_p); }
2583
2584 private:
2585 /* Determines whether the pass instance records nonzero bits. */
2586 bool nonzero_p;
2587 }; // class pass_ccp
2588
2589 } // anon namespace
2590
2591 gimple_opt_pass *
2592 make_pass_ccp (gcc::context *ctxt)
2593 {
2594 return new pass_ccp (ctxt);
2595 }
2596
2597
2598
2599 /* Try to optimize out __builtin_stack_restore. Optimize it out
2600 if there is another __builtin_stack_restore in the same basic
2601 block and no calls or ASM_EXPRs are in between, or if this block's
2602 only outgoing edge is to EXIT_BLOCK and there are no calls or
2603 ASM_EXPRs after this __builtin_stack_restore. */
2604
2605 static tree
2606 optimize_stack_restore (gimple_stmt_iterator i)
2607 {
2608 tree callee;
2609 gimple *stmt;
2610
2611 basic_block bb = gsi_bb (i);
2612 gimple *call = gsi_stmt (i);
2613
2614 if (gimple_code (call) != GIMPLE_CALL
2615 || gimple_call_num_args (call) != 1
2616 || TREE_CODE (gimple_call_arg (call, 0)) != SSA_NAME
2617 || !POINTER_TYPE_P (TREE_TYPE (gimple_call_arg (call, 0))))
2618 return NULL_TREE;
2619
2620 for (gsi_next (&i); !gsi_end_p (i); gsi_next (&i))
2621 {
2622 stmt = gsi_stmt (i);
2623 if (gimple_code (stmt) == GIMPLE_ASM)
2624 return NULL_TREE;
2625 if (gimple_code (stmt) != GIMPLE_CALL)
2626 continue;
2627
2628 callee = gimple_call_fndecl (stmt);
2629 if (!callee
2630 || !fndecl_built_in_p (callee, BUILT_IN_NORMAL)
2631 /* All regular builtins are ok, just obviously not alloca. */
2632 || ALLOCA_FUNCTION_CODE_P (DECL_FUNCTION_CODE (callee)))
2633 return NULL_TREE;
2634
2635 if (fndecl_built_in_p (callee, BUILT_IN_STACK_RESTORE))
2636 goto second_stack_restore;
2637 }
2638
2639 if (!gsi_end_p (i))
2640 return NULL_TREE;
2641
2642 /* Allow one successor of the exit block, or zero successors. */
2643 switch (EDGE_COUNT (bb->succs))
2644 {
2645 case 0:
2646 break;
2647 case 1:
2648 if (single_succ_edge (bb)->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
2649 return NULL_TREE;
2650 break;
2651 default:
2652 return NULL_TREE;
2653 }
2654 second_stack_restore:
2655
2656 /* If there's exactly one use, then zap the call to __builtin_stack_save.
2657 If there are multiple uses, then the last one should remove the call.
2658 In any case, whether the call to __builtin_stack_save can be removed
2659 or not is irrelevant to removing the call to __builtin_stack_restore. */
2660 if (has_single_use (gimple_call_arg (call, 0)))
2661 {
2662 gimple *stack_save = SSA_NAME_DEF_STMT (gimple_call_arg (call, 0));
2663 if (is_gimple_call (stack_save))
2664 {
2665 callee = gimple_call_fndecl (stack_save);
2666 if (callee && fndecl_built_in_p (callee, BUILT_IN_STACK_SAVE))
2667 {
2668 gimple_stmt_iterator stack_save_gsi;
2669 tree rhs;
2670
2671 stack_save_gsi = gsi_for_stmt (stack_save);
2672 rhs = build_int_cst (TREE_TYPE (gimple_call_arg (call, 0)), 0);
2673 update_call_from_tree (&stack_save_gsi, rhs);
2674 }
2675 }
2676 }
2677
2678 /* No effect, so the statement will be deleted. */
2679 return integer_zero_node;
2680 }
2681
2682 /* If va_list type is a simple pointer and nothing special is needed,
2683 optimize __builtin_va_start (&ap, 0) into ap = __builtin_next_arg (0),
2684 __builtin_va_end (&ap) out as NOP and __builtin_va_copy into a simple
2685 pointer assignment. */
2686
2687 static tree
2688 optimize_stdarg_builtin (gimple *call)
2689 {
2690 tree callee, lhs, rhs, cfun_va_list;
2691 bool va_list_simple_ptr;
2692 location_t loc = gimple_location (call);
2693
2694 callee = gimple_call_fndecl (call);
2695
2696 cfun_va_list = targetm.fn_abi_va_list (callee);
2697 va_list_simple_ptr = POINTER_TYPE_P (cfun_va_list)
2698 && (TREE_TYPE (cfun_va_list) == void_type_node
2699 || TREE_TYPE (cfun_va_list) == char_type_node);
2700
2701 switch (DECL_FUNCTION_CODE (callee))
2702 {
2703 case BUILT_IN_VA_START:
2704 if (!va_list_simple_ptr
2705 || targetm.expand_builtin_va_start != NULL
2706 || !builtin_decl_explicit_p (BUILT_IN_NEXT_ARG))
2707 return NULL_TREE;
2708
2709 if (gimple_call_num_args (call) != 2)
2710 return NULL_TREE;
2711
2712 lhs = gimple_call_arg (call, 0);
2713 if (!POINTER_TYPE_P (TREE_TYPE (lhs))
2714 || TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (lhs)))
2715 != TYPE_MAIN_VARIANT (cfun_va_list))
2716 return NULL_TREE;
2717
2718 lhs = build_fold_indirect_ref_loc (loc, lhs);
2719 rhs = build_call_expr_loc (loc, builtin_decl_explicit (BUILT_IN_NEXT_ARG),
2720 1, integer_zero_node);
2721 rhs = fold_convert_loc (loc, TREE_TYPE (lhs), rhs);
2722 return build2 (MODIFY_EXPR, TREE_TYPE (lhs), lhs, rhs);
2723
2724 case BUILT_IN_VA_COPY:
2725 if (!va_list_simple_ptr)
2726 return NULL_TREE;
2727
2728 if (gimple_call_num_args (call) != 2)
2729 return NULL_TREE;
2730
2731 lhs = gimple_call_arg (call, 0);
2732 if (!POINTER_TYPE_P (TREE_TYPE (lhs))
2733 || TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (lhs)))
2734 != TYPE_MAIN_VARIANT (cfun_va_list))
2735 return NULL_TREE;
2736
2737 lhs = build_fold_indirect_ref_loc (loc, lhs);
2738 rhs = gimple_call_arg (call, 1);
2739 if (TYPE_MAIN_VARIANT (TREE_TYPE (rhs))
2740 != TYPE_MAIN_VARIANT (cfun_va_list))
2741 return NULL_TREE;
2742
2743 rhs = fold_convert_loc (loc, TREE_TYPE (lhs), rhs);
2744 return build2 (MODIFY_EXPR, TREE_TYPE (lhs), lhs, rhs);
2745
2746 case BUILT_IN_VA_END:
2747 /* No effect, so the statement will be deleted. */
2748 return integer_zero_node;
2749
2750 default:
2751 gcc_unreachable ();
2752 }
2753 }
2754
2755 /* Attemp to make the block of __builtin_unreachable I unreachable by changing
2756 the incoming jumps. Return true if at least one jump was changed. */
2757
2758 static bool
2759 optimize_unreachable (gimple_stmt_iterator i)
2760 {
2761 basic_block bb = gsi_bb (i);
2762 gimple_stmt_iterator gsi;
2763 gimple *stmt;
2764 edge_iterator ei;
2765 edge e;
2766 bool ret;
2767
2768 if (flag_sanitize & SANITIZE_UNREACHABLE)
2769 return false;
2770
2771 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2772 {
2773 stmt = gsi_stmt (gsi);
2774
2775 if (is_gimple_debug (stmt))
2776 continue;
2777
2778 if (glabel *label_stmt = dyn_cast <glabel *> (stmt))
2779 {
2780 /* Verify we do not need to preserve the label. */
2781 if (FORCED_LABEL (gimple_label_label (label_stmt)))
2782 return false;
2783
2784 continue;
2785 }
2786
2787 /* Only handle the case that __builtin_unreachable is the first statement
2788 in the block. We rely on DCE to remove stmts without side-effects
2789 before __builtin_unreachable. */
2790 if (gsi_stmt (gsi) != gsi_stmt (i))
2791 return false;
2792 }
2793
2794 ret = false;
2795 FOR_EACH_EDGE (e, ei, bb->preds)
2796 {
2797 gsi = gsi_last_bb (e->src);
2798 if (gsi_end_p (gsi))
2799 continue;
2800
2801 stmt = gsi_stmt (gsi);
2802 if (gcond *cond_stmt = dyn_cast <gcond *> (stmt))
2803 {
2804 if (e->flags & EDGE_TRUE_VALUE)
2805 gimple_cond_make_false (cond_stmt);
2806 else if (e->flags & EDGE_FALSE_VALUE)
2807 gimple_cond_make_true (cond_stmt);
2808 else
2809 gcc_unreachable ();
2810 update_stmt (cond_stmt);
2811 }
2812 else
2813 {
2814 /* Todo: handle other cases. Note that unreachable switch case
2815 statements have already been removed. */
2816 continue;
2817 }
2818
2819 ret = true;
2820 }
2821
2822 return ret;
2823 }
2824
2825 /* Optimize
2826 mask_2 = 1 << cnt_1;
2827 _4 = __atomic_fetch_or_* (ptr_6, mask_2, _3);
2828 _5 = _4 & mask_2;
2829 to
2830 _4 = ATOMIC_BIT_TEST_AND_SET (ptr_6, cnt_1, 0, _3);
2831 _5 = _4;
2832 If _5 is only used in _5 != 0 or _5 == 0 comparisons, 1
2833 is passed instead of 0, and the builtin just returns a zero
2834 or 1 value instead of the actual bit.
2835 Similarly for __sync_fetch_and_or_* (without the ", _3" part
2836 in there), and/or if mask_2 is a power of 2 constant.
2837 Similarly for xor instead of or, use ATOMIC_BIT_TEST_AND_COMPLEMENT
2838 in that case. And similarly for and instead of or, except that
2839 the second argument to the builtin needs to be one's complement
2840 of the mask instead of mask. */
2841
2842 static void
2843 optimize_atomic_bit_test_and (gimple_stmt_iterator *gsip,
2844 enum internal_fn fn, bool has_model_arg,
2845 bool after)
2846 {
2847 gimple *call = gsi_stmt (*gsip);
2848 tree lhs = gimple_call_lhs (call);
2849 use_operand_p use_p;
2850 gimple *use_stmt;
2851 tree mask, bit;
2852 optab optab;
2853
2854 if (!flag_inline_atomics
2855 || optimize_debug
2856 || !gimple_call_builtin_p (call, BUILT_IN_NORMAL)
2857 || !lhs
2858 || SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs)
2859 || !single_imm_use (lhs, &use_p, &use_stmt)
2860 || !is_gimple_assign (use_stmt)
2861 || gimple_assign_rhs_code (use_stmt) != BIT_AND_EXPR
2862 || !gimple_vdef (call))
2863 return;
2864
2865 switch (fn)
2866 {
2867 case IFN_ATOMIC_BIT_TEST_AND_SET:
2868 optab = atomic_bit_test_and_set_optab;
2869 break;
2870 case IFN_ATOMIC_BIT_TEST_AND_COMPLEMENT:
2871 optab = atomic_bit_test_and_complement_optab;
2872 break;
2873 case IFN_ATOMIC_BIT_TEST_AND_RESET:
2874 optab = atomic_bit_test_and_reset_optab;
2875 break;
2876 default:
2877 return;
2878 }
2879
2880 if (optab_handler (optab, TYPE_MODE (TREE_TYPE (lhs))) == CODE_FOR_nothing)
2881 return;
2882
2883 mask = gimple_call_arg (call, 1);
2884 tree use_lhs = gimple_assign_lhs (use_stmt);
2885 if (!use_lhs)
2886 return;
2887
2888 if (TREE_CODE (mask) == INTEGER_CST)
2889 {
2890 if (fn == IFN_ATOMIC_BIT_TEST_AND_RESET)
2891 mask = const_unop (BIT_NOT_EXPR, TREE_TYPE (mask), mask);
2892 mask = fold_convert (TREE_TYPE (lhs), mask);
2893 int ibit = tree_log2 (mask);
2894 if (ibit < 0)
2895 return;
2896 bit = build_int_cst (TREE_TYPE (lhs), ibit);
2897 }
2898 else if (TREE_CODE (mask) == SSA_NAME)
2899 {
2900 gimple *g = SSA_NAME_DEF_STMT (mask);
2901 if (fn == IFN_ATOMIC_BIT_TEST_AND_RESET)
2902 {
2903 if (!is_gimple_assign (g)
2904 || gimple_assign_rhs_code (g) != BIT_NOT_EXPR)
2905 return;
2906 mask = gimple_assign_rhs1 (g);
2907 if (TREE_CODE (mask) != SSA_NAME)
2908 return;
2909 g = SSA_NAME_DEF_STMT (mask);
2910 }
2911 if (!is_gimple_assign (g)
2912 || gimple_assign_rhs_code (g) != LSHIFT_EXPR
2913 || !integer_onep (gimple_assign_rhs1 (g)))
2914 return;
2915 bit = gimple_assign_rhs2 (g);
2916 }
2917 else
2918 return;
2919
2920 if (gimple_assign_rhs1 (use_stmt) == lhs)
2921 {
2922 if (!operand_equal_p (gimple_assign_rhs2 (use_stmt), mask, 0))
2923 return;
2924 }
2925 else if (gimple_assign_rhs2 (use_stmt) != lhs
2926 || !operand_equal_p (gimple_assign_rhs1 (use_stmt), mask, 0))
2927 return;
2928
2929 bool use_bool = true;
2930 bool has_debug_uses = false;
2931 imm_use_iterator iter;
2932 gimple *g;
2933
2934 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (use_lhs))
2935 use_bool = false;
2936 FOR_EACH_IMM_USE_STMT (g, iter, use_lhs)
2937 {
2938 enum tree_code code = ERROR_MARK;
2939 tree op0 = NULL_TREE, op1 = NULL_TREE;
2940 if (is_gimple_debug (g))
2941 {
2942 has_debug_uses = true;
2943 continue;
2944 }
2945 else if (is_gimple_assign (g))
2946 switch (gimple_assign_rhs_code (g))
2947 {
2948 case COND_EXPR:
2949 op1 = gimple_assign_rhs1 (g);
2950 code = TREE_CODE (op1);
2951 op0 = TREE_OPERAND (op1, 0);
2952 op1 = TREE_OPERAND (op1, 1);
2953 break;
2954 case EQ_EXPR:
2955 case NE_EXPR:
2956 code = gimple_assign_rhs_code (g);
2957 op0 = gimple_assign_rhs1 (g);
2958 op1 = gimple_assign_rhs2 (g);
2959 break;
2960 default:
2961 break;
2962 }
2963 else if (gimple_code (g) == GIMPLE_COND)
2964 {
2965 code = gimple_cond_code (g);
2966 op0 = gimple_cond_lhs (g);
2967 op1 = gimple_cond_rhs (g);
2968 }
2969
2970 if ((code == EQ_EXPR || code == NE_EXPR)
2971 && op0 == use_lhs
2972 && integer_zerop (op1))
2973 {
2974 use_operand_p use_p;
2975 int n = 0;
2976 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
2977 n++;
2978 if (n == 1)
2979 continue;
2980 }
2981
2982 use_bool = false;
2983 BREAK_FROM_IMM_USE_STMT (iter);
2984 }
2985
2986 tree new_lhs = make_ssa_name (TREE_TYPE (lhs));
2987 tree flag = build_int_cst (TREE_TYPE (lhs), use_bool);
2988 if (has_model_arg)
2989 g = gimple_build_call_internal (fn, 4, gimple_call_arg (call, 0),
2990 bit, flag, gimple_call_arg (call, 2));
2991 else
2992 g = gimple_build_call_internal (fn, 3, gimple_call_arg (call, 0),
2993 bit, flag);
2994 gimple_call_set_lhs (g, new_lhs);
2995 gimple_set_location (g, gimple_location (call));
2996 gimple_move_vops (g, call);
2997 bool throws = stmt_can_throw_internal (cfun, call);
2998 gimple_call_set_nothrow (as_a <gcall *> (g),
2999 gimple_call_nothrow_p (as_a <gcall *> (call)));
3000 gimple_stmt_iterator gsi = *gsip;
3001 gsi_insert_after (&gsi, g, GSI_NEW_STMT);
3002 edge e = NULL;
3003 if (throws)
3004 {
3005 maybe_clean_or_replace_eh_stmt (call, g);
3006 if (after || (use_bool && has_debug_uses))
3007 e = find_fallthru_edge (gsi_bb (gsi)->succs);
3008 }
3009 if (after)
3010 {
3011 /* The internal function returns the value of the specified bit
3012 before the atomic operation. If we are interested in the value
3013 of the specified bit after the atomic operation (makes only sense
3014 for xor, otherwise the bit content is compile time known),
3015 we need to invert the bit. */
3016 g = gimple_build_assign (make_ssa_name (TREE_TYPE (lhs)),
3017 BIT_XOR_EXPR, new_lhs,
3018 use_bool ? build_int_cst (TREE_TYPE (lhs), 1)
3019 : mask);
3020 new_lhs = gimple_assign_lhs (g);
3021 if (throws)
3022 {
3023 gsi_insert_on_edge_immediate (e, g);
3024 gsi = gsi_for_stmt (g);
3025 }
3026 else
3027 gsi_insert_after (&gsi, g, GSI_NEW_STMT);
3028 }
3029 if (use_bool && has_debug_uses)
3030 {
3031 tree temp = NULL_TREE;
3032 if (!throws || after || single_pred_p (e->dest))
3033 {
3034 temp = make_node (DEBUG_EXPR_DECL);
3035 DECL_ARTIFICIAL (temp) = 1;
3036 TREE_TYPE (temp) = TREE_TYPE (lhs);
3037 SET_DECL_MODE (temp, TYPE_MODE (TREE_TYPE (lhs)));
3038 tree t = build2 (LSHIFT_EXPR, TREE_TYPE (lhs), new_lhs, bit);
3039 g = gimple_build_debug_bind (temp, t, g);
3040 if (throws && !after)
3041 {
3042 gsi = gsi_after_labels (e->dest);
3043 gsi_insert_before (&gsi, g, GSI_SAME_STMT);
3044 }
3045 else
3046 gsi_insert_after (&gsi, g, GSI_NEW_STMT);
3047 }
3048 FOR_EACH_IMM_USE_STMT (g, iter, use_lhs)
3049 if (is_gimple_debug (g))
3050 {
3051 use_operand_p use_p;
3052 if (temp == NULL_TREE)
3053 gimple_debug_bind_reset_value (g);
3054 else
3055 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
3056 SET_USE (use_p, temp);
3057 update_stmt (g);
3058 }
3059 }
3060 SSA_NAME_OCCURS_IN_ABNORMAL_PHI (new_lhs)
3061 = SSA_NAME_OCCURS_IN_ABNORMAL_PHI (use_lhs);
3062 replace_uses_by (use_lhs, new_lhs);
3063 gsi = gsi_for_stmt (use_stmt);
3064 gsi_remove (&gsi, true);
3065 release_defs (use_stmt);
3066 gsi_remove (gsip, true);
3067 release_ssa_name (lhs);
3068 }
3069
3070 /* Optimize
3071 a = {};
3072 b = a;
3073 into
3074 a = {};
3075 b = {};
3076 Similarly for memset (&a, ..., sizeof (a)); instead of a = {};
3077 and/or memcpy (&b, &a, sizeof (a)); instead of b = a; */
3078
3079 static void
3080 optimize_memcpy (gimple_stmt_iterator *gsip, tree dest, tree src, tree len)
3081 {
3082 gimple *stmt = gsi_stmt (*gsip);
3083 if (gimple_has_volatile_ops (stmt))
3084 return;
3085
3086 tree vuse = gimple_vuse (stmt);
3087 if (vuse == NULL)
3088 return;
3089
3090 gimple *defstmt = SSA_NAME_DEF_STMT (vuse);
3091 tree src2 = NULL_TREE, len2 = NULL_TREE;
3092 poly_int64 offset, offset2;
3093 tree val = integer_zero_node;
3094 if (gimple_store_p (defstmt)
3095 && gimple_assign_single_p (defstmt)
3096 && TREE_CODE (gimple_assign_rhs1 (defstmt)) == CONSTRUCTOR
3097 && !gimple_clobber_p (defstmt))
3098 src2 = gimple_assign_lhs (defstmt);
3099 else if (gimple_call_builtin_p (defstmt, BUILT_IN_MEMSET)
3100 && TREE_CODE (gimple_call_arg (defstmt, 0)) == ADDR_EXPR
3101 && TREE_CODE (gimple_call_arg (defstmt, 1)) == INTEGER_CST)
3102 {
3103 src2 = TREE_OPERAND (gimple_call_arg (defstmt, 0), 0);
3104 len2 = gimple_call_arg (defstmt, 2);
3105 val = gimple_call_arg (defstmt, 1);
3106 /* For non-0 val, we'd have to transform stmt from assignment
3107 into memset (only if dest is addressable). */
3108 if (!integer_zerop (val) && is_gimple_assign (stmt))
3109 src2 = NULL_TREE;
3110 }
3111
3112 if (src2 == NULL_TREE)
3113 return;
3114
3115 if (len == NULL_TREE)
3116 len = (TREE_CODE (src) == COMPONENT_REF
3117 ? DECL_SIZE_UNIT (TREE_OPERAND (src, 1))
3118 : TYPE_SIZE_UNIT (TREE_TYPE (src)));
3119 if (len2 == NULL_TREE)
3120 len2 = (TREE_CODE (src2) == COMPONENT_REF
3121 ? DECL_SIZE_UNIT (TREE_OPERAND (src2, 1))
3122 : TYPE_SIZE_UNIT (TREE_TYPE (src2)));
3123 if (len == NULL_TREE
3124 || !poly_int_tree_p (len)
3125 || len2 == NULL_TREE
3126 || !poly_int_tree_p (len2))
3127 return;
3128
3129 src = get_addr_base_and_unit_offset (src, &offset);
3130 src2 = get_addr_base_and_unit_offset (src2, &offset2);
3131 if (src == NULL_TREE
3132 || src2 == NULL_TREE
3133 || maybe_lt (offset, offset2))
3134 return;
3135
3136 if (!operand_equal_p (src, src2, 0))
3137 return;
3138
3139 /* [ src + offset2, src + offset2 + len2 - 1 ] is set to val.
3140 Make sure that
3141 [ src + offset, src + offset + len - 1 ] is a subset of that. */
3142 if (maybe_gt (wi::to_poly_offset (len) + (offset - offset2),
3143 wi::to_poly_offset (len2)))
3144 return;
3145
3146 if (dump_file && (dump_flags & TDF_DETAILS))
3147 {
3148 fprintf (dump_file, "Simplified\n ");
3149 print_gimple_stmt (dump_file, stmt, 0, dump_flags);
3150 fprintf (dump_file, "after previous\n ");
3151 print_gimple_stmt (dump_file, defstmt, 0, dump_flags);
3152 }
3153
3154 /* For simplicity, don't change the kind of the stmt,
3155 turn dest = src; into dest = {}; and memcpy (&dest, &src, len);
3156 into memset (&dest, val, len);
3157 In theory we could change dest = src into memset if dest
3158 is addressable (maybe beneficial if val is not 0), or
3159 memcpy (&dest, &src, len) into dest = {} if len is the size
3160 of dest, dest isn't volatile. */
3161 if (is_gimple_assign (stmt))
3162 {
3163 tree ctor = build_constructor (TREE_TYPE (dest), NULL);
3164 gimple_assign_set_rhs_from_tree (gsip, ctor);
3165 update_stmt (stmt);
3166 }
3167 else /* If stmt is memcpy, transform it into memset. */
3168 {
3169 gcall *call = as_a <gcall *> (stmt);
3170 tree fndecl = builtin_decl_implicit (BUILT_IN_MEMSET);
3171 gimple_call_set_fndecl (call, fndecl);
3172 gimple_call_set_fntype (call, TREE_TYPE (fndecl));
3173 gimple_call_set_arg (call, 1, val);
3174 update_stmt (stmt);
3175 }
3176
3177 if (dump_file && (dump_flags & TDF_DETAILS))
3178 {
3179 fprintf (dump_file, "into\n ");
3180 print_gimple_stmt (dump_file, stmt, 0, dump_flags);
3181 }
3182 }
3183
3184 /* A simple pass that attempts to fold all builtin functions. This pass
3185 is run after we've propagated as many constants as we can. */
3186
3187 namespace {
3188
3189 const pass_data pass_data_fold_builtins =
3190 {
3191 GIMPLE_PASS, /* type */
3192 "fab", /* name */
3193 OPTGROUP_NONE, /* optinfo_flags */
3194 TV_NONE, /* tv_id */
3195 ( PROP_cfg | PROP_ssa ), /* properties_required */
3196 0, /* properties_provided */
3197 0, /* properties_destroyed */
3198 0, /* todo_flags_start */
3199 TODO_update_ssa, /* todo_flags_finish */
3200 };
3201
3202 class pass_fold_builtins : public gimple_opt_pass
3203 {
3204 public:
3205 pass_fold_builtins (gcc::context *ctxt)
3206 : gimple_opt_pass (pass_data_fold_builtins, ctxt)
3207 {}
3208
3209 /* opt_pass methods: */
3210 opt_pass * clone () { return new pass_fold_builtins (m_ctxt); }
3211 virtual unsigned int execute (function *);
3212
3213 }; // class pass_fold_builtins
3214
3215 unsigned int
3216 pass_fold_builtins::execute (function *fun)
3217 {
3218 bool cfg_changed = false;
3219 basic_block bb;
3220 unsigned int todoflags = 0;
3221
3222 FOR_EACH_BB_FN (bb, fun)
3223 {
3224 gimple_stmt_iterator i;
3225 for (i = gsi_start_bb (bb); !gsi_end_p (i); )
3226 {
3227 gimple *stmt, *old_stmt;
3228 tree callee;
3229 enum built_in_function fcode;
3230
3231 stmt = gsi_stmt (i);
3232
3233 if (gimple_code (stmt) != GIMPLE_CALL)
3234 {
3235 /* Remove all *ssaname_N ={v} {CLOBBER}; stmts,
3236 after the last GIMPLE DSE they aren't needed and might
3237 unnecessarily keep the SSA_NAMEs live. */
3238 if (gimple_clobber_p (stmt))
3239 {
3240 tree lhs = gimple_assign_lhs (stmt);
3241 if (TREE_CODE (lhs) == MEM_REF
3242 && TREE_CODE (TREE_OPERAND (lhs, 0)) == SSA_NAME)
3243 {
3244 unlink_stmt_vdef (stmt);
3245 gsi_remove (&i, true);
3246 release_defs (stmt);
3247 continue;
3248 }
3249 }
3250 else if (gimple_assign_load_p (stmt) && gimple_store_p (stmt))
3251 optimize_memcpy (&i, gimple_assign_lhs (stmt),
3252 gimple_assign_rhs1 (stmt), NULL_TREE);
3253 gsi_next (&i);
3254 continue;
3255 }
3256
3257 callee = gimple_call_fndecl (stmt);
3258 if (!callee || !fndecl_built_in_p (callee, BUILT_IN_NORMAL))
3259 {
3260 gsi_next (&i);
3261 continue;
3262 }
3263
3264 fcode = DECL_FUNCTION_CODE (callee);
3265 if (fold_stmt (&i))
3266 ;
3267 else
3268 {
3269 tree result = NULL_TREE;
3270 switch (DECL_FUNCTION_CODE (callee))
3271 {
3272 case BUILT_IN_CONSTANT_P:
3273 /* Resolve __builtin_constant_p. If it hasn't been
3274 folded to integer_one_node by now, it's fairly
3275 certain that the value simply isn't constant. */
3276 result = integer_zero_node;
3277 break;
3278
3279 case BUILT_IN_ASSUME_ALIGNED:
3280 /* Remove __builtin_assume_aligned. */
3281 result = gimple_call_arg (stmt, 0);
3282 break;
3283
3284 case BUILT_IN_STACK_RESTORE:
3285 result = optimize_stack_restore (i);
3286 if (result)
3287 break;
3288 gsi_next (&i);
3289 continue;
3290
3291 case BUILT_IN_UNREACHABLE:
3292 if (optimize_unreachable (i))
3293 cfg_changed = true;
3294 break;
3295
3296 case BUILT_IN_ATOMIC_FETCH_OR_1:
3297 case BUILT_IN_ATOMIC_FETCH_OR_2:
3298 case BUILT_IN_ATOMIC_FETCH_OR_4:
3299 case BUILT_IN_ATOMIC_FETCH_OR_8:
3300 case BUILT_IN_ATOMIC_FETCH_OR_16:
3301 optimize_atomic_bit_test_and (&i,
3302 IFN_ATOMIC_BIT_TEST_AND_SET,
3303 true, false);
3304 break;
3305 case BUILT_IN_SYNC_FETCH_AND_OR_1:
3306 case BUILT_IN_SYNC_FETCH_AND_OR_2:
3307 case BUILT_IN_SYNC_FETCH_AND_OR_4:
3308 case BUILT_IN_SYNC_FETCH_AND_OR_8:
3309 case BUILT_IN_SYNC_FETCH_AND_OR_16:
3310 optimize_atomic_bit_test_and (&i,
3311 IFN_ATOMIC_BIT_TEST_AND_SET,
3312 false, false);
3313 break;
3314
3315 case BUILT_IN_ATOMIC_FETCH_XOR_1:
3316 case BUILT_IN_ATOMIC_FETCH_XOR_2:
3317 case BUILT_IN_ATOMIC_FETCH_XOR_4:
3318 case BUILT_IN_ATOMIC_FETCH_XOR_8:
3319 case BUILT_IN_ATOMIC_FETCH_XOR_16:
3320 optimize_atomic_bit_test_and
3321 (&i, IFN_ATOMIC_BIT_TEST_AND_COMPLEMENT, true, false);
3322 break;
3323 case BUILT_IN_SYNC_FETCH_AND_XOR_1:
3324 case BUILT_IN_SYNC_FETCH_AND_XOR_2:
3325 case BUILT_IN_SYNC_FETCH_AND_XOR_4:
3326 case BUILT_IN_SYNC_FETCH_AND_XOR_8:
3327 case BUILT_IN_SYNC_FETCH_AND_XOR_16:
3328 optimize_atomic_bit_test_and
3329 (&i, IFN_ATOMIC_BIT_TEST_AND_COMPLEMENT, false, false);
3330 break;
3331
3332 case BUILT_IN_ATOMIC_XOR_FETCH_1:
3333 case BUILT_IN_ATOMIC_XOR_FETCH_2:
3334 case BUILT_IN_ATOMIC_XOR_FETCH_4:
3335 case BUILT_IN_ATOMIC_XOR_FETCH_8:
3336 case BUILT_IN_ATOMIC_XOR_FETCH_16:
3337 optimize_atomic_bit_test_and
3338 (&i, IFN_ATOMIC_BIT_TEST_AND_COMPLEMENT, true, true);
3339 break;
3340 case BUILT_IN_SYNC_XOR_AND_FETCH_1:
3341 case BUILT_IN_SYNC_XOR_AND_FETCH_2:
3342 case BUILT_IN_SYNC_XOR_AND_FETCH_4:
3343 case BUILT_IN_SYNC_XOR_AND_FETCH_8:
3344 case BUILT_IN_SYNC_XOR_AND_FETCH_16:
3345 optimize_atomic_bit_test_and
3346 (&i, IFN_ATOMIC_BIT_TEST_AND_COMPLEMENT, false, true);
3347 break;
3348
3349 case BUILT_IN_ATOMIC_FETCH_AND_1:
3350 case BUILT_IN_ATOMIC_FETCH_AND_2:
3351 case BUILT_IN_ATOMIC_FETCH_AND_4:
3352 case BUILT_IN_ATOMIC_FETCH_AND_8:
3353 case BUILT_IN_ATOMIC_FETCH_AND_16:
3354 optimize_atomic_bit_test_and (&i,
3355 IFN_ATOMIC_BIT_TEST_AND_RESET,
3356 true, false);
3357 break;
3358 case BUILT_IN_SYNC_FETCH_AND_AND_1:
3359 case BUILT_IN_SYNC_FETCH_AND_AND_2:
3360 case BUILT_IN_SYNC_FETCH_AND_AND_4:
3361 case BUILT_IN_SYNC_FETCH_AND_AND_8:
3362 case BUILT_IN_SYNC_FETCH_AND_AND_16:
3363 optimize_atomic_bit_test_and (&i,
3364 IFN_ATOMIC_BIT_TEST_AND_RESET,
3365 false, false);
3366 break;
3367
3368 case BUILT_IN_MEMCPY:
3369 if (gimple_call_builtin_p (stmt, BUILT_IN_NORMAL)
3370 && TREE_CODE (gimple_call_arg (stmt, 0)) == ADDR_EXPR
3371 && TREE_CODE (gimple_call_arg (stmt, 1)) == ADDR_EXPR
3372 && TREE_CODE (gimple_call_arg (stmt, 2)) == INTEGER_CST)
3373 {
3374 tree dest = TREE_OPERAND (gimple_call_arg (stmt, 0), 0);
3375 tree src = TREE_OPERAND (gimple_call_arg (stmt, 1), 0);
3376 tree len = gimple_call_arg (stmt, 2);
3377 optimize_memcpy (&i, dest, src, len);
3378 }
3379 break;
3380
3381 case BUILT_IN_VA_START:
3382 case BUILT_IN_VA_END:
3383 case BUILT_IN_VA_COPY:
3384 /* These shouldn't be folded before pass_stdarg. */
3385 result = optimize_stdarg_builtin (stmt);
3386 break;
3387
3388 default:;
3389 }
3390
3391 if (!result)
3392 {
3393 gsi_next (&i);
3394 continue;
3395 }
3396
3397 if (!update_call_from_tree (&i, result))
3398 gimplify_and_update_call_from_tree (&i, result);
3399 }
3400
3401 todoflags |= TODO_update_address_taken;
3402
3403 if (dump_file && (dump_flags & TDF_DETAILS))
3404 {
3405 fprintf (dump_file, "Simplified\n ");
3406 print_gimple_stmt (dump_file, stmt, 0, dump_flags);
3407 }
3408
3409 old_stmt = stmt;
3410 stmt = gsi_stmt (i);
3411 update_stmt (stmt);
3412
3413 if (maybe_clean_or_replace_eh_stmt (old_stmt, stmt)
3414 && gimple_purge_dead_eh_edges (bb))
3415 cfg_changed = true;
3416
3417 if (dump_file && (dump_flags & TDF_DETAILS))
3418 {
3419 fprintf (dump_file, "to\n ");
3420 print_gimple_stmt (dump_file, stmt, 0, dump_flags);
3421 fprintf (dump_file, "\n");
3422 }
3423
3424 /* Retry the same statement if it changed into another
3425 builtin, there might be new opportunities now. */
3426 if (gimple_code (stmt) != GIMPLE_CALL)
3427 {
3428 gsi_next (&i);
3429 continue;
3430 }
3431 callee = gimple_call_fndecl (stmt);
3432 if (!callee
3433 || !fndecl_built_in_p (callee, fcode))
3434 gsi_next (&i);
3435 }
3436 }
3437
3438 /* Delete unreachable blocks. */
3439 if (cfg_changed)
3440 todoflags |= TODO_cleanup_cfg;
3441
3442 return todoflags;
3443 }
3444
3445 } // anon namespace
3446
3447 gimple_opt_pass *
3448 make_pass_fold_builtins (gcc::context *ctxt)
3449 {
3450 return new pass_fold_builtins (ctxt);
3451 }
3452
3453 /* A simple pass that emits some warnings post IPA. */
3454
3455 namespace {
3456
3457 const pass_data pass_data_post_ipa_warn =
3458 {
3459 GIMPLE_PASS, /* type */
3460 "post_ipa_warn", /* name */
3461 OPTGROUP_NONE, /* optinfo_flags */
3462 TV_NONE, /* tv_id */
3463 ( PROP_cfg | PROP_ssa ), /* properties_required */
3464 0, /* properties_provided */
3465 0, /* properties_destroyed */
3466 0, /* todo_flags_start */
3467 0, /* todo_flags_finish */
3468 };
3469
3470 class pass_post_ipa_warn : public gimple_opt_pass
3471 {
3472 public:
3473 pass_post_ipa_warn (gcc::context *ctxt)
3474 : gimple_opt_pass (pass_data_post_ipa_warn, ctxt)
3475 {}
3476
3477 /* opt_pass methods: */
3478 opt_pass * clone () { return new pass_post_ipa_warn (m_ctxt); }
3479 virtual bool gate (function *) { return warn_nonnull != 0; }
3480 virtual unsigned int execute (function *);
3481
3482 }; // class pass_fold_builtins
3483
3484 unsigned int
3485 pass_post_ipa_warn::execute (function *fun)
3486 {
3487 basic_block bb;
3488
3489 FOR_EACH_BB_FN (bb, fun)
3490 {
3491 gimple_stmt_iterator gsi;
3492 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
3493 {
3494 gimple *stmt = gsi_stmt (gsi);
3495 if (!is_gimple_call (stmt) || gimple_no_warning_p (stmt))
3496 continue;
3497
3498 if (warn_nonnull)
3499 {
3500 bitmap nonnullargs
3501 = get_nonnull_args (gimple_call_fntype (stmt));
3502 if (nonnullargs)
3503 {
3504 for (unsigned i = 0; i < gimple_call_num_args (stmt); i++)
3505 {
3506 tree arg = gimple_call_arg (stmt, i);
3507 if (TREE_CODE (TREE_TYPE (arg)) != POINTER_TYPE)
3508 continue;
3509 if (!integer_zerop (arg))
3510 continue;
3511 if (!bitmap_empty_p (nonnullargs)
3512 && !bitmap_bit_p (nonnullargs, i))
3513 continue;
3514
3515 location_t loc = gimple_location (stmt);
3516 auto_diagnostic_group d;
3517 if (warning_at (loc, OPT_Wnonnull,
3518 "%Gargument %u null where non-null "
3519 "expected", stmt, i + 1))
3520 {
3521 tree fndecl = gimple_call_fndecl (stmt);
3522 if (fndecl && DECL_IS_BUILTIN (fndecl))
3523 inform (loc, "in a call to built-in function %qD",
3524 fndecl);
3525 else if (fndecl)
3526 inform (DECL_SOURCE_LOCATION (fndecl),
3527 "in a call to function %qD declared here",
3528 fndecl);
3529
3530 }
3531 }
3532 BITMAP_FREE (nonnullargs);
3533 }
3534 }
3535 }
3536 }
3537 return 0;
3538 }
3539
3540 } // anon namespace
3541
3542 gimple_opt_pass *
3543 make_pass_post_ipa_warn (gcc::context *ctxt)
3544 {
3545 return new pass_post_ipa_warn (ctxt);
3546 }