BASE-VER: Set to 4.8.0.
[gcc.git] / gcc / tree-ssa-uninit.c
1 /* Predicate aware uninitialized variable warning.
2 Copyright (C) 2001, 2002, 2003, 2004, 2005, 2007, 2008, 2010 Free Software
3 Foundation, Inc.
4 Contributed by Xinliang David Li <davidxl@google.com>
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3, or (at your option)
11 any later version.
12
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
21
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "tm.h"
26 #include "tree.h"
27 #include "flags.h"
28 #include "tm_p.h"
29 #include "langhooks.h"
30 #include "basic-block.h"
31 #include "output.h"
32 #include "function.h"
33 #include "gimple-pretty-print.h"
34 #include "bitmap.h"
35 #include "pointer-set.h"
36 #include "tree-flow.h"
37 #include "gimple.h"
38 #include "tree-inline.h"
39 #include "timevar.h"
40 #include "hashtab.h"
41 #include "tree-dump.h"
42 #include "tree-pass.h"
43 #include "diagnostic-core.h"
44 #include "timevar.h"
45
46 /* This implements the pass that does predicate aware warning on uses of
47 possibly uninitialized variables. The pass first collects the set of
48 possibly uninitialized SSA names. For each such name, it walks through
49 all its immediate uses. For each immediate use, it rebuilds the condition
50 expression (the predicate) that guards the use. The predicate is then
51 examined to see if the variable is always defined under that same condition.
52 This is done either by pruning the unrealizable paths that lead to the
53 default definitions or by checking if the predicate set that guards the
54 defining paths is a superset of the use predicate. */
55
56
57 /* Pointer set of potentially undefined ssa names, i.e.,
58 ssa names that are defined by phi with operands that
59 are not defined or potentially undefined. */
60 static struct pointer_set_t *possibly_undefined_names = 0;
61
62 /* Bit mask handling macros. */
63 #define MASK_SET_BIT(mask, pos) mask |= (1 << pos)
64 #define MASK_TEST_BIT(mask, pos) (mask & (1 << pos))
65 #define MASK_EMPTY(mask) (mask == 0)
66
67 /* Returns the first bit position (starting from LSB)
68 in mask that is non zero. Returns -1 if the mask is empty. */
69 static int
70 get_mask_first_set_bit (unsigned mask)
71 {
72 int pos = 0;
73 if (mask == 0)
74 return -1;
75
76 while ((mask & (1 << pos)) == 0)
77 pos++;
78
79 return pos;
80 }
81 #define MASK_FIRST_SET_BIT(mask) get_mask_first_set_bit (mask)
82
83
84 /* Return true if T, an SSA_NAME, has an undefined value. */
85
86 bool
87 ssa_undefined_value_p (tree t)
88 {
89 tree var = SSA_NAME_VAR (t);
90
91 /* Parameters get their initial value from the function entry. */
92 if (TREE_CODE (var) == PARM_DECL)
93 return false;
94
95 /* When returning by reference the return address is actually a hidden
96 parameter. */
97 if (TREE_CODE (SSA_NAME_VAR (t)) == RESULT_DECL
98 && DECL_BY_REFERENCE (SSA_NAME_VAR (t)))
99 return false;
100
101 /* Hard register variables get their initial value from the ether. */
102 if (TREE_CODE (var) == VAR_DECL && DECL_HARD_REGISTER (var))
103 return false;
104
105 /* The value is undefined iff its definition statement is empty. */
106 return (gimple_nop_p (SSA_NAME_DEF_STMT (t))
107 || (possibly_undefined_names
108 && pointer_set_contains (possibly_undefined_names, t)));
109 }
110
111 /* Checks if the operand OPND of PHI is defined by
112 another phi with one operand defined by this PHI,
113 but the rest operands are all defined. If yes,
114 returns true to skip this this operand as being
115 redundant. Can be enhanced to be more general. */
116
117 static bool
118 can_skip_redundant_opnd (tree opnd, gimple phi)
119 {
120 gimple op_def;
121 tree phi_def;
122 int i, n;
123
124 phi_def = gimple_phi_result (phi);
125 op_def = SSA_NAME_DEF_STMT (opnd);
126 if (gimple_code (op_def) != GIMPLE_PHI)
127 return false;
128 n = gimple_phi_num_args (op_def);
129 for (i = 0; i < n; ++i)
130 {
131 tree op = gimple_phi_arg_def (op_def, i);
132 if (TREE_CODE (op) != SSA_NAME)
133 continue;
134 if (op != phi_def && ssa_undefined_value_p (op))
135 return false;
136 }
137
138 return true;
139 }
140
141 /* Returns a bit mask holding the positions of arguments in PHI
142 that have empty (or possibly empty) definitions. */
143
144 static unsigned
145 compute_uninit_opnds_pos (gimple phi)
146 {
147 size_t i, n;
148 unsigned uninit_opnds = 0;
149
150 n = gimple_phi_num_args (phi);
151 /* Bail out for phi with too many args. */
152 if (n > 32)
153 return 0;
154
155 for (i = 0; i < n; ++i)
156 {
157 tree op = gimple_phi_arg_def (phi, i);
158 if (TREE_CODE (op) == SSA_NAME
159 && ssa_undefined_value_p (op)
160 && !can_skip_redundant_opnd (op, phi))
161 MASK_SET_BIT (uninit_opnds, i);
162 }
163 return uninit_opnds;
164 }
165
166 /* Find the immediate postdominator PDOM of the specified
167 basic block BLOCK. */
168
169 static inline basic_block
170 find_pdom (basic_block block)
171 {
172 if (block == EXIT_BLOCK_PTR)
173 return EXIT_BLOCK_PTR;
174 else
175 {
176 basic_block bb
177 = get_immediate_dominator (CDI_POST_DOMINATORS, block);
178 if (! bb)
179 return EXIT_BLOCK_PTR;
180 return bb;
181 }
182 }
183
184 /* Find the immediate DOM of the specified
185 basic block BLOCK. */
186
187 static inline basic_block
188 find_dom (basic_block block)
189 {
190 if (block == ENTRY_BLOCK_PTR)
191 return ENTRY_BLOCK_PTR;
192 else
193 {
194 basic_block bb = get_immediate_dominator (CDI_DOMINATORS, block);
195 if (! bb)
196 return ENTRY_BLOCK_PTR;
197 return bb;
198 }
199 }
200
201 /* Returns true if BB1 is postdominating BB2 and BB1 is
202 not a loop exit bb. The loop exit bb check is simple and does
203 not cover all cases. */
204
205 static bool
206 is_non_loop_exit_postdominating (basic_block bb1, basic_block bb2)
207 {
208 if (!dominated_by_p (CDI_POST_DOMINATORS, bb2, bb1))
209 return false;
210
211 if (single_pred_p (bb1) && !single_succ_p (bb2))
212 return false;
213
214 return true;
215 }
216
217 /* Find the closest postdominator of a specified BB, which is control
218 equivalent to BB. */
219
220 static inline basic_block
221 find_control_equiv_block (basic_block bb)
222 {
223 basic_block pdom;
224
225 pdom = find_pdom (bb);
226
227 /* Skip the postdominating bb that is also loop exit. */
228 if (!is_non_loop_exit_postdominating (pdom, bb))
229 return NULL;
230
231 if (dominated_by_p (CDI_DOMINATORS, pdom, bb))
232 return pdom;
233
234 return NULL;
235 }
236
237 #define MAX_NUM_CHAINS 8
238 #define MAX_CHAIN_LEN 5
239
240 /* Computes the control dependence chains (paths of edges)
241 for DEP_BB up to the dominating basic block BB (the head node of a
242 chain should be dominated by it). CD_CHAINS is pointer to a
243 dynamic array holding the result chains. CUR_CD_CHAIN is the current
244 chain being computed. *NUM_CHAINS is total number of chains. The
245 function returns true if the information is successfully computed,
246 return false if there is no control dependence or not computed. */
247
248 static bool
249 compute_control_dep_chain (basic_block bb, basic_block dep_bb,
250 VEC(edge, heap) **cd_chains,
251 size_t *num_chains,
252 VEC(edge, heap) **cur_cd_chain)
253 {
254 edge_iterator ei;
255 edge e;
256 size_t i;
257 bool found_cd_chain = false;
258 size_t cur_chain_len = 0;
259
260 if (EDGE_COUNT (bb->succs) < 2)
261 return false;
262
263 /* Could use a set instead. */
264 cur_chain_len = VEC_length (edge, *cur_cd_chain);
265 if (cur_chain_len > MAX_CHAIN_LEN)
266 return false;
267
268 for (i = 0; i < cur_chain_len; i++)
269 {
270 edge e = VEC_index (edge, *cur_cd_chain, i);
271 /* cycle detected. */
272 if (e->src == bb)
273 return false;
274 }
275
276 FOR_EACH_EDGE (e, ei, bb->succs)
277 {
278 basic_block cd_bb;
279 if (e->flags & (EDGE_FAKE | EDGE_ABNORMAL))
280 continue;
281
282 cd_bb = e->dest;
283 VEC_safe_push (edge, heap, *cur_cd_chain, e);
284 while (!is_non_loop_exit_postdominating (cd_bb, bb))
285 {
286 if (cd_bb == dep_bb)
287 {
288 /* Found a direct control dependence. */
289 if (*num_chains < MAX_NUM_CHAINS)
290 {
291 cd_chains[*num_chains]
292 = VEC_copy (edge, heap, *cur_cd_chain);
293 (*num_chains)++;
294 }
295 found_cd_chain = true;
296 /* check path from next edge. */
297 break;
298 }
299
300 /* Now check if DEP_BB is indirectly control dependent on BB. */
301 if (compute_control_dep_chain (cd_bb, dep_bb, cd_chains,
302 num_chains, cur_cd_chain))
303 {
304 found_cd_chain = true;
305 break;
306 }
307
308 cd_bb = find_pdom (cd_bb);
309 if (cd_bb == EXIT_BLOCK_PTR)
310 break;
311 }
312 VEC_pop (edge, *cur_cd_chain);
313 gcc_assert (VEC_length (edge, *cur_cd_chain) == cur_chain_len);
314 }
315 gcc_assert (VEC_length (edge, *cur_cd_chain) == cur_chain_len);
316
317 return found_cd_chain;
318 }
319
320 typedef struct use_pred_info
321 {
322 gimple cond;
323 bool invert;
324 } *use_pred_info_t;
325
326 DEF_VEC_P(use_pred_info_t);
327 DEF_VEC_ALLOC_P(use_pred_info_t, heap);
328
329
330 /* Converts the chains of control dependence edges into a set of
331 predicates. A control dependence chain is represented by a vector
332 edges. DEP_CHAINS points to an array of dependence chains.
333 NUM_CHAINS is the size of the chain array. One edge in a dependence
334 chain is mapped to predicate expression represented by use_pred_info_t
335 type. One dependence chain is converted to a composite predicate that
336 is the result of AND operation of use_pred_info_t mapped to each edge.
337 A composite predicate is presented by a vector of use_pred_info_t. On
338 return, *PREDS points to the resulting array of composite predicates.
339 *NUM_PREDS is the number of composite predictes. */
340
341 static bool
342 convert_control_dep_chain_into_preds (VEC(edge, heap) **dep_chains,
343 size_t num_chains,
344 VEC(use_pred_info_t, heap) ***preds,
345 size_t *num_preds)
346 {
347 bool has_valid_pred = false;
348 size_t i, j;
349 if (num_chains == 0 || num_chains >= MAX_NUM_CHAINS)
350 return false;
351
352 /* Now convert the control dep chain into a set
353 of predicates. */
354 *preds = XCNEWVEC (VEC(use_pred_info_t, heap) *,
355 num_chains);
356 *num_preds = num_chains;
357
358 for (i = 0; i < num_chains; i++)
359 {
360 VEC(edge, heap) *one_cd_chain = dep_chains[i];
361
362 has_valid_pred = false;
363 for (j = 0; j < VEC_length (edge, one_cd_chain); j++)
364 {
365 gimple cond_stmt;
366 gimple_stmt_iterator gsi;
367 basic_block guard_bb;
368 use_pred_info_t one_pred;
369 edge e;
370
371 e = VEC_index (edge, one_cd_chain, j);
372 guard_bb = e->src;
373 gsi = gsi_last_bb (guard_bb);
374 if (gsi_end_p (gsi))
375 {
376 has_valid_pred = false;
377 break;
378 }
379 cond_stmt = gsi_stmt (gsi);
380 if (gimple_code (cond_stmt) == GIMPLE_CALL
381 && EDGE_COUNT (e->src->succs) >= 2)
382 {
383 /* Ignore EH edge. Can add assertion
384 on the other edge's flag. */
385 continue;
386 }
387 /* Skip if there is essentially one succesor. */
388 if (EDGE_COUNT (e->src->succs) == 2)
389 {
390 edge e1;
391 edge_iterator ei1;
392 bool skip = false;
393
394 FOR_EACH_EDGE (e1, ei1, e->src->succs)
395 {
396 if (EDGE_COUNT (e1->dest->succs) == 0)
397 {
398 skip = true;
399 break;
400 }
401 }
402 if (skip)
403 continue;
404 }
405 if (gimple_code (cond_stmt) != GIMPLE_COND)
406 {
407 has_valid_pred = false;
408 break;
409 }
410 one_pred = XNEW (struct use_pred_info);
411 one_pred->cond = cond_stmt;
412 one_pred->invert = !!(e->flags & EDGE_FALSE_VALUE);
413 VEC_safe_push (use_pred_info_t, heap, (*preds)[i], one_pred);
414 has_valid_pred = true;
415 }
416
417 if (!has_valid_pred)
418 break;
419 }
420 return has_valid_pred;
421 }
422
423 /* Computes all control dependence chains for USE_BB. The control
424 dependence chains are then converted to an array of composite
425 predicates pointed to by PREDS. PHI_BB is the basic block of
426 the phi whose result is used in USE_BB. */
427
428 static bool
429 find_predicates (VEC(use_pred_info_t, heap) ***preds,
430 size_t *num_preds,
431 basic_block phi_bb,
432 basic_block use_bb)
433 {
434 size_t num_chains = 0, i;
435 VEC(edge, heap) **dep_chains = 0;
436 VEC(edge, heap) *cur_chain = 0;
437 bool has_valid_pred = false;
438 basic_block cd_root = 0;
439
440 dep_chains = XCNEWVEC (VEC(edge, heap) *, MAX_NUM_CHAINS);
441
442 /* First find the closest bb that is control equivalent to PHI_BB
443 that also dominates USE_BB. */
444 cd_root = phi_bb;
445 while (dominated_by_p (CDI_DOMINATORS, use_bb, cd_root))
446 {
447 basic_block ctrl_eq_bb = find_control_equiv_block (cd_root);
448 if (ctrl_eq_bb && dominated_by_p (CDI_DOMINATORS, use_bb, ctrl_eq_bb))
449 cd_root = ctrl_eq_bb;
450 else
451 break;
452 }
453
454 compute_control_dep_chain (cd_root, use_bb,
455 dep_chains, &num_chains,
456 &cur_chain);
457
458 has_valid_pred
459 = convert_control_dep_chain_into_preds (dep_chains,
460 num_chains,
461 preds,
462 num_preds);
463 /* Free individual chain */
464 VEC_free (edge, heap, cur_chain);
465 for (i = 0; i < num_chains; i++)
466 VEC_free (edge, heap, dep_chains[i]);
467 free (dep_chains);
468 return has_valid_pred;
469 }
470
471 /* Computes the set of incoming edges of PHI that have non empty
472 definitions of a phi chain. The collection will be done
473 recursively on operands that are defined by phis. CD_ROOT
474 is the control dependence root. *EDGES holds the result, and
475 VISITED_PHIS is a pointer set for detecting cycles. */
476
477 static void
478 collect_phi_def_edges (gimple phi, basic_block cd_root,
479 VEC(edge, heap) **edges,
480 struct pointer_set_t *visited_phis)
481 {
482 size_t i, n;
483 edge opnd_edge;
484 tree opnd;
485
486 if (pointer_set_insert (visited_phis, phi))
487 return;
488
489 n = gimple_phi_num_args (phi);
490 for (i = 0; i < n; i++)
491 {
492 opnd_edge = gimple_phi_arg_edge (phi, i);
493 opnd = gimple_phi_arg_def (phi, i);
494
495 if (TREE_CODE (opnd) != SSA_NAME)
496 {
497 if (dump_file && (dump_flags & TDF_DETAILS))
498 {
499 fprintf (dump_file, "\n[CHECK] Found def edge %d in ", (int)i);
500 print_gimple_stmt (dump_file, phi, 0, 0);
501 }
502 VEC_safe_push (edge, heap, *edges, opnd_edge);
503 }
504 else
505 {
506 gimple def = SSA_NAME_DEF_STMT (opnd);
507
508 if (gimple_code (def) == GIMPLE_PHI
509 && dominated_by_p (CDI_DOMINATORS,
510 gimple_bb (def), cd_root))
511 collect_phi_def_edges (def, cd_root, edges,
512 visited_phis);
513 else if (!ssa_undefined_value_p (opnd))
514 {
515 if (dump_file && (dump_flags & TDF_DETAILS))
516 {
517 fprintf (dump_file, "\n[CHECK] Found def edge %d in ", (int)i);
518 print_gimple_stmt (dump_file, phi, 0, 0);
519 }
520 VEC_safe_push (edge, heap, *edges, opnd_edge);
521 }
522 }
523 }
524 }
525
526 /* For each use edge of PHI, computes all control dependence chains.
527 The control dependence chains are then converted to an array of
528 composite predicates pointed to by PREDS. */
529
530 static bool
531 find_def_preds (VEC(use_pred_info_t, heap) ***preds,
532 size_t *num_preds, gimple phi)
533 {
534 size_t num_chains = 0, i, n;
535 VEC(edge, heap) **dep_chains = 0;
536 VEC(edge, heap) *cur_chain = 0;
537 VEC(edge, heap) *def_edges = 0;
538 bool has_valid_pred = false;
539 basic_block phi_bb, cd_root = 0;
540 struct pointer_set_t *visited_phis;
541
542 dep_chains = XCNEWVEC (VEC(edge, heap) *, MAX_NUM_CHAINS);
543
544 phi_bb = gimple_bb (phi);
545 /* First find the closest dominating bb to be
546 the control dependence root */
547 cd_root = find_dom (phi_bb);
548 if (!cd_root)
549 return false;
550
551 visited_phis = pointer_set_create ();
552 collect_phi_def_edges (phi, cd_root, &def_edges, visited_phis);
553 pointer_set_destroy (visited_phis);
554
555 n = VEC_length (edge, def_edges);
556 if (n == 0)
557 return false;
558
559 for (i = 0; i < n; i++)
560 {
561 size_t prev_nc, j;
562 edge opnd_edge;
563
564 opnd_edge = VEC_index (edge, def_edges, i);
565 prev_nc = num_chains;
566 compute_control_dep_chain (cd_root, opnd_edge->src,
567 dep_chains, &num_chains,
568 &cur_chain);
569 /* Free individual chain */
570 VEC_free (edge, heap, cur_chain);
571 cur_chain = 0;
572
573 /* Now update the newly added chains with
574 the phi operand edge: */
575 if (EDGE_COUNT (opnd_edge->src->succs) > 1)
576 {
577 if (prev_nc == num_chains
578 && num_chains < MAX_NUM_CHAINS)
579 num_chains++;
580 for (j = prev_nc; j < num_chains; j++)
581 {
582 VEC_safe_push (edge, heap, dep_chains[j], opnd_edge);
583 }
584 }
585 }
586
587 has_valid_pred
588 = convert_control_dep_chain_into_preds (dep_chains,
589 num_chains,
590 preds,
591 num_preds);
592 for (i = 0; i < num_chains; i++)
593 VEC_free (edge, heap, dep_chains[i]);
594 free (dep_chains);
595 return has_valid_pred;
596 }
597
598 /* Dumps the predicates (PREDS) for USESTMT. */
599
600 static void
601 dump_predicates (gimple usestmt, size_t num_preds,
602 VEC(use_pred_info_t, heap) **preds,
603 const char* msg)
604 {
605 size_t i, j;
606 VEC(use_pred_info_t, heap) *one_pred_chain;
607 fprintf (dump_file, msg);
608 print_gimple_stmt (dump_file, usestmt, 0, 0);
609 fprintf (dump_file, "is guarded by :\n");
610 /* do some dumping here: */
611 for (i = 0; i < num_preds; i++)
612 {
613 size_t np;
614
615 one_pred_chain = preds[i];
616 np = VEC_length (use_pred_info_t, one_pred_chain);
617
618 for (j = 0; j < np; j++)
619 {
620 use_pred_info_t one_pred
621 = VEC_index (use_pred_info_t, one_pred_chain, j);
622 if (one_pred->invert)
623 fprintf (dump_file, " (.NOT.) ");
624 print_gimple_stmt (dump_file, one_pred->cond, 0, 0);
625 if (j < np - 1)
626 fprintf (dump_file, "(.AND.)\n");
627 }
628 if (i < num_preds - 1)
629 fprintf (dump_file, "(.OR.)\n");
630 }
631 }
632
633 /* Destroys the predicate set *PREDS. */
634
635 static void
636 destroy_predicate_vecs (size_t n,
637 VEC(use_pred_info_t, heap) ** preds)
638 {
639 size_t i, j;
640 for (i = 0; i < n; i++)
641 {
642 for (j = 0; j < VEC_length (use_pred_info_t, preds[i]); j++)
643 free (VEC_index (use_pred_info_t, preds[i], j));
644 VEC_free (use_pred_info_t, heap, preds[i]);
645 }
646 free (preds);
647 }
648
649
650 /* Computes the 'normalized' conditional code with operand
651 swapping and condition inversion. */
652
653 static enum tree_code
654 get_cmp_code (enum tree_code orig_cmp_code,
655 bool swap_cond, bool invert)
656 {
657 enum tree_code tc = orig_cmp_code;
658
659 if (swap_cond)
660 tc = swap_tree_comparison (orig_cmp_code);
661 if (invert)
662 tc = invert_tree_comparison (tc, false);
663
664 switch (tc)
665 {
666 case LT_EXPR:
667 case LE_EXPR:
668 case GT_EXPR:
669 case GE_EXPR:
670 case EQ_EXPR:
671 case NE_EXPR:
672 break;
673 default:
674 return ERROR_MARK;
675 }
676 return tc;
677 }
678
679 /* Returns true if VAL falls in the range defined by BOUNDARY and CMPC, i.e.
680 all values in the range satisfies (x CMPC BOUNDARY) == true. */
681
682 static bool
683 is_value_included_in (tree val, tree boundary, enum tree_code cmpc)
684 {
685 bool inverted = false;
686 bool is_unsigned;
687 bool result;
688
689 /* Only handle integer constant here. */
690 if (TREE_CODE (val) != INTEGER_CST
691 || TREE_CODE (boundary) != INTEGER_CST)
692 return true;
693
694 is_unsigned = TYPE_UNSIGNED (TREE_TYPE (val));
695
696 if (cmpc == GE_EXPR || cmpc == GT_EXPR
697 || cmpc == NE_EXPR)
698 {
699 cmpc = invert_tree_comparison (cmpc, false);
700 inverted = true;
701 }
702
703 if (is_unsigned)
704 {
705 if (cmpc == EQ_EXPR)
706 result = tree_int_cst_equal (val, boundary);
707 else if (cmpc == LT_EXPR)
708 result = INT_CST_LT_UNSIGNED (val, boundary);
709 else
710 {
711 gcc_assert (cmpc == LE_EXPR);
712 result = (tree_int_cst_equal (val, boundary)
713 || INT_CST_LT_UNSIGNED (val, boundary));
714 }
715 }
716 else
717 {
718 if (cmpc == EQ_EXPR)
719 result = tree_int_cst_equal (val, boundary);
720 else if (cmpc == LT_EXPR)
721 result = INT_CST_LT (val, boundary);
722 else
723 {
724 gcc_assert (cmpc == LE_EXPR);
725 result = (tree_int_cst_equal (val, boundary)
726 || INT_CST_LT (val, boundary));
727 }
728 }
729
730 if (inverted)
731 result ^= 1;
732
733 return result;
734 }
735
736 /* Returns true if PRED is common among all the predicate
737 chains (PREDS) (and therefore can be factored out).
738 NUM_PRED_CHAIN is the size of array PREDS. */
739
740 static bool
741 find_matching_predicate_in_rest_chains (use_pred_info_t pred,
742 VEC(use_pred_info_t, heap) **preds,
743 size_t num_pred_chains)
744 {
745 size_t i, j, n;
746
747 /* trival case */
748 if (num_pred_chains == 1)
749 return true;
750
751 for (i = 1; i < num_pred_chains; i++)
752 {
753 bool found = false;
754 VEC(use_pred_info_t, heap) *one_chain = preds[i];
755 n = VEC_length (use_pred_info_t, one_chain);
756 for (j = 0; j < n; j++)
757 {
758 use_pred_info_t pred2
759 = VEC_index (use_pred_info_t, one_chain, j);
760 /* can relax the condition comparison to not
761 use address comparison. However, the most common
762 case is that multiple control dependent paths share
763 a common path prefix, so address comparison should
764 be ok. */
765
766 if (pred2->cond == pred->cond
767 && pred2->invert == pred->invert)
768 {
769 found = true;
770 break;
771 }
772 }
773 if (!found)
774 return false;
775 }
776 return true;
777 }
778
779 /* Forward declaration. */
780 static bool
781 is_use_properly_guarded (gimple use_stmt,
782 basic_block use_bb,
783 gimple phi,
784 unsigned uninit_opnds,
785 struct pointer_set_t *visited_phis);
786
787 /* Returns true if all uninitialized opnds are pruned. Returns false
788 otherwise. PHI is the phi node with uninitialized operands,
789 UNINIT_OPNDS is the bitmap of the uninitialize operand positions,
790 FLAG_DEF is the statement defining the flag guarding the use of the
791 PHI output, BOUNDARY_CST is the const value used in the predicate
792 associated with the flag, CMP_CODE is the comparison code used in
793 the predicate, VISITED_PHIS is the pointer set of phis visited, and
794 VISITED_FLAG_PHIS is the pointer to the pointer set of flag definitions
795 that are also phis.
796
797 Example scenario:
798
799 BB1:
800 flag_1 = phi <0, 1> // (1)
801 var_1 = phi <undef, some_val>
802
803
804 BB2:
805 flag_2 = phi <0, flag_1, flag_1> // (2)
806 var_2 = phi <undef, var_1, var_1>
807 if (flag_2 == 1)
808 goto BB3;
809
810 BB3:
811 use of var_2 // (3)
812
813 Because some flag arg in (1) is not constant, if we do not look into the
814 flag phis recursively, it is conservatively treated as unknown and var_1
815 is thought to be flowed into use at (3). Since var_1 is potentially uninitialized
816 a false warning will be emitted. Checking recursively into (1), the compiler can
817 find out that only some_val (which is defined) can flow into (3) which is OK.
818
819 */
820
821 static bool
822 prune_uninit_phi_opnds_in_unrealizable_paths (
823 gimple phi, unsigned uninit_opnds,
824 gimple flag_def, tree boundary_cst,
825 enum tree_code cmp_code,
826 struct pointer_set_t *visited_phis,
827 bitmap *visited_flag_phis)
828 {
829 unsigned i;
830
831 for (i = 0; i < MIN (32, gimple_phi_num_args (flag_def)); i++)
832 {
833 tree flag_arg;
834
835 if (!MASK_TEST_BIT (uninit_opnds, i))
836 continue;
837
838 flag_arg = gimple_phi_arg_def (flag_def, i);
839 if (!is_gimple_constant (flag_arg))
840 {
841 gimple flag_arg_def, phi_arg_def;
842 tree phi_arg;
843 unsigned uninit_opnds_arg_phi;
844
845 if (TREE_CODE (flag_arg) != SSA_NAME)
846 return false;
847 flag_arg_def = SSA_NAME_DEF_STMT (flag_arg);
848 if (gimple_code (flag_arg_def) != GIMPLE_PHI)
849 return false;
850
851 phi_arg = gimple_phi_arg_def (phi, i);
852 if (TREE_CODE (phi_arg) != SSA_NAME)
853 return false;
854
855 phi_arg_def = SSA_NAME_DEF_STMT (phi_arg);
856 if (gimple_code (phi_arg_def) != GIMPLE_PHI)
857 return false;
858
859 if (gimple_bb (phi_arg_def) != gimple_bb (flag_arg_def))
860 return false;
861
862 if (!*visited_flag_phis)
863 *visited_flag_phis = BITMAP_ALLOC (NULL);
864
865 if (bitmap_bit_p (*visited_flag_phis,
866 SSA_NAME_VERSION (gimple_phi_result (flag_arg_def))))
867 return false;
868
869 bitmap_set_bit (*visited_flag_phis,
870 SSA_NAME_VERSION (gimple_phi_result (flag_arg_def)));
871
872 /* Now recursively prune the uninitialized phi args. */
873 uninit_opnds_arg_phi = compute_uninit_opnds_pos (phi_arg_def);
874 if (!prune_uninit_phi_opnds_in_unrealizable_paths (
875 phi_arg_def, uninit_opnds_arg_phi,
876 flag_arg_def, boundary_cst, cmp_code,
877 visited_phis, visited_flag_phis))
878 return false;
879
880 bitmap_clear_bit (*visited_flag_phis,
881 SSA_NAME_VERSION (gimple_phi_result (flag_arg_def)));
882 continue;
883 }
884
885 /* Now check if the constant is in the guarded range. */
886 if (is_value_included_in (flag_arg, boundary_cst, cmp_code))
887 {
888 tree opnd;
889 gimple opnd_def;
890
891 /* Now that we know that this undefined edge is not
892 pruned. If the operand is defined by another phi,
893 we can further prune the incoming edges of that
894 phi by checking the predicates of this operands. */
895
896 opnd = gimple_phi_arg_def (phi, i);
897 opnd_def = SSA_NAME_DEF_STMT (opnd);
898 if (gimple_code (opnd_def) == GIMPLE_PHI)
899 {
900 edge opnd_edge;
901 unsigned uninit_opnds2
902 = compute_uninit_opnds_pos (opnd_def);
903 gcc_assert (!MASK_EMPTY (uninit_opnds2));
904 opnd_edge = gimple_phi_arg_edge (phi, i);
905 if (!is_use_properly_guarded (phi,
906 opnd_edge->src,
907 opnd_def,
908 uninit_opnds2,
909 visited_phis))
910 return false;
911 }
912 else
913 return false;
914 }
915 }
916
917 return true;
918 }
919
920 /* A helper function that determines if the predicate set
921 of the use is not overlapping with that of the uninit paths.
922 The most common senario of guarded use is in Example 1:
923 Example 1:
924 if (some_cond)
925 {
926 x = ...;
927 flag = true;
928 }
929
930 ... some code ...
931
932 if (flag)
933 use (x);
934
935 The real world examples are usually more complicated, but similar
936 and usually result from inlining:
937
938 bool init_func (int * x)
939 {
940 if (some_cond)
941 return false;
942 *x = ..
943 return true;
944 }
945
946 void foo(..)
947 {
948 int x;
949
950 if (!init_func(&x))
951 return;
952
953 .. some_code ...
954 use (x);
955 }
956
957 Another possible use scenario is in the following trivial example:
958
959 Example 2:
960 if (n > 0)
961 x = 1;
962 ...
963 if (n > 0)
964 {
965 if (m < 2)
966 .. = x;
967 }
968
969 Predicate analysis needs to compute the composite predicate:
970
971 1) 'x' use predicate: (n > 0) .AND. (m < 2)
972 2) 'x' default value (non-def) predicate: .NOT. (n > 0)
973 (the predicate chain for phi operand defs can be computed
974 starting from a bb that is control equivalent to the phi's
975 bb and is dominating the operand def.)
976
977 and check overlapping:
978 (n > 0) .AND. (m < 2) .AND. (.NOT. (n > 0))
979 <==> false
980
981 This implementation provides framework that can handle
982 scenarios. (Note that many simple cases are handled properly
983 without the predicate analysis -- this is due to jump threading
984 transformation which eliminates the merge point thus makes
985 path sensitive analysis unnecessary.)
986
987 NUM_PREDS is the number is the number predicate chains, PREDS is
988 the array of chains, PHI is the phi node whose incoming (undefined)
989 paths need to be pruned, and UNINIT_OPNDS is the bitmap holding
990 uninit operand positions. VISITED_PHIS is the pointer set of phi
991 stmts being checked. */
992
993
994 static bool
995 use_pred_not_overlap_with_undef_path_pred (
996 size_t num_preds,
997 VEC(use_pred_info_t, heap) **preds,
998 gimple phi, unsigned uninit_opnds,
999 struct pointer_set_t *visited_phis)
1000 {
1001 unsigned int i, n;
1002 gimple flag_def = 0;
1003 tree boundary_cst = 0;
1004 enum tree_code cmp_code;
1005 bool swap_cond = false;
1006 bool invert = false;
1007 VEC(use_pred_info_t, heap) *the_pred_chain;
1008 bitmap visited_flag_phis = NULL;
1009 bool all_pruned = false;
1010
1011 gcc_assert (num_preds > 0);
1012 /* Find within the common prefix of multiple predicate chains
1013 a predicate that is a comparison of a flag variable against
1014 a constant. */
1015 the_pred_chain = preds[0];
1016 n = VEC_length (use_pred_info_t, the_pred_chain);
1017 for (i = 0; i < n; i++)
1018 {
1019 gimple cond;
1020 tree cond_lhs, cond_rhs, flag = 0;
1021
1022 use_pred_info_t the_pred
1023 = VEC_index (use_pred_info_t, the_pred_chain, i);
1024
1025 cond = the_pred->cond;
1026 invert = the_pred->invert;
1027 cond_lhs = gimple_cond_lhs (cond);
1028 cond_rhs = gimple_cond_rhs (cond);
1029 cmp_code = gimple_cond_code (cond);
1030
1031 if (cond_lhs != NULL_TREE && TREE_CODE (cond_lhs) == SSA_NAME
1032 && cond_rhs != NULL_TREE && is_gimple_constant (cond_rhs))
1033 {
1034 boundary_cst = cond_rhs;
1035 flag = cond_lhs;
1036 }
1037 else if (cond_rhs != NULL_TREE && TREE_CODE (cond_rhs) == SSA_NAME
1038 && cond_lhs != NULL_TREE && is_gimple_constant (cond_lhs))
1039 {
1040 boundary_cst = cond_lhs;
1041 flag = cond_rhs;
1042 swap_cond = true;
1043 }
1044
1045 if (!flag)
1046 continue;
1047
1048 flag_def = SSA_NAME_DEF_STMT (flag);
1049
1050 if (!flag_def)
1051 continue;
1052
1053 if ((gimple_code (flag_def) == GIMPLE_PHI)
1054 && (gimple_bb (flag_def) == gimple_bb (phi))
1055 && find_matching_predicate_in_rest_chains (
1056 the_pred, preds, num_preds))
1057 break;
1058
1059 flag_def = 0;
1060 }
1061
1062 if (!flag_def)
1063 return false;
1064
1065 /* Now check all the uninit incoming edge has a constant flag value
1066 that is in conflict with the use guard/predicate. */
1067 cmp_code = get_cmp_code (cmp_code, swap_cond, invert);
1068
1069 if (cmp_code == ERROR_MARK)
1070 return false;
1071
1072 all_pruned = prune_uninit_phi_opnds_in_unrealizable_paths (phi,
1073 uninit_opnds,
1074 flag_def,
1075 boundary_cst,
1076 cmp_code,
1077 visited_phis,
1078 &visited_flag_phis);
1079
1080 if (visited_flag_phis)
1081 BITMAP_FREE (visited_flag_phis);
1082
1083 return all_pruned;
1084 }
1085
1086 /* Returns true if TC is AND or OR */
1087
1088 static inline bool
1089 is_and_or_or (enum tree_code tc, tree typ)
1090 {
1091 return (tc == BIT_IOR_EXPR
1092 || (tc == BIT_AND_EXPR
1093 && (typ == 0 || TREE_CODE (typ) == BOOLEAN_TYPE)));
1094 }
1095
1096 typedef struct norm_cond
1097 {
1098 VEC(gimple, heap) *conds;
1099 enum tree_code cond_code;
1100 bool invert;
1101 } *norm_cond_t;
1102
1103
1104 /* Normalizes gimple condition COND. The normalization follows
1105 UD chains to form larger condition expression trees. NORM_COND
1106 holds the normalized result. COND_CODE is the logical opcode
1107 (AND or OR) of the normalized tree. */
1108
1109 static void
1110 normalize_cond_1 (gimple cond,
1111 norm_cond_t norm_cond,
1112 enum tree_code cond_code)
1113 {
1114 enum gimple_code gc;
1115 enum tree_code cur_cond_code;
1116 tree rhs1, rhs2;
1117
1118 gc = gimple_code (cond);
1119 if (gc != GIMPLE_ASSIGN)
1120 {
1121 VEC_safe_push (gimple, heap, norm_cond->conds, cond);
1122 return;
1123 }
1124
1125 cur_cond_code = gimple_assign_rhs_code (cond);
1126 rhs1 = gimple_assign_rhs1 (cond);
1127 rhs2 = gimple_assign_rhs2 (cond);
1128 if (cur_cond_code == NE_EXPR)
1129 {
1130 if (integer_zerop (rhs2)
1131 && (TREE_CODE (rhs1) == SSA_NAME))
1132 normalize_cond_1 (
1133 SSA_NAME_DEF_STMT (rhs1),
1134 norm_cond, cond_code);
1135 else if (integer_zerop (rhs1)
1136 && (TREE_CODE (rhs2) == SSA_NAME))
1137 normalize_cond_1 (
1138 SSA_NAME_DEF_STMT (rhs2),
1139 norm_cond, cond_code);
1140 else
1141 VEC_safe_push (gimple, heap, norm_cond->conds, cond);
1142
1143 return;
1144 }
1145
1146 if (is_and_or_or (cur_cond_code, TREE_TYPE (rhs1))
1147 && (cond_code == cur_cond_code || cond_code == ERROR_MARK)
1148 && (TREE_CODE (rhs1) == SSA_NAME && TREE_CODE (rhs2) == SSA_NAME))
1149 {
1150 normalize_cond_1 (SSA_NAME_DEF_STMT (rhs1),
1151 norm_cond, cur_cond_code);
1152 normalize_cond_1 (SSA_NAME_DEF_STMT (rhs2),
1153 norm_cond, cur_cond_code);
1154 norm_cond->cond_code = cur_cond_code;
1155 }
1156 else
1157 VEC_safe_push (gimple, heap, norm_cond->conds, cond);
1158 }
1159
1160 /* See normalize_cond_1 for details. INVERT is a flag to indicate
1161 if COND needs to be inverted or not. */
1162
1163 static void
1164 normalize_cond (gimple cond, norm_cond_t norm_cond, bool invert)
1165 {
1166 enum tree_code cond_code;
1167
1168 norm_cond->cond_code = ERROR_MARK;
1169 norm_cond->invert = false;
1170 norm_cond->conds = NULL;
1171 gcc_assert (gimple_code (cond) == GIMPLE_COND);
1172 cond_code = gimple_cond_code (cond);
1173 if (invert)
1174 cond_code = invert_tree_comparison (cond_code, false);
1175
1176 if (cond_code == NE_EXPR)
1177 {
1178 if (integer_zerop (gimple_cond_rhs (cond))
1179 && (TREE_CODE (gimple_cond_lhs (cond)) == SSA_NAME))
1180 normalize_cond_1 (
1181 SSA_NAME_DEF_STMT (gimple_cond_lhs (cond)),
1182 norm_cond, ERROR_MARK);
1183 else if (integer_zerop (gimple_cond_lhs (cond))
1184 && (TREE_CODE (gimple_cond_rhs (cond)) == SSA_NAME))
1185 normalize_cond_1 (
1186 SSA_NAME_DEF_STMT (gimple_cond_rhs (cond)),
1187 norm_cond, ERROR_MARK);
1188 else
1189 {
1190 VEC_safe_push (gimple, heap, norm_cond->conds, cond);
1191 norm_cond->invert = invert;
1192 }
1193 }
1194 else
1195 {
1196 VEC_safe_push (gimple, heap, norm_cond->conds, cond);
1197 norm_cond->invert = invert;
1198 }
1199
1200 gcc_assert (VEC_length (gimple, norm_cond->conds) == 1
1201 || is_and_or_or (norm_cond->cond_code, NULL));
1202 }
1203
1204 /* Returns true if the domain for condition COND1 is a subset of
1205 COND2. REVERSE is a flag. when it is true the function checks
1206 if COND1 is a superset of COND2. INVERT1 and INVERT2 are flags
1207 to indicate if COND1 and COND2 need to be inverted or not. */
1208
1209 static bool
1210 is_gcond_subset_of (gimple cond1, bool invert1,
1211 gimple cond2, bool invert2,
1212 bool reverse)
1213 {
1214 enum gimple_code gc1, gc2;
1215 enum tree_code cond1_code, cond2_code;
1216 gimple tmp;
1217 tree cond1_lhs, cond1_rhs, cond2_lhs, cond2_rhs;
1218
1219 /* Take the short cut. */
1220 if (cond1 == cond2)
1221 return true;
1222
1223 if (reverse)
1224 {
1225 tmp = cond1;
1226 cond1 = cond2;
1227 cond2 = tmp;
1228 }
1229
1230 gc1 = gimple_code (cond1);
1231 gc2 = gimple_code (cond2);
1232
1233 if ((gc1 != GIMPLE_ASSIGN && gc1 != GIMPLE_COND)
1234 || (gc2 != GIMPLE_ASSIGN && gc2 != GIMPLE_COND))
1235 return cond1 == cond2;
1236
1237 cond1_code = ((gc1 == GIMPLE_ASSIGN)
1238 ? gimple_assign_rhs_code (cond1)
1239 : gimple_cond_code (cond1));
1240
1241 cond2_code = ((gc2 == GIMPLE_ASSIGN)
1242 ? gimple_assign_rhs_code (cond2)
1243 : gimple_cond_code (cond2));
1244
1245 if (TREE_CODE_CLASS (cond1_code) != tcc_comparison
1246 || TREE_CODE_CLASS (cond2_code) != tcc_comparison)
1247 return false;
1248
1249 if (invert1)
1250 cond1_code = invert_tree_comparison (cond1_code, false);
1251 if (invert2)
1252 cond2_code = invert_tree_comparison (cond2_code, false);
1253
1254 cond1_lhs = ((gc1 == GIMPLE_ASSIGN)
1255 ? gimple_assign_rhs1 (cond1)
1256 : gimple_cond_lhs (cond1));
1257 cond1_rhs = ((gc1 == GIMPLE_ASSIGN)
1258 ? gimple_assign_rhs2 (cond1)
1259 : gimple_cond_rhs (cond1));
1260 cond2_lhs = ((gc2 == GIMPLE_ASSIGN)
1261 ? gimple_assign_rhs1 (cond2)
1262 : gimple_cond_lhs (cond2));
1263 cond2_rhs = ((gc2 == GIMPLE_ASSIGN)
1264 ? gimple_assign_rhs2 (cond2)
1265 : gimple_cond_rhs (cond2));
1266
1267 /* Assuming const operands have been swapped to the
1268 rhs at this point of the analysis. */
1269
1270 if (cond1_lhs != cond2_lhs)
1271 return false;
1272
1273 if (!is_gimple_constant (cond1_rhs)
1274 || TREE_CODE (cond1_rhs) != INTEGER_CST)
1275 return (cond1_rhs == cond2_rhs);
1276
1277 if (!is_gimple_constant (cond2_rhs)
1278 || TREE_CODE (cond2_rhs) != INTEGER_CST)
1279 return (cond1_rhs == cond2_rhs);
1280
1281 if (cond1_code == EQ_EXPR)
1282 return is_value_included_in (cond1_rhs,
1283 cond2_rhs, cond2_code);
1284 if (cond1_code == NE_EXPR || cond2_code == EQ_EXPR)
1285 return ((cond2_code == cond1_code)
1286 && tree_int_cst_equal (cond1_rhs, cond2_rhs));
1287
1288 if (((cond1_code == GE_EXPR || cond1_code == GT_EXPR)
1289 && (cond2_code == LE_EXPR || cond2_code == LT_EXPR))
1290 || ((cond1_code == LE_EXPR || cond1_code == LT_EXPR)
1291 && (cond2_code == GE_EXPR || cond2_code == GT_EXPR)))
1292 return false;
1293
1294 if (cond1_code != GE_EXPR && cond1_code != GT_EXPR
1295 && cond1_code != LE_EXPR && cond1_code != LT_EXPR)
1296 return false;
1297
1298 if (cond1_code == GT_EXPR)
1299 {
1300 cond1_code = GE_EXPR;
1301 cond1_rhs = fold_binary (PLUS_EXPR, TREE_TYPE (cond1_rhs),
1302 cond1_rhs,
1303 fold_convert (TREE_TYPE (cond1_rhs),
1304 integer_one_node));
1305 }
1306 else if (cond1_code == LT_EXPR)
1307 {
1308 cond1_code = LE_EXPR;
1309 cond1_rhs = fold_binary (MINUS_EXPR, TREE_TYPE (cond1_rhs),
1310 cond1_rhs,
1311 fold_convert (TREE_TYPE (cond1_rhs),
1312 integer_one_node));
1313 }
1314
1315 if (!cond1_rhs)
1316 return false;
1317
1318 gcc_assert (cond1_code == GE_EXPR || cond1_code == LE_EXPR);
1319
1320 if (cond2_code == GE_EXPR || cond2_code == GT_EXPR ||
1321 cond2_code == LE_EXPR || cond2_code == LT_EXPR)
1322 return is_value_included_in (cond1_rhs,
1323 cond2_rhs, cond2_code);
1324 else if (cond2_code == NE_EXPR)
1325 return
1326 (is_value_included_in (cond1_rhs,
1327 cond2_rhs, cond2_code)
1328 && !is_value_included_in (cond2_rhs,
1329 cond1_rhs, cond1_code));
1330 return false;
1331 }
1332
1333 /* Returns true if the domain of the condition expression
1334 in COND is a subset of any of the sub-conditions
1335 of the normalized condtion NORM_COND. INVERT is a flag
1336 to indicate of the COND needs to be inverted.
1337 REVERSE is a flag. When it is true, the check is reversed --
1338 it returns true if COND is a superset of any of the subconditions
1339 of NORM_COND. */
1340
1341 static bool
1342 is_subset_of_any (gimple cond, bool invert,
1343 norm_cond_t norm_cond, bool reverse)
1344 {
1345 size_t i;
1346 size_t len = VEC_length (gimple, norm_cond->conds);
1347
1348 for (i = 0; i < len; i++)
1349 {
1350 if (is_gcond_subset_of (cond, invert,
1351 VEC_index (gimple, norm_cond->conds, i),
1352 false, reverse))
1353 return true;
1354 }
1355 return false;
1356 }
1357
1358 /* NORM_COND1 and NORM_COND2 are normalized logical/BIT OR
1359 expressions (formed by following UD chains not control
1360 dependence chains). The function returns true of domain
1361 of and expression NORM_COND1 is a subset of NORM_COND2's.
1362 The implementation is conservative, and it returns false if
1363 it the inclusion relationship may not hold. */
1364
1365 static bool
1366 is_or_set_subset_of (norm_cond_t norm_cond1,
1367 norm_cond_t norm_cond2)
1368 {
1369 size_t i;
1370 size_t len = VEC_length (gimple, norm_cond1->conds);
1371
1372 for (i = 0; i < len; i++)
1373 {
1374 if (!is_subset_of_any (VEC_index (gimple, norm_cond1->conds, i),
1375 false, norm_cond2, false))
1376 return false;
1377 }
1378 return true;
1379 }
1380
1381 /* NORM_COND1 and NORM_COND2 are normalized logical AND
1382 expressions (formed by following UD chains not control
1383 dependence chains). The function returns true of domain
1384 of and expression NORM_COND1 is a subset of NORM_COND2's. */
1385
1386 static bool
1387 is_and_set_subset_of (norm_cond_t norm_cond1,
1388 norm_cond_t norm_cond2)
1389 {
1390 size_t i;
1391 size_t len = VEC_length (gimple, norm_cond2->conds);
1392
1393 for (i = 0; i < len; i++)
1394 {
1395 if (!is_subset_of_any (VEC_index (gimple, norm_cond2->conds, i),
1396 false, norm_cond1, true))
1397 return false;
1398 }
1399 return true;
1400 }
1401
1402 /* Returns true of the domain if NORM_COND1 is a subset
1403 of that of NORM_COND2. Returns false if it can not be
1404 proved to be so. */
1405
1406 static bool
1407 is_norm_cond_subset_of (norm_cond_t norm_cond1,
1408 norm_cond_t norm_cond2)
1409 {
1410 size_t i;
1411 enum tree_code code1, code2;
1412
1413 code1 = norm_cond1->cond_code;
1414 code2 = norm_cond2->cond_code;
1415
1416 if (code1 == BIT_AND_EXPR)
1417 {
1418 /* Both conditions are AND expressions. */
1419 if (code2 == BIT_AND_EXPR)
1420 return is_and_set_subset_of (norm_cond1, norm_cond2);
1421 /* NORM_COND1 is an AND expression, and NORM_COND2 is an OR
1422 expression. In this case, returns true if any subexpression
1423 of NORM_COND1 is a subset of any subexpression of NORM_COND2. */
1424 else if (code2 == BIT_IOR_EXPR)
1425 {
1426 size_t len1;
1427 len1 = VEC_length (gimple, norm_cond1->conds);
1428 for (i = 0; i < len1; i++)
1429 {
1430 gimple cond1 = VEC_index (gimple, norm_cond1->conds, i);
1431 if (is_subset_of_any (cond1, false, norm_cond2, false))
1432 return true;
1433 }
1434 return false;
1435 }
1436 else
1437 {
1438 gcc_assert (code2 == ERROR_MARK);
1439 gcc_assert (VEC_length (gimple, norm_cond2->conds) == 1);
1440 return is_subset_of_any (VEC_index (gimple, norm_cond2->conds, 0),
1441 norm_cond2->invert, norm_cond1, true);
1442 }
1443 }
1444 /* NORM_COND1 is an OR expression */
1445 else if (code1 == BIT_IOR_EXPR)
1446 {
1447 if (code2 != code1)
1448 return false;
1449
1450 return is_or_set_subset_of (norm_cond1, norm_cond2);
1451 }
1452 else
1453 {
1454 gcc_assert (code1 == ERROR_MARK);
1455 gcc_assert (VEC_length (gimple, norm_cond1->conds) == 1);
1456 /* Conservatively returns false if NORM_COND1 is non-decomposible
1457 and NORM_COND2 is an AND expression. */
1458 if (code2 == BIT_AND_EXPR)
1459 return false;
1460
1461 if (code2 == BIT_IOR_EXPR)
1462 return is_subset_of_any (VEC_index (gimple, norm_cond1->conds, 0),
1463 norm_cond1->invert, norm_cond2, false);
1464
1465 gcc_assert (code2 == ERROR_MARK);
1466 gcc_assert (VEC_length (gimple, norm_cond2->conds) == 1);
1467 return is_gcond_subset_of (VEC_index (gimple, norm_cond1->conds, 0),
1468 norm_cond1->invert,
1469 VEC_index (gimple, norm_cond2->conds, 0),
1470 norm_cond2->invert, false);
1471 }
1472 }
1473
1474 /* Returns true of the domain of single predicate expression
1475 EXPR1 is a subset of that of EXPR2. Returns false if it
1476 can not be proved. */
1477
1478 static bool
1479 is_pred_expr_subset_of (use_pred_info_t expr1,
1480 use_pred_info_t expr2)
1481 {
1482 gimple cond1, cond2;
1483 enum tree_code code1, code2;
1484 struct norm_cond norm_cond1, norm_cond2;
1485 bool is_subset = false;
1486
1487 cond1 = expr1->cond;
1488 cond2 = expr2->cond;
1489 code1 = gimple_cond_code (cond1);
1490 code2 = gimple_cond_code (cond2);
1491
1492 if (expr1->invert)
1493 code1 = invert_tree_comparison (code1, false);
1494 if (expr2->invert)
1495 code2 = invert_tree_comparison (code2, false);
1496
1497 /* Fast path -- match exactly */
1498 if ((gimple_cond_lhs (cond1) == gimple_cond_lhs (cond2))
1499 && (gimple_cond_rhs (cond1) == gimple_cond_rhs (cond2))
1500 && (code1 == code2))
1501 return true;
1502
1503 /* Normalize conditions. To keep NE_EXPR, do not invert
1504 with both need inversion. */
1505 normalize_cond (cond1, &norm_cond1, (expr1->invert));
1506 normalize_cond (cond2, &norm_cond2, (expr2->invert));
1507
1508 is_subset = is_norm_cond_subset_of (&norm_cond1, &norm_cond2);
1509
1510 /* Free memory */
1511 VEC_free (gimple, heap, norm_cond1.conds);
1512 VEC_free (gimple, heap, norm_cond2.conds);
1513 return is_subset ;
1514 }
1515
1516 /* Returns true if the domain of PRED1 is a subset
1517 of that of PRED2. Returns false if it can not be proved so. */
1518
1519 static bool
1520 is_pred_chain_subset_of (VEC(use_pred_info_t, heap) *pred1,
1521 VEC(use_pred_info_t, heap) *pred2)
1522 {
1523 size_t np1, np2, i1, i2;
1524
1525 np1 = VEC_length (use_pred_info_t, pred1);
1526 np2 = VEC_length (use_pred_info_t, pred2);
1527
1528 for (i2 = 0; i2 < np2; i2++)
1529 {
1530 bool found = false;
1531 use_pred_info_t info2
1532 = VEC_index (use_pred_info_t, pred2, i2);
1533 for (i1 = 0; i1 < np1; i1++)
1534 {
1535 use_pred_info_t info1
1536 = VEC_index (use_pred_info_t, pred1, i1);
1537 if (is_pred_expr_subset_of (info1, info2))
1538 {
1539 found = true;
1540 break;
1541 }
1542 }
1543 if (!found)
1544 return false;
1545 }
1546 return true;
1547 }
1548
1549 /* Returns true if the domain defined by
1550 one pred chain ONE_PRED is a subset of the domain
1551 of *PREDS. It returns false if ONE_PRED's domain is
1552 not a subset of any of the sub-domains of PREDS (
1553 corresponding to each individual chains in it), even
1554 though it may be still be a subset of whole domain
1555 of PREDS which is the union (ORed) of all its subdomains.
1556 In other words, the result is conservative. */
1557
1558 static bool
1559 is_included_in (VEC(use_pred_info_t, heap) *one_pred,
1560 VEC(use_pred_info_t, heap) **preds,
1561 size_t n)
1562 {
1563 size_t i;
1564
1565 for (i = 0; i < n; i++)
1566 {
1567 if (is_pred_chain_subset_of (one_pred, preds[i]))
1568 return true;
1569 }
1570
1571 return false;
1572 }
1573
1574 /* compares two predicate sets PREDS1 and PREDS2 and returns
1575 true if the domain defined by PREDS1 is a superset
1576 of PREDS2's domain. N1 and N2 are array sizes of PREDS1 and
1577 PREDS2 respectively. The implementation chooses not to build
1578 generic trees (and relying on the folding capability of the
1579 compiler), but instead performs brute force comparison of
1580 individual predicate chains (won't be a compile time problem
1581 as the chains are pretty short). When the function returns
1582 false, it does not necessarily mean *PREDS1 is not a superset
1583 of *PREDS2, but mean it may not be so since the analysis can
1584 not prove it. In such cases, false warnings may still be
1585 emitted. */
1586
1587 static bool
1588 is_superset_of (VEC(use_pred_info_t, heap) **preds1,
1589 size_t n1,
1590 VEC(use_pred_info_t, heap) **preds2,
1591 size_t n2)
1592 {
1593 size_t i;
1594 VEC(use_pred_info_t, heap) *one_pred_chain;
1595
1596 for (i = 0; i < n2; i++)
1597 {
1598 one_pred_chain = preds2[i];
1599 if (!is_included_in (one_pred_chain, preds1, n1))
1600 return false;
1601 }
1602
1603 return true;
1604 }
1605
1606 /* Comparison function used by qsort. It is used to
1607 sort predicate chains to allow predicate
1608 simplification. */
1609
1610 static int
1611 pred_chain_length_cmp (const void *p1, const void *p2)
1612 {
1613 use_pred_info_t i1, i2;
1614 VEC(use_pred_info_t, heap) * const *chain1
1615 = (VEC(use_pred_info_t, heap) * const *)p1;
1616 VEC(use_pred_info_t, heap) * const *chain2
1617 = (VEC(use_pred_info_t, heap) * const *)p2;
1618
1619 if (VEC_length (use_pred_info_t, *chain1)
1620 != VEC_length (use_pred_info_t, *chain2))
1621 return (VEC_length (use_pred_info_t, *chain1)
1622 - VEC_length (use_pred_info_t, *chain2));
1623
1624 i1 = VEC_index (use_pred_info_t, *chain1, 0);
1625 i2 = VEC_index (use_pred_info_t, *chain2, 0);
1626
1627 /* Allow predicates with similar prefix come together. */
1628 if (!i1->invert && i2->invert)
1629 return -1;
1630 else if (i1->invert && !i2->invert)
1631 return 1;
1632
1633 return gimple_uid (i1->cond) - gimple_uid (i2->cond);
1634 }
1635
1636 /* x OR (!x AND y) is equivalent to x OR y.
1637 This function normalizes x1 OR (!x1 AND x2) OR (!x1 AND !x2 AND x3)
1638 into x1 OR x2 OR x3. PREDS is the predicate chains, and N is
1639 the number of chains. Returns true if normalization happens. */
1640
1641 static bool
1642 normalize_preds (VEC(use_pred_info_t, heap) **preds, size_t *n)
1643 {
1644 size_t i, j, ll;
1645 VEC(use_pred_info_t, heap) *pred_chain;
1646 VEC(use_pred_info_t, heap) *x = 0;
1647 use_pred_info_t xj = 0, nxj = 0;
1648
1649 if (*n < 2)
1650 return false;
1651
1652 /* First sort the chains in ascending order of lengths. */
1653 qsort (preds, *n, sizeof (void *), pred_chain_length_cmp);
1654 pred_chain = preds[0];
1655 ll = VEC_length (use_pred_info_t, pred_chain);
1656 if (ll != 1)
1657 {
1658 if (ll == 2)
1659 {
1660 use_pred_info_t xx, yy, xx2, nyy;
1661 VEC(use_pred_info_t, heap) *pred_chain2 = preds[1];
1662 if (VEC_length (use_pred_info_t, pred_chain2) != 2)
1663 return false;
1664
1665 /* See if simplification x AND y OR x AND !y is possible. */
1666 xx = VEC_index (use_pred_info_t, pred_chain, 0);
1667 yy = VEC_index (use_pred_info_t, pred_chain, 1);
1668 xx2 = VEC_index (use_pred_info_t, pred_chain2, 0);
1669 nyy = VEC_index (use_pred_info_t, pred_chain2, 1);
1670 if (gimple_cond_lhs (xx->cond) != gimple_cond_lhs (xx2->cond)
1671 || gimple_cond_rhs (xx->cond) != gimple_cond_rhs (xx2->cond)
1672 || gimple_cond_code (xx->cond) != gimple_cond_code (xx2->cond)
1673 || (xx->invert != xx2->invert))
1674 return false;
1675 if (gimple_cond_lhs (yy->cond) != gimple_cond_lhs (nyy->cond)
1676 || gimple_cond_rhs (yy->cond) != gimple_cond_rhs (nyy->cond)
1677 || gimple_cond_code (yy->cond) != gimple_cond_code (nyy->cond)
1678 || (yy->invert == nyy->invert))
1679 return false;
1680
1681 /* Now merge the first two chains. */
1682 free (yy);
1683 free (nyy);
1684 free (xx2);
1685 VEC_free (use_pred_info_t, heap, pred_chain);
1686 VEC_free (use_pred_info_t, heap, pred_chain2);
1687 pred_chain = 0;
1688 VEC_safe_push (use_pred_info_t, heap, pred_chain, xx);
1689 preds[0] = pred_chain;
1690 for (i = 1; i < *n - 1; i++)
1691 preds[i] = preds[i + 1];
1692
1693 preds[*n - 1] = 0;
1694 *n = *n - 1;
1695 }
1696 else
1697 return false;
1698 }
1699
1700 VEC_safe_push (use_pred_info_t, heap, x,
1701 VEC_index (use_pred_info_t, pred_chain, 0));
1702
1703 /* The loop extracts x1, x2, x3, etc from chains
1704 x1 OR (!x1 AND x2) OR (!x1 AND !x2 AND x3) OR ... */
1705 for (i = 1; i < *n; i++)
1706 {
1707 pred_chain = preds[i];
1708 if (VEC_length (use_pred_info_t, pred_chain) != i + 1)
1709 return false;
1710
1711 for (j = 0; j < i; j++)
1712 {
1713 xj = VEC_index (use_pred_info_t, x, j);
1714 nxj = VEC_index (use_pred_info_t, pred_chain, j);
1715
1716 /* Check if nxj is !xj */
1717 if (gimple_cond_lhs (xj->cond) != gimple_cond_lhs (nxj->cond)
1718 || gimple_cond_rhs (xj->cond) != gimple_cond_rhs (nxj->cond)
1719 || gimple_cond_code (xj->cond) != gimple_cond_code (nxj->cond)
1720 || (xj->invert == nxj->invert))
1721 return false;
1722 }
1723
1724 VEC_safe_push (use_pred_info_t, heap, x,
1725 VEC_index (use_pred_info_t, pred_chain, i));
1726 }
1727
1728 /* Now normalize the pred chains using the extraced x1, x2, x3 etc. */
1729 for (j = 0; j < *n; j++)
1730 {
1731 use_pred_info_t t;
1732 xj = VEC_index (use_pred_info_t, x, j);
1733
1734 t = XNEW (struct use_pred_info);
1735 *t = *xj;
1736
1737 VEC_replace (use_pred_info_t, x, j, t);
1738 }
1739
1740 for (i = 0; i < *n; i++)
1741 {
1742 pred_chain = preds[i];
1743 for (j = 0; j < VEC_length (use_pred_info_t, pred_chain); j++)
1744 free (VEC_index (use_pred_info_t, pred_chain, j));
1745 VEC_free (use_pred_info_t, heap, pred_chain);
1746 pred_chain = 0;
1747 /* A new chain. */
1748 VEC_safe_push (use_pred_info_t, heap, pred_chain,
1749 VEC_index (use_pred_info_t, x, i));
1750 preds[i] = pred_chain;
1751 }
1752 return true;
1753 }
1754
1755
1756
1757 /* Computes the predicates that guard the use and checks
1758 if the incoming paths that have empty (or possibly
1759 empty) defintion can be pruned/filtered. The function returns
1760 true if it can be determined that the use of PHI's def in
1761 USE_STMT is guarded with a predicate set not overlapping with
1762 predicate sets of all runtime paths that do not have a definition.
1763 Returns false if it is not or it can not be determined. USE_BB is
1764 the bb of the use (for phi operand use, the bb is not the bb of
1765 the phi stmt, but the src bb of the operand edge). UNINIT_OPNDS
1766 is a bit vector. If an operand of PHI is uninitialized, the
1767 correponding bit in the vector is 1. VISIED_PHIS is a pointer
1768 set of phis being visted. */
1769
1770 static bool
1771 is_use_properly_guarded (gimple use_stmt,
1772 basic_block use_bb,
1773 gimple phi,
1774 unsigned uninit_opnds,
1775 struct pointer_set_t *visited_phis)
1776 {
1777 basic_block phi_bb;
1778 VEC(use_pred_info_t, heap) **preds = 0;
1779 VEC(use_pred_info_t, heap) **def_preds = 0;
1780 size_t num_preds = 0, num_def_preds = 0;
1781 bool has_valid_preds = false;
1782 bool is_properly_guarded = false;
1783
1784 if (pointer_set_insert (visited_phis, phi))
1785 return false;
1786
1787 phi_bb = gimple_bb (phi);
1788
1789 if (is_non_loop_exit_postdominating (use_bb, phi_bb))
1790 return false;
1791
1792 has_valid_preds = find_predicates (&preds, &num_preds,
1793 phi_bb, use_bb);
1794
1795 if (!has_valid_preds)
1796 {
1797 destroy_predicate_vecs (num_preds, preds);
1798 return false;
1799 }
1800
1801 if (dump_file)
1802 dump_predicates (use_stmt, num_preds, preds,
1803 "\nUse in stmt ");
1804
1805 has_valid_preds = find_def_preds (&def_preds,
1806 &num_def_preds, phi);
1807
1808 if (has_valid_preds)
1809 {
1810 bool normed;
1811 if (dump_file)
1812 dump_predicates (phi, num_def_preds, def_preds,
1813 "Operand defs of phi ");
1814
1815 normed = normalize_preds (def_preds, &num_def_preds);
1816 if (normed && dump_file)
1817 {
1818 fprintf (dump_file, "\nNormalized to\n");
1819 dump_predicates (phi, num_def_preds, def_preds,
1820 "Operand defs of phi ");
1821 }
1822 is_properly_guarded =
1823 is_superset_of (def_preds, num_def_preds,
1824 preds, num_preds);
1825 }
1826
1827 /* further prune the dead incoming phi edges. */
1828 if (!is_properly_guarded)
1829 is_properly_guarded
1830 = use_pred_not_overlap_with_undef_path_pred (
1831 num_preds, preds, phi, uninit_opnds, visited_phis);
1832
1833 destroy_predicate_vecs (num_preds, preds);
1834 destroy_predicate_vecs (num_def_preds, def_preds);
1835 return is_properly_guarded;
1836 }
1837
1838 /* Searches through all uses of a potentially
1839 uninitialized variable defined by PHI and returns a use
1840 statement if the use is not properly guarded. It returns
1841 NULL if all uses are guarded. UNINIT_OPNDS is a bitvector
1842 holding the position(s) of uninit PHI operands. WORKLIST
1843 is the vector of candidate phis that may be updated by this
1844 function. ADDED_TO_WORKLIST is the pointer set tracking
1845 if the new phi is already in the worklist. */
1846
1847 static gimple
1848 find_uninit_use (gimple phi, unsigned uninit_opnds,
1849 VEC(gimple, heap) **worklist,
1850 struct pointer_set_t *added_to_worklist)
1851 {
1852 tree phi_result;
1853 use_operand_p use_p;
1854 gimple use_stmt;
1855 imm_use_iterator iter;
1856
1857 phi_result = gimple_phi_result (phi);
1858
1859 FOR_EACH_IMM_USE_FAST (use_p, iter, phi_result)
1860 {
1861 struct pointer_set_t *visited_phis;
1862 basic_block use_bb;
1863
1864 use_stmt = USE_STMT (use_p);
1865 if (is_gimple_debug (use_stmt))
1866 continue;
1867
1868 visited_phis = pointer_set_create ();
1869
1870 if (gimple_code (use_stmt) == GIMPLE_PHI)
1871 use_bb = gimple_phi_arg_edge (use_stmt,
1872 PHI_ARG_INDEX_FROM_USE (use_p))->src;
1873 else
1874 use_bb = gimple_bb (use_stmt);
1875
1876 if (is_use_properly_guarded (use_stmt,
1877 use_bb,
1878 phi,
1879 uninit_opnds,
1880 visited_phis))
1881 {
1882 pointer_set_destroy (visited_phis);
1883 continue;
1884 }
1885 pointer_set_destroy (visited_phis);
1886
1887 if (dump_file && (dump_flags & TDF_DETAILS))
1888 {
1889 fprintf (dump_file, "[CHECK]: Found unguarded use: ");
1890 print_gimple_stmt (dump_file, use_stmt, 0, 0);
1891 }
1892 /* Found one real use, return. */
1893 if (gimple_code (use_stmt) != GIMPLE_PHI)
1894 return use_stmt;
1895
1896 /* Found a phi use that is not guarded,
1897 add the phi to the worklist. */
1898 if (!pointer_set_insert (added_to_worklist,
1899 use_stmt))
1900 {
1901 if (dump_file && (dump_flags & TDF_DETAILS))
1902 {
1903 fprintf (dump_file, "[WORKLIST]: Update worklist with phi: ");
1904 print_gimple_stmt (dump_file, use_stmt, 0, 0);
1905 }
1906
1907 VEC_safe_push (gimple, heap, *worklist, use_stmt);
1908 pointer_set_insert (possibly_undefined_names,
1909 phi_result);
1910 }
1911 }
1912
1913 return NULL;
1914 }
1915
1916 /* Look for inputs to PHI that are SSA_NAMEs that have empty definitions
1917 and gives warning if there exists a runtime path from the entry to a
1918 use of the PHI def that does not contain a definition. In other words,
1919 the warning is on the real use. The more dead paths that can be pruned
1920 by the compiler, the fewer false positives the warning is. WORKLIST
1921 is a vector of candidate phis to be examined. ADDED_TO_WORKLIST is
1922 a pointer set tracking if the new phi is added to the worklist or not. */
1923
1924 static void
1925 warn_uninitialized_phi (gimple phi, VEC(gimple, heap) **worklist,
1926 struct pointer_set_t *added_to_worklist)
1927 {
1928 unsigned uninit_opnds;
1929 gimple uninit_use_stmt = 0;
1930 tree uninit_op;
1931
1932 /* Don't look at memory tags. */
1933 if (!is_gimple_reg (gimple_phi_result (phi)))
1934 return;
1935
1936 uninit_opnds = compute_uninit_opnds_pos (phi);
1937
1938 if (MASK_EMPTY (uninit_opnds))
1939 return;
1940
1941 if (dump_file && (dump_flags & TDF_DETAILS))
1942 {
1943 fprintf (dump_file, "[CHECK]: examining phi: ");
1944 print_gimple_stmt (dump_file, phi, 0, 0);
1945 }
1946
1947 /* Now check if we have any use of the value without proper guard. */
1948 uninit_use_stmt = find_uninit_use (phi, uninit_opnds,
1949 worklist, added_to_worklist);
1950
1951 /* All uses are properly guarded. */
1952 if (!uninit_use_stmt)
1953 return;
1954
1955 uninit_op = gimple_phi_arg_def (phi, MASK_FIRST_SET_BIT (uninit_opnds));
1956 warn_uninit (OPT_Wmaybe_uninitialized, uninit_op, SSA_NAME_VAR (uninit_op),
1957 SSA_NAME_VAR (uninit_op),
1958 "%qD may be used uninitialized in this function",
1959 uninit_use_stmt);
1960
1961 }
1962
1963
1964 /* Entry point to the late uninitialized warning pass. */
1965
1966 static unsigned int
1967 execute_late_warn_uninitialized (void)
1968 {
1969 basic_block bb;
1970 gimple_stmt_iterator gsi;
1971 VEC(gimple, heap) *worklist = 0;
1972 struct pointer_set_t *added_to_worklist;
1973
1974 calculate_dominance_info (CDI_DOMINATORS);
1975 calculate_dominance_info (CDI_POST_DOMINATORS);
1976 /* Re-do the plain uninitialized variable check, as optimization may have
1977 straightened control flow. Do this first so that we don't accidentally
1978 get a "may be" warning when we'd have seen an "is" warning later. */
1979 warn_uninitialized_vars (/*warn_possibly_uninitialized=*/1);
1980
1981 timevar_push (TV_TREE_UNINIT);
1982
1983 possibly_undefined_names = pointer_set_create ();
1984 added_to_worklist = pointer_set_create ();
1985
1986 /* Initialize worklist */
1987 FOR_EACH_BB (bb)
1988 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1989 {
1990 gimple phi = gsi_stmt (gsi);
1991 size_t n, i;
1992
1993 n = gimple_phi_num_args (phi);
1994
1995 /* Don't look at memory tags. */
1996 if (!is_gimple_reg (gimple_phi_result (phi)))
1997 continue;
1998
1999 for (i = 0; i < n; ++i)
2000 {
2001 tree op = gimple_phi_arg_def (phi, i);
2002 if (TREE_CODE (op) == SSA_NAME
2003 && ssa_undefined_value_p (op))
2004 {
2005 VEC_safe_push (gimple, heap, worklist, phi);
2006 pointer_set_insert (added_to_worklist, phi);
2007 if (dump_file && (dump_flags & TDF_DETAILS))
2008 {
2009 fprintf (dump_file, "[WORKLIST]: add to initial list: ");
2010 print_gimple_stmt (dump_file, phi, 0, 0);
2011 }
2012 break;
2013 }
2014 }
2015 }
2016
2017 while (VEC_length (gimple, worklist) != 0)
2018 {
2019 gimple cur_phi = 0;
2020 cur_phi = VEC_pop (gimple, worklist);
2021 warn_uninitialized_phi (cur_phi, &worklist, added_to_worklist);
2022 }
2023
2024 VEC_free (gimple, heap, worklist);
2025 pointer_set_destroy (added_to_worklist);
2026 pointer_set_destroy (possibly_undefined_names);
2027 possibly_undefined_names = NULL;
2028 free_dominance_info (CDI_POST_DOMINATORS);
2029 timevar_pop (TV_TREE_UNINIT);
2030 return 0;
2031 }
2032
2033 static bool
2034 gate_warn_uninitialized (void)
2035 {
2036 return warn_uninitialized != 0;
2037 }
2038
2039 struct gimple_opt_pass pass_late_warn_uninitialized =
2040 {
2041 {
2042 GIMPLE_PASS,
2043 "uninit", /* name */
2044 gate_warn_uninitialized, /* gate */
2045 execute_late_warn_uninitialized, /* execute */
2046 NULL, /* sub */
2047 NULL, /* next */
2048 0, /* static_pass_number */
2049 TV_NONE, /* tv_id */
2050 PROP_ssa, /* properties_required */
2051 0, /* properties_provided */
2052 0, /* properties_destroyed */
2053 0, /* todo_flags_start */
2054 0 /* todo_flags_finish */
2055 }
2056 };