* g++.dg/cpp0x/nullptr21.c: Remove printfs, make self-checking.
[gcc.git] / gcc / tree-ssa-uninit.c
1 /* Predicate aware uninitialized variable warning.
2 Copyright (C) 2001, 2002, 2003, 2004, 2005, 2007, 2008, 2010 Free Software
3 Foundation, Inc.
4 Contributed by Xinliang David Li <davidxl@google.com>
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3, or (at your option)
11 any later version.
12
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
21
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "tm.h"
26 #include "tree.h"
27 #include "flags.h"
28 #include "tm_p.h"
29 #include "basic-block.h"
30 #include "function.h"
31 #include "gimple-pretty-print.h"
32 #include "bitmap.h"
33 #include "pointer-set.h"
34 #include "tree-flow.h"
35 #include "gimple.h"
36 #include "tree-inline.h"
37 #include "hashtab.h"
38 #include "tree-pass.h"
39 #include "diagnostic-core.h"
40
41 /* This implements the pass that does predicate aware warning on uses of
42 possibly uninitialized variables. The pass first collects the set of
43 possibly uninitialized SSA names. For each such name, it walks through
44 all its immediate uses. For each immediate use, it rebuilds the condition
45 expression (the predicate) that guards the use. The predicate is then
46 examined to see if the variable is always defined under that same condition.
47 This is done either by pruning the unrealizable paths that lead to the
48 default definitions or by checking if the predicate set that guards the
49 defining paths is a superset of the use predicate. */
50
51
52 /* Pointer set of potentially undefined ssa names, i.e.,
53 ssa names that are defined by phi with operands that
54 are not defined or potentially undefined. */
55 static struct pointer_set_t *possibly_undefined_names = 0;
56
57 /* Bit mask handling macros. */
58 #define MASK_SET_BIT(mask, pos) mask |= (1 << pos)
59 #define MASK_TEST_BIT(mask, pos) (mask & (1 << pos))
60 #define MASK_EMPTY(mask) (mask == 0)
61
62 /* Returns the first bit position (starting from LSB)
63 in mask that is non zero. Returns -1 if the mask is empty. */
64 static int
65 get_mask_first_set_bit (unsigned mask)
66 {
67 int pos = 0;
68 if (mask == 0)
69 return -1;
70
71 while ((mask & (1 << pos)) == 0)
72 pos++;
73
74 return pos;
75 }
76 #define MASK_FIRST_SET_BIT(mask) get_mask_first_set_bit (mask)
77
78
79 /* Return true if T, an SSA_NAME, has an undefined value. */
80
81 bool
82 ssa_undefined_value_p (tree t)
83 {
84 tree var = SSA_NAME_VAR (t);
85
86 /* Parameters get their initial value from the function entry. */
87 if (TREE_CODE (var) == PARM_DECL)
88 return false;
89
90 /* When returning by reference the return address is actually a hidden
91 parameter. */
92 if (TREE_CODE (SSA_NAME_VAR (t)) == RESULT_DECL
93 && DECL_BY_REFERENCE (SSA_NAME_VAR (t)))
94 return false;
95
96 /* Hard register variables get their initial value from the ether. */
97 if (TREE_CODE (var) == VAR_DECL && DECL_HARD_REGISTER (var))
98 return false;
99
100 /* The value is undefined iff its definition statement is empty. */
101 return (gimple_nop_p (SSA_NAME_DEF_STMT (t))
102 || (possibly_undefined_names
103 && pointer_set_contains (possibly_undefined_names, t)));
104 }
105
106 /* Checks if the operand OPND of PHI is defined by
107 another phi with one operand defined by this PHI,
108 but the rest operands are all defined. If yes,
109 returns true to skip this this operand as being
110 redundant. Can be enhanced to be more general. */
111
112 static bool
113 can_skip_redundant_opnd (tree opnd, gimple phi)
114 {
115 gimple op_def;
116 tree phi_def;
117 int i, n;
118
119 phi_def = gimple_phi_result (phi);
120 op_def = SSA_NAME_DEF_STMT (opnd);
121 if (gimple_code (op_def) != GIMPLE_PHI)
122 return false;
123 n = gimple_phi_num_args (op_def);
124 for (i = 0; i < n; ++i)
125 {
126 tree op = gimple_phi_arg_def (op_def, i);
127 if (TREE_CODE (op) != SSA_NAME)
128 continue;
129 if (op != phi_def && ssa_undefined_value_p (op))
130 return false;
131 }
132
133 return true;
134 }
135
136 /* Returns a bit mask holding the positions of arguments in PHI
137 that have empty (or possibly empty) definitions. */
138
139 static unsigned
140 compute_uninit_opnds_pos (gimple phi)
141 {
142 size_t i, n;
143 unsigned uninit_opnds = 0;
144
145 n = gimple_phi_num_args (phi);
146 /* Bail out for phi with too many args. */
147 if (n > 32)
148 return 0;
149
150 for (i = 0; i < n; ++i)
151 {
152 tree op = gimple_phi_arg_def (phi, i);
153 if (TREE_CODE (op) == SSA_NAME
154 && ssa_undefined_value_p (op)
155 && !can_skip_redundant_opnd (op, phi))
156 MASK_SET_BIT (uninit_opnds, i);
157 }
158 return uninit_opnds;
159 }
160
161 /* Find the immediate postdominator PDOM of the specified
162 basic block BLOCK. */
163
164 static inline basic_block
165 find_pdom (basic_block block)
166 {
167 if (block == EXIT_BLOCK_PTR)
168 return EXIT_BLOCK_PTR;
169 else
170 {
171 basic_block bb
172 = get_immediate_dominator (CDI_POST_DOMINATORS, block);
173 if (! bb)
174 return EXIT_BLOCK_PTR;
175 return bb;
176 }
177 }
178
179 /* Find the immediate DOM of the specified
180 basic block BLOCK. */
181
182 static inline basic_block
183 find_dom (basic_block block)
184 {
185 if (block == ENTRY_BLOCK_PTR)
186 return ENTRY_BLOCK_PTR;
187 else
188 {
189 basic_block bb = get_immediate_dominator (CDI_DOMINATORS, block);
190 if (! bb)
191 return ENTRY_BLOCK_PTR;
192 return bb;
193 }
194 }
195
196 /* Returns true if BB1 is postdominating BB2 and BB1 is
197 not a loop exit bb. The loop exit bb check is simple and does
198 not cover all cases. */
199
200 static bool
201 is_non_loop_exit_postdominating (basic_block bb1, basic_block bb2)
202 {
203 if (!dominated_by_p (CDI_POST_DOMINATORS, bb2, bb1))
204 return false;
205
206 if (single_pred_p (bb1) && !single_succ_p (bb2))
207 return false;
208
209 return true;
210 }
211
212 /* Find the closest postdominator of a specified BB, which is control
213 equivalent to BB. */
214
215 static inline basic_block
216 find_control_equiv_block (basic_block bb)
217 {
218 basic_block pdom;
219
220 pdom = find_pdom (bb);
221
222 /* Skip the postdominating bb that is also loop exit. */
223 if (!is_non_loop_exit_postdominating (pdom, bb))
224 return NULL;
225
226 if (dominated_by_p (CDI_DOMINATORS, pdom, bb))
227 return pdom;
228
229 return NULL;
230 }
231
232 #define MAX_NUM_CHAINS 8
233 #define MAX_CHAIN_LEN 5
234
235 /* Computes the control dependence chains (paths of edges)
236 for DEP_BB up to the dominating basic block BB (the head node of a
237 chain should be dominated by it). CD_CHAINS is pointer to a
238 dynamic array holding the result chains. CUR_CD_CHAIN is the current
239 chain being computed. *NUM_CHAINS is total number of chains. The
240 function returns true if the information is successfully computed,
241 return false if there is no control dependence or not computed. */
242
243 static bool
244 compute_control_dep_chain (basic_block bb, basic_block dep_bb,
245 VEC(edge, heap) **cd_chains,
246 size_t *num_chains,
247 VEC(edge, heap) **cur_cd_chain)
248 {
249 edge_iterator ei;
250 edge e;
251 size_t i;
252 bool found_cd_chain = false;
253 size_t cur_chain_len = 0;
254
255 if (EDGE_COUNT (bb->succs) < 2)
256 return false;
257
258 /* Could use a set instead. */
259 cur_chain_len = VEC_length (edge, *cur_cd_chain);
260 if (cur_chain_len > MAX_CHAIN_LEN)
261 return false;
262
263 for (i = 0; i < cur_chain_len; i++)
264 {
265 edge e = VEC_index (edge, *cur_cd_chain, i);
266 /* cycle detected. */
267 if (e->src == bb)
268 return false;
269 }
270
271 FOR_EACH_EDGE (e, ei, bb->succs)
272 {
273 basic_block cd_bb;
274 if (e->flags & (EDGE_FAKE | EDGE_ABNORMAL))
275 continue;
276
277 cd_bb = e->dest;
278 VEC_safe_push (edge, heap, *cur_cd_chain, e);
279 while (!is_non_loop_exit_postdominating (cd_bb, bb))
280 {
281 if (cd_bb == dep_bb)
282 {
283 /* Found a direct control dependence. */
284 if (*num_chains < MAX_NUM_CHAINS)
285 {
286 cd_chains[*num_chains]
287 = VEC_copy (edge, heap, *cur_cd_chain);
288 (*num_chains)++;
289 }
290 found_cd_chain = true;
291 /* check path from next edge. */
292 break;
293 }
294
295 /* Now check if DEP_BB is indirectly control dependent on BB. */
296 if (compute_control_dep_chain (cd_bb, dep_bb, cd_chains,
297 num_chains, cur_cd_chain))
298 {
299 found_cd_chain = true;
300 break;
301 }
302
303 cd_bb = find_pdom (cd_bb);
304 if (cd_bb == EXIT_BLOCK_PTR)
305 break;
306 }
307 VEC_pop (edge, *cur_cd_chain);
308 gcc_assert (VEC_length (edge, *cur_cd_chain) == cur_chain_len);
309 }
310 gcc_assert (VEC_length (edge, *cur_cd_chain) == cur_chain_len);
311
312 return found_cd_chain;
313 }
314
315 typedef struct use_pred_info
316 {
317 gimple cond;
318 bool invert;
319 } *use_pred_info_t;
320
321 DEF_VEC_P(use_pred_info_t);
322 DEF_VEC_ALLOC_P(use_pred_info_t, heap);
323
324
325 /* Converts the chains of control dependence edges into a set of
326 predicates. A control dependence chain is represented by a vector
327 edges. DEP_CHAINS points to an array of dependence chains.
328 NUM_CHAINS is the size of the chain array. One edge in a dependence
329 chain is mapped to predicate expression represented by use_pred_info_t
330 type. One dependence chain is converted to a composite predicate that
331 is the result of AND operation of use_pred_info_t mapped to each edge.
332 A composite predicate is presented by a vector of use_pred_info_t. On
333 return, *PREDS points to the resulting array of composite predicates.
334 *NUM_PREDS is the number of composite predictes. */
335
336 static bool
337 convert_control_dep_chain_into_preds (VEC(edge, heap) **dep_chains,
338 size_t num_chains,
339 VEC(use_pred_info_t, heap) ***preds,
340 size_t *num_preds)
341 {
342 bool has_valid_pred = false;
343 size_t i, j;
344 if (num_chains == 0 || num_chains >= MAX_NUM_CHAINS)
345 return false;
346
347 /* Now convert the control dep chain into a set
348 of predicates. */
349 *preds = XCNEWVEC (VEC(use_pred_info_t, heap) *,
350 num_chains);
351 *num_preds = num_chains;
352
353 for (i = 0; i < num_chains; i++)
354 {
355 VEC(edge, heap) *one_cd_chain = dep_chains[i];
356
357 has_valid_pred = false;
358 for (j = 0; j < VEC_length (edge, one_cd_chain); j++)
359 {
360 gimple cond_stmt;
361 gimple_stmt_iterator gsi;
362 basic_block guard_bb;
363 use_pred_info_t one_pred;
364 edge e;
365
366 e = VEC_index (edge, one_cd_chain, j);
367 guard_bb = e->src;
368 gsi = gsi_last_bb (guard_bb);
369 if (gsi_end_p (gsi))
370 {
371 has_valid_pred = false;
372 break;
373 }
374 cond_stmt = gsi_stmt (gsi);
375 if (gimple_code (cond_stmt) == GIMPLE_CALL
376 && EDGE_COUNT (e->src->succs) >= 2)
377 {
378 /* Ignore EH edge. Can add assertion
379 on the other edge's flag. */
380 continue;
381 }
382 /* Skip if there is essentially one succesor. */
383 if (EDGE_COUNT (e->src->succs) == 2)
384 {
385 edge e1;
386 edge_iterator ei1;
387 bool skip = false;
388
389 FOR_EACH_EDGE (e1, ei1, e->src->succs)
390 {
391 if (EDGE_COUNT (e1->dest->succs) == 0)
392 {
393 skip = true;
394 break;
395 }
396 }
397 if (skip)
398 continue;
399 }
400 if (gimple_code (cond_stmt) != GIMPLE_COND)
401 {
402 has_valid_pred = false;
403 break;
404 }
405 one_pred = XNEW (struct use_pred_info);
406 one_pred->cond = cond_stmt;
407 one_pred->invert = !!(e->flags & EDGE_FALSE_VALUE);
408 VEC_safe_push (use_pred_info_t, heap, (*preds)[i], one_pred);
409 has_valid_pred = true;
410 }
411
412 if (!has_valid_pred)
413 break;
414 }
415 return has_valid_pred;
416 }
417
418 /* Computes all control dependence chains for USE_BB. The control
419 dependence chains are then converted to an array of composite
420 predicates pointed to by PREDS. PHI_BB is the basic block of
421 the phi whose result is used in USE_BB. */
422
423 static bool
424 find_predicates (VEC(use_pred_info_t, heap) ***preds,
425 size_t *num_preds,
426 basic_block phi_bb,
427 basic_block use_bb)
428 {
429 size_t num_chains = 0, i;
430 VEC(edge, heap) **dep_chains = 0;
431 VEC(edge, heap) *cur_chain = 0;
432 bool has_valid_pred = false;
433 basic_block cd_root = 0;
434
435 dep_chains = XCNEWVEC (VEC(edge, heap) *, MAX_NUM_CHAINS);
436
437 /* First find the closest bb that is control equivalent to PHI_BB
438 that also dominates USE_BB. */
439 cd_root = phi_bb;
440 while (dominated_by_p (CDI_DOMINATORS, use_bb, cd_root))
441 {
442 basic_block ctrl_eq_bb = find_control_equiv_block (cd_root);
443 if (ctrl_eq_bb && dominated_by_p (CDI_DOMINATORS, use_bb, ctrl_eq_bb))
444 cd_root = ctrl_eq_bb;
445 else
446 break;
447 }
448
449 compute_control_dep_chain (cd_root, use_bb,
450 dep_chains, &num_chains,
451 &cur_chain);
452
453 has_valid_pred
454 = convert_control_dep_chain_into_preds (dep_chains,
455 num_chains,
456 preds,
457 num_preds);
458 /* Free individual chain */
459 VEC_free (edge, heap, cur_chain);
460 for (i = 0; i < num_chains; i++)
461 VEC_free (edge, heap, dep_chains[i]);
462 free (dep_chains);
463 return has_valid_pred;
464 }
465
466 /* Computes the set of incoming edges of PHI that have non empty
467 definitions of a phi chain. The collection will be done
468 recursively on operands that are defined by phis. CD_ROOT
469 is the control dependence root. *EDGES holds the result, and
470 VISITED_PHIS is a pointer set for detecting cycles. */
471
472 static void
473 collect_phi_def_edges (gimple phi, basic_block cd_root,
474 VEC(edge, heap) **edges,
475 struct pointer_set_t *visited_phis)
476 {
477 size_t i, n;
478 edge opnd_edge;
479 tree opnd;
480
481 if (pointer_set_insert (visited_phis, phi))
482 return;
483
484 n = gimple_phi_num_args (phi);
485 for (i = 0; i < n; i++)
486 {
487 opnd_edge = gimple_phi_arg_edge (phi, i);
488 opnd = gimple_phi_arg_def (phi, i);
489
490 if (TREE_CODE (opnd) != SSA_NAME)
491 {
492 if (dump_file && (dump_flags & TDF_DETAILS))
493 {
494 fprintf (dump_file, "\n[CHECK] Found def edge %d in ", (int)i);
495 print_gimple_stmt (dump_file, phi, 0, 0);
496 }
497 VEC_safe_push (edge, heap, *edges, opnd_edge);
498 }
499 else
500 {
501 gimple def = SSA_NAME_DEF_STMT (opnd);
502
503 if (gimple_code (def) == GIMPLE_PHI
504 && dominated_by_p (CDI_DOMINATORS,
505 gimple_bb (def), cd_root))
506 collect_phi_def_edges (def, cd_root, edges,
507 visited_phis);
508 else if (!ssa_undefined_value_p (opnd))
509 {
510 if (dump_file && (dump_flags & TDF_DETAILS))
511 {
512 fprintf (dump_file, "\n[CHECK] Found def edge %d in ", (int)i);
513 print_gimple_stmt (dump_file, phi, 0, 0);
514 }
515 VEC_safe_push (edge, heap, *edges, opnd_edge);
516 }
517 }
518 }
519 }
520
521 /* For each use edge of PHI, computes all control dependence chains.
522 The control dependence chains are then converted to an array of
523 composite predicates pointed to by PREDS. */
524
525 static bool
526 find_def_preds (VEC(use_pred_info_t, heap) ***preds,
527 size_t *num_preds, gimple phi)
528 {
529 size_t num_chains = 0, i, n;
530 VEC(edge, heap) **dep_chains = 0;
531 VEC(edge, heap) *cur_chain = 0;
532 VEC(edge, heap) *def_edges = 0;
533 bool has_valid_pred = false;
534 basic_block phi_bb, cd_root = 0;
535 struct pointer_set_t *visited_phis;
536
537 dep_chains = XCNEWVEC (VEC(edge, heap) *, MAX_NUM_CHAINS);
538
539 phi_bb = gimple_bb (phi);
540 /* First find the closest dominating bb to be
541 the control dependence root */
542 cd_root = find_dom (phi_bb);
543 if (!cd_root)
544 return false;
545
546 visited_phis = pointer_set_create ();
547 collect_phi_def_edges (phi, cd_root, &def_edges, visited_phis);
548 pointer_set_destroy (visited_phis);
549
550 n = VEC_length (edge, def_edges);
551 if (n == 0)
552 return false;
553
554 for (i = 0; i < n; i++)
555 {
556 size_t prev_nc, j;
557 edge opnd_edge;
558
559 opnd_edge = VEC_index (edge, def_edges, i);
560 prev_nc = num_chains;
561 compute_control_dep_chain (cd_root, opnd_edge->src,
562 dep_chains, &num_chains,
563 &cur_chain);
564 /* Free individual chain */
565 VEC_free (edge, heap, cur_chain);
566 cur_chain = 0;
567
568 /* Now update the newly added chains with
569 the phi operand edge: */
570 if (EDGE_COUNT (opnd_edge->src->succs) > 1)
571 {
572 if (prev_nc == num_chains
573 && num_chains < MAX_NUM_CHAINS)
574 num_chains++;
575 for (j = prev_nc; j < num_chains; j++)
576 {
577 VEC_safe_push (edge, heap, dep_chains[j], opnd_edge);
578 }
579 }
580 }
581
582 has_valid_pred
583 = convert_control_dep_chain_into_preds (dep_chains,
584 num_chains,
585 preds,
586 num_preds);
587 for (i = 0; i < num_chains; i++)
588 VEC_free (edge, heap, dep_chains[i]);
589 free (dep_chains);
590 return has_valid_pred;
591 }
592
593 /* Dumps the predicates (PREDS) for USESTMT. */
594
595 static void
596 dump_predicates (gimple usestmt, size_t num_preds,
597 VEC(use_pred_info_t, heap) **preds,
598 const char* msg)
599 {
600 size_t i, j;
601 VEC(use_pred_info_t, heap) *one_pred_chain;
602 fprintf (dump_file, msg);
603 print_gimple_stmt (dump_file, usestmt, 0, 0);
604 fprintf (dump_file, "is guarded by :\n");
605 /* do some dumping here: */
606 for (i = 0; i < num_preds; i++)
607 {
608 size_t np;
609
610 one_pred_chain = preds[i];
611 np = VEC_length (use_pred_info_t, one_pred_chain);
612
613 for (j = 0; j < np; j++)
614 {
615 use_pred_info_t one_pred
616 = VEC_index (use_pred_info_t, one_pred_chain, j);
617 if (one_pred->invert)
618 fprintf (dump_file, " (.NOT.) ");
619 print_gimple_stmt (dump_file, one_pred->cond, 0, 0);
620 if (j < np - 1)
621 fprintf (dump_file, "(.AND.)\n");
622 }
623 if (i < num_preds - 1)
624 fprintf (dump_file, "(.OR.)\n");
625 }
626 }
627
628 /* Destroys the predicate set *PREDS. */
629
630 static void
631 destroy_predicate_vecs (size_t n,
632 VEC(use_pred_info_t, heap) ** preds)
633 {
634 size_t i, j;
635 for (i = 0; i < n; i++)
636 {
637 for (j = 0; j < VEC_length (use_pred_info_t, preds[i]); j++)
638 free (VEC_index (use_pred_info_t, preds[i], j));
639 VEC_free (use_pred_info_t, heap, preds[i]);
640 }
641 free (preds);
642 }
643
644
645 /* Computes the 'normalized' conditional code with operand
646 swapping and condition inversion. */
647
648 static enum tree_code
649 get_cmp_code (enum tree_code orig_cmp_code,
650 bool swap_cond, bool invert)
651 {
652 enum tree_code tc = orig_cmp_code;
653
654 if (swap_cond)
655 tc = swap_tree_comparison (orig_cmp_code);
656 if (invert)
657 tc = invert_tree_comparison (tc, false);
658
659 switch (tc)
660 {
661 case LT_EXPR:
662 case LE_EXPR:
663 case GT_EXPR:
664 case GE_EXPR:
665 case EQ_EXPR:
666 case NE_EXPR:
667 break;
668 default:
669 return ERROR_MARK;
670 }
671 return tc;
672 }
673
674 /* Returns true if VAL falls in the range defined by BOUNDARY and CMPC, i.e.
675 all values in the range satisfies (x CMPC BOUNDARY) == true. */
676
677 static bool
678 is_value_included_in (tree val, tree boundary, enum tree_code cmpc)
679 {
680 bool inverted = false;
681 bool is_unsigned;
682 bool result;
683
684 /* Only handle integer constant here. */
685 if (TREE_CODE (val) != INTEGER_CST
686 || TREE_CODE (boundary) != INTEGER_CST)
687 return true;
688
689 is_unsigned = TYPE_UNSIGNED (TREE_TYPE (val));
690
691 if (cmpc == GE_EXPR || cmpc == GT_EXPR
692 || cmpc == NE_EXPR)
693 {
694 cmpc = invert_tree_comparison (cmpc, false);
695 inverted = true;
696 }
697
698 if (is_unsigned)
699 {
700 if (cmpc == EQ_EXPR)
701 result = tree_int_cst_equal (val, boundary);
702 else if (cmpc == LT_EXPR)
703 result = INT_CST_LT_UNSIGNED (val, boundary);
704 else
705 {
706 gcc_assert (cmpc == LE_EXPR);
707 result = (tree_int_cst_equal (val, boundary)
708 || INT_CST_LT_UNSIGNED (val, boundary));
709 }
710 }
711 else
712 {
713 if (cmpc == EQ_EXPR)
714 result = tree_int_cst_equal (val, boundary);
715 else if (cmpc == LT_EXPR)
716 result = INT_CST_LT (val, boundary);
717 else
718 {
719 gcc_assert (cmpc == LE_EXPR);
720 result = (tree_int_cst_equal (val, boundary)
721 || INT_CST_LT (val, boundary));
722 }
723 }
724
725 if (inverted)
726 result ^= 1;
727
728 return result;
729 }
730
731 /* Returns true if PRED is common among all the predicate
732 chains (PREDS) (and therefore can be factored out).
733 NUM_PRED_CHAIN is the size of array PREDS. */
734
735 static bool
736 find_matching_predicate_in_rest_chains (use_pred_info_t pred,
737 VEC(use_pred_info_t, heap) **preds,
738 size_t num_pred_chains)
739 {
740 size_t i, j, n;
741
742 /* trival case */
743 if (num_pred_chains == 1)
744 return true;
745
746 for (i = 1; i < num_pred_chains; i++)
747 {
748 bool found = false;
749 VEC(use_pred_info_t, heap) *one_chain = preds[i];
750 n = VEC_length (use_pred_info_t, one_chain);
751 for (j = 0; j < n; j++)
752 {
753 use_pred_info_t pred2
754 = VEC_index (use_pred_info_t, one_chain, j);
755 /* can relax the condition comparison to not
756 use address comparison. However, the most common
757 case is that multiple control dependent paths share
758 a common path prefix, so address comparison should
759 be ok. */
760
761 if (pred2->cond == pred->cond
762 && pred2->invert == pred->invert)
763 {
764 found = true;
765 break;
766 }
767 }
768 if (!found)
769 return false;
770 }
771 return true;
772 }
773
774 /* Forward declaration. */
775 static bool
776 is_use_properly_guarded (gimple use_stmt,
777 basic_block use_bb,
778 gimple phi,
779 unsigned uninit_opnds,
780 struct pointer_set_t *visited_phis);
781
782 /* Returns true if all uninitialized opnds are pruned. Returns false
783 otherwise. PHI is the phi node with uninitialized operands,
784 UNINIT_OPNDS is the bitmap of the uninitialize operand positions,
785 FLAG_DEF is the statement defining the flag guarding the use of the
786 PHI output, BOUNDARY_CST is the const value used in the predicate
787 associated with the flag, CMP_CODE is the comparison code used in
788 the predicate, VISITED_PHIS is the pointer set of phis visited, and
789 VISITED_FLAG_PHIS is the pointer to the pointer set of flag definitions
790 that are also phis.
791
792 Example scenario:
793
794 BB1:
795 flag_1 = phi <0, 1> // (1)
796 var_1 = phi <undef, some_val>
797
798
799 BB2:
800 flag_2 = phi <0, flag_1, flag_1> // (2)
801 var_2 = phi <undef, var_1, var_1>
802 if (flag_2 == 1)
803 goto BB3;
804
805 BB3:
806 use of var_2 // (3)
807
808 Because some flag arg in (1) is not constant, if we do not look into the
809 flag phis recursively, it is conservatively treated as unknown and var_1
810 is thought to be flowed into use at (3). Since var_1 is potentially uninitialized
811 a false warning will be emitted. Checking recursively into (1), the compiler can
812 find out that only some_val (which is defined) can flow into (3) which is OK.
813
814 */
815
816 static bool
817 prune_uninit_phi_opnds_in_unrealizable_paths (
818 gimple phi, unsigned uninit_opnds,
819 gimple flag_def, tree boundary_cst,
820 enum tree_code cmp_code,
821 struct pointer_set_t *visited_phis,
822 bitmap *visited_flag_phis)
823 {
824 unsigned i;
825
826 for (i = 0; i < MIN (32, gimple_phi_num_args (flag_def)); i++)
827 {
828 tree flag_arg;
829
830 if (!MASK_TEST_BIT (uninit_opnds, i))
831 continue;
832
833 flag_arg = gimple_phi_arg_def (flag_def, i);
834 if (!is_gimple_constant (flag_arg))
835 {
836 gimple flag_arg_def, phi_arg_def;
837 tree phi_arg;
838 unsigned uninit_opnds_arg_phi;
839
840 if (TREE_CODE (flag_arg) != SSA_NAME)
841 return false;
842 flag_arg_def = SSA_NAME_DEF_STMT (flag_arg);
843 if (gimple_code (flag_arg_def) != GIMPLE_PHI)
844 return false;
845
846 phi_arg = gimple_phi_arg_def (phi, i);
847 if (TREE_CODE (phi_arg) != SSA_NAME)
848 return false;
849
850 phi_arg_def = SSA_NAME_DEF_STMT (phi_arg);
851 if (gimple_code (phi_arg_def) != GIMPLE_PHI)
852 return false;
853
854 if (gimple_bb (phi_arg_def) != gimple_bb (flag_arg_def))
855 return false;
856
857 if (!*visited_flag_phis)
858 *visited_flag_phis = BITMAP_ALLOC (NULL);
859
860 if (bitmap_bit_p (*visited_flag_phis,
861 SSA_NAME_VERSION (gimple_phi_result (flag_arg_def))))
862 return false;
863
864 bitmap_set_bit (*visited_flag_phis,
865 SSA_NAME_VERSION (gimple_phi_result (flag_arg_def)));
866
867 /* Now recursively prune the uninitialized phi args. */
868 uninit_opnds_arg_phi = compute_uninit_opnds_pos (phi_arg_def);
869 if (!prune_uninit_phi_opnds_in_unrealizable_paths (
870 phi_arg_def, uninit_opnds_arg_phi,
871 flag_arg_def, boundary_cst, cmp_code,
872 visited_phis, visited_flag_phis))
873 return false;
874
875 bitmap_clear_bit (*visited_flag_phis,
876 SSA_NAME_VERSION (gimple_phi_result (flag_arg_def)));
877 continue;
878 }
879
880 /* Now check if the constant is in the guarded range. */
881 if (is_value_included_in (flag_arg, boundary_cst, cmp_code))
882 {
883 tree opnd;
884 gimple opnd_def;
885
886 /* Now that we know that this undefined edge is not
887 pruned. If the operand is defined by another phi,
888 we can further prune the incoming edges of that
889 phi by checking the predicates of this operands. */
890
891 opnd = gimple_phi_arg_def (phi, i);
892 opnd_def = SSA_NAME_DEF_STMT (opnd);
893 if (gimple_code (opnd_def) == GIMPLE_PHI)
894 {
895 edge opnd_edge;
896 unsigned uninit_opnds2
897 = compute_uninit_opnds_pos (opnd_def);
898 gcc_assert (!MASK_EMPTY (uninit_opnds2));
899 opnd_edge = gimple_phi_arg_edge (phi, i);
900 if (!is_use_properly_guarded (phi,
901 opnd_edge->src,
902 opnd_def,
903 uninit_opnds2,
904 visited_phis))
905 return false;
906 }
907 else
908 return false;
909 }
910 }
911
912 return true;
913 }
914
915 /* A helper function that determines if the predicate set
916 of the use is not overlapping with that of the uninit paths.
917 The most common senario of guarded use is in Example 1:
918 Example 1:
919 if (some_cond)
920 {
921 x = ...;
922 flag = true;
923 }
924
925 ... some code ...
926
927 if (flag)
928 use (x);
929
930 The real world examples are usually more complicated, but similar
931 and usually result from inlining:
932
933 bool init_func (int * x)
934 {
935 if (some_cond)
936 return false;
937 *x = ..
938 return true;
939 }
940
941 void foo(..)
942 {
943 int x;
944
945 if (!init_func(&x))
946 return;
947
948 .. some_code ...
949 use (x);
950 }
951
952 Another possible use scenario is in the following trivial example:
953
954 Example 2:
955 if (n > 0)
956 x = 1;
957 ...
958 if (n > 0)
959 {
960 if (m < 2)
961 .. = x;
962 }
963
964 Predicate analysis needs to compute the composite predicate:
965
966 1) 'x' use predicate: (n > 0) .AND. (m < 2)
967 2) 'x' default value (non-def) predicate: .NOT. (n > 0)
968 (the predicate chain for phi operand defs can be computed
969 starting from a bb that is control equivalent to the phi's
970 bb and is dominating the operand def.)
971
972 and check overlapping:
973 (n > 0) .AND. (m < 2) .AND. (.NOT. (n > 0))
974 <==> false
975
976 This implementation provides framework that can handle
977 scenarios. (Note that many simple cases are handled properly
978 without the predicate analysis -- this is due to jump threading
979 transformation which eliminates the merge point thus makes
980 path sensitive analysis unnecessary.)
981
982 NUM_PREDS is the number is the number predicate chains, PREDS is
983 the array of chains, PHI is the phi node whose incoming (undefined)
984 paths need to be pruned, and UNINIT_OPNDS is the bitmap holding
985 uninit operand positions. VISITED_PHIS is the pointer set of phi
986 stmts being checked. */
987
988
989 static bool
990 use_pred_not_overlap_with_undef_path_pred (
991 size_t num_preds,
992 VEC(use_pred_info_t, heap) **preds,
993 gimple phi, unsigned uninit_opnds,
994 struct pointer_set_t *visited_phis)
995 {
996 unsigned int i, n;
997 gimple flag_def = 0;
998 tree boundary_cst = 0;
999 enum tree_code cmp_code;
1000 bool swap_cond = false;
1001 bool invert = false;
1002 VEC(use_pred_info_t, heap) *the_pred_chain;
1003 bitmap visited_flag_phis = NULL;
1004 bool all_pruned = false;
1005
1006 gcc_assert (num_preds > 0);
1007 /* Find within the common prefix of multiple predicate chains
1008 a predicate that is a comparison of a flag variable against
1009 a constant. */
1010 the_pred_chain = preds[0];
1011 n = VEC_length (use_pred_info_t, the_pred_chain);
1012 for (i = 0; i < n; i++)
1013 {
1014 gimple cond;
1015 tree cond_lhs, cond_rhs, flag = 0;
1016
1017 use_pred_info_t the_pred
1018 = VEC_index (use_pred_info_t, the_pred_chain, i);
1019
1020 cond = the_pred->cond;
1021 invert = the_pred->invert;
1022 cond_lhs = gimple_cond_lhs (cond);
1023 cond_rhs = gimple_cond_rhs (cond);
1024 cmp_code = gimple_cond_code (cond);
1025
1026 if (cond_lhs != NULL_TREE && TREE_CODE (cond_lhs) == SSA_NAME
1027 && cond_rhs != NULL_TREE && is_gimple_constant (cond_rhs))
1028 {
1029 boundary_cst = cond_rhs;
1030 flag = cond_lhs;
1031 }
1032 else if (cond_rhs != NULL_TREE && TREE_CODE (cond_rhs) == SSA_NAME
1033 && cond_lhs != NULL_TREE && is_gimple_constant (cond_lhs))
1034 {
1035 boundary_cst = cond_lhs;
1036 flag = cond_rhs;
1037 swap_cond = true;
1038 }
1039
1040 if (!flag)
1041 continue;
1042
1043 flag_def = SSA_NAME_DEF_STMT (flag);
1044
1045 if (!flag_def)
1046 continue;
1047
1048 if ((gimple_code (flag_def) == GIMPLE_PHI)
1049 && (gimple_bb (flag_def) == gimple_bb (phi))
1050 && find_matching_predicate_in_rest_chains (
1051 the_pred, preds, num_preds))
1052 break;
1053
1054 flag_def = 0;
1055 }
1056
1057 if (!flag_def)
1058 return false;
1059
1060 /* Now check all the uninit incoming edge has a constant flag value
1061 that is in conflict with the use guard/predicate. */
1062 cmp_code = get_cmp_code (cmp_code, swap_cond, invert);
1063
1064 if (cmp_code == ERROR_MARK)
1065 return false;
1066
1067 all_pruned = prune_uninit_phi_opnds_in_unrealizable_paths (phi,
1068 uninit_opnds,
1069 flag_def,
1070 boundary_cst,
1071 cmp_code,
1072 visited_phis,
1073 &visited_flag_phis);
1074
1075 if (visited_flag_phis)
1076 BITMAP_FREE (visited_flag_phis);
1077
1078 return all_pruned;
1079 }
1080
1081 /* Returns true if TC is AND or OR */
1082
1083 static inline bool
1084 is_and_or_or (enum tree_code tc, tree typ)
1085 {
1086 return (tc == BIT_IOR_EXPR
1087 || (tc == BIT_AND_EXPR
1088 && (typ == 0 || TREE_CODE (typ) == BOOLEAN_TYPE)));
1089 }
1090
1091 typedef struct norm_cond
1092 {
1093 VEC(gimple, heap) *conds;
1094 enum tree_code cond_code;
1095 bool invert;
1096 } *norm_cond_t;
1097
1098
1099 /* Normalizes gimple condition COND. The normalization follows
1100 UD chains to form larger condition expression trees. NORM_COND
1101 holds the normalized result. COND_CODE is the logical opcode
1102 (AND or OR) of the normalized tree. */
1103
1104 static void
1105 normalize_cond_1 (gimple cond,
1106 norm_cond_t norm_cond,
1107 enum tree_code cond_code)
1108 {
1109 enum gimple_code gc;
1110 enum tree_code cur_cond_code;
1111 tree rhs1, rhs2;
1112
1113 gc = gimple_code (cond);
1114 if (gc != GIMPLE_ASSIGN)
1115 {
1116 VEC_safe_push (gimple, heap, norm_cond->conds, cond);
1117 return;
1118 }
1119
1120 cur_cond_code = gimple_assign_rhs_code (cond);
1121 rhs1 = gimple_assign_rhs1 (cond);
1122 rhs2 = gimple_assign_rhs2 (cond);
1123 if (cur_cond_code == NE_EXPR)
1124 {
1125 if (integer_zerop (rhs2)
1126 && (TREE_CODE (rhs1) == SSA_NAME))
1127 normalize_cond_1 (
1128 SSA_NAME_DEF_STMT (rhs1),
1129 norm_cond, cond_code);
1130 else if (integer_zerop (rhs1)
1131 && (TREE_CODE (rhs2) == SSA_NAME))
1132 normalize_cond_1 (
1133 SSA_NAME_DEF_STMT (rhs2),
1134 norm_cond, cond_code);
1135 else
1136 VEC_safe_push (gimple, heap, norm_cond->conds, cond);
1137
1138 return;
1139 }
1140
1141 if (is_and_or_or (cur_cond_code, TREE_TYPE (rhs1))
1142 && (cond_code == cur_cond_code || cond_code == ERROR_MARK)
1143 && (TREE_CODE (rhs1) == SSA_NAME && TREE_CODE (rhs2) == SSA_NAME))
1144 {
1145 normalize_cond_1 (SSA_NAME_DEF_STMT (rhs1),
1146 norm_cond, cur_cond_code);
1147 normalize_cond_1 (SSA_NAME_DEF_STMT (rhs2),
1148 norm_cond, cur_cond_code);
1149 norm_cond->cond_code = cur_cond_code;
1150 }
1151 else
1152 VEC_safe_push (gimple, heap, norm_cond->conds, cond);
1153 }
1154
1155 /* See normalize_cond_1 for details. INVERT is a flag to indicate
1156 if COND needs to be inverted or not. */
1157
1158 static void
1159 normalize_cond (gimple cond, norm_cond_t norm_cond, bool invert)
1160 {
1161 enum tree_code cond_code;
1162
1163 norm_cond->cond_code = ERROR_MARK;
1164 norm_cond->invert = false;
1165 norm_cond->conds = NULL;
1166 gcc_assert (gimple_code (cond) == GIMPLE_COND);
1167 cond_code = gimple_cond_code (cond);
1168 if (invert)
1169 cond_code = invert_tree_comparison (cond_code, false);
1170
1171 if (cond_code == NE_EXPR)
1172 {
1173 if (integer_zerop (gimple_cond_rhs (cond))
1174 && (TREE_CODE (gimple_cond_lhs (cond)) == SSA_NAME))
1175 normalize_cond_1 (
1176 SSA_NAME_DEF_STMT (gimple_cond_lhs (cond)),
1177 norm_cond, ERROR_MARK);
1178 else if (integer_zerop (gimple_cond_lhs (cond))
1179 && (TREE_CODE (gimple_cond_rhs (cond)) == SSA_NAME))
1180 normalize_cond_1 (
1181 SSA_NAME_DEF_STMT (gimple_cond_rhs (cond)),
1182 norm_cond, ERROR_MARK);
1183 else
1184 {
1185 VEC_safe_push (gimple, heap, norm_cond->conds, cond);
1186 norm_cond->invert = invert;
1187 }
1188 }
1189 else
1190 {
1191 VEC_safe_push (gimple, heap, norm_cond->conds, cond);
1192 norm_cond->invert = invert;
1193 }
1194
1195 gcc_assert (VEC_length (gimple, norm_cond->conds) == 1
1196 || is_and_or_or (norm_cond->cond_code, NULL));
1197 }
1198
1199 /* Returns true if the domain for condition COND1 is a subset of
1200 COND2. REVERSE is a flag. when it is true the function checks
1201 if COND1 is a superset of COND2. INVERT1 and INVERT2 are flags
1202 to indicate if COND1 and COND2 need to be inverted or not. */
1203
1204 static bool
1205 is_gcond_subset_of (gimple cond1, bool invert1,
1206 gimple cond2, bool invert2,
1207 bool reverse)
1208 {
1209 enum gimple_code gc1, gc2;
1210 enum tree_code cond1_code, cond2_code;
1211 gimple tmp;
1212 tree cond1_lhs, cond1_rhs, cond2_lhs, cond2_rhs;
1213
1214 /* Take the short cut. */
1215 if (cond1 == cond2)
1216 return true;
1217
1218 if (reverse)
1219 {
1220 tmp = cond1;
1221 cond1 = cond2;
1222 cond2 = tmp;
1223 }
1224
1225 gc1 = gimple_code (cond1);
1226 gc2 = gimple_code (cond2);
1227
1228 if ((gc1 != GIMPLE_ASSIGN && gc1 != GIMPLE_COND)
1229 || (gc2 != GIMPLE_ASSIGN && gc2 != GIMPLE_COND))
1230 return cond1 == cond2;
1231
1232 cond1_code = ((gc1 == GIMPLE_ASSIGN)
1233 ? gimple_assign_rhs_code (cond1)
1234 : gimple_cond_code (cond1));
1235
1236 cond2_code = ((gc2 == GIMPLE_ASSIGN)
1237 ? gimple_assign_rhs_code (cond2)
1238 : gimple_cond_code (cond2));
1239
1240 if (TREE_CODE_CLASS (cond1_code) != tcc_comparison
1241 || TREE_CODE_CLASS (cond2_code) != tcc_comparison)
1242 return false;
1243
1244 if (invert1)
1245 cond1_code = invert_tree_comparison (cond1_code, false);
1246 if (invert2)
1247 cond2_code = invert_tree_comparison (cond2_code, false);
1248
1249 cond1_lhs = ((gc1 == GIMPLE_ASSIGN)
1250 ? gimple_assign_rhs1 (cond1)
1251 : gimple_cond_lhs (cond1));
1252 cond1_rhs = ((gc1 == GIMPLE_ASSIGN)
1253 ? gimple_assign_rhs2 (cond1)
1254 : gimple_cond_rhs (cond1));
1255 cond2_lhs = ((gc2 == GIMPLE_ASSIGN)
1256 ? gimple_assign_rhs1 (cond2)
1257 : gimple_cond_lhs (cond2));
1258 cond2_rhs = ((gc2 == GIMPLE_ASSIGN)
1259 ? gimple_assign_rhs2 (cond2)
1260 : gimple_cond_rhs (cond2));
1261
1262 /* Assuming const operands have been swapped to the
1263 rhs at this point of the analysis. */
1264
1265 if (cond1_lhs != cond2_lhs)
1266 return false;
1267
1268 if (!is_gimple_constant (cond1_rhs)
1269 || TREE_CODE (cond1_rhs) != INTEGER_CST)
1270 return (cond1_rhs == cond2_rhs);
1271
1272 if (!is_gimple_constant (cond2_rhs)
1273 || TREE_CODE (cond2_rhs) != INTEGER_CST)
1274 return (cond1_rhs == cond2_rhs);
1275
1276 if (cond1_code == EQ_EXPR)
1277 return is_value_included_in (cond1_rhs,
1278 cond2_rhs, cond2_code);
1279 if (cond1_code == NE_EXPR || cond2_code == EQ_EXPR)
1280 return ((cond2_code == cond1_code)
1281 && tree_int_cst_equal (cond1_rhs, cond2_rhs));
1282
1283 if (((cond1_code == GE_EXPR || cond1_code == GT_EXPR)
1284 && (cond2_code == LE_EXPR || cond2_code == LT_EXPR))
1285 || ((cond1_code == LE_EXPR || cond1_code == LT_EXPR)
1286 && (cond2_code == GE_EXPR || cond2_code == GT_EXPR)))
1287 return false;
1288
1289 if (cond1_code != GE_EXPR && cond1_code != GT_EXPR
1290 && cond1_code != LE_EXPR && cond1_code != LT_EXPR)
1291 return false;
1292
1293 if (cond1_code == GT_EXPR)
1294 {
1295 cond1_code = GE_EXPR;
1296 cond1_rhs = fold_binary (PLUS_EXPR, TREE_TYPE (cond1_rhs),
1297 cond1_rhs,
1298 fold_convert (TREE_TYPE (cond1_rhs),
1299 integer_one_node));
1300 }
1301 else if (cond1_code == LT_EXPR)
1302 {
1303 cond1_code = LE_EXPR;
1304 cond1_rhs = fold_binary (MINUS_EXPR, TREE_TYPE (cond1_rhs),
1305 cond1_rhs,
1306 fold_convert (TREE_TYPE (cond1_rhs),
1307 integer_one_node));
1308 }
1309
1310 if (!cond1_rhs)
1311 return false;
1312
1313 gcc_assert (cond1_code == GE_EXPR || cond1_code == LE_EXPR);
1314
1315 if (cond2_code == GE_EXPR || cond2_code == GT_EXPR ||
1316 cond2_code == LE_EXPR || cond2_code == LT_EXPR)
1317 return is_value_included_in (cond1_rhs,
1318 cond2_rhs, cond2_code);
1319 else if (cond2_code == NE_EXPR)
1320 return
1321 (is_value_included_in (cond1_rhs,
1322 cond2_rhs, cond2_code)
1323 && !is_value_included_in (cond2_rhs,
1324 cond1_rhs, cond1_code));
1325 return false;
1326 }
1327
1328 /* Returns true if the domain of the condition expression
1329 in COND is a subset of any of the sub-conditions
1330 of the normalized condtion NORM_COND. INVERT is a flag
1331 to indicate of the COND needs to be inverted.
1332 REVERSE is a flag. When it is true, the check is reversed --
1333 it returns true if COND is a superset of any of the subconditions
1334 of NORM_COND. */
1335
1336 static bool
1337 is_subset_of_any (gimple cond, bool invert,
1338 norm_cond_t norm_cond, bool reverse)
1339 {
1340 size_t i;
1341 size_t len = VEC_length (gimple, norm_cond->conds);
1342
1343 for (i = 0; i < len; i++)
1344 {
1345 if (is_gcond_subset_of (cond, invert,
1346 VEC_index (gimple, norm_cond->conds, i),
1347 false, reverse))
1348 return true;
1349 }
1350 return false;
1351 }
1352
1353 /* NORM_COND1 and NORM_COND2 are normalized logical/BIT OR
1354 expressions (formed by following UD chains not control
1355 dependence chains). The function returns true of domain
1356 of and expression NORM_COND1 is a subset of NORM_COND2's.
1357 The implementation is conservative, and it returns false if
1358 it the inclusion relationship may not hold. */
1359
1360 static bool
1361 is_or_set_subset_of (norm_cond_t norm_cond1,
1362 norm_cond_t norm_cond2)
1363 {
1364 size_t i;
1365 size_t len = VEC_length (gimple, norm_cond1->conds);
1366
1367 for (i = 0; i < len; i++)
1368 {
1369 if (!is_subset_of_any (VEC_index (gimple, norm_cond1->conds, i),
1370 false, norm_cond2, false))
1371 return false;
1372 }
1373 return true;
1374 }
1375
1376 /* NORM_COND1 and NORM_COND2 are normalized logical AND
1377 expressions (formed by following UD chains not control
1378 dependence chains). The function returns true of domain
1379 of and expression NORM_COND1 is a subset of NORM_COND2's. */
1380
1381 static bool
1382 is_and_set_subset_of (norm_cond_t norm_cond1,
1383 norm_cond_t norm_cond2)
1384 {
1385 size_t i;
1386 size_t len = VEC_length (gimple, norm_cond2->conds);
1387
1388 for (i = 0; i < len; i++)
1389 {
1390 if (!is_subset_of_any (VEC_index (gimple, norm_cond2->conds, i),
1391 false, norm_cond1, true))
1392 return false;
1393 }
1394 return true;
1395 }
1396
1397 /* Returns true of the domain if NORM_COND1 is a subset
1398 of that of NORM_COND2. Returns false if it can not be
1399 proved to be so. */
1400
1401 static bool
1402 is_norm_cond_subset_of (norm_cond_t norm_cond1,
1403 norm_cond_t norm_cond2)
1404 {
1405 size_t i;
1406 enum tree_code code1, code2;
1407
1408 code1 = norm_cond1->cond_code;
1409 code2 = norm_cond2->cond_code;
1410
1411 if (code1 == BIT_AND_EXPR)
1412 {
1413 /* Both conditions are AND expressions. */
1414 if (code2 == BIT_AND_EXPR)
1415 return is_and_set_subset_of (norm_cond1, norm_cond2);
1416 /* NORM_COND1 is an AND expression, and NORM_COND2 is an OR
1417 expression. In this case, returns true if any subexpression
1418 of NORM_COND1 is a subset of any subexpression of NORM_COND2. */
1419 else if (code2 == BIT_IOR_EXPR)
1420 {
1421 size_t len1;
1422 len1 = VEC_length (gimple, norm_cond1->conds);
1423 for (i = 0; i < len1; i++)
1424 {
1425 gimple cond1 = VEC_index (gimple, norm_cond1->conds, i);
1426 if (is_subset_of_any (cond1, false, norm_cond2, false))
1427 return true;
1428 }
1429 return false;
1430 }
1431 else
1432 {
1433 gcc_assert (code2 == ERROR_MARK);
1434 gcc_assert (VEC_length (gimple, norm_cond2->conds) == 1);
1435 return is_subset_of_any (VEC_index (gimple, norm_cond2->conds, 0),
1436 norm_cond2->invert, norm_cond1, true);
1437 }
1438 }
1439 /* NORM_COND1 is an OR expression */
1440 else if (code1 == BIT_IOR_EXPR)
1441 {
1442 if (code2 != code1)
1443 return false;
1444
1445 return is_or_set_subset_of (norm_cond1, norm_cond2);
1446 }
1447 else
1448 {
1449 gcc_assert (code1 == ERROR_MARK);
1450 gcc_assert (VEC_length (gimple, norm_cond1->conds) == 1);
1451 /* Conservatively returns false if NORM_COND1 is non-decomposible
1452 and NORM_COND2 is an AND expression. */
1453 if (code2 == BIT_AND_EXPR)
1454 return false;
1455
1456 if (code2 == BIT_IOR_EXPR)
1457 return is_subset_of_any (VEC_index (gimple, norm_cond1->conds, 0),
1458 norm_cond1->invert, norm_cond2, false);
1459
1460 gcc_assert (code2 == ERROR_MARK);
1461 gcc_assert (VEC_length (gimple, norm_cond2->conds) == 1);
1462 return is_gcond_subset_of (VEC_index (gimple, norm_cond1->conds, 0),
1463 norm_cond1->invert,
1464 VEC_index (gimple, norm_cond2->conds, 0),
1465 norm_cond2->invert, false);
1466 }
1467 }
1468
1469 /* Returns true of the domain of single predicate expression
1470 EXPR1 is a subset of that of EXPR2. Returns false if it
1471 can not be proved. */
1472
1473 static bool
1474 is_pred_expr_subset_of (use_pred_info_t expr1,
1475 use_pred_info_t expr2)
1476 {
1477 gimple cond1, cond2;
1478 enum tree_code code1, code2;
1479 struct norm_cond norm_cond1, norm_cond2;
1480 bool is_subset = false;
1481
1482 cond1 = expr1->cond;
1483 cond2 = expr2->cond;
1484 code1 = gimple_cond_code (cond1);
1485 code2 = gimple_cond_code (cond2);
1486
1487 if (expr1->invert)
1488 code1 = invert_tree_comparison (code1, false);
1489 if (expr2->invert)
1490 code2 = invert_tree_comparison (code2, false);
1491
1492 /* Fast path -- match exactly */
1493 if ((gimple_cond_lhs (cond1) == gimple_cond_lhs (cond2))
1494 && (gimple_cond_rhs (cond1) == gimple_cond_rhs (cond2))
1495 && (code1 == code2))
1496 return true;
1497
1498 /* Normalize conditions. To keep NE_EXPR, do not invert
1499 with both need inversion. */
1500 normalize_cond (cond1, &norm_cond1, (expr1->invert));
1501 normalize_cond (cond2, &norm_cond2, (expr2->invert));
1502
1503 is_subset = is_norm_cond_subset_of (&norm_cond1, &norm_cond2);
1504
1505 /* Free memory */
1506 VEC_free (gimple, heap, norm_cond1.conds);
1507 VEC_free (gimple, heap, norm_cond2.conds);
1508 return is_subset ;
1509 }
1510
1511 /* Returns true if the domain of PRED1 is a subset
1512 of that of PRED2. Returns false if it can not be proved so. */
1513
1514 static bool
1515 is_pred_chain_subset_of (VEC(use_pred_info_t, heap) *pred1,
1516 VEC(use_pred_info_t, heap) *pred2)
1517 {
1518 size_t np1, np2, i1, i2;
1519
1520 np1 = VEC_length (use_pred_info_t, pred1);
1521 np2 = VEC_length (use_pred_info_t, pred2);
1522
1523 for (i2 = 0; i2 < np2; i2++)
1524 {
1525 bool found = false;
1526 use_pred_info_t info2
1527 = VEC_index (use_pred_info_t, pred2, i2);
1528 for (i1 = 0; i1 < np1; i1++)
1529 {
1530 use_pred_info_t info1
1531 = VEC_index (use_pred_info_t, pred1, i1);
1532 if (is_pred_expr_subset_of (info1, info2))
1533 {
1534 found = true;
1535 break;
1536 }
1537 }
1538 if (!found)
1539 return false;
1540 }
1541 return true;
1542 }
1543
1544 /* Returns true if the domain defined by
1545 one pred chain ONE_PRED is a subset of the domain
1546 of *PREDS. It returns false if ONE_PRED's domain is
1547 not a subset of any of the sub-domains of PREDS (
1548 corresponding to each individual chains in it), even
1549 though it may be still be a subset of whole domain
1550 of PREDS which is the union (ORed) of all its subdomains.
1551 In other words, the result is conservative. */
1552
1553 static bool
1554 is_included_in (VEC(use_pred_info_t, heap) *one_pred,
1555 VEC(use_pred_info_t, heap) **preds,
1556 size_t n)
1557 {
1558 size_t i;
1559
1560 for (i = 0; i < n; i++)
1561 {
1562 if (is_pred_chain_subset_of (one_pred, preds[i]))
1563 return true;
1564 }
1565
1566 return false;
1567 }
1568
1569 /* compares two predicate sets PREDS1 and PREDS2 and returns
1570 true if the domain defined by PREDS1 is a superset
1571 of PREDS2's domain. N1 and N2 are array sizes of PREDS1 and
1572 PREDS2 respectively. The implementation chooses not to build
1573 generic trees (and relying on the folding capability of the
1574 compiler), but instead performs brute force comparison of
1575 individual predicate chains (won't be a compile time problem
1576 as the chains are pretty short). When the function returns
1577 false, it does not necessarily mean *PREDS1 is not a superset
1578 of *PREDS2, but mean it may not be so since the analysis can
1579 not prove it. In such cases, false warnings may still be
1580 emitted. */
1581
1582 static bool
1583 is_superset_of (VEC(use_pred_info_t, heap) **preds1,
1584 size_t n1,
1585 VEC(use_pred_info_t, heap) **preds2,
1586 size_t n2)
1587 {
1588 size_t i;
1589 VEC(use_pred_info_t, heap) *one_pred_chain;
1590
1591 for (i = 0; i < n2; i++)
1592 {
1593 one_pred_chain = preds2[i];
1594 if (!is_included_in (one_pred_chain, preds1, n1))
1595 return false;
1596 }
1597
1598 return true;
1599 }
1600
1601 /* Comparison function used by qsort. It is used to
1602 sort predicate chains to allow predicate
1603 simplification. */
1604
1605 static int
1606 pred_chain_length_cmp (const void *p1, const void *p2)
1607 {
1608 use_pred_info_t i1, i2;
1609 VEC(use_pred_info_t, heap) * const *chain1
1610 = (VEC(use_pred_info_t, heap) * const *)p1;
1611 VEC(use_pred_info_t, heap) * const *chain2
1612 = (VEC(use_pred_info_t, heap) * const *)p2;
1613
1614 if (VEC_length (use_pred_info_t, *chain1)
1615 != VEC_length (use_pred_info_t, *chain2))
1616 return (VEC_length (use_pred_info_t, *chain1)
1617 - VEC_length (use_pred_info_t, *chain2));
1618
1619 i1 = VEC_index (use_pred_info_t, *chain1, 0);
1620 i2 = VEC_index (use_pred_info_t, *chain2, 0);
1621
1622 /* Allow predicates with similar prefix come together. */
1623 if (!i1->invert && i2->invert)
1624 return -1;
1625 else if (i1->invert && !i2->invert)
1626 return 1;
1627
1628 return gimple_uid (i1->cond) - gimple_uid (i2->cond);
1629 }
1630
1631 /* x OR (!x AND y) is equivalent to x OR y.
1632 This function normalizes x1 OR (!x1 AND x2) OR (!x1 AND !x2 AND x3)
1633 into x1 OR x2 OR x3. PREDS is the predicate chains, and N is
1634 the number of chains. Returns true if normalization happens. */
1635
1636 static bool
1637 normalize_preds (VEC(use_pred_info_t, heap) **preds, size_t *n)
1638 {
1639 size_t i, j, ll;
1640 VEC(use_pred_info_t, heap) *pred_chain;
1641 VEC(use_pred_info_t, heap) *x = 0;
1642 use_pred_info_t xj = 0, nxj = 0;
1643
1644 if (*n < 2)
1645 return false;
1646
1647 /* First sort the chains in ascending order of lengths. */
1648 qsort (preds, *n, sizeof (void *), pred_chain_length_cmp);
1649 pred_chain = preds[0];
1650 ll = VEC_length (use_pred_info_t, pred_chain);
1651 if (ll != 1)
1652 {
1653 if (ll == 2)
1654 {
1655 use_pred_info_t xx, yy, xx2, nyy;
1656 VEC(use_pred_info_t, heap) *pred_chain2 = preds[1];
1657 if (VEC_length (use_pred_info_t, pred_chain2) != 2)
1658 return false;
1659
1660 /* See if simplification x AND y OR x AND !y is possible. */
1661 xx = VEC_index (use_pred_info_t, pred_chain, 0);
1662 yy = VEC_index (use_pred_info_t, pred_chain, 1);
1663 xx2 = VEC_index (use_pred_info_t, pred_chain2, 0);
1664 nyy = VEC_index (use_pred_info_t, pred_chain2, 1);
1665 if (gimple_cond_lhs (xx->cond) != gimple_cond_lhs (xx2->cond)
1666 || gimple_cond_rhs (xx->cond) != gimple_cond_rhs (xx2->cond)
1667 || gimple_cond_code (xx->cond) != gimple_cond_code (xx2->cond)
1668 || (xx->invert != xx2->invert))
1669 return false;
1670 if (gimple_cond_lhs (yy->cond) != gimple_cond_lhs (nyy->cond)
1671 || gimple_cond_rhs (yy->cond) != gimple_cond_rhs (nyy->cond)
1672 || gimple_cond_code (yy->cond) != gimple_cond_code (nyy->cond)
1673 || (yy->invert == nyy->invert))
1674 return false;
1675
1676 /* Now merge the first two chains. */
1677 free (yy);
1678 free (nyy);
1679 free (xx2);
1680 VEC_free (use_pred_info_t, heap, pred_chain);
1681 VEC_free (use_pred_info_t, heap, pred_chain2);
1682 pred_chain = 0;
1683 VEC_safe_push (use_pred_info_t, heap, pred_chain, xx);
1684 preds[0] = pred_chain;
1685 for (i = 1; i < *n - 1; i++)
1686 preds[i] = preds[i + 1];
1687
1688 preds[*n - 1] = 0;
1689 *n = *n - 1;
1690 }
1691 else
1692 return false;
1693 }
1694
1695 VEC_safe_push (use_pred_info_t, heap, x,
1696 VEC_index (use_pred_info_t, pred_chain, 0));
1697
1698 /* The loop extracts x1, x2, x3, etc from chains
1699 x1 OR (!x1 AND x2) OR (!x1 AND !x2 AND x3) OR ... */
1700 for (i = 1; i < *n; i++)
1701 {
1702 pred_chain = preds[i];
1703 if (VEC_length (use_pred_info_t, pred_chain) != i + 1)
1704 return false;
1705
1706 for (j = 0; j < i; j++)
1707 {
1708 xj = VEC_index (use_pred_info_t, x, j);
1709 nxj = VEC_index (use_pred_info_t, pred_chain, j);
1710
1711 /* Check if nxj is !xj */
1712 if (gimple_cond_lhs (xj->cond) != gimple_cond_lhs (nxj->cond)
1713 || gimple_cond_rhs (xj->cond) != gimple_cond_rhs (nxj->cond)
1714 || gimple_cond_code (xj->cond) != gimple_cond_code (nxj->cond)
1715 || (xj->invert == nxj->invert))
1716 return false;
1717 }
1718
1719 VEC_safe_push (use_pred_info_t, heap, x,
1720 VEC_index (use_pred_info_t, pred_chain, i));
1721 }
1722
1723 /* Now normalize the pred chains using the extraced x1, x2, x3 etc. */
1724 for (j = 0; j < *n; j++)
1725 {
1726 use_pred_info_t t;
1727 xj = VEC_index (use_pred_info_t, x, j);
1728
1729 t = XNEW (struct use_pred_info);
1730 *t = *xj;
1731
1732 VEC_replace (use_pred_info_t, x, j, t);
1733 }
1734
1735 for (i = 0; i < *n; i++)
1736 {
1737 pred_chain = preds[i];
1738 for (j = 0; j < VEC_length (use_pred_info_t, pred_chain); j++)
1739 free (VEC_index (use_pred_info_t, pred_chain, j));
1740 VEC_free (use_pred_info_t, heap, pred_chain);
1741 pred_chain = 0;
1742 /* A new chain. */
1743 VEC_safe_push (use_pred_info_t, heap, pred_chain,
1744 VEC_index (use_pred_info_t, x, i));
1745 preds[i] = pred_chain;
1746 }
1747 return true;
1748 }
1749
1750
1751
1752 /* Computes the predicates that guard the use and checks
1753 if the incoming paths that have empty (or possibly
1754 empty) definition can be pruned/filtered. The function returns
1755 true if it can be determined that the use of PHI's def in
1756 USE_STMT is guarded with a predicate set not overlapping with
1757 predicate sets of all runtime paths that do not have a definition.
1758 Returns false if it is not or it can not be determined. USE_BB is
1759 the bb of the use (for phi operand use, the bb is not the bb of
1760 the phi stmt, but the src bb of the operand edge). UNINIT_OPNDS
1761 is a bit vector. If an operand of PHI is uninitialized, the
1762 corresponding bit in the vector is 1. VISIED_PHIS is a pointer
1763 set of phis being visted. */
1764
1765 static bool
1766 is_use_properly_guarded (gimple use_stmt,
1767 basic_block use_bb,
1768 gimple phi,
1769 unsigned uninit_opnds,
1770 struct pointer_set_t *visited_phis)
1771 {
1772 basic_block phi_bb;
1773 VEC(use_pred_info_t, heap) **preds = 0;
1774 VEC(use_pred_info_t, heap) **def_preds = 0;
1775 size_t num_preds = 0, num_def_preds = 0;
1776 bool has_valid_preds = false;
1777 bool is_properly_guarded = false;
1778
1779 if (pointer_set_insert (visited_phis, phi))
1780 return false;
1781
1782 phi_bb = gimple_bb (phi);
1783
1784 if (is_non_loop_exit_postdominating (use_bb, phi_bb))
1785 return false;
1786
1787 has_valid_preds = find_predicates (&preds, &num_preds,
1788 phi_bb, use_bb);
1789
1790 if (!has_valid_preds)
1791 {
1792 destroy_predicate_vecs (num_preds, preds);
1793 return false;
1794 }
1795
1796 if (dump_file)
1797 dump_predicates (use_stmt, num_preds, preds,
1798 "\nUse in stmt ");
1799
1800 has_valid_preds = find_def_preds (&def_preds,
1801 &num_def_preds, phi);
1802
1803 if (has_valid_preds)
1804 {
1805 bool normed;
1806 if (dump_file)
1807 dump_predicates (phi, num_def_preds, def_preds,
1808 "Operand defs of phi ");
1809
1810 normed = normalize_preds (def_preds, &num_def_preds);
1811 if (normed && dump_file)
1812 {
1813 fprintf (dump_file, "\nNormalized to\n");
1814 dump_predicates (phi, num_def_preds, def_preds,
1815 "Operand defs of phi ");
1816 }
1817 is_properly_guarded =
1818 is_superset_of (def_preds, num_def_preds,
1819 preds, num_preds);
1820 }
1821
1822 /* further prune the dead incoming phi edges. */
1823 if (!is_properly_guarded)
1824 is_properly_guarded
1825 = use_pred_not_overlap_with_undef_path_pred (
1826 num_preds, preds, phi, uninit_opnds, visited_phis);
1827
1828 destroy_predicate_vecs (num_preds, preds);
1829 destroy_predicate_vecs (num_def_preds, def_preds);
1830 return is_properly_guarded;
1831 }
1832
1833 /* Searches through all uses of a potentially
1834 uninitialized variable defined by PHI and returns a use
1835 statement if the use is not properly guarded. It returns
1836 NULL if all uses are guarded. UNINIT_OPNDS is a bitvector
1837 holding the position(s) of uninit PHI operands. WORKLIST
1838 is the vector of candidate phis that may be updated by this
1839 function. ADDED_TO_WORKLIST is the pointer set tracking
1840 if the new phi is already in the worklist. */
1841
1842 static gimple
1843 find_uninit_use (gimple phi, unsigned uninit_opnds,
1844 VEC(gimple, heap) **worklist,
1845 struct pointer_set_t *added_to_worklist)
1846 {
1847 tree phi_result;
1848 use_operand_p use_p;
1849 gimple use_stmt;
1850 imm_use_iterator iter;
1851
1852 phi_result = gimple_phi_result (phi);
1853
1854 FOR_EACH_IMM_USE_FAST (use_p, iter, phi_result)
1855 {
1856 struct pointer_set_t *visited_phis;
1857 basic_block use_bb;
1858
1859 use_stmt = USE_STMT (use_p);
1860 if (is_gimple_debug (use_stmt))
1861 continue;
1862
1863 visited_phis = pointer_set_create ();
1864
1865 if (gimple_code (use_stmt) == GIMPLE_PHI)
1866 use_bb = gimple_phi_arg_edge (use_stmt,
1867 PHI_ARG_INDEX_FROM_USE (use_p))->src;
1868 else
1869 use_bb = gimple_bb (use_stmt);
1870
1871 if (is_use_properly_guarded (use_stmt,
1872 use_bb,
1873 phi,
1874 uninit_opnds,
1875 visited_phis))
1876 {
1877 pointer_set_destroy (visited_phis);
1878 continue;
1879 }
1880 pointer_set_destroy (visited_phis);
1881
1882 if (dump_file && (dump_flags & TDF_DETAILS))
1883 {
1884 fprintf (dump_file, "[CHECK]: Found unguarded use: ");
1885 print_gimple_stmt (dump_file, use_stmt, 0, 0);
1886 }
1887 /* Found one real use, return. */
1888 if (gimple_code (use_stmt) != GIMPLE_PHI)
1889 return use_stmt;
1890
1891 /* Found a phi use that is not guarded,
1892 add the phi to the worklist. */
1893 if (!pointer_set_insert (added_to_worklist,
1894 use_stmt))
1895 {
1896 if (dump_file && (dump_flags & TDF_DETAILS))
1897 {
1898 fprintf (dump_file, "[WORKLIST]: Update worklist with phi: ");
1899 print_gimple_stmt (dump_file, use_stmt, 0, 0);
1900 }
1901
1902 VEC_safe_push (gimple, heap, *worklist, use_stmt);
1903 pointer_set_insert (possibly_undefined_names,
1904 phi_result);
1905 }
1906 }
1907
1908 return NULL;
1909 }
1910
1911 /* Look for inputs to PHI that are SSA_NAMEs that have empty definitions
1912 and gives warning if there exists a runtime path from the entry to a
1913 use of the PHI def that does not contain a definition. In other words,
1914 the warning is on the real use. The more dead paths that can be pruned
1915 by the compiler, the fewer false positives the warning is. WORKLIST
1916 is a vector of candidate phis to be examined. ADDED_TO_WORKLIST is
1917 a pointer set tracking if the new phi is added to the worklist or not. */
1918
1919 static void
1920 warn_uninitialized_phi (gimple phi, VEC(gimple, heap) **worklist,
1921 struct pointer_set_t *added_to_worklist)
1922 {
1923 unsigned uninit_opnds;
1924 gimple uninit_use_stmt = 0;
1925 tree uninit_op;
1926
1927 /* Don't look at memory tags. */
1928 if (!is_gimple_reg (gimple_phi_result (phi)))
1929 return;
1930
1931 uninit_opnds = compute_uninit_opnds_pos (phi);
1932
1933 if (MASK_EMPTY (uninit_opnds))
1934 return;
1935
1936 if (dump_file && (dump_flags & TDF_DETAILS))
1937 {
1938 fprintf (dump_file, "[CHECK]: examining phi: ");
1939 print_gimple_stmt (dump_file, phi, 0, 0);
1940 }
1941
1942 /* Now check if we have any use of the value without proper guard. */
1943 uninit_use_stmt = find_uninit_use (phi, uninit_opnds,
1944 worklist, added_to_worklist);
1945
1946 /* All uses are properly guarded. */
1947 if (!uninit_use_stmt)
1948 return;
1949
1950 uninit_op = gimple_phi_arg_def (phi, MASK_FIRST_SET_BIT (uninit_opnds));
1951 warn_uninit (OPT_Wmaybe_uninitialized, uninit_op, SSA_NAME_VAR (uninit_op),
1952 SSA_NAME_VAR (uninit_op),
1953 "%qD may be used uninitialized in this function",
1954 uninit_use_stmt);
1955
1956 }
1957
1958
1959 /* Entry point to the late uninitialized warning pass. */
1960
1961 static unsigned int
1962 execute_late_warn_uninitialized (void)
1963 {
1964 basic_block bb;
1965 gimple_stmt_iterator gsi;
1966 VEC(gimple, heap) *worklist = 0;
1967 struct pointer_set_t *added_to_worklist;
1968
1969 calculate_dominance_info (CDI_DOMINATORS);
1970 calculate_dominance_info (CDI_POST_DOMINATORS);
1971 /* Re-do the plain uninitialized variable check, as optimization may have
1972 straightened control flow. Do this first so that we don't accidentally
1973 get a "may be" warning when we'd have seen an "is" warning later. */
1974 warn_uninitialized_vars (/*warn_possibly_uninitialized=*/1);
1975
1976 timevar_push (TV_TREE_UNINIT);
1977
1978 possibly_undefined_names = pointer_set_create ();
1979 added_to_worklist = pointer_set_create ();
1980
1981 /* Initialize worklist */
1982 FOR_EACH_BB (bb)
1983 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1984 {
1985 gimple phi = gsi_stmt (gsi);
1986 size_t n, i;
1987
1988 n = gimple_phi_num_args (phi);
1989
1990 /* Don't look at memory tags. */
1991 if (!is_gimple_reg (gimple_phi_result (phi)))
1992 continue;
1993
1994 for (i = 0; i < n; ++i)
1995 {
1996 tree op = gimple_phi_arg_def (phi, i);
1997 if (TREE_CODE (op) == SSA_NAME
1998 && ssa_undefined_value_p (op))
1999 {
2000 VEC_safe_push (gimple, heap, worklist, phi);
2001 pointer_set_insert (added_to_worklist, phi);
2002 if (dump_file && (dump_flags & TDF_DETAILS))
2003 {
2004 fprintf (dump_file, "[WORKLIST]: add to initial list: ");
2005 print_gimple_stmt (dump_file, phi, 0, 0);
2006 }
2007 break;
2008 }
2009 }
2010 }
2011
2012 while (VEC_length (gimple, worklist) != 0)
2013 {
2014 gimple cur_phi = 0;
2015 cur_phi = VEC_pop (gimple, worklist);
2016 warn_uninitialized_phi (cur_phi, &worklist, added_to_worklist);
2017 }
2018
2019 VEC_free (gimple, heap, worklist);
2020 pointer_set_destroy (added_to_worklist);
2021 pointer_set_destroy (possibly_undefined_names);
2022 possibly_undefined_names = NULL;
2023 free_dominance_info (CDI_POST_DOMINATORS);
2024 timevar_pop (TV_TREE_UNINIT);
2025 return 0;
2026 }
2027
2028 static bool
2029 gate_warn_uninitialized (void)
2030 {
2031 return warn_uninitialized != 0;
2032 }
2033
2034 struct gimple_opt_pass pass_late_warn_uninitialized =
2035 {
2036 {
2037 GIMPLE_PASS,
2038 "uninit", /* name */
2039 gate_warn_uninitialized, /* gate */
2040 execute_late_warn_uninitialized, /* execute */
2041 NULL, /* sub */
2042 NULL, /* next */
2043 0, /* static_pass_number */
2044 TV_NONE, /* tv_id */
2045 PROP_ssa, /* properties_required */
2046 0, /* properties_provided */
2047 0, /* properties_destroyed */
2048 0, /* todo_flags_start */
2049 0 /* todo_flags_finish */
2050 }
2051 };