cond.md (stzx_16): Use register_operand for operand 0.
[gcc.git] / gcc / tree-ssa-uninit.c
1 /* Predicate aware uninitialized variable warning.
2 Copyright (C) 2001-2013 Free Software Foundation, Inc.
3 Contributed by Xinliang David Li <davidxl@google.com>
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "tree.h"
26 #include "flags.h"
27 #include "tm_p.h"
28 #include "basic-block.h"
29 #include "function.h"
30 #include "gimple-pretty-print.h"
31 #include "bitmap.h"
32 #include "pointer-set.h"
33 #include "gimple.h"
34 #include "gimple-iterator.h"
35 #include "gimple-ssa.h"
36 #include "tree-phinodes.h"
37 #include "ssa-iterators.h"
38 #include "tree-ssa.h"
39 #include "tree-inline.h"
40 #include "hashtab.h"
41 #include "tree-pass.h"
42 #include "diagnostic-core.h"
43
44 /* This implements the pass that does predicate aware warning on uses of
45 possibly uninitialized variables. The pass first collects the set of
46 possibly uninitialized SSA names. For each such name, it walks through
47 all its immediate uses. For each immediate use, it rebuilds the condition
48 expression (the predicate) that guards the use. The predicate is then
49 examined to see if the variable is always defined under that same condition.
50 This is done either by pruning the unrealizable paths that lead to the
51 default definitions or by checking if the predicate set that guards the
52 defining paths is a superset of the use predicate. */
53
54
55 /* Pointer set of potentially undefined ssa names, i.e.,
56 ssa names that are defined by phi with operands that
57 are not defined or potentially undefined. */
58 static struct pointer_set_t *possibly_undefined_names = 0;
59
60 /* Bit mask handling macros. */
61 #define MASK_SET_BIT(mask, pos) mask |= (1 << pos)
62 #define MASK_TEST_BIT(mask, pos) (mask & (1 << pos))
63 #define MASK_EMPTY(mask) (mask == 0)
64
65 /* Returns the first bit position (starting from LSB)
66 in mask that is non zero. Returns -1 if the mask is empty. */
67 static int
68 get_mask_first_set_bit (unsigned mask)
69 {
70 int pos = 0;
71 if (mask == 0)
72 return -1;
73
74 while ((mask & (1 << pos)) == 0)
75 pos++;
76
77 return pos;
78 }
79 #define MASK_FIRST_SET_BIT(mask) get_mask_first_set_bit (mask)
80
81 /* Return true if T, an SSA_NAME, has an undefined value. */
82 static bool
83 has_undefined_value_p (tree t)
84 {
85 return (ssa_undefined_value_p (t)
86 || (possibly_undefined_names
87 && pointer_set_contains (possibly_undefined_names, t)));
88 }
89
90
91
92 /* Like has_undefined_value_p, but don't return true if TREE_NO_WARNING
93 is set on SSA_NAME_VAR. */
94
95 static inline bool
96 uninit_undefined_value_p (tree t) {
97 if (!has_undefined_value_p (t))
98 return false;
99 if (SSA_NAME_VAR (t) && TREE_NO_WARNING (SSA_NAME_VAR (t)))
100 return false;
101 return true;
102 }
103
104 /* Emit warnings for uninitialized variables. This is done in two passes.
105
106 The first pass notices real uses of SSA names with undefined values.
107 Such uses are unconditionally uninitialized, and we can be certain that
108 such a use is a mistake. This pass is run before most optimizations,
109 so that we catch as many as we can.
110
111 The second pass follows PHI nodes to find uses that are potentially
112 uninitialized. In this case we can't necessarily prove that the use
113 is really uninitialized. This pass is run after most optimizations,
114 so that we thread as many jumps and possible, and delete as much dead
115 code as possible, in order to reduce false positives. We also look
116 again for plain uninitialized variables, since optimization may have
117 changed conditionally uninitialized to unconditionally uninitialized. */
118
119 /* Emit a warning for EXPR based on variable VAR at the point in the
120 program T, an SSA_NAME, is used being uninitialized. The exact
121 warning text is in MSGID and LOCUS may contain a location or be null.
122 WC is the warning code. */
123
124 static void
125 warn_uninit (enum opt_code wc, tree t,
126 tree expr, tree var, const char *gmsgid, void *data)
127 {
128 gimple context = (gimple) data;
129 location_t location, cfun_loc;
130 expanded_location xloc, floc;
131
132 if (!has_undefined_value_p (t))
133 return;
134
135 /* TREE_NO_WARNING either means we already warned, or the front end
136 wishes to suppress the warning. */
137 if ((context
138 && (gimple_no_warning_p (context)
139 || (gimple_assign_single_p (context)
140 && TREE_NO_WARNING (gimple_assign_rhs1 (context)))))
141 || TREE_NO_WARNING (expr))
142 return;
143
144 location = (context != NULL && gimple_has_location (context))
145 ? gimple_location (context)
146 : DECL_SOURCE_LOCATION (var);
147 location = linemap_resolve_location (line_table, location,
148 LRK_SPELLING_LOCATION,
149 NULL);
150 cfun_loc = DECL_SOURCE_LOCATION (cfun->decl);
151 xloc = expand_location (location);
152 floc = expand_location (cfun_loc);
153 if (warning_at (location, wc, gmsgid, expr))
154 {
155 TREE_NO_WARNING (expr) = 1;
156
157 if (location == DECL_SOURCE_LOCATION (var))
158 return;
159 if (xloc.file != floc.file
160 || linemap_location_before_p (line_table,
161 location, cfun_loc)
162 || linemap_location_before_p (line_table,
163 cfun->function_end_locus,
164 location))
165 inform (DECL_SOURCE_LOCATION (var), "%qD was declared here", var);
166 }
167 }
168
169 static unsigned int
170 warn_uninitialized_vars (bool warn_possibly_uninitialized)
171 {
172 gimple_stmt_iterator gsi;
173 basic_block bb;
174
175 FOR_EACH_BB (bb)
176 {
177 bool always_executed = dominated_by_p (CDI_POST_DOMINATORS,
178 single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun)), bb);
179 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
180 {
181 gimple stmt = gsi_stmt (gsi);
182 use_operand_p use_p;
183 ssa_op_iter op_iter;
184 tree use;
185
186 if (is_gimple_debug (stmt))
187 continue;
188
189 /* We only do data flow with SSA_NAMEs, so that's all we
190 can warn about. */
191 FOR_EACH_SSA_USE_OPERAND (use_p, stmt, op_iter, SSA_OP_USE)
192 {
193 use = USE_FROM_PTR (use_p);
194 if (always_executed)
195 warn_uninit (OPT_Wuninitialized, use,
196 SSA_NAME_VAR (use), SSA_NAME_VAR (use),
197 "%qD is used uninitialized in this function",
198 stmt);
199 else if (warn_possibly_uninitialized)
200 warn_uninit (OPT_Wmaybe_uninitialized, use,
201 SSA_NAME_VAR (use), SSA_NAME_VAR (use),
202 "%qD may be used uninitialized in this function",
203 stmt);
204 }
205
206 /* For memory the only cheap thing we can do is see if we
207 have a use of the default def of the virtual operand.
208 ??? Note that at -O0 we do not have virtual operands.
209 ??? Not so cheap would be to use the alias oracle via
210 walk_aliased_vdefs, if we don't find any aliasing vdef
211 warn as is-used-uninitialized, if we don't find an aliasing
212 vdef that kills our use (stmt_kills_ref_p), warn as
213 may-be-used-uninitialized. But this walk is quadratic and
214 so must be limited which means we would miss warning
215 opportunities. */
216 use = gimple_vuse (stmt);
217 if (use
218 && gimple_assign_single_p (stmt)
219 && !gimple_vdef (stmt)
220 && SSA_NAME_IS_DEFAULT_DEF (use))
221 {
222 tree rhs = gimple_assign_rhs1 (stmt);
223 tree base = get_base_address (rhs);
224
225 /* Do not warn if it can be initialized outside this function. */
226 if (TREE_CODE (base) != VAR_DECL
227 || DECL_HARD_REGISTER (base)
228 || is_global_var (base))
229 continue;
230
231 if (always_executed)
232 warn_uninit (OPT_Wuninitialized, use,
233 gimple_assign_rhs1 (stmt), base,
234 "%qE is used uninitialized in this function",
235 stmt);
236 else if (warn_possibly_uninitialized)
237 warn_uninit (OPT_Wmaybe_uninitialized, use,
238 gimple_assign_rhs1 (stmt), base,
239 "%qE may be used uninitialized in this function",
240 stmt);
241 }
242 }
243 }
244
245 return 0;
246 }
247
248 /* Checks if the operand OPND of PHI is defined by
249 another phi with one operand defined by this PHI,
250 but the rest operands are all defined. If yes,
251 returns true to skip this this operand as being
252 redundant. Can be enhanced to be more general. */
253
254 static bool
255 can_skip_redundant_opnd (tree opnd, gimple phi)
256 {
257 gimple op_def;
258 tree phi_def;
259 int i, n;
260
261 phi_def = gimple_phi_result (phi);
262 op_def = SSA_NAME_DEF_STMT (opnd);
263 if (gimple_code (op_def) != GIMPLE_PHI)
264 return false;
265 n = gimple_phi_num_args (op_def);
266 for (i = 0; i < n; ++i)
267 {
268 tree op = gimple_phi_arg_def (op_def, i);
269 if (TREE_CODE (op) != SSA_NAME)
270 continue;
271 if (op != phi_def && uninit_undefined_value_p (op))
272 return false;
273 }
274
275 return true;
276 }
277
278 /* Returns a bit mask holding the positions of arguments in PHI
279 that have empty (or possibly empty) definitions. */
280
281 static unsigned
282 compute_uninit_opnds_pos (gimple phi)
283 {
284 size_t i, n;
285 unsigned uninit_opnds = 0;
286
287 n = gimple_phi_num_args (phi);
288 /* Bail out for phi with too many args. */
289 if (n > 32)
290 return 0;
291
292 for (i = 0; i < n; ++i)
293 {
294 tree op = gimple_phi_arg_def (phi, i);
295 if (TREE_CODE (op) == SSA_NAME
296 && uninit_undefined_value_p (op)
297 && !can_skip_redundant_opnd (op, phi))
298 {
299 if (cfun->has_nonlocal_label || cfun->calls_setjmp)
300 {
301 /* Ignore SSA_NAMEs that appear on abnormal edges
302 somewhere. */
303 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (op))
304 continue;
305 }
306 MASK_SET_BIT (uninit_opnds, i);
307 }
308 }
309 return uninit_opnds;
310 }
311
312 /* Find the immediate postdominator PDOM of the specified
313 basic block BLOCK. */
314
315 static inline basic_block
316 find_pdom (basic_block block)
317 {
318 if (block == EXIT_BLOCK_PTR_FOR_FN (cfun))
319 return EXIT_BLOCK_PTR_FOR_FN (cfun);
320 else
321 {
322 basic_block bb
323 = get_immediate_dominator (CDI_POST_DOMINATORS, block);
324 if (! bb)
325 return EXIT_BLOCK_PTR_FOR_FN (cfun);
326 return bb;
327 }
328 }
329
330 /* Find the immediate DOM of the specified
331 basic block BLOCK. */
332
333 static inline basic_block
334 find_dom (basic_block block)
335 {
336 if (block == ENTRY_BLOCK_PTR_FOR_FN (cfun))
337 return ENTRY_BLOCK_PTR_FOR_FN (cfun);
338 else
339 {
340 basic_block bb = get_immediate_dominator (CDI_DOMINATORS, block);
341 if (! bb)
342 return ENTRY_BLOCK_PTR_FOR_FN (cfun);
343 return bb;
344 }
345 }
346
347 /* Returns true if BB1 is postdominating BB2 and BB1 is
348 not a loop exit bb. The loop exit bb check is simple and does
349 not cover all cases. */
350
351 static bool
352 is_non_loop_exit_postdominating (basic_block bb1, basic_block bb2)
353 {
354 if (!dominated_by_p (CDI_POST_DOMINATORS, bb2, bb1))
355 return false;
356
357 if (single_pred_p (bb1) && !single_succ_p (bb2))
358 return false;
359
360 return true;
361 }
362
363 /* Find the closest postdominator of a specified BB, which is control
364 equivalent to BB. */
365
366 static inline basic_block
367 find_control_equiv_block (basic_block bb)
368 {
369 basic_block pdom;
370
371 pdom = find_pdom (bb);
372
373 /* Skip the postdominating bb that is also loop exit. */
374 if (!is_non_loop_exit_postdominating (pdom, bb))
375 return NULL;
376
377 if (dominated_by_p (CDI_DOMINATORS, pdom, bb))
378 return pdom;
379
380 return NULL;
381 }
382
383 #define MAX_NUM_CHAINS 8
384 #define MAX_CHAIN_LEN 5
385 #define MAX_POSTDOM_CHECK 8
386
387 /* Computes the control dependence chains (paths of edges)
388 for DEP_BB up to the dominating basic block BB (the head node of a
389 chain should be dominated by it). CD_CHAINS is pointer to a
390 dynamic array holding the result chains. CUR_CD_CHAIN is the current
391 chain being computed. *NUM_CHAINS is total number of chains. The
392 function returns true if the information is successfully computed,
393 return false if there is no control dependence or not computed. */
394
395 static bool
396 compute_control_dep_chain (basic_block bb, basic_block dep_bb,
397 vec<edge> *cd_chains,
398 size_t *num_chains,
399 vec<edge> *cur_cd_chain)
400 {
401 edge_iterator ei;
402 edge e;
403 size_t i;
404 bool found_cd_chain = false;
405 size_t cur_chain_len = 0;
406
407 if (EDGE_COUNT (bb->succs) < 2)
408 return false;
409
410 /* Could use a set instead. */
411 cur_chain_len = cur_cd_chain->length ();
412 if (cur_chain_len > MAX_CHAIN_LEN)
413 return false;
414
415 for (i = 0; i < cur_chain_len; i++)
416 {
417 edge e = (*cur_cd_chain)[i];
418 /* cycle detected. */
419 if (e->src == bb)
420 return false;
421 }
422
423 FOR_EACH_EDGE (e, ei, bb->succs)
424 {
425 basic_block cd_bb;
426 int post_dom_check = 0;
427 if (e->flags & (EDGE_FAKE | EDGE_ABNORMAL))
428 continue;
429
430 cd_bb = e->dest;
431 cur_cd_chain->safe_push (e);
432 while (!is_non_loop_exit_postdominating (cd_bb, bb))
433 {
434 if (cd_bb == dep_bb)
435 {
436 /* Found a direct control dependence. */
437 if (*num_chains < MAX_NUM_CHAINS)
438 {
439 cd_chains[*num_chains] = cur_cd_chain->copy ();
440 (*num_chains)++;
441 }
442 found_cd_chain = true;
443 /* check path from next edge. */
444 break;
445 }
446
447 /* Now check if DEP_BB is indirectly control dependent on BB. */
448 if (compute_control_dep_chain (cd_bb, dep_bb, cd_chains,
449 num_chains, cur_cd_chain))
450 {
451 found_cd_chain = true;
452 break;
453 }
454
455 cd_bb = find_pdom (cd_bb);
456 post_dom_check++;
457 if (cd_bb == EXIT_BLOCK_PTR_FOR_FN (cfun) || post_dom_check >
458 MAX_POSTDOM_CHECK)
459 break;
460 }
461 cur_cd_chain->pop ();
462 gcc_assert (cur_cd_chain->length () == cur_chain_len);
463 }
464 gcc_assert (cur_cd_chain->length () == cur_chain_len);
465
466 return found_cd_chain;
467 }
468
469 typedef struct use_pred_info
470 {
471 gimple cond;
472 bool invert;
473 } *use_pred_info_t;
474
475
476
477 /* Converts the chains of control dependence edges into a set of
478 predicates. A control dependence chain is represented by a vector
479 edges. DEP_CHAINS points to an array of dependence chains.
480 NUM_CHAINS is the size of the chain array. One edge in a dependence
481 chain is mapped to predicate expression represented by use_pred_info_t
482 type. One dependence chain is converted to a composite predicate that
483 is the result of AND operation of use_pred_info_t mapped to each edge.
484 A composite predicate is presented by a vector of use_pred_info_t. On
485 return, *PREDS points to the resulting array of composite predicates.
486 *NUM_PREDS is the number of composite predictes. */
487
488 static bool
489 convert_control_dep_chain_into_preds (vec<edge> *dep_chains,
490 size_t num_chains,
491 vec<use_pred_info_t> **preds,
492 size_t *num_preds)
493 {
494 bool has_valid_pred = false;
495 size_t i, j;
496 if (num_chains == 0 || num_chains >= MAX_NUM_CHAINS)
497 return false;
498
499 /* Now convert the control dep chain into a set
500 of predicates. */
501 typedef vec<use_pred_info_t> vec_use_pred_info_t_heap;
502 *preds = XCNEWVEC (vec_use_pred_info_t_heap, num_chains);
503 *num_preds = num_chains;
504
505 for (i = 0; i < num_chains; i++)
506 {
507 vec<edge> one_cd_chain = dep_chains[i];
508
509 has_valid_pred = false;
510 for (j = 0; j < one_cd_chain.length (); j++)
511 {
512 gimple cond_stmt;
513 gimple_stmt_iterator gsi;
514 basic_block guard_bb;
515 use_pred_info_t one_pred;
516 edge e;
517
518 e = one_cd_chain[j];
519 guard_bb = e->src;
520 gsi = gsi_last_bb (guard_bb);
521 if (gsi_end_p (gsi))
522 {
523 has_valid_pred = false;
524 break;
525 }
526 cond_stmt = gsi_stmt (gsi);
527 if (gimple_code (cond_stmt) == GIMPLE_CALL
528 && EDGE_COUNT (e->src->succs) >= 2)
529 {
530 /* Ignore EH edge. Can add assertion
531 on the other edge's flag. */
532 continue;
533 }
534 /* Skip if there is essentially one succesor. */
535 if (EDGE_COUNT (e->src->succs) == 2)
536 {
537 edge e1;
538 edge_iterator ei1;
539 bool skip = false;
540
541 FOR_EACH_EDGE (e1, ei1, e->src->succs)
542 {
543 if (EDGE_COUNT (e1->dest->succs) == 0)
544 {
545 skip = true;
546 break;
547 }
548 }
549 if (skip)
550 continue;
551 }
552 if (gimple_code (cond_stmt) != GIMPLE_COND)
553 {
554 has_valid_pred = false;
555 break;
556 }
557 one_pred = XNEW (struct use_pred_info);
558 one_pred->cond = cond_stmt;
559 one_pred->invert = !!(e->flags & EDGE_FALSE_VALUE);
560 (*preds)[i].safe_push (one_pred);
561 has_valid_pred = true;
562 }
563
564 if (!has_valid_pred)
565 break;
566 }
567 return has_valid_pred;
568 }
569
570 /* Computes all control dependence chains for USE_BB. The control
571 dependence chains are then converted to an array of composite
572 predicates pointed to by PREDS. PHI_BB is the basic block of
573 the phi whose result is used in USE_BB. */
574
575 static bool
576 find_predicates (vec<use_pred_info_t> **preds,
577 size_t *num_preds,
578 basic_block phi_bb,
579 basic_block use_bb)
580 {
581 size_t num_chains = 0, i;
582 vec<edge> *dep_chains = 0;
583 vec<edge> cur_chain = vNULL;
584 bool has_valid_pred = false;
585 basic_block cd_root = 0;
586
587 typedef vec<edge> vec_edge_heap;
588 dep_chains = XCNEWVEC (vec_edge_heap, MAX_NUM_CHAINS);
589
590 /* First find the closest bb that is control equivalent to PHI_BB
591 that also dominates USE_BB. */
592 cd_root = phi_bb;
593 while (dominated_by_p (CDI_DOMINATORS, use_bb, cd_root))
594 {
595 basic_block ctrl_eq_bb = find_control_equiv_block (cd_root);
596 if (ctrl_eq_bb && dominated_by_p (CDI_DOMINATORS, use_bb, ctrl_eq_bb))
597 cd_root = ctrl_eq_bb;
598 else
599 break;
600 }
601
602 compute_control_dep_chain (cd_root, use_bb,
603 dep_chains, &num_chains,
604 &cur_chain);
605
606 has_valid_pred
607 = convert_control_dep_chain_into_preds (dep_chains,
608 num_chains,
609 preds,
610 num_preds);
611 /* Free individual chain */
612 cur_chain.release ();
613 for (i = 0; i < num_chains; i++)
614 dep_chains[i].release ();
615 free (dep_chains);
616 return has_valid_pred;
617 }
618
619 /* Computes the set of incoming edges of PHI that have non empty
620 definitions of a phi chain. The collection will be done
621 recursively on operands that are defined by phis. CD_ROOT
622 is the control dependence root. *EDGES holds the result, and
623 VISITED_PHIS is a pointer set for detecting cycles. */
624
625 static void
626 collect_phi_def_edges (gimple phi, basic_block cd_root,
627 vec<edge> *edges,
628 struct pointer_set_t *visited_phis)
629 {
630 size_t i, n;
631 edge opnd_edge;
632 tree opnd;
633
634 if (pointer_set_insert (visited_phis, phi))
635 return;
636
637 n = gimple_phi_num_args (phi);
638 for (i = 0; i < n; i++)
639 {
640 opnd_edge = gimple_phi_arg_edge (phi, i);
641 opnd = gimple_phi_arg_def (phi, i);
642
643 if (TREE_CODE (opnd) != SSA_NAME)
644 {
645 if (dump_file && (dump_flags & TDF_DETAILS))
646 {
647 fprintf (dump_file, "\n[CHECK] Found def edge %d in ", (int)i);
648 print_gimple_stmt (dump_file, phi, 0, 0);
649 }
650 edges->safe_push (opnd_edge);
651 }
652 else
653 {
654 gimple def = SSA_NAME_DEF_STMT (opnd);
655
656 if (gimple_code (def) == GIMPLE_PHI
657 && dominated_by_p (CDI_DOMINATORS,
658 gimple_bb (def), cd_root))
659 collect_phi_def_edges (def, cd_root, edges,
660 visited_phis);
661 else if (!uninit_undefined_value_p (opnd))
662 {
663 if (dump_file && (dump_flags & TDF_DETAILS))
664 {
665 fprintf (dump_file, "\n[CHECK] Found def edge %d in ", (int)i);
666 print_gimple_stmt (dump_file, phi, 0, 0);
667 }
668 edges->safe_push (opnd_edge);
669 }
670 }
671 }
672 }
673
674 /* For each use edge of PHI, computes all control dependence chains.
675 The control dependence chains are then converted to an array of
676 composite predicates pointed to by PREDS. */
677
678 static bool
679 find_def_preds (vec<use_pred_info_t> **preds,
680 size_t *num_preds, gimple phi)
681 {
682 size_t num_chains = 0, i, n;
683 vec<edge> *dep_chains = 0;
684 vec<edge> cur_chain = vNULL;
685 vec<edge> def_edges = vNULL;
686 bool has_valid_pred = false;
687 basic_block phi_bb, cd_root = 0;
688 struct pointer_set_t *visited_phis;
689
690 typedef vec<edge> vec_edge_heap;
691 dep_chains = XCNEWVEC (vec_edge_heap, MAX_NUM_CHAINS);
692
693 phi_bb = gimple_bb (phi);
694 /* First find the closest dominating bb to be
695 the control dependence root */
696 cd_root = find_dom (phi_bb);
697 if (!cd_root)
698 return false;
699
700 visited_phis = pointer_set_create ();
701 collect_phi_def_edges (phi, cd_root, &def_edges, visited_phis);
702 pointer_set_destroy (visited_phis);
703
704 n = def_edges.length ();
705 if (n == 0)
706 return false;
707
708 for (i = 0; i < n; i++)
709 {
710 size_t prev_nc, j;
711 edge opnd_edge;
712
713 opnd_edge = def_edges[i];
714 prev_nc = num_chains;
715 compute_control_dep_chain (cd_root, opnd_edge->src,
716 dep_chains, &num_chains,
717 &cur_chain);
718 /* Free individual chain */
719 cur_chain.release ();
720
721 /* Now update the newly added chains with
722 the phi operand edge: */
723 if (EDGE_COUNT (opnd_edge->src->succs) > 1)
724 {
725 if (prev_nc == num_chains
726 && num_chains < MAX_NUM_CHAINS)
727 num_chains++;
728 for (j = prev_nc; j < num_chains; j++)
729 {
730 dep_chains[j].safe_push (opnd_edge);
731 }
732 }
733 }
734
735 has_valid_pred
736 = convert_control_dep_chain_into_preds (dep_chains,
737 num_chains,
738 preds,
739 num_preds);
740 for (i = 0; i < num_chains; i++)
741 dep_chains[i].release ();
742 free (dep_chains);
743 return has_valid_pred;
744 }
745
746 /* Dumps the predicates (PREDS) for USESTMT. */
747
748 static void
749 dump_predicates (gimple usestmt, size_t num_preds,
750 vec<use_pred_info_t> *preds,
751 const char* msg)
752 {
753 size_t i, j;
754 vec<use_pred_info_t> one_pred_chain;
755 fprintf (dump_file, msg);
756 print_gimple_stmt (dump_file, usestmt, 0, 0);
757 fprintf (dump_file, "is guarded by :\n");
758 /* do some dumping here: */
759 for (i = 0; i < num_preds; i++)
760 {
761 size_t np;
762
763 one_pred_chain = preds[i];
764 np = one_pred_chain.length ();
765
766 for (j = 0; j < np; j++)
767 {
768 use_pred_info_t one_pred
769 = one_pred_chain[j];
770 if (one_pred->invert)
771 fprintf (dump_file, " (.NOT.) ");
772 print_gimple_stmt (dump_file, one_pred->cond, 0, 0);
773 if (j < np - 1)
774 fprintf (dump_file, "(.AND.)\n");
775 }
776 if (i < num_preds - 1)
777 fprintf (dump_file, "(.OR.)\n");
778 }
779 }
780
781 /* Destroys the predicate set *PREDS. */
782
783 static void
784 destroy_predicate_vecs (size_t n,
785 vec<use_pred_info_t> * preds)
786 {
787 size_t i, j;
788 for (i = 0; i < n; i++)
789 {
790 for (j = 0; j < preds[i].length (); j++)
791 free (preds[i][j]);
792 preds[i].release ();
793 }
794 free (preds);
795 }
796
797
798 /* Computes the 'normalized' conditional code with operand
799 swapping and condition inversion. */
800
801 static enum tree_code
802 get_cmp_code (enum tree_code orig_cmp_code,
803 bool swap_cond, bool invert)
804 {
805 enum tree_code tc = orig_cmp_code;
806
807 if (swap_cond)
808 tc = swap_tree_comparison (orig_cmp_code);
809 if (invert)
810 tc = invert_tree_comparison (tc, false);
811
812 switch (tc)
813 {
814 case LT_EXPR:
815 case LE_EXPR:
816 case GT_EXPR:
817 case GE_EXPR:
818 case EQ_EXPR:
819 case NE_EXPR:
820 break;
821 default:
822 return ERROR_MARK;
823 }
824 return tc;
825 }
826
827 /* Returns true if VAL falls in the range defined by BOUNDARY and CMPC, i.e.
828 all values in the range satisfies (x CMPC BOUNDARY) == true. */
829
830 static bool
831 is_value_included_in (tree val, tree boundary, enum tree_code cmpc)
832 {
833 bool inverted = false;
834 bool is_unsigned;
835 bool result;
836
837 /* Only handle integer constant here. */
838 if (TREE_CODE (val) != INTEGER_CST
839 || TREE_CODE (boundary) != INTEGER_CST)
840 return true;
841
842 is_unsigned = TYPE_UNSIGNED (TREE_TYPE (val));
843
844 if (cmpc == GE_EXPR || cmpc == GT_EXPR
845 || cmpc == NE_EXPR)
846 {
847 cmpc = invert_tree_comparison (cmpc, false);
848 inverted = true;
849 }
850
851 if (is_unsigned)
852 {
853 if (cmpc == EQ_EXPR)
854 result = tree_int_cst_equal (val, boundary);
855 else if (cmpc == LT_EXPR)
856 result = INT_CST_LT_UNSIGNED (val, boundary);
857 else
858 {
859 gcc_assert (cmpc == LE_EXPR);
860 result = (tree_int_cst_equal (val, boundary)
861 || INT_CST_LT_UNSIGNED (val, boundary));
862 }
863 }
864 else
865 {
866 if (cmpc == EQ_EXPR)
867 result = tree_int_cst_equal (val, boundary);
868 else if (cmpc == LT_EXPR)
869 result = INT_CST_LT (val, boundary);
870 else
871 {
872 gcc_assert (cmpc == LE_EXPR);
873 result = (tree_int_cst_equal (val, boundary)
874 || INT_CST_LT (val, boundary));
875 }
876 }
877
878 if (inverted)
879 result ^= 1;
880
881 return result;
882 }
883
884 /* Returns true if PRED is common among all the predicate
885 chains (PREDS) (and therefore can be factored out).
886 NUM_PRED_CHAIN is the size of array PREDS. */
887
888 static bool
889 find_matching_predicate_in_rest_chains (use_pred_info_t pred,
890 vec<use_pred_info_t> *preds,
891 size_t num_pred_chains)
892 {
893 size_t i, j, n;
894
895 /* trival case */
896 if (num_pred_chains == 1)
897 return true;
898
899 for (i = 1; i < num_pred_chains; i++)
900 {
901 bool found = false;
902 vec<use_pred_info_t> one_chain = preds[i];
903 n = one_chain.length ();
904 for (j = 0; j < n; j++)
905 {
906 use_pred_info_t pred2
907 = one_chain[j];
908 /* can relax the condition comparison to not
909 use address comparison. However, the most common
910 case is that multiple control dependent paths share
911 a common path prefix, so address comparison should
912 be ok. */
913
914 if (pred2->cond == pred->cond
915 && pred2->invert == pred->invert)
916 {
917 found = true;
918 break;
919 }
920 }
921 if (!found)
922 return false;
923 }
924 return true;
925 }
926
927 /* Forward declaration. */
928 static bool
929 is_use_properly_guarded (gimple use_stmt,
930 basic_block use_bb,
931 gimple phi,
932 unsigned uninit_opnds,
933 struct pointer_set_t *visited_phis);
934
935 /* Returns true if all uninitialized opnds are pruned. Returns false
936 otherwise. PHI is the phi node with uninitialized operands,
937 UNINIT_OPNDS is the bitmap of the uninitialize operand positions,
938 FLAG_DEF is the statement defining the flag guarding the use of the
939 PHI output, BOUNDARY_CST is the const value used in the predicate
940 associated with the flag, CMP_CODE is the comparison code used in
941 the predicate, VISITED_PHIS is the pointer set of phis visited, and
942 VISITED_FLAG_PHIS is the pointer to the pointer set of flag definitions
943 that are also phis.
944
945 Example scenario:
946
947 BB1:
948 flag_1 = phi <0, 1> // (1)
949 var_1 = phi <undef, some_val>
950
951
952 BB2:
953 flag_2 = phi <0, flag_1, flag_1> // (2)
954 var_2 = phi <undef, var_1, var_1>
955 if (flag_2 == 1)
956 goto BB3;
957
958 BB3:
959 use of var_2 // (3)
960
961 Because some flag arg in (1) is not constant, if we do not look into the
962 flag phis recursively, it is conservatively treated as unknown and var_1
963 is thought to be flowed into use at (3). Since var_1 is potentially uninitialized
964 a false warning will be emitted. Checking recursively into (1), the compiler can
965 find out that only some_val (which is defined) can flow into (3) which is OK.
966
967 */
968
969 static bool
970 prune_uninit_phi_opnds_in_unrealizable_paths (
971 gimple phi, unsigned uninit_opnds,
972 gimple flag_def, tree boundary_cst,
973 enum tree_code cmp_code,
974 struct pointer_set_t *visited_phis,
975 bitmap *visited_flag_phis)
976 {
977 unsigned i;
978
979 for (i = 0; i < MIN (32, gimple_phi_num_args (flag_def)); i++)
980 {
981 tree flag_arg;
982
983 if (!MASK_TEST_BIT (uninit_opnds, i))
984 continue;
985
986 flag_arg = gimple_phi_arg_def (flag_def, i);
987 if (!is_gimple_constant (flag_arg))
988 {
989 gimple flag_arg_def, phi_arg_def;
990 tree phi_arg;
991 unsigned uninit_opnds_arg_phi;
992
993 if (TREE_CODE (flag_arg) != SSA_NAME)
994 return false;
995 flag_arg_def = SSA_NAME_DEF_STMT (flag_arg);
996 if (gimple_code (flag_arg_def) != GIMPLE_PHI)
997 return false;
998
999 phi_arg = gimple_phi_arg_def (phi, i);
1000 if (TREE_CODE (phi_arg) != SSA_NAME)
1001 return false;
1002
1003 phi_arg_def = SSA_NAME_DEF_STMT (phi_arg);
1004 if (gimple_code (phi_arg_def) != GIMPLE_PHI)
1005 return false;
1006
1007 if (gimple_bb (phi_arg_def) != gimple_bb (flag_arg_def))
1008 return false;
1009
1010 if (!*visited_flag_phis)
1011 *visited_flag_phis = BITMAP_ALLOC (NULL);
1012
1013 if (bitmap_bit_p (*visited_flag_phis,
1014 SSA_NAME_VERSION (gimple_phi_result (flag_arg_def))))
1015 return false;
1016
1017 bitmap_set_bit (*visited_flag_phis,
1018 SSA_NAME_VERSION (gimple_phi_result (flag_arg_def)));
1019
1020 /* Now recursively prune the uninitialized phi args. */
1021 uninit_opnds_arg_phi = compute_uninit_opnds_pos (phi_arg_def);
1022 if (!prune_uninit_phi_opnds_in_unrealizable_paths (
1023 phi_arg_def, uninit_opnds_arg_phi,
1024 flag_arg_def, boundary_cst, cmp_code,
1025 visited_phis, visited_flag_phis))
1026 return false;
1027
1028 bitmap_clear_bit (*visited_flag_phis,
1029 SSA_NAME_VERSION (gimple_phi_result (flag_arg_def)));
1030 continue;
1031 }
1032
1033 /* Now check if the constant is in the guarded range. */
1034 if (is_value_included_in (flag_arg, boundary_cst, cmp_code))
1035 {
1036 tree opnd;
1037 gimple opnd_def;
1038
1039 /* Now that we know that this undefined edge is not
1040 pruned. If the operand is defined by another phi,
1041 we can further prune the incoming edges of that
1042 phi by checking the predicates of this operands. */
1043
1044 opnd = gimple_phi_arg_def (phi, i);
1045 opnd_def = SSA_NAME_DEF_STMT (opnd);
1046 if (gimple_code (opnd_def) == GIMPLE_PHI)
1047 {
1048 edge opnd_edge;
1049 unsigned uninit_opnds2
1050 = compute_uninit_opnds_pos (opnd_def);
1051 gcc_assert (!MASK_EMPTY (uninit_opnds2));
1052 opnd_edge = gimple_phi_arg_edge (phi, i);
1053 if (!is_use_properly_guarded (phi,
1054 opnd_edge->src,
1055 opnd_def,
1056 uninit_opnds2,
1057 visited_phis))
1058 return false;
1059 }
1060 else
1061 return false;
1062 }
1063 }
1064
1065 return true;
1066 }
1067
1068 /* A helper function that determines if the predicate set
1069 of the use is not overlapping with that of the uninit paths.
1070 The most common senario of guarded use is in Example 1:
1071 Example 1:
1072 if (some_cond)
1073 {
1074 x = ...;
1075 flag = true;
1076 }
1077
1078 ... some code ...
1079
1080 if (flag)
1081 use (x);
1082
1083 The real world examples are usually more complicated, but similar
1084 and usually result from inlining:
1085
1086 bool init_func (int * x)
1087 {
1088 if (some_cond)
1089 return false;
1090 *x = ..
1091 return true;
1092 }
1093
1094 void foo(..)
1095 {
1096 int x;
1097
1098 if (!init_func(&x))
1099 return;
1100
1101 .. some_code ...
1102 use (x);
1103 }
1104
1105 Another possible use scenario is in the following trivial example:
1106
1107 Example 2:
1108 if (n > 0)
1109 x = 1;
1110 ...
1111 if (n > 0)
1112 {
1113 if (m < 2)
1114 .. = x;
1115 }
1116
1117 Predicate analysis needs to compute the composite predicate:
1118
1119 1) 'x' use predicate: (n > 0) .AND. (m < 2)
1120 2) 'x' default value (non-def) predicate: .NOT. (n > 0)
1121 (the predicate chain for phi operand defs can be computed
1122 starting from a bb that is control equivalent to the phi's
1123 bb and is dominating the operand def.)
1124
1125 and check overlapping:
1126 (n > 0) .AND. (m < 2) .AND. (.NOT. (n > 0))
1127 <==> false
1128
1129 This implementation provides framework that can handle
1130 scenarios. (Note that many simple cases are handled properly
1131 without the predicate analysis -- this is due to jump threading
1132 transformation which eliminates the merge point thus makes
1133 path sensitive analysis unnecessary.)
1134
1135 NUM_PREDS is the number is the number predicate chains, PREDS is
1136 the array of chains, PHI is the phi node whose incoming (undefined)
1137 paths need to be pruned, and UNINIT_OPNDS is the bitmap holding
1138 uninit operand positions. VISITED_PHIS is the pointer set of phi
1139 stmts being checked. */
1140
1141
1142 static bool
1143 use_pred_not_overlap_with_undef_path_pred (
1144 size_t num_preds,
1145 vec<use_pred_info_t> *preds,
1146 gimple phi, unsigned uninit_opnds,
1147 struct pointer_set_t *visited_phis)
1148 {
1149 unsigned int i, n;
1150 gimple flag_def = 0;
1151 tree boundary_cst = 0;
1152 enum tree_code cmp_code;
1153 bool swap_cond = false;
1154 bool invert = false;
1155 vec<use_pred_info_t> the_pred_chain;
1156 bitmap visited_flag_phis = NULL;
1157 bool all_pruned = false;
1158
1159 gcc_assert (num_preds > 0);
1160 /* Find within the common prefix of multiple predicate chains
1161 a predicate that is a comparison of a flag variable against
1162 a constant. */
1163 the_pred_chain = preds[0];
1164 n = the_pred_chain.length ();
1165 for (i = 0; i < n; i++)
1166 {
1167 gimple cond;
1168 tree cond_lhs, cond_rhs, flag = 0;
1169
1170 use_pred_info_t the_pred
1171 = the_pred_chain[i];
1172
1173 cond = the_pred->cond;
1174 invert = the_pred->invert;
1175 cond_lhs = gimple_cond_lhs (cond);
1176 cond_rhs = gimple_cond_rhs (cond);
1177 cmp_code = gimple_cond_code (cond);
1178
1179 if (cond_lhs != NULL_TREE && TREE_CODE (cond_lhs) == SSA_NAME
1180 && cond_rhs != NULL_TREE && is_gimple_constant (cond_rhs))
1181 {
1182 boundary_cst = cond_rhs;
1183 flag = cond_lhs;
1184 }
1185 else if (cond_rhs != NULL_TREE && TREE_CODE (cond_rhs) == SSA_NAME
1186 && cond_lhs != NULL_TREE && is_gimple_constant (cond_lhs))
1187 {
1188 boundary_cst = cond_lhs;
1189 flag = cond_rhs;
1190 swap_cond = true;
1191 }
1192
1193 if (!flag)
1194 continue;
1195
1196 flag_def = SSA_NAME_DEF_STMT (flag);
1197
1198 if (!flag_def)
1199 continue;
1200
1201 if ((gimple_code (flag_def) == GIMPLE_PHI)
1202 && (gimple_bb (flag_def) == gimple_bb (phi))
1203 && find_matching_predicate_in_rest_chains (
1204 the_pred, preds, num_preds))
1205 break;
1206
1207 flag_def = 0;
1208 }
1209
1210 if (!flag_def)
1211 return false;
1212
1213 /* Now check all the uninit incoming edge has a constant flag value
1214 that is in conflict with the use guard/predicate. */
1215 cmp_code = get_cmp_code (cmp_code, swap_cond, invert);
1216
1217 if (cmp_code == ERROR_MARK)
1218 return false;
1219
1220 all_pruned = prune_uninit_phi_opnds_in_unrealizable_paths (phi,
1221 uninit_opnds,
1222 flag_def,
1223 boundary_cst,
1224 cmp_code,
1225 visited_phis,
1226 &visited_flag_phis);
1227
1228 if (visited_flag_phis)
1229 BITMAP_FREE (visited_flag_phis);
1230
1231 return all_pruned;
1232 }
1233
1234 /* Returns true if TC is AND or OR */
1235
1236 static inline bool
1237 is_and_or_or (enum tree_code tc, tree typ)
1238 {
1239 return (tc == BIT_IOR_EXPR
1240 || (tc == BIT_AND_EXPR
1241 && (typ == 0 || TREE_CODE (typ) == BOOLEAN_TYPE)));
1242 }
1243
1244 typedef struct norm_cond
1245 {
1246 vec<gimple> conds;
1247 enum tree_code cond_code;
1248 bool invert;
1249 } *norm_cond_t;
1250
1251
1252 /* Normalizes gimple condition COND. The normalization follows
1253 UD chains to form larger condition expression trees. NORM_COND
1254 holds the normalized result. COND_CODE is the logical opcode
1255 (AND or OR) of the normalized tree. */
1256
1257 static void
1258 normalize_cond_1 (gimple cond,
1259 norm_cond_t norm_cond,
1260 enum tree_code cond_code)
1261 {
1262 enum gimple_code gc;
1263 enum tree_code cur_cond_code;
1264 tree rhs1, rhs2;
1265
1266 gc = gimple_code (cond);
1267 if (gc != GIMPLE_ASSIGN)
1268 {
1269 norm_cond->conds.safe_push (cond);
1270 return;
1271 }
1272
1273 cur_cond_code = gimple_assign_rhs_code (cond);
1274 rhs1 = gimple_assign_rhs1 (cond);
1275 rhs2 = gimple_assign_rhs2 (cond);
1276 if (cur_cond_code == NE_EXPR)
1277 {
1278 if (integer_zerop (rhs2)
1279 && (TREE_CODE (rhs1) == SSA_NAME))
1280 normalize_cond_1 (
1281 SSA_NAME_DEF_STMT (rhs1),
1282 norm_cond, cond_code);
1283 else if (integer_zerop (rhs1)
1284 && (TREE_CODE (rhs2) == SSA_NAME))
1285 normalize_cond_1 (
1286 SSA_NAME_DEF_STMT (rhs2),
1287 norm_cond, cond_code);
1288 else
1289 norm_cond->conds.safe_push (cond);
1290
1291 return;
1292 }
1293
1294 if (is_and_or_or (cur_cond_code, TREE_TYPE (rhs1))
1295 && (cond_code == cur_cond_code || cond_code == ERROR_MARK)
1296 && (TREE_CODE (rhs1) == SSA_NAME && TREE_CODE (rhs2) == SSA_NAME))
1297 {
1298 normalize_cond_1 (SSA_NAME_DEF_STMT (rhs1),
1299 norm_cond, cur_cond_code);
1300 normalize_cond_1 (SSA_NAME_DEF_STMT (rhs2),
1301 norm_cond, cur_cond_code);
1302 norm_cond->cond_code = cur_cond_code;
1303 }
1304 else
1305 norm_cond->conds.safe_push (cond);
1306 }
1307
1308 /* See normalize_cond_1 for details. INVERT is a flag to indicate
1309 if COND needs to be inverted or not. */
1310
1311 static void
1312 normalize_cond (gimple cond, norm_cond_t norm_cond, bool invert)
1313 {
1314 enum tree_code cond_code;
1315
1316 norm_cond->cond_code = ERROR_MARK;
1317 norm_cond->invert = false;
1318 norm_cond->conds.create (0);
1319 gcc_assert (gimple_code (cond) == GIMPLE_COND);
1320 cond_code = gimple_cond_code (cond);
1321 if (invert)
1322 cond_code = invert_tree_comparison (cond_code, false);
1323
1324 if (cond_code == NE_EXPR)
1325 {
1326 if (integer_zerop (gimple_cond_rhs (cond))
1327 && (TREE_CODE (gimple_cond_lhs (cond)) == SSA_NAME))
1328 normalize_cond_1 (
1329 SSA_NAME_DEF_STMT (gimple_cond_lhs (cond)),
1330 norm_cond, ERROR_MARK);
1331 else if (integer_zerop (gimple_cond_lhs (cond))
1332 && (TREE_CODE (gimple_cond_rhs (cond)) == SSA_NAME))
1333 normalize_cond_1 (
1334 SSA_NAME_DEF_STMT (gimple_cond_rhs (cond)),
1335 norm_cond, ERROR_MARK);
1336 else
1337 {
1338 norm_cond->conds.safe_push (cond);
1339 norm_cond->invert = invert;
1340 }
1341 }
1342 else
1343 {
1344 norm_cond->conds.safe_push (cond);
1345 norm_cond->invert = invert;
1346 }
1347
1348 gcc_assert (norm_cond->conds.length () == 1
1349 || is_and_or_or (norm_cond->cond_code, NULL));
1350 }
1351
1352 /* Returns true if the domain for condition COND1 is a subset of
1353 COND2. REVERSE is a flag. when it is true the function checks
1354 if COND1 is a superset of COND2. INVERT1 and INVERT2 are flags
1355 to indicate if COND1 and COND2 need to be inverted or not. */
1356
1357 static bool
1358 is_gcond_subset_of (gimple cond1, bool invert1,
1359 gimple cond2, bool invert2,
1360 bool reverse)
1361 {
1362 enum gimple_code gc1, gc2;
1363 enum tree_code cond1_code, cond2_code;
1364 gimple tmp;
1365 tree cond1_lhs, cond1_rhs, cond2_lhs, cond2_rhs;
1366
1367 /* Take the short cut. */
1368 if (cond1 == cond2)
1369 return true;
1370
1371 if (reverse)
1372 {
1373 tmp = cond1;
1374 cond1 = cond2;
1375 cond2 = tmp;
1376 }
1377
1378 gc1 = gimple_code (cond1);
1379 gc2 = gimple_code (cond2);
1380
1381 if ((gc1 != GIMPLE_ASSIGN && gc1 != GIMPLE_COND)
1382 || (gc2 != GIMPLE_ASSIGN && gc2 != GIMPLE_COND))
1383 return cond1 == cond2;
1384
1385 cond1_code = ((gc1 == GIMPLE_ASSIGN)
1386 ? gimple_assign_rhs_code (cond1)
1387 : gimple_cond_code (cond1));
1388
1389 cond2_code = ((gc2 == GIMPLE_ASSIGN)
1390 ? gimple_assign_rhs_code (cond2)
1391 : gimple_cond_code (cond2));
1392
1393 if (TREE_CODE_CLASS (cond1_code) != tcc_comparison
1394 || TREE_CODE_CLASS (cond2_code) != tcc_comparison)
1395 return false;
1396
1397 if (invert1)
1398 cond1_code = invert_tree_comparison (cond1_code, false);
1399 if (invert2)
1400 cond2_code = invert_tree_comparison (cond2_code, false);
1401
1402 cond1_lhs = ((gc1 == GIMPLE_ASSIGN)
1403 ? gimple_assign_rhs1 (cond1)
1404 : gimple_cond_lhs (cond1));
1405 cond1_rhs = ((gc1 == GIMPLE_ASSIGN)
1406 ? gimple_assign_rhs2 (cond1)
1407 : gimple_cond_rhs (cond1));
1408 cond2_lhs = ((gc2 == GIMPLE_ASSIGN)
1409 ? gimple_assign_rhs1 (cond2)
1410 : gimple_cond_lhs (cond2));
1411 cond2_rhs = ((gc2 == GIMPLE_ASSIGN)
1412 ? gimple_assign_rhs2 (cond2)
1413 : gimple_cond_rhs (cond2));
1414
1415 /* Assuming const operands have been swapped to the
1416 rhs at this point of the analysis. */
1417
1418 if (cond1_lhs != cond2_lhs)
1419 return false;
1420
1421 if (!is_gimple_constant (cond1_rhs)
1422 || TREE_CODE (cond1_rhs) != INTEGER_CST)
1423 return (cond1_rhs == cond2_rhs);
1424
1425 if (!is_gimple_constant (cond2_rhs)
1426 || TREE_CODE (cond2_rhs) != INTEGER_CST)
1427 return (cond1_rhs == cond2_rhs);
1428
1429 if (cond1_code == EQ_EXPR)
1430 return is_value_included_in (cond1_rhs,
1431 cond2_rhs, cond2_code);
1432 if (cond1_code == NE_EXPR || cond2_code == EQ_EXPR)
1433 return ((cond2_code == cond1_code)
1434 && tree_int_cst_equal (cond1_rhs, cond2_rhs));
1435
1436 if (((cond1_code == GE_EXPR || cond1_code == GT_EXPR)
1437 && (cond2_code == LE_EXPR || cond2_code == LT_EXPR))
1438 || ((cond1_code == LE_EXPR || cond1_code == LT_EXPR)
1439 && (cond2_code == GE_EXPR || cond2_code == GT_EXPR)))
1440 return false;
1441
1442 if (cond1_code != GE_EXPR && cond1_code != GT_EXPR
1443 && cond1_code != LE_EXPR && cond1_code != LT_EXPR)
1444 return false;
1445
1446 if (cond1_code == GT_EXPR)
1447 {
1448 cond1_code = GE_EXPR;
1449 cond1_rhs = fold_binary (PLUS_EXPR, TREE_TYPE (cond1_rhs),
1450 cond1_rhs,
1451 fold_convert (TREE_TYPE (cond1_rhs),
1452 integer_one_node));
1453 }
1454 else if (cond1_code == LT_EXPR)
1455 {
1456 cond1_code = LE_EXPR;
1457 cond1_rhs = fold_binary (MINUS_EXPR, TREE_TYPE (cond1_rhs),
1458 cond1_rhs,
1459 fold_convert (TREE_TYPE (cond1_rhs),
1460 integer_one_node));
1461 }
1462
1463 if (!cond1_rhs)
1464 return false;
1465
1466 gcc_assert (cond1_code == GE_EXPR || cond1_code == LE_EXPR);
1467
1468 if (cond2_code == GE_EXPR || cond2_code == GT_EXPR ||
1469 cond2_code == LE_EXPR || cond2_code == LT_EXPR)
1470 return is_value_included_in (cond1_rhs,
1471 cond2_rhs, cond2_code);
1472 else if (cond2_code == NE_EXPR)
1473 return
1474 (is_value_included_in (cond1_rhs,
1475 cond2_rhs, cond2_code)
1476 && !is_value_included_in (cond2_rhs,
1477 cond1_rhs, cond1_code));
1478 return false;
1479 }
1480
1481 /* Returns true if the domain of the condition expression
1482 in COND is a subset of any of the sub-conditions
1483 of the normalized condtion NORM_COND. INVERT is a flag
1484 to indicate of the COND needs to be inverted.
1485 REVERSE is a flag. When it is true, the check is reversed --
1486 it returns true if COND is a superset of any of the subconditions
1487 of NORM_COND. */
1488
1489 static bool
1490 is_subset_of_any (gimple cond, bool invert,
1491 norm_cond_t norm_cond, bool reverse)
1492 {
1493 size_t i;
1494 size_t len = norm_cond->conds.length ();
1495
1496 for (i = 0; i < len; i++)
1497 {
1498 if (is_gcond_subset_of (cond, invert,
1499 norm_cond->conds[i],
1500 false, reverse))
1501 return true;
1502 }
1503 return false;
1504 }
1505
1506 /* NORM_COND1 and NORM_COND2 are normalized logical/BIT OR
1507 expressions (formed by following UD chains not control
1508 dependence chains). The function returns true of domain
1509 of and expression NORM_COND1 is a subset of NORM_COND2's.
1510 The implementation is conservative, and it returns false if
1511 it the inclusion relationship may not hold. */
1512
1513 static bool
1514 is_or_set_subset_of (norm_cond_t norm_cond1,
1515 norm_cond_t norm_cond2)
1516 {
1517 size_t i;
1518 size_t len = norm_cond1->conds.length ();
1519
1520 for (i = 0; i < len; i++)
1521 {
1522 if (!is_subset_of_any (norm_cond1->conds[i],
1523 false, norm_cond2, false))
1524 return false;
1525 }
1526 return true;
1527 }
1528
1529 /* NORM_COND1 and NORM_COND2 are normalized logical AND
1530 expressions (formed by following UD chains not control
1531 dependence chains). The function returns true of domain
1532 of and expression NORM_COND1 is a subset of NORM_COND2's. */
1533
1534 static bool
1535 is_and_set_subset_of (norm_cond_t norm_cond1,
1536 norm_cond_t norm_cond2)
1537 {
1538 size_t i;
1539 size_t len = norm_cond2->conds.length ();
1540
1541 for (i = 0; i < len; i++)
1542 {
1543 if (!is_subset_of_any (norm_cond2->conds[i],
1544 false, norm_cond1, true))
1545 return false;
1546 }
1547 return true;
1548 }
1549
1550 /* Returns true of the domain if NORM_COND1 is a subset
1551 of that of NORM_COND2. Returns false if it can not be
1552 proved to be so. */
1553
1554 static bool
1555 is_norm_cond_subset_of (norm_cond_t norm_cond1,
1556 norm_cond_t norm_cond2)
1557 {
1558 size_t i;
1559 enum tree_code code1, code2;
1560
1561 code1 = norm_cond1->cond_code;
1562 code2 = norm_cond2->cond_code;
1563
1564 if (code1 == BIT_AND_EXPR)
1565 {
1566 /* Both conditions are AND expressions. */
1567 if (code2 == BIT_AND_EXPR)
1568 return is_and_set_subset_of (norm_cond1, norm_cond2);
1569 /* NORM_COND1 is an AND expression, and NORM_COND2 is an OR
1570 expression. In this case, returns true if any subexpression
1571 of NORM_COND1 is a subset of any subexpression of NORM_COND2. */
1572 else if (code2 == BIT_IOR_EXPR)
1573 {
1574 size_t len1;
1575 len1 = norm_cond1->conds.length ();
1576 for (i = 0; i < len1; i++)
1577 {
1578 gimple cond1 = norm_cond1->conds[i];
1579 if (is_subset_of_any (cond1, false, norm_cond2, false))
1580 return true;
1581 }
1582 return false;
1583 }
1584 else
1585 {
1586 gcc_assert (code2 == ERROR_MARK);
1587 gcc_assert (norm_cond2->conds.length () == 1);
1588 return is_subset_of_any (norm_cond2->conds[0],
1589 norm_cond2->invert, norm_cond1, true);
1590 }
1591 }
1592 /* NORM_COND1 is an OR expression */
1593 else if (code1 == BIT_IOR_EXPR)
1594 {
1595 if (code2 != code1)
1596 return false;
1597
1598 return is_or_set_subset_of (norm_cond1, norm_cond2);
1599 }
1600 else
1601 {
1602 gcc_assert (code1 == ERROR_MARK);
1603 gcc_assert (norm_cond1->conds.length () == 1);
1604 /* Conservatively returns false if NORM_COND1 is non-decomposible
1605 and NORM_COND2 is an AND expression. */
1606 if (code2 == BIT_AND_EXPR)
1607 return false;
1608
1609 if (code2 == BIT_IOR_EXPR)
1610 return is_subset_of_any (norm_cond1->conds[0],
1611 norm_cond1->invert, norm_cond2, false);
1612
1613 gcc_assert (code2 == ERROR_MARK);
1614 gcc_assert (norm_cond2->conds.length () == 1);
1615 return is_gcond_subset_of (norm_cond1->conds[0],
1616 norm_cond1->invert,
1617 norm_cond2->conds[0],
1618 norm_cond2->invert, false);
1619 }
1620 }
1621
1622 /* Returns true of the domain of single predicate expression
1623 EXPR1 is a subset of that of EXPR2. Returns false if it
1624 can not be proved. */
1625
1626 static bool
1627 is_pred_expr_subset_of (use_pred_info_t expr1,
1628 use_pred_info_t expr2)
1629 {
1630 gimple cond1, cond2;
1631 enum tree_code code1, code2;
1632 struct norm_cond norm_cond1, norm_cond2;
1633 bool is_subset = false;
1634
1635 cond1 = expr1->cond;
1636 cond2 = expr2->cond;
1637 code1 = gimple_cond_code (cond1);
1638 code2 = gimple_cond_code (cond2);
1639
1640 if (expr1->invert)
1641 code1 = invert_tree_comparison (code1, false);
1642 if (expr2->invert)
1643 code2 = invert_tree_comparison (code2, false);
1644
1645 /* Fast path -- match exactly */
1646 if ((gimple_cond_lhs (cond1) == gimple_cond_lhs (cond2))
1647 && (gimple_cond_rhs (cond1) == gimple_cond_rhs (cond2))
1648 && (code1 == code2))
1649 return true;
1650
1651 /* Normalize conditions. To keep NE_EXPR, do not invert
1652 with both need inversion. */
1653 normalize_cond (cond1, &norm_cond1, (expr1->invert));
1654 normalize_cond (cond2, &norm_cond2, (expr2->invert));
1655
1656 is_subset = is_norm_cond_subset_of (&norm_cond1, &norm_cond2);
1657
1658 /* Free memory */
1659 norm_cond1.conds.release ();
1660 norm_cond2.conds.release ();
1661 return is_subset ;
1662 }
1663
1664 /* Returns true if the domain of PRED1 is a subset
1665 of that of PRED2. Returns false if it can not be proved so. */
1666
1667 static bool
1668 is_pred_chain_subset_of (vec<use_pred_info_t> pred1,
1669 vec<use_pred_info_t> pred2)
1670 {
1671 size_t np1, np2, i1, i2;
1672
1673 np1 = pred1.length ();
1674 np2 = pred2.length ();
1675
1676 for (i2 = 0; i2 < np2; i2++)
1677 {
1678 bool found = false;
1679 use_pred_info_t info2
1680 = pred2[i2];
1681 for (i1 = 0; i1 < np1; i1++)
1682 {
1683 use_pred_info_t info1
1684 = pred1[i1];
1685 if (is_pred_expr_subset_of (info1, info2))
1686 {
1687 found = true;
1688 break;
1689 }
1690 }
1691 if (!found)
1692 return false;
1693 }
1694 return true;
1695 }
1696
1697 /* Returns true if the domain defined by
1698 one pred chain ONE_PRED is a subset of the domain
1699 of *PREDS. It returns false if ONE_PRED's domain is
1700 not a subset of any of the sub-domains of PREDS (
1701 corresponding to each individual chains in it), even
1702 though it may be still be a subset of whole domain
1703 of PREDS which is the union (ORed) of all its subdomains.
1704 In other words, the result is conservative. */
1705
1706 static bool
1707 is_included_in (vec<use_pred_info_t> one_pred,
1708 vec<use_pred_info_t> *preds,
1709 size_t n)
1710 {
1711 size_t i;
1712
1713 for (i = 0; i < n; i++)
1714 {
1715 if (is_pred_chain_subset_of (one_pred, preds[i]))
1716 return true;
1717 }
1718
1719 return false;
1720 }
1721
1722 /* compares two predicate sets PREDS1 and PREDS2 and returns
1723 true if the domain defined by PREDS1 is a superset
1724 of PREDS2's domain. N1 and N2 are array sizes of PREDS1 and
1725 PREDS2 respectively. The implementation chooses not to build
1726 generic trees (and relying on the folding capability of the
1727 compiler), but instead performs brute force comparison of
1728 individual predicate chains (won't be a compile time problem
1729 as the chains are pretty short). When the function returns
1730 false, it does not necessarily mean *PREDS1 is not a superset
1731 of *PREDS2, but mean it may not be so since the analysis can
1732 not prove it. In such cases, false warnings may still be
1733 emitted. */
1734
1735 static bool
1736 is_superset_of (vec<use_pred_info_t> *preds1,
1737 size_t n1,
1738 vec<use_pred_info_t> *preds2,
1739 size_t n2)
1740 {
1741 size_t i;
1742 vec<use_pred_info_t> one_pred_chain;
1743
1744 for (i = 0; i < n2; i++)
1745 {
1746 one_pred_chain = preds2[i];
1747 if (!is_included_in (one_pred_chain, preds1, n1))
1748 return false;
1749 }
1750
1751 return true;
1752 }
1753
1754 /* Comparison function used by qsort. It is used to
1755 sort predicate chains to allow predicate
1756 simplification. */
1757
1758 static int
1759 pred_chain_length_cmp (const void *p1, const void *p2)
1760 {
1761 use_pred_info_t i1, i2;
1762 vec<use_pred_info_t> const *chain1
1763 = (vec<use_pred_info_t> const *)p1;
1764 vec<use_pred_info_t> const *chain2
1765 = (vec<use_pred_info_t> const *)p2;
1766
1767 if (chain1->length () != chain2->length ())
1768 return (chain1->length () - chain2->length ());
1769
1770 i1 = (*chain1)[0];
1771 i2 = (*chain2)[0];
1772
1773 /* Allow predicates with similar prefix come together. */
1774 if (!i1->invert && i2->invert)
1775 return -1;
1776 else if (i1->invert && !i2->invert)
1777 return 1;
1778
1779 return gimple_uid (i1->cond) - gimple_uid (i2->cond);
1780 }
1781
1782 /* x OR (!x AND y) is equivalent to x OR y.
1783 This function normalizes x1 OR (!x1 AND x2) OR (!x1 AND !x2 AND x3)
1784 into x1 OR x2 OR x3. PREDS is the predicate chains, and N is
1785 the number of chains. Returns true if normalization happens. */
1786
1787 static bool
1788 normalize_preds (vec<use_pred_info_t> *preds, size_t *n)
1789 {
1790 size_t i, j, ll;
1791 vec<use_pred_info_t> pred_chain;
1792 vec<use_pred_info_t> x = vNULL;
1793 use_pred_info_t xj = 0, nxj = 0;
1794
1795 if (*n < 2)
1796 return false;
1797
1798 /* First sort the chains in ascending order of lengths. */
1799 qsort (preds, *n, sizeof (void *), pred_chain_length_cmp);
1800 pred_chain = preds[0];
1801 ll = pred_chain.length ();
1802 if (ll != 1)
1803 {
1804 if (ll == 2)
1805 {
1806 use_pred_info_t xx, yy, xx2, nyy;
1807 vec<use_pred_info_t> pred_chain2 = preds[1];
1808 if (pred_chain2.length () != 2)
1809 return false;
1810
1811 /* See if simplification x AND y OR x AND !y is possible. */
1812 xx = pred_chain[0];
1813 yy = pred_chain[1];
1814 xx2 = pred_chain2[0];
1815 nyy = pred_chain2[1];
1816 if (gimple_cond_lhs (xx->cond) != gimple_cond_lhs (xx2->cond)
1817 || gimple_cond_rhs (xx->cond) != gimple_cond_rhs (xx2->cond)
1818 || gimple_cond_code (xx->cond) != gimple_cond_code (xx2->cond)
1819 || (xx->invert != xx2->invert))
1820 return false;
1821 if (gimple_cond_lhs (yy->cond) != gimple_cond_lhs (nyy->cond)
1822 || gimple_cond_rhs (yy->cond) != gimple_cond_rhs (nyy->cond)
1823 || gimple_cond_code (yy->cond) != gimple_cond_code (nyy->cond)
1824 || (yy->invert == nyy->invert))
1825 return false;
1826
1827 /* Now merge the first two chains. */
1828 free (yy);
1829 free (nyy);
1830 free (xx2);
1831 pred_chain.release ();
1832 pred_chain2.release ();
1833 pred_chain.safe_push (xx);
1834 preds[0] = pred_chain;
1835 for (i = 1; i < *n - 1; i++)
1836 preds[i] = preds[i + 1];
1837
1838 preds[*n - 1].create (0);
1839 *n = *n - 1;
1840 }
1841 else
1842 return false;
1843 }
1844
1845 x.safe_push (pred_chain[0]);
1846
1847 /* The loop extracts x1, x2, x3, etc from chains
1848 x1 OR (!x1 AND x2) OR (!x1 AND !x2 AND x3) OR ... */
1849 for (i = 1; i < *n; i++)
1850 {
1851 pred_chain = preds[i];
1852 if (pred_chain.length () != i + 1)
1853 return false;
1854
1855 for (j = 0; j < i; j++)
1856 {
1857 xj = x[j];
1858 nxj = pred_chain[j];
1859
1860 /* Check if nxj is !xj */
1861 if (gimple_cond_lhs (xj->cond) != gimple_cond_lhs (nxj->cond)
1862 || gimple_cond_rhs (xj->cond) != gimple_cond_rhs (nxj->cond)
1863 || gimple_cond_code (xj->cond) != gimple_cond_code (nxj->cond)
1864 || (xj->invert == nxj->invert))
1865 return false;
1866 }
1867
1868 x.safe_push (pred_chain[i]);
1869 }
1870
1871 /* Now normalize the pred chains using the extraced x1, x2, x3 etc. */
1872 for (j = 0; j < *n; j++)
1873 {
1874 use_pred_info_t t;
1875 xj = x[j];
1876
1877 t = XNEW (struct use_pred_info);
1878 *t = *xj;
1879
1880 x[j] = t;
1881 }
1882
1883 for (i = 0; i < *n; i++)
1884 {
1885 pred_chain = preds[i];
1886 for (j = 0; j < pred_chain.length (); j++)
1887 free (pred_chain[j]);
1888 pred_chain.release ();
1889 /* A new chain. */
1890 pred_chain.safe_push (x[i]);
1891 preds[i] = pred_chain;
1892 }
1893 return true;
1894 }
1895
1896
1897
1898 /* Computes the predicates that guard the use and checks
1899 if the incoming paths that have empty (or possibly
1900 empty) definition can be pruned/filtered. The function returns
1901 true if it can be determined that the use of PHI's def in
1902 USE_STMT is guarded with a predicate set not overlapping with
1903 predicate sets of all runtime paths that do not have a definition.
1904 Returns false if it is not or it can not be determined. USE_BB is
1905 the bb of the use (for phi operand use, the bb is not the bb of
1906 the phi stmt, but the src bb of the operand edge). UNINIT_OPNDS
1907 is a bit vector. If an operand of PHI is uninitialized, the
1908 corresponding bit in the vector is 1. VISIED_PHIS is a pointer
1909 set of phis being visted. */
1910
1911 static bool
1912 is_use_properly_guarded (gimple use_stmt,
1913 basic_block use_bb,
1914 gimple phi,
1915 unsigned uninit_opnds,
1916 struct pointer_set_t *visited_phis)
1917 {
1918 basic_block phi_bb;
1919 vec<use_pred_info_t> *preds = 0;
1920 vec<use_pred_info_t> *def_preds = 0;
1921 size_t num_preds = 0, num_def_preds = 0;
1922 bool has_valid_preds = false;
1923 bool is_properly_guarded = false;
1924
1925 if (pointer_set_insert (visited_phis, phi))
1926 return false;
1927
1928 phi_bb = gimple_bb (phi);
1929
1930 if (is_non_loop_exit_postdominating (use_bb, phi_bb))
1931 return false;
1932
1933 has_valid_preds = find_predicates (&preds, &num_preds,
1934 phi_bb, use_bb);
1935
1936 if (!has_valid_preds)
1937 {
1938 destroy_predicate_vecs (num_preds, preds);
1939 return false;
1940 }
1941
1942 if (dump_file)
1943 dump_predicates (use_stmt, num_preds, preds,
1944 "\nUse in stmt ");
1945
1946 has_valid_preds = find_def_preds (&def_preds,
1947 &num_def_preds, phi);
1948
1949 if (has_valid_preds)
1950 {
1951 bool normed;
1952 if (dump_file)
1953 dump_predicates (phi, num_def_preds, def_preds,
1954 "Operand defs of phi ");
1955
1956 normed = normalize_preds (def_preds, &num_def_preds);
1957 if (normed && dump_file)
1958 {
1959 fprintf (dump_file, "\nNormalized to\n");
1960 dump_predicates (phi, num_def_preds, def_preds,
1961 "Operand defs of phi ");
1962 }
1963 is_properly_guarded =
1964 is_superset_of (def_preds, num_def_preds,
1965 preds, num_preds);
1966 }
1967
1968 /* further prune the dead incoming phi edges. */
1969 if (!is_properly_guarded)
1970 is_properly_guarded
1971 = use_pred_not_overlap_with_undef_path_pred (
1972 num_preds, preds, phi, uninit_opnds, visited_phis);
1973
1974 destroy_predicate_vecs (num_preds, preds);
1975 destroy_predicate_vecs (num_def_preds, def_preds);
1976 return is_properly_guarded;
1977 }
1978
1979 /* Searches through all uses of a potentially
1980 uninitialized variable defined by PHI and returns a use
1981 statement if the use is not properly guarded. It returns
1982 NULL if all uses are guarded. UNINIT_OPNDS is a bitvector
1983 holding the position(s) of uninit PHI operands. WORKLIST
1984 is the vector of candidate phis that may be updated by this
1985 function. ADDED_TO_WORKLIST is the pointer set tracking
1986 if the new phi is already in the worklist. */
1987
1988 static gimple
1989 find_uninit_use (gimple phi, unsigned uninit_opnds,
1990 vec<gimple> *worklist,
1991 struct pointer_set_t *added_to_worklist)
1992 {
1993 tree phi_result;
1994 use_operand_p use_p;
1995 gimple use_stmt;
1996 imm_use_iterator iter;
1997
1998 phi_result = gimple_phi_result (phi);
1999
2000 FOR_EACH_IMM_USE_FAST (use_p, iter, phi_result)
2001 {
2002 struct pointer_set_t *visited_phis;
2003 basic_block use_bb;
2004
2005 use_stmt = USE_STMT (use_p);
2006 if (is_gimple_debug (use_stmt))
2007 continue;
2008
2009 visited_phis = pointer_set_create ();
2010
2011 if (gimple_code (use_stmt) == GIMPLE_PHI)
2012 use_bb = gimple_phi_arg_edge (use_stmt,
2013 PHI_ARG_INDEX_FROM_USE (use_p))->src;
2014 else
2015 use_bb = gimple_bb (use_stmt);
2016
2017 if (is_use_properly_guarded (use_stmt,
2018 use_bb,
2019 phi,
2020 uninit_opnds,
2021 visited_phis))
2022 {
2023 pointer_set_destroy (visited_phis);
2024 continue;
2025 }
2026 pointer_set_destroy (visited_phis);
2027
2028 if (dump_file && (dump_flags & TDF_DETAILS))
2029 {
2030 fprintf (dump_file, "[CHECK]: Found unguarded use: ");
2031 print_gimple_stmt (dump_file, use_stmt, 0, 0);
2032 }
2033 /* Found one real use, return. */
2034 if (gimple_code (use_stmt) != GIMPLE_PHI)
2035 return use_stmt;
2036
2037 /* Found a phi use that is not guarded,
2038 add the phi to the worklist. */
2039 if (!pointer_set_insert (added_to_worklist,
2040 use_stmt))
2041 {
2042 if (dump_file && (dump_flags & TDF_DETAILS))
2043 {
2044 fprintf (dump_file, "[WORKLIST]: Update worklist with phi: ");
2045 print_gimple_stmt (dump_file, use_stmt, 0, 0);
2046 }
2047
2048 worklist->safe_push (use_stmt);
2049 pointer_set_insert (possibly_undefined_names, phi_result);
2050 }
2051 }
2052
2053 return NULL;
2054 }
2055
2056 /* Look for inputs to PHI that are SSA_NAMEs that have empty definitions
2057 and gives warning if there exists a runtime path from the entry to a
2058 use of the PHI def that does not contain a definition. In other words,
2059 the warning is on the real use. The more dead paths that can be pruned
2060 by the compiler, the fewer false positives the warning is. WORKLIST
2061 is a vector of candidate phis to be examined. ADDED_TO_WORKLIST is
2062 a pointer set tracking if the new phi is added to the worklist or not. */
2063
2064 static void
2065 warn_uninitialized_phi (gimple phi, vec<gimple> *worklist,
2066 struct pointer_set_t *added_to_worklist)
2067 {
2068 unsigned uninit_opnds;
2069 gimple uninit_use_stmt = 0;
2070 tree uninit_op;
2071
2072 /* Don't look at virtual operands. */
2073 if (virtual_operand_p (gimple_phi_result (phi)))
2074 return;
2075
2076 uninit_opnds = compute_uninit_opnds_pos (phi);
2077
2078 if (MASK_EMPTY (uninit_opnds))
2079 return;
2080
2081 if (dump_file && (dump_flags & TDF_DETAILS))
2082 {
2083 fprintf (dump_file, "[CHECK]: examining phi: ");
2084 print_gimple_stmt (dump_file, phi, 0, 0);
2085 }
2086
2087 /* Now check if we have any use of the value without proper guard. */
2088 uninit_use_stmt = find_uninit_use (phi, uninit_opnds,
2089 worklist, added_to_worklist);
2090
2091 /* All uses are properly guarded. */
2092 if (!uninit_use_stmt)
2093 return;
2094
2095 uninit_op = gimple_phi_arg_def (phi, MASK_FIRST_SET_BIT (uninit_opnds));
2096 if (SSA_NAME_VAR (uninit_op) == NULL_TREE)
2097 return;
2098 warn_uninit (OPT_Wmaybe_uninitialized, uninit_op, SSA_NAME_VAR (uninit_op),
2099 SSA_NAME_VAR (uninit_op),
2100 "%qD may be used uninitialized in this function",
2101 uninit_use_stmt);
2102
2103 }
2104
2105
2106 /* Entry point to the late uninitialized warning pass. */
2107
2108 static unsigned int
2109 execute_late_warn_uninitialized (void)
2110 {
2111 basic_block bb;
2112 gimple_stmt_iterator gsi;
2113 vec<gimple> worklist = vNULL;
2114 struct pointer_set_t *added_to_worklist;
2115
2116 calculate_dominance_info (CDI_DOMINATORS);
2117 calculate_dominance_info (CDI_POST_DOMINATORS);
2118 /* Re-do the plain uninitialized variable check, as optimization may have
2119 straightened control flow. Do this first so that we don't accidentally
2120 get a "may be" warning when we'd have seen an "is" warning later. */
2121 warn_uninitialized_vars (/*warn_possibly_uninitialized=*/1);
2122
2123 timevar_push (TV_TREE_UNINIT);
2124
2125 possibly_undefined_names = pointer_set_create ();
2126 added_to_worklist = pointer_set_create ();
2127
2128 /* Initialize worklist */
2129 FOR_EACH_BB (bb)
2130 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2131 {
2132 gimple phi = gsi_stmt (gsi);
2133 size_t n, i;
2134
2135 n = gimple_phi_num_args (phi);
2136
2137 /* Don't look at virtual operands. */
2138 if (virtual_operand_p (gimple_phi_result (phi)))
2139 continue;
2140
2141 for (i = 0; i < n; ++i)
2142 {
2143 tree op = gimple_phi_arg_def (phi, i);
2144 if (TREE_CODE (op) == SSA_NAME
2145 && uninit_undefined_value_p (op))
2146 {
2147 worklist.safe_push (phi);
2148 pointer_set_insert (added_to_worklist, phi);
2149 if (dump_file && (dump_flags & TDF_DETAILS))
2150 {
2151 fprintf (dump_file, "[WORKLIST]: add to initial list: ");
2152 print_gimple_stmt (dump_file, phi, 0, 0);
2153 }
2154 break;
2155 }
2156 }
2157 }
2158
2159 while (worklist.length () != 0)
2160 {
2161 gimple cur_phi = 0;
2162 cur_phi = worklist.pop ();
2163 warn_uninitialized_phi (cur_phi, &worklist, added_to_worklist);
2164 }
2165
2166 worklist.release ();
2167 pointer_set_destroy (added_to_worklist);
2168 pointer_set_destroy (possibly_undefined_names);
2169 possibly_undefined_names = NULL;
2170 free_dominance_info (CDI_POST_DOMINATORS);
2171 timevar_pop (TV_TREE_UNINIT);
2172 return 0;
2173 }
2174
2175 static bool
2176 gate_warn_uninitialized (void)
2177 {
2178 return warn_uninitialized != 0;
2179 }
2180
2181 namespace {
2182
2183 const pass_data pass_data_late_warn_uninitialized =
2184 {
2185 GIMPLE_PASS, /* type */
2186 "uninit", /* name */
2187 OPTGROUP_NONE, /* optinfo_flags */
2188 true, /* has_gate */
2189 true, /* has_execute */
2190 TV_NONE, /* tv_id */
2191 PROP_ssa, /* properties_required */
2192 0, /* properties_provided */
2193 0, /* properties_destroyed */
2194 0, /* todo_flags_start */
2195 0, /* todo_flags_finish */
2196 };
2197
2198 class pass_late_warn_uninitialized : public gimple_opt_pass
2199 {
2200 public:
2201 pass_late_warn_uninitialized (gcc::context *ctxt)
2202 : gimple_opt_pass (pass_data_late_warn_uninitialized, ctxt)
2203 {}
2204
2205 /* opt_pass methods: */
2206 opt_pass * clone () { return new pass_late_warn_uninitialized (m_ctxt); }
2207 bool gate () { return gate_warn_uninitialized (); }
2208 unsigned int execute () { return execute_late_warn_uninitialized (); }
2209
2210 }; // class pass_late_warn_uninitialized
2211
2212 } // anon namespace
2213
2214 gimple_opt_pass *
2215 make_pass_late_warn_uninitialized (gcc::context *ctxt)
2216 {
2217 return new pass_late_warn_uninitialized (ctxt);
2218 }
2219
2220
2221 static unsigned int
2222 execute_early_warn_uninitialized (void)
2223 {
2224 /* Currently, this pass runs always but
2225 execute_late_warn_uninitialized only runs with optimization. With
2226 optimization we want to warn about possible uninitialized as late
2227 as possible, thus don't do it here. However, without
2228 optimization we need to warn here about "may be uninitialized".
2229 */
2230 calculate_dominance_info (CDI_POST_DOMINATORS);
2231
2232 warn_uninitialized_vars (/*warn_possibly_uninitialized=*/!optimize);
2233
2234 /* Post-dominator information can not be reliably updated. Free it
2235 after the use. */
2236
2237 free_dominance_info (CDI_POST_DOMINATORS);
2238 return 0;
2239 }
2240
2241
2242 namespace {
2243
2244 const pass_data pass_data_early_warn_uninitialized =
2245 {
2246 GIMPLE_PASS, /* type */
2247 "*early_warn_uninitialized", /* name */
2248 OPTGROUP_NONE, /* optinfo_flags */
2249 true, /* has_gate */
2250 true, /* has_execute */
2251 TV_TREE_UNINIT, /* tv_id */
2252 PROP_ssa, /* properties_required */
2253 0, /* properties_provided */
2254 0, /* properties_destroyed */
2255 0, /* todo_flags_start */
2256 0, /* todo_flags_finish */
2257 };
2258
2259 class pass_early_warn_uninitialized : public gimple_opt_pass
2260 {
2261 public:
2262 pass_early_warn_uninitialized (gcc::context *ctxt)
2263 : gimple_opt_pass (pass_data_early_warn_uninitialized, ctxt)
2264 {}
2265
2266 /* opt_pass methods: */
2267 bool gate () { return gate_warn_uninitialized (); }
2268 unsigned int execute () { return execute_early_warn_uninitialized (); }
2269
2270 }; // class pass_early_warn_uninitialized
2271
2272 } // anon namespace
2273
2274 gimple_opt_pass *
2275 make_pass_early_warn_uninitialized (gcc::context *ctxt)
2276 {
2277 return new pass_early_warn_uninitialized (ctxt);
2278 }
2279
2280