* tree-eh.c (remove_unreachable_handlers): Obvious cleanup.
[gcc.git] / gcc / tree-eh.c
1 /* Exception handling semantics and decomposition for trees.
2 Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011
3 Free Software Foundation, Inc.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "tree.h"
26 #include "flags.h"
27 #include "function.h"
28 #include "except.h"
29 #include "pointer-set.h"
30 #include "tree-flow.h"
31 #include "tree-dump.h"
32 #include "tree-inline.h"
33 #include "tree-iterator.h"
34 #include "tree-pass.h"
35 #include "timevar.h"
36 #include "langhooks.h"
37 #include "ggc.h"
38 #include "diagnostic-core.h"
39 #include "gimple.h"
40 #include "target.h"
41
42 /* In some instances a tree and a gimple need to be stored in a same table,
43 i.e. in hash tables. This is a structure to do this. */
44 typedef union {tree *tp; tree t; gimple g;} treemple;
45
46 /* Nonzero if we are using EH to handle cleanups. */
47 static int using_eh_for_cleanups_p = 0;
48
49 void
50 using_eh_for_cleanups (void)
51 {
52 using_eh_for_cleanups_p = 1;
53 }
54
55 /* Misc functions used in this file. */
56
57 /* Compare and hash for any structure which begins with a canonical
58 pointer. Assumes all pointers are interchangeable, which is sort
59 of already assumed by gcc elsewhere IIRC. */
60
61 static int
62 struct_ptr_eq (const void *a, const void *b)
63 {
64 const void * const * x = (const void * const *) a;
65 const void * const * y = (const void * const *) b;
66 return *x == *y;
67 }
68
69 static hashval_t
70 struct_ptr_hash (const void *a)
71 {
72 const void * const * x = (const void * const *) a;
73 return (size_t)*x >> 4;
74 }
75
76
77 /* Remember and lookup EH landing pad data for arbitrary statements.
78 Really this means any statement that could_throw_p. We could
79 stuff this information into the stmt_ann data structure, but:
80
81 (1) We absolutely rely on this information being kept until
82 we get to rtl. Once we're done with lowering here, if we lose
83 the information there's no way to recover it!
84
85 (2) There are many more statements that *cannot* throw as
86 compared to those that can. We should be saving some amount
87 of space by only allocating memory for those that can throw. */
88
89 /* Add statement T in function IFUN to landing pad NUM. */
90
91 void
92 add_stmt_to_eh_lp_fn (struct function *ifun, gimple t, int num)
93 {
94 struct throw_stmt_node *n;
95 void **slot;
96
97 gcc_assert (num != 0);
98
99 n = ggc_alloc_throw_stmt_node ();
100 n->stmt = t;
101 n->lp_nr = num;
102
103 if (!get_eh_throw_stmt_table (ifun))
104 set_eh_throw_stmt_table (ifun, htab_create_ggc (31, struct_ptr_hash,
105 struct_ptr_eq,
106 ggc_free));
107
108 slot = htab_find_slot (get_eh_throw_stmt_table (ifun), n, INSERT);
109 gcc_assert (!*slot);
110 *slot = n;
111 }
112
113 /* Add statement T in the current function (cfun) to EH landing pad NUM. */
114
115 void
116 add_stmt_to_eh_lp (gimple t, int num)
117 {
118 add_stmt_to_eh_lp_fn (cfun, t, num);
119 }
120
121 /* Add statement T to the single EH landing pad in REGION. */
122
123 static void
124 record_stmt_eh_region (eh_region region, gimple t)
125 {
126 if (region == NULL)
127 return;
128 if (region->type == ERT_MUST_NOT_THROW)
129 add_stmt_to_eh_lp_fn (cfun, t, -region->index);
130 else
131 {
132 eh_landing_pad lp = region->landing_pads;
133 if (lp == NULL)
134 lp = gen_eh_landing_pad (region);
135 else
136 gcc_assert (lp->next_lp == NULL);
137 add_stmt_to_eh_lp_fn (cfun, t, lp->index);
138 }
139 }
140
141
142 /* Remove statement T in function IFUN from its EH landing pad. */
143
144 bool
145 remove_stmt_from_eh_lp_fn (struct function *ifun, gimple t)
146 {
147 struct throw_stmt_node dummy;
148 void **slot;
149
150 if (!get_eh_throw_stmt_table (ifun))
151 return false;
152
153 dummy.stmt = t;
154 slot = htab_find_slot (get_eh_throw_stmt_table (ifun), &dummy,
155 NO_INSERT);
156 if (slot)
157 {
158 htab_clear_slot (get_eh_throw_stmt_table (ifun), slot);
159 return true;
160 }
161 else
162 return false;
163 }
164
165
166 /* Remove statement T in the current function (cfun) from its
167 EH landing pad. */
168
169 bool
170 remove_stmt_from_eh_lp (gimple t)
171 {
172 return remove_stmt_from_eh_lp_fn (cfun, t);
173 }
174
175 /* Determine if statement T is inside an EH region in function IFUN.
176 Positive numbers indicate a landing pad index; negative numbers
177 indicate a MUST_NOT_THROW region index; zero indicates that the
178 statement is not recorded in the region table. */
179
180 int
181 lookup_stmt_eh_lp_fn (struct function *ifun, gimple t)
182 {
183 struct throw_stmt_node *p, n;
184
185 if (ifun->eh->throw_stmt_table == NULL)
186 return 0;
187
188 n.stmt = t;
189 p = (struct throw_stmt_node *) htab_find (ifun->eh->throw_stmt_table, &n);
190 return p ? p->lp_nr : 0;
191 }
192
193 /* Likewise, but always use the current function. */
194
195 int
196 lookup_stmt_eh_lp (gimple t)
197 {
198 /* We can get called from initialized data when -fnon-call-exceptions
199 is on; prevent crash. */
200 if (!cfun)
201 return 0;
202 return lookup_stmt_eh_lp_fn (cfun, t);
203 }
204
205 /* First pass of EH node decomposition. Build up a tree of GIMPLE_TRY_FINALLY
206 nodes and LABEL_DECL nodes. We will use this during the second phase to
207 determine if a goto leaves the body of a TRY_FINALLY_EXPR node. */
208
209 struct finally_tree_node
210 {
211 /* When storing a GIMPLE_TRY, we have to record a gimple. However
212 when deciding whether a GOTO to a certain LABEL_DECL (which is a
213 tree) leaves the TRY block, its necessary to record a tree in
214 this field. Thus a treemple is used. */
215 treemple child;
216 gimple parent;
217 };
218
219 /* Note that this table is *not* marked GTY. It is short-lived. */
220 static htab_t finally_tree;
221
222 static void
223 record_in_finally_tree (treemple child, gimple parent)
224 {
225 struct finally_tree_node *n;
226 void **slot;
227
228 n = XNEW (struct finally_tree_node);
229 n->child = child;
230 n->parent = parent;
231
232 slot = htab_find_slot (finally_tree, n, INSERT);
233 gcc_assert (!*slot);
234 *slot = n;
235 }
236
237 static void
238 collect_finally_tree (gimple stmt, gimple region);
239
240 /* Go through the gimple sequence. Works with collect_finally_tree to
241 record all GIMPLE_LABEL and GIMPLE_TRY statements. */
242
243 static void
244 collect_finally_tree_1 (gimple_seq seq, gimple region)
245 {
246 gimple_stmt_iterator gsi;
247
248 for (gsi = gsi_start (seq); !gsi_end_p (gsi); gsi_next (&gsi))
249 collect_finally_tree (gsi_stmt (gsi), region);
250 }
251
252 static void
253 collect_finally_tree (gimple stmt, gimple region)
254 {
255 treemple temp;
256
257 switch (gimple_code (stmt))
258 {
259 case GIMPLE_LABEL:
260 temp.t = gimple_label_label (stmt);
261 record_in_finally_tree (temp, region);
262 break;
263
264 case GIMPLE_TRY:
265 if (gimple_try_kind (stmt) == GIMPLE_TRY_FINALLY)
266 {
267 temp.g = stmt;
268 record_in_finally_tree (temp, region);
269 collect_finally_tree_1 (gimple_try_eval (stmt), stmt);
270 collect_finally_tree_1 (gimple_try_cleanup (stmt), region);
271 }
272 else if (gimple_try_kind (stmt) == GIMPLE_TRY_CATCH)
273 {
274 collect_finally_tree_1 (gimple_try_eval (stmt), region);
275 collect_finally_tree_1 (gimple_try_cleanup (stmt), region);
276 }
277 break;
278
279 case GIMPLE_CATCH:
280 collect_finally_tree_1 (gimple_catch_handler (stmt), region);
281 break;
282
283 case GIMPLE_EH_FILTER:
284 collect_finally_tree_1 (gimple_eh_filter_failure (stmt), region);
285 break;
286
287 default:
288 /* A type, a decl, or some kind of statement that we're not
289 interested in. Don't walk them. */
290 break;
291 }
292 }
293
294
295 /* Use the finally tree to determine if a jump from START to TARGET
296 would leave the try_finally node that START lives in. */
297
298 static bool
299 outside_finally_tree (treemple start, gimple target)
300 {
301 struct finally_tree_node n, *p;
302
303 do
304 {
305 n.child = start;
306 p = (struct finally_tree_node *) htab_find (finally_tree, &n);
307 if (!p)
308 return true;
309 start.g = p->parent;
310 }
311 while (start.g != target);
312
313 return false;
314 }
315
316 /* Second pass of EH node decomposition. Actually transform the GIMPLE_TRY
317 nodes into a set of gotos, magic labels, and eh regions.
318 The eh region creation is straight-forward, but frobbing all the gotos
319 and such into shape isn't. */
320
321 /* The sequence into which we record all EH stuff. This will be
322 placed at the end of the function when we're all done. */
323 static gimple_seq eh_seq;
324
325 /* Record whether an EH region contains something that can throw,
326 indexed by EH region number. */
327 static bitmap eh_region_may_contain_throw_map;
328
329 /* The GOTO_QUEUE is is an array of GIMPLE_GOTO and GIMPLE_RETURN
330 statements that are seen to escape this GIMPLE_TRY_FINALLY node.
331 The idea is to record a gimple statement for everything except for
332 the conditionals, which get their labels recorded. Since labels are
333 of type 'tree', we need this node to store both gimple and tree
334 objects. REPL_STMT is the sequence used to replace the goto/return
335 statement. CONT_STMT is used to store the statement that allows
336 the return/goto to jump to the original destination. */
337
338 struct goto_queue_node
339 {
340 treemple stmt;
341 gimple_seq repl_stmt;
342 gimple cont_stmt;
343 int index;
344 /* This is used when index >= 0 to indicate that stmt is a label (as
345 opposed to a goto stmt). */
346 int is_label;
347 };
348
349 /* State of the world while lowering. */
350
351 struct leh_state
352 {
353 /* What's "current" while constructing the eh region tree. These
354 correspond to variables of the same name in cfun->eh, which we
355 don't have easy access to. */
356 eh_region cur_region;
357
358 /* What's "current" for the purposes of __builtin_eh_pointer. For
359 a CATCH, this is the associated TRY. For an EH_FILTER, this is
360 the associated ALLOWED_EXCEPTIONS, etc. */
361 eh_region ehp_region;
362
363 /* Processing of TRY_FINALLY requires a bit more state. This is
364 split out into a separate structure so that we don't have to
365 copy so much when processing other nodes. */
366 struct leh_tf_state *tf;
367 };
368
369 struct leh_tf_state
370 {
371 /* Pointer to the GIMPLE_TRY_FINALLY node under discussion. The
372 try_finally_expr is the original GIMPLE_TRY_FINALLY. We need to retain
373 this so that outside_finally_tree can reliably reference the tree used
374 in the collect_finally_tree data structures. */
375 gimple try_finally_expr;
376 gimple top_p;
377
378 /* While lowering a top_p usually it is expanded into multiple statements,
379 thus we need the following field to store them. */
380 gimple_seq top_p_seq;
381
382 /* The state outside this try_finally node. */
383 struct leh_state *outer;
384
385 /* The exception region created for it. */
386 eh_region region;
387
388 /* The goto queue. */
389 struct goto_queue_node *goto_queue;
390 size_t goto_queue_size;
391 size_t goto_queue_active;
392
393 /* Pointer map to help in searching goto_queue when it is large. */
394 struct pointer_map_t *goto_queue_map;
395
396 /* The set of unique labels seen as entries in the goto queue. */
397 VEC(tree,heap) *dest_array;
398
399 /* A label to be added at the end of the completed transformed
400 sequence. It will be set if may_fallthru was true *at one time*,
401 though subsequent transformations may have cleared that flag. */
402 tree fallthru_label;
403
404 /* True if it is possible to fall out the bottom of the try block.
405 Cleared if the fallthru is converted to a goto. */
406 bool may_fallthru;
407
408 /* True if any entry in goto_queue is a GIMPLE_RETURN. */
409 bool may_return;
410
411 /* True if the finally block can receive an exception edge.
412 Cleared if the exception case is handled by code duplication. */
413 bool may_throw;
414 };
415
416 static gimple_seq lower_eh_must_not_throw (struct leh_state *, gimple);
417
418 /* Search for STMT in the goto queue. Return the replacement,
419 or null if the statement isn't in the queue. */
420
421 #define LARGE_GOTO_QUEUE 20
422
423 static void lower_eh_constructs_1 (struct leh_state *state, gimple_seq seq);
424
425 static gimple_seq
426 find_goto_replacement (struct leh_tf_state *tf, treemple stmt)
427 {
428 unsigned int i;
429 void **slot;
430
431 if (tf->goto_queue_active < LARGE_GOTO_QUEUE)
432 {
433 for (i = 0; i < tf->goto_queue_active; i++)
434 if ( tf->goto_queue[i].stmt.g == stmt.g)
435 return tf->goto_queue[i].repl_stmt;
436 return NULL;
437 }
438
439 /* If we have a large number of entries in the goto_queue, create a
440 pointer map and use that for searching. */
441
442 if (!tf->goto_queue_map)
443 {
444 tf->goto_queue_map = pointer_map_create ();
445 for (i = 0; i < tf->goto_queue_active; i++)
446 {
447 slot = pointer_map_insert (tf->goto_queue_map,
448 tf->goto_queue[i].stmt.g);
449 gcc_assert (*slot == NULL);
450 *slot = &tf->goto_queue[i];
451 }
452 }
453
454 slot = pointer_map_contains (tf->goto_queue_map, stmt.g);
455 if (slot != NULL)
456 return (((struct goto_queue_node *) *slot)->repl_stmt);
457
458 return NULL;
459 }
460
461 /* A subroutine of replace_goto_queue_1. Handles the sub-clauses of a
462 lowered GIMPLE_COND. If, by chance, the replacement is a simple goto,
463 then we can just splat it in, otherwise we add the new stmts immediately
464 after the GIMPLE_COND and redirect. */
465
466 static void
467 replace_goto_queue_cond_clause (tree *tp, struct leh_tf_state *tf,
468 gimple_stmt_iterator *gsi)
469 {
470 tree label;
471 gimple_seq new_seq;
472 treemple temp;
473 location_t loc = gimple_location (gsi_stmt (*gsi));
474
475 temp.tp = tp;
476 new_seq = find_goto_replacement (tf, temp);
477 if (!new_seq)
478 return;
479
480 if (gimple_seq_singleton_p (new_seq)
481 && gimple_code (gimple_seq_first_stmt (new_seq)) == GIMPLE_GOTO)
482 {
483 *tp = gimple_goto_dest (gimple_seq_first_stmt (new_seq));
484 return;
485 }
486
487 label = create_artificial_label (loc);
488 /* Set the new label for the GIMPLE_COND */
489 *tp = label;
490
491 gsi_insert_after (gsi, gimple_build_label (label), GSI_CONTINUE_LINKING);
492 gsi_insert_seq_after (gsi, gimple_seq_copy (new_seq), GSI_CONTINUE_LINKING);
493 }
494
495 /* The real work of replace_goto_queue. Returns with TSI updated to
496 point to the next statement. */
497
498 static void replace_goto_queue_stmt_list (gimple_seq, struct leh_tf_state *);
499
500 static void
501 replace_goto_queue_1 (gimple stmt, struct leh_tf_state *tf,
502 gimple_stmt_iterator *gsi)
503 {
504 gimple_seq seq;
505 treemple temp;
506 temp.g = NULL;
507
508 switch (gimple_code (stmt))
509 {
510 case GIMPLE_GOTO:
511 case GIMPLE_RETURN:
512 temp.g = stmt;
513 seq = find_goto_replacement (tf, temp);
514 if (seq)
515 {
516 gsi_insert_seq_before (gsi, gimple_seq_copy (seq), GSI_SAME_STMT);
517 gsi_remove (gsi, false);
518 return;
519 }
520 break;
521
522 case GIMPLE_COND:
523 replace_goto_queue_cond_clause (gimple_op_ptr (stmt, 2), tf, gsi);
524 replace_goto_queue_cond_clause (gimple_op_ptr (stmt, 3), tf, gsi);
525 break;
526
527 case GIMPLE_TRY:
528 replace_goto_queue_stmt_list (gimple_try_eval (stmt), tf);
529 replace_goto_queue_stmt_list (gimple_try_cleanup (stmt), tf);
530 break;
531 case GIMPLE_CATCH:
532 replace_goto_queue_stmt_list (gimple_catch_handler (stmt), tf);
533 break;
534 case GIMPLE_EH_FILTER:
535 replace_goto_queue_stmt_list (gimple_eh_filter_failure (stmt), tf);
536 break;
537
538 default:
539 /* These won't have gotos in them. */
540 break;
541 }
542
543 gsi_next (gsi);
544 }
545
546 /* A subroutine of replace_goto_queue. Handles GIMPLE_SEQ. */
547
548 static void
549 replace_goto_queue_stmt_list (gimple_seq seq, struct leh_tf_state *tf)
550 {
551 gimple_stmt_iterator gsi = gsi_start (seq);
552
553 while (!gsi_end_p (gsi))
554 replace_goto_queue_1 (gsi_stmt (gsi), tf, &gsi);
555 }
556
557 /* Replace all goto queue members. */
558
559 static void
560 replace_goto_queue (struct leh_tf_state *tf)
561 {
562 if (tf->goto_queue_active == 0)
563 return;
564 replace_goto_queue_stmt_list (tf->top_p_seq, tf);
565 replace_goto_queue_stmt_list (eh_seq, tf);
566 }
567
568 /* Add a new record to the goto queue contained in TF. NEW_STMT is the
569 data to be added, IS_LABEL indicates whether NEW_STMT is a label or
570 a gimple return. */
571
572 static void
573 record_in_goto_queue (struct leh_tf_state *tf,
574 treemple new_stmt,
575 int index,
576 bool is_label)
577 {
578 size_t active, size;
579 struct goto_queue_node *q;
580
581 gcc_assert (!tf->goto_queue_map);
582
583 active = tf->goto_queue_active;
584 size = tf->goto_queue_size;
585 if (active >= size)
586 {
587 size = (size ? size * 2 : 32);
588 tf->goto_queue_size = size;
589 tf->goto_queue
590 = XRESIZEVEC (struct goto_queue_node, tf->goto_queue, size);
591 }
592
593 q = &tf->goto_queue[active];
594 tf->goto_queue_active = active + 1;
595
596 memset (q, 0, sizeof (*q));
597 q->stmt = new_stmt;
598 q->index = index;
599 q->is_label = is_label;
600 }
601
602 /* Record the LABEL label in the goto queue contained in TF.
603 TF is not null. */
604
605 static void
606 record_in_goto_queue_label (struct leh_tf_state *tf, treemple stmt, tree label)
607 {
608 int index;
609 treemple temp, new_stmt;
610
611 if (!label)
612 return;
613
614 /* Computed and non-local gotos do not get processed. Given
615 their nature we can neither tell whether we've escaped the
616 finally block nor redirect them if we knew. */
617 if (TREE_CODE (label) != LABEL_DECL)
618 return;
619
620 /* No need to record gotos that don't leave the try block. */
621 temp.t = label;
622 if (!outside_finally_tree (temp, tf->try_finally_expr))
623 return;
624
625 if (! tf->dest_array)
626 {
627 tf->dest_array = VEC_alloc (tree, heap, 10);
628 VEC_quick_push (tree, tf->dest_array, label);
629 index = 0;
630 }
631 else
632 {
633 int n = VEC_length (tree, tf->dest_array);
634 for (index = 0; index < n; ++index)
635 if (VEC_index (tree, tf->dest_array, index) == label)
636 break;
637 if (index == n)
638 VEC_safe_push (tree, heap, tf->dest_array, label);
639 }
640
641 /* In the case of a GOTO we want to record the destination label,
642 since with a GIMPLE_COND we have an easy access to the then/else
643 labels. */
644 new_stmt = stmt;
645 record_in_goto_queue (tf, new_stmt, index, true);
646 }
647
648 /* For any GIMPLE_GOTO or GIMPLE_RETURN, decide whether it leaves a try_finally
649 node, and if so record that fact in the goto queue associated with that
650 try_finally node. */
651
652 static void
653 maybe_record_in_goto_queue (struct leh_state *state, gimple stmt)
654 {
655 struct leh_tf_state *tf = state->tf;
656 treemple new_stmt;
657
658 if (!tf)
659 return;
660
661 switch (gimple_code (stmt))
662 {
663 case GIMPLE_COND:
664 new_stmt.tp = gimple_op_ptr (stmt, 2);
665 record_in_goto_queue_label (tf, new_stmt, gimple_cond_true_label (stmt));
666 new_stmt.tp = gimple_op_ptr (stmt, 3);
667 record_in_goto_queue_label (tf, new_stmt, gimple_cond_false_label (stmt));
668 break;
669 case GIMPLE_GOTO:
670 new_stmt.g = stmt;
671 record_in_goto_queue_label (tf, new_stmt, gimple_goto_dest (stmt));
672 break;
673
674 case GIMPLE_RETURN:
675 tf->may_return = true;
676 new_stmt.g = stmt;
677 record_in_goto_queue (tf, new_stmt, -1, false);
678 break;
679
680 default:
681 gcc_unreachable ();
682 }
683 }
684
685
686 #ifdef ENABLE_CHECKING
687 /* We do not process GIMPLE_SWITCHes for now. As long as the original source
688 was in fact structured, and we've not yet done jump threading, then none
689 of the labels will leave outer GIMPLE_TRY_FINALLY nodes. Verify this. */
690
691 static void
692 verify_norecord_switch_expr (struct leh_state *state, gimple switch_expr)
693 {
694 struct leh_tf_state *tf = state->tf;
695 size_t i, n;
696
697 if (!tf)
698 return;
699
700 n = gimple_switch_num_labels (switch_expr);
701
702 for (i = 0; i < n; ++i)
703 {
704 treemple temp;
705 tree lab = CASE_LABEL (gimple_switch_label (switch_expr, i));
706 temp.t = lab;
707 gcc_assert (!outside_finally_tree (temp, tf->try_finally_expr));
708 }
709 }
710 #else
711 #define verify_norecord_switch_expr(state, switch_expr)
712 #endif
713
714 /* Redirect a RETURN_EXPR pointed to by STMT_P to FINLAB. Place in CONT_P
715 whatever is needed to finish the return. If MOD is non-null, insert it
716 before the new branch. RETURN_VALUE_P is a cache containing a temporary
717 variable to be used in manipulating the value returned from the function. */
718
719 static void
720 do_return_redirection (struct goto_queue_node *q, tree finlab, gimple_seq mod,
721 tree *return_value_p)
722 {
723 tree ret_expr;
724 gimple x;
725
726 /* In the case of a return, the queue node must be a gimple statement. */
727 gcc_assert (!q->is_label);
728
729 ret_expr = gimple_return_retval (q->stmt.g);
730
731 if (ret_expr)
732 {
733 if (!*return_value_p)
734 *return_value_p = ret_expr;
735 else
736 gcc_assert (*return_value_p == ret_expr);
737 q->cont_stmt = q->stmt.g;
738 /* The nasty part about redirecting the return value is that the
739 return value itself is to be computed before the FINALLY block
740 is executed. e.g.
741
742 int x;
743 int foo (void)
744 {
745 x = 0;
746 try {
747 return x;
748 } finally {
749 x++;
750 }
751 }
752
753 should return 0, not 1. Arrange for this to happen by copying
754 computed the return value into a local temporary. This also
755 allows us to redirect multiple return statements through the
756 same destination block; whether this is a net win or not really
757 depends, I guess, but it does make generation of the switch in
758 lower_try_finally_switch easier. */
759
760 if (TREE_CODE (ret_expr) == RESULT_DECL)
761 {
762 if (!*return_value_p)
763 *return_value_p = ret_expr;
764 else
765 gcc_assert (*return_value_p == ret_expr);
766 q->cont_stmt = q->stmt.g;
767 }
768 else
769 gcc_unreachable ();
770 }
771 else
772 /* If we don't return a value, all return statements are the same. */
773 q->cont_stmt = q->stmt.g;
774
775 if (!q->repl_stmt)
776 q->repl_stmt = gimple_seq_alloc ();
777
778 if (mod)
779 gimple_seq_add_seq (&q->repl_stmt, mod);
780
781 x = gimple_build_goto (finlab);
782 gimple_seq_add_stmt (&q->repl_stmt, x);
783 }
784
785 /* Similar, but easier, for GIMPLE_GOTO. */
786
787 static void
788 do_goto_redirection (struct goto_queue_node *q, tree finlab, gimple_seq mod,
789 struct leh_tf_state *tf)
790 {
791 gimple x;
792
793 gcc_assert (q->is_label);
794 if (!q->repl_stmt)
795 q->repl_stmt = gimple_seq_alloc ();
796
797 q->cont_stmt = gimple_build_goto (VEC_index (tree, tf->dest_array, q->index));
798
799 if (mod)
800 gimple_seq_add_seq (&q->repl_stmt, mod);
801
802 x = gimple_build_goto (finlab);
803 gimple_seq_add_stmt (&q->repl_stmt, x);
804 }
805
806 /* Emit a standard landing pad sequence into SEQ for REGION. */
807
808 static void
809 emit_post_landing_pad (gimple_seq *seq, eh_region region)
810 {
811 eh_landing_pad lp = region->landing_pads;
812 gimple x;
813
814 if (lp == NULL)
815 lp = gen_eh_landing_pad (region);
816
817 lp->post_landing_pad = create_artificial_label (UNKNOWN_LOCATION);
818 EH_LANDING_PAD_NR (lp->post_landing_pad) = lp->index;
819
820 x = gimple_build_label (lp->post_landing_pad);
821 gimple_seq_add_stmt (seq, x);
822 }
823
824 /* Emit a RESX statement into SEQ for REGION. */
825
826 static void
827 emit_resx (gimple_seq *seq, eh_region region)
828 {
829 gimple x = gimple_build_resx (region->index);
830 gimple_seq_add_stmt (seq, x);
831 if (region->outer)
832 record_stmt_eh_region (region->outer, x);
833 }
834
835 /* Emit an EH_DISPATCH statement into SEQ for REGION. */
836
837 static void
838 emit_eh_dispatch (gimple_seq *seq, eh_region region)
839 {
840 gimple x = gimple_build_eh_dispatch (region->index);
841 gimple_seq_add_stmt (seq, x);
842 }
843
844 /* Note that the current EH region may contain a throw, or a
845 call to a function which itself may contain a throw. */
846
847 static void
848 note_eh_region_may_contain_throw (eh_region region)
849 {
850 while (bitmap_set_bit (eh_region_may_contain_throw_map, region->index))
851 {
852 if (region->type == ERT_MUST_NOT_THROW)
853 break;
854 region = region->outer;
855 if (region == NULL)
856 break;
857 }
858 }
859
860 /* Check if REGION has been marked as containing a throw. If REGION is
861 NULL, this predicate is false. */
862
863 static inline bool
864 eh_region_may_contain_throw (eh_region r)
865 {
866 return r && bitmap_bit_p (eh_region_may_contain_throw_map, r->index);
867 }
868
869 /* We want to transform
870 try { body; } catch { stuff; }
871 to
872 normal_seqence:
873 body;
874 over:
875 eh_seqence:
876 landing_pad:
877 stuff;
878 goto over;
879
880 TP is a GIMPLE_TRY node. REGION is the region whose post_landing_pad
881 should be placed before the second operand, or NULL. OVER is
882 an existing label that should be put at the exit, or NULL. */
883
884 static gimple_seq
885 frob_into_branch_around (gimple tp, eh_region region, tree over)
886 {
887 gimple x;
888 gimple_seq cleanup, result;
889 location_t loc = gimple_location (tp);
890
891 cleanup = gimple_try_cleanup (tp);
892 result = gimple_try_eval (tp);
893
894 if (region)
895 emit_post_landing_pad (&eh_seq, region);
896
897 if (gimple_seq_may_fallthru (cleanup))
898 {
899 if (!over)
900 over = create_artificial_label (loc);
901 x = gimple_build_goto (over);
902 gimple_seq_add_stmt (&cleanup, x);
903 }
904 gimple_seq_add_seq (&eh_seq, cleanup);
905
906 if (over)
907 {
908 x = gimple_build_label (over);
909 gimple_seq_add_stmt (&result, x);
910 }
911 return result;
912 }
913
914 /* A subroutine of lower_try_finally. Duplicate the tree rooted at T.
915 Make sure to record all new labels found. */
916
917 static gimple_seq
918 lower_try_finally_dup_block (gimple_seq seq, struct leh_state *outer_state)
919 {
920 gimple region = NULL;
921 gimple_seq new_seq;
922
923 new_seq = copy_gimple_seq_and_replace_locals (seq);
924
925 if (outer_state->tf)
926 region = outer_state->tf->try_finally_expr;
927 collect_finally_tree_1 (new_seq, region);
928
929 return new_seq;
930 }
931
932 /* A subroutine of lower_try_finally. Create a fallthru label for
933 the given try_finally state. The only tricky bit here is that
934 we have to make sure to record the label in our outer context. */
935
936 static tree
937 lower_try_finally_fallthru_label (struct leh_tf_state *tf)
938 {
939 tree label = tf->fallthru_label;
940 treemple temp;
941
942 if (!label)
943 {
944 label = create_artificial_label (gimple_location (tf->try_finally_expr));
945 tf->fallthru_label = label;
946 if (tf->outer->tf)
947 {
948 temp.t = label;
949 record_in_finally_tree (temp, tf->outer->tf->try_finally_expr);
950 }
951 }
952 return label;
953 }
954
955 /* A subroutine of lower_try_finally. If the eh_protect_cleanup_actions
956 langhook returns non-null, then the language requires that the exception
957 path out of a try_finally be treated specially. To wit: the code within
958 the finally block may not itself throw an exception. We have two choices
959 here. First we can duplicate the finally block and wrap it in a
960 must_not_throw region. Second, we can generate code like
961
962 try {
963 finally_block;
964 } catch {
965 if (fintmp == eh_edge)
966 protect_cleanup_actions;
967 }
968
969 where "fintmp" is the temporary used in the switch statement generation
970 alternative considered below. For the nonce, we always choose the first
971 option.
972
973 THIS_STATE may be null if this is a try-cleanup, not a try-finally. */
974
975 static void
976 honor_protect_cleanup_actions (struct leh_state *outer_state,
977 struct leh_state *this_state,
978 struct leh_tf_state *tf)
979 {
980 tree protect_cleanup_actions;
981 gimple_stmt_iterator gsi;
982 bool finally_may_fallthru;
983 gimple_seq finally;
984 gimple x;
985
986 /* First check for nothing to do. */
987 if (lang_hooks.eh_protect_cleanup_actions == NULL)
988 return;
989 protect_cleanup_actions = lang_hooks.eh_protect_cleanup_actions ();
990 if (protect_cleanup_actions == NULL)
991 return;
992
993 finally = gimple_try_cleanup (tf->top_p);
994 finally_may_fallthru = gimple_seq_may_fallthru (finally);
995
996 /* Duplicate the FINALLY block. Only need to do this for try-finally,
997 and not for cleanups. */
998 if (this_state)
999 finally = lower_try_finally_dup_block (finally, outer_state);
1000
1001 /* If this cleanup consists of a TRY_CATCH_EXPR with TRY_CATCH_IS_CLEANUP
1002 set, the handler of the TRY_CATCH_EXPR is another cleanup which ought
1003 to be in an enclosing scope, but needs to be implemented at this level
1004 to avoid a nesting violation (see wrap_temporary_cleanups in
1005 cp/decl.c). Since it's logically at an outer level, we should call
1006 terminate before we get to it, so strip it away before adding the
1007 MUST_NOT_THROW filter. */
1008 gsi = gsi_start (finally);
1009 x = gsi_stmt (gsi);
1010 if (gimple_code (x) == GIMPLE_TRY
1011 && gimple_try_kind (x) == GIMPLE_TRY_CATCH
1012 && gimple_try_catch_is_cleanup (x))
1013 {
1014 gsi_insert_seq_before (&gsi, gimple_try_eval (x), GSI_SAME_STMT);
1015 gsi_remove (&gsi, false);
1016 }
1017
1018 /* Wrap the block with protect_cleanup_actions as the action. */
1019 x = gimple_build_eh_must_not_throw (protect_cleanup_actions);
1020 x = gimple_build_try (finally, gimple_seq_alloc_with_stmt (x),
1021 GIMPLE_TRY_CATCH);
1022 finally = lower_eh_must_not_throw (outer_state, x);
1023
1024 /* Drop all of this into the exception sequence. */
1025 emit_post_landing_pad (&eh_seq, tf->region);
1026 gimple_seq_add_seq (&eh_seq, finally);
1027 if (finally_may_fallthru)
1028 emit_resx (&eh_seq, tf->region);
1029
1030 /* Having now been handled, EH isn't to be considered with
1031 the rest of the outgoing edges. */
1032 tf->may_throw = false;
1033 }
1034
1035 /* A subroutine of lower_try_finally. We have determined that there is
1036 no fallthru edge out of the finally block. This means that there is
1037 no outgoing edge corresponding to any incoming edge. Restructure the
1038 try_finally node for this special case. */
1039
1040 static void
1041 lower_try_finally_nofallthru (struct leh_state *state,
1042 struct leh_tf_state *tf)
1043 {
1044 tree lab, return_val;
1045 gimple x;
1046 gimple_seq finally;
1047 struct goto_queue_node *q, *qe;
1048
1049 lab = create_artificial_label (gimple_location (tf->try_finally_expr));
1050
1051 /* We expect that tf->top_p is a GIMPLE_TRY. */
1052 finally = gimple_try_cleanup (tf->top_p);
1053 tf->top_p_seq = gimple_try_eval (tf->top_p);
1054
1055 x = gimple_build_label (lab);
1056 gimple_seq_add_stmt (&tf->top_p_seq, x);
1057
1058 return_val = NULL;
1059 q = tf->goto_queue;
1060 qe = q + tf->goto_queue_active;
1061 for (; q < qe; ++q)
1062 if (q->index < 0)
1063 do_return_redirection (q, lab, NULL, &return_val);
1064 else
1065 do_goto_redirection (q, lab, NULL, tf);
1066
1067 replace_goto_queue (tf);
1068
1069 lower_eh_constructs_1 (state, finally);
1070 gimple_seq_add_seq (&tf->top_p_seq, finally);
1071
1072 if (tf->may_throw)
1073 {
1074 emit_post_landing_pad (&eh_seq, tf->region);
1075
1076 x = gimple_build_goto (lab);
1077 gimple_seq_add_stmt (&eh_seq, x);
1078 }
1079 }
1080
1081 /* A subroutine of lower_try_finally. We have determined that there is
1082 exactly one destination of the finally block. Restructure the
1083 try_finally node for this special case. */
1084
1085 static void
1086 lower_try_finally_onedest (struct leh_state *state, struct leh_tf_state *tf)
1087 {
1088 struct goto_queue_node *q, *qe;
1089 gimple x;
1090 gimple_seq finally;
1091 tree finally_label;
1092 location_t loc = gimple_location (tf->try_finally_expr);
1093
1094 finally = gimple_try_cleanup (tf->top_p);
1095 tf->top_p_seq = gimple_try_eval (tf->top_p);
1096
1097 lower_eh_constructs_1 (state, finally);
1098
1099 if (tf->may_throw)
1100 {
1101 /* Only reachable via the exception edge. Add the given label to
1102 the head of the FINALLY block. Append a RESX at the end. */
1103 emit_post_landing_pad (&eh_seq, tf->region);
1104 gimple_seq_add_seq (&eh_seq, finally);
1105 emit_resx (&eh_seq, tf->region);
1106 return;
1107 }
1108
1109 if (tf->may_fallthru)
1110 {
1111 /* Only reachable via the fallthru edge. Do nothing but let
1112 the two blocks run together; we'll fall out the bottom. */
1113 gimple_seq_add_seq (&tf->top_p_seq, finally);
1114 return;
1115 }
1116
1117 finally_label = create_artificial_label (loc);
1118 x = gimple_build_label (finally_label);
1119 gimple_seq_add_stmt (&tf->top_p_seq, x);
1120
1121 gimple_seq_add_seq (&tf->top_p_seq, finally);
1122
1123 q = tf->goto_queue;
1124 qe = q + tf->goto_queue_active;
1125
1126 if (tf->may_return)
1127 {
1128 /* Reachable by return expressions only. Redirect them. */
1129 tree return_val = NULL;
1130 for (; q < qe; ++q)
1131 do_return_redirection (q, finally_label, NULL, &return_val);
1132 replace_goto_queue (tf);
1133 }
1134 else
1135 {
1136 /* Reachable by goto expressions only. Redirect them. */
1137 for (; q < qe; ++q)
1138 do_goto_redirection (q, finally_label, NULL, tf);
1139 replace_goto_queue (tf);
1140
1141 if (VEC_index (tree, tf->dest_array, 0) == tf->fallthru_label)
1142 {
1143 /* Reachable by goto to fallthru label only. Redirect it
1144 to the new label (already created, sadly), and do not
1145 emit the final branch out, or the fallthru label. */
1146 tf->fallthru_label = NULL;
1147 return;
1148 }
1149 }
1150
1151 /* Place the original return/goto to the original destination
1152 immediately after the finally block. */
1153 x = tf->goto_queue[0].cont_stmt;
1154 gimple_seq_add_stmt (&tf->top_p_seq, x);
1155 maybe_record_in_goto_queue (state, x);
1156 }
1157
1158 /* A subroutine of lower_try_finally. There are multiple edges incoming
1159 and outgoing from the finally block. Implement this by duplicating the
1160 finally block for every destination. */
1161
1162 static void
1163 lower_try_finally_copy (struct leh_state *state, struct leh_tf_state *tf)
1164 {
1165 gimple_seq finally;
1166 gimple_seq new_stmt;
1167 gimple_seq seq;
1168 gimple x;
1169 tree tmp;
1170 location_t tf_loc = gimple_location (tf->try_finally_expr);
1171
1172 finally = gimple_try_cleanup (tf->top_p);
1173 tf->top_p_seq = gimple_try_eval (tf->top_p);
1174 new_stmt = NULL;
1175
1176 if (tf->may_fallthru)
1177 {
1178 seq = lower_try_finally_dup_block (finally, state);
1179 lower_eh_constructs_1 (state, seq);
1180 gimple_seq_add_seq (&new_stmt, seq);
1181
1182 tmp = lower_try_finally_fallthru_label (tf);
1183 x = gimple_build_goto (tmp);
1184 gimple_seq_add_stmt (&new_stmt, x);
1185 }
1186
1187 if (tf->may_throw)
1188 {
1189 seq = lower_try_finally_dup_block (finally, state);
1190 lower_eh_constructs_1 (state, seq);
1191
1192 emit_post_landing_pad (&eh_seq, tf->region);
1193 gimple_seq_add_seq (&eh_seq, seq);
1194 emit_resx (&eh_seq, tf->region);
1195 }
1196
1197 if (tf->goto_queue)
1198 {
1199 struct goto_queue_node *q, *qe;
1200 tree return_val = NULL;
1201 int return_index, index;
1202 struct labels_s
1203 {
1204 struct goto_queue_node *q;
1205 tree label;
1206 } *labels;
1207
1208 return_index = VEC_length (tree, tf->dest_array);
1209 labels = XCNEWVEC (struct labels_s, return_index + 1);
1210
1211 q = tf->goto_queue;
1212 qe = q + tf->goto_queue_active;
1213 for (; q < qe; q++)
1214 {
1215 index = q->index < 0 ? return_index : q->index;
1216
1217 if (!labels[index].q)
1218 labels[index].q = q;
1219 }
1220
1221 for (index = 0; index < return_index + 1; index++)
1222 {
1223 tree lab;
1224
1225 q = labels[index].q;
1226 if (! q)
1227 continue;
1228
1229 lab = labels[index].label
1230 = create_artificial_label (tf_loc);
1231
1232 if (index == return_index)
1233 do_return_redirection (q, lab, NULL, &return_val);
1234 else
1235 do_goto_redirection (q, lab, NULL, tf);
1236
1237 x = gimple_build_label (lab);
1238 gimple_seq_add_stmt (&new_stmt, x);
1239
1240 seq = lower_try_finally_dup_block (finally, state);
1241 lower_eh_constructs_1 (state, seq);
1242 gimple_seq_add_seq (&new_stmt, seq);
1243
1244 gimple_seq_add_stmt (&new_stmt, q->cont_stmt);
1245 maybe_record_in_goto_queue (state, q->cont_stmt);
1246 }
1247
1248 for (q = tf->goto_queue; q < qe; q++)
1249 {
1250 tree lab;
1251
1252 index = q->index < 0 ? return_index : q->index;
1253
1254 if (labels[index].q == q)
1255 continue;
1256
1257 lab = labels[index].label;
1258
1259 if (index == return_index)
1260 do_return_redirection (q, lab, NULL, &return_val);
1261 else
1262 do_goto_redirection (q, lab, NULL, tf);
1263 }
1264
1265 replace_goto_queue (tf);
1266 free (labels);
1267 }
1268
1269 /* Need to link new stmts after running replace_goto_queue due
1270 to not wanting to process the same goto stmts twice. */
1271 gimple_seq_add_seq (&tf->top_p_seq, new_stmt);
1272 }
1273
1274 /* A subroutine of lower_try_finally. There are multiple edges incoming
1275 and outgoing from the finally block. Implement this by instrumenting
1276 each incoming edge and creating a switch statement at the end of the
1277 finally block that branches to the appropriate destination. */
1278
1279 static void
1280 lower_try_finally_switch (struct leh_state *state, struct leh_tf_state *tf)
1281 {
1282 struct goto_queue_node *q, *qe;
1283 tree return_val = NULL;
1284 tree finally_tmp, finally_label;
1285 int return_index, eh_index, fallthru_index;
1286 int nlabels, ndests, j, last_case_index;
1287 tree last_case;
1288 VEC (tree,heap) *case_label_vec;
1289 gimple_seq switch_body;
1290 gimple x;
1291 tree tmp;
1292 gimple switch_stmt;
1293 gimple_seq finally;
1294 struct pointer_map_t *cont_map = NULL;
1295 /* The location of the TRY_FINALLY stmt. */
1296 location_t tf_loc = gimple_location (tf->try_finally_expr);
1297 /* The location of the finally block. */
1298 location_t finally_loc;
1299
1300 switch_body = gimple_seq_alloc ();
1301
1302 /* Mash the TRY block to the head of the chain. */
1303 finally = gimple_try_cleanup (tf->top_p);
1304 tf->top_p_seq = gimple_try_eval (tf->top_p);
1305
1306 /* The location of the finally is either the last stmt in the finally
1307 block or the location of the TRY_FINALLY itself. */
1308 finally_loc = gimple_seq_last_stmt (tf->top_p_seq) != NULL ?
1309 gimple_location (gimple_seq_last_stmt (tf->top_p_seq))
1310 : tf_loc;
1311
1312 /* Lower the finally block itself. */
1313 lower_eh_constructs_1 (state, finally);
1314
1315 /* Prepare for switch statement generation. */
1316 nlabels = VEC_length (tree, tf->dest_array);
1317 return_index = nlabels;
1318 eh_index = return_index + tf->may_return;
1319 fallthru_index = eh_index + tf->may_throw;
1320 ndests = fallthru_index + tf->may_fallthru;
1321
1322 finally_tmp = create_tmp_var (integer_type_node, "finally_tmp");
1323 finally_label = create_artificial_label (finally_loc);
1324
1325 /* We use VEC_quick_push on case_label_vec throughout this function,
1326 since we know the size in advance and allocate precisely as muce
1327 space as needed. */
1328 case_label_vec = VEC_alloc (tree, heap, ndests);
1329 last_case = NULL;
1330 last_case_index = 0;
1331
1332 /* Begin inserting code for getting to the finally block. Things
1333 are done in this order to correspond to the sequence the code is
1334 layed out. */
1335
1336 if (tf->may_fallthru)
1337 {
1338 x = gimple_build_assign (finally_tmp,
1339 build_int_cst (integer_type_node,
1340 fallthru_index));
1341 gimple_seq_add_stmt (&tf->top_p_seq, x);
1342
1343 tmp = build_int_cst (integer_type_node, fallthru_index);
1344 last_case = build_case_label (tmp, NULL,
1345 create_artificial_label (tf_loc));
1346 VEC_quick_push (tree, case_label_vec, last_case);
1347 last_case_index++;
1348
1349 x = gimple_build_label (CASE_LABEL (last_case));
1350 gimple_seq_add_stmt (&switch_body, x);
1351
1352 tmp = lower_try_finally_fallthru_label (tf);
1353 x = gimple_build_goto (tmp);
1354 gimple_seq_add_stmt (&switch_body, x);
1355 }
1356
1357 if (tf->may_throw)
1358 {
1359 emit_post_landing_pad (&eh_seq, tf->region);
1360
1361 x = gimple_build_assign (finally_tmp,
1362 build_int_cst (integer_type_node, eh_index));
1363 gimple_seq_add_stmt (&eh_seq, x);
1364
1365 x = gimple_build_goto (finally_label);
1366 gimple_seq_add_stmt (&eh_seq, x);
1367
1368 tmp = build_int_cst (integer_type_node, eh_index);
1369 last_case = build_case_label (tmp, NULL,
1370 create_artificial_label (tf_loc));
1371 VEC_quick_push (tree, case_label_vec, last_case);
1372 last_case_index++;
1373
1374 x = gimple_build_label (CASE_LABEL (last_case));
1375 gimple_seq_add_stmt (&eh_seq, x);
1376 emit_resx (&eh_seq, tf->region);
1377 }
1378
1379 x = gimple_build_label (finally_label);
1380 gimple_seq_add_stmt (&tf->top_p_seq, x);
1381
1382 gimple_seq_add_seq (&tf->top_p_seq, finally);
1383
1384 /* Redirect each incoming goto edge. */
1385 q = tf->goto_queue;
1386 qe = q + tf->goto_queue_active;
1387 j = last_case_index + tf->may_return;
1388 /* Prepare the assignments to finally_tmp that are executed upon the
1389 entrance through a particular edge. */
1390 for (; q < qe; ++q)
1391 {
1392 gimple_seq mod;
1393 int switch_id;
1394 unsigned int case_index;
1395
1396 mod = gimple_seq_alloc ();
1397
1398 if (q->index < 0)
1399 {
1400 x = gimple_build_assign (finally_tmp,
1401 build_int_cst (integer_type_node,
1402 return_index));
1403 gimple_seq_add_stmt (&mod, x);
1404 do_return_redirection (q, finally_label, mod, &return_val);
1405 switch_id = return_index;
1406 }
1407 else
1408 {
1409 x = gimple_build_assign (finally_tmp,
1410 build_int_cst (integer_type_node, q->index));
1411 gimple_seq_add_stmt (&mod, x);
1412 do_goto_redirection (q, finally_label, mod, tf);
1413 switch_id = q->index;
1414 }
1415
1416 case_index = j + q->index;
1417 if (VEC_length (tree, case_label_vec) <= case_index
1418 || !VEC_index (tree, case_label_vec, case_index))
1419 {
1420 tree case_lab;
1421 void **slot;
1422 tmp = build_int_cst (integer_type_node, switch_id);
1423 case_lab = build_case_label (tmp, NULL,
1424 create_artificial_label (tf_loc));
1425 /* We store the cont_stmt in the pointer map, so that we can recover
1426 it in the loop below. */
1427 if (!cont_map)
1428 cont_map = pointer_map_create ();
1429 slot = pointer_map_insert (cont_map, case_lab);
1430 *slot = q->cont_stmt;
1431 VEC_quick_push (tree, case_label_vec, case_lab);
1432 }
1433 }
1434 for (j = last_case_index; j < last_case_index + nlabels; j++)
1435 {
1436 gimple cont_stmt;
1437 void **slot;
1438
1439 last_case = VEC_index (tree, case_label_vec, j);
1440
1441 gcc_assert (last_case);
1442 gcc_assert (cont_map);
1443
1444 slot = pointer_map_contains (cont_map, last_case);
1445 gcc_assert (slot);
1446 cont_stmt = *(gimple *) slot;
1447
1448 x = gimple_build_label (CASE_LABEL (last_case));
1449 gimple_seq_add_stmt (&switch_body, x);
1450 gimple_seq_add_stmt (&switch_body, cont_stmt);
1451 maybe_record_in_goto_queue (state, cont_stmt);
1452 }
1453 if (cont_map)
1454 pointer_map_destroy (cont_map);
1455
1456 replace_goto_queue (tf);
1457
1458 /* Make sure that the last case is the default label, as one is required.
1459 Then sort the labels, which is also required in GIMPLE. */
1460 CASE_LOW (last_case) = NULL;
1461 sort_case_labels (case_label_vec);
1462
1463 /* Build the switch statement, setting last_case to be the default
1464 label. */
1465 switch_stmt = gimple_build_switch_vec (finally_tmp, last_case,
1466 case_label_vec);
1467 gimple_set_location (switch_stmt, finally_loc);
1468
1469 /* Need to link SWITCH_STMT after running replace_goto_queue
1470 due to not wanting to process the same goto stmts twice. */
1471 gimple_seq_add_stmt (&tf->top_p_seq, switch_stmt);
1472 gimple_seq_add_seq (&tf->top_p_seq, switch_body);
1473 }
1474
1475 /* Decide whether or not we are going to duplicate the finally block.
1476 There are several considerations.
1477
1478 First, if this is Java, then the finally block contains code
1479 written by the user. It has line numbers associated with it,
1480 so duplicating the block means it's difficult to set a breakpoint.
1481 Since controlling code generation via -g is verboten, we simply
1482 never duplicate code without optimization.
1483
1484 Second, we'd like to prevent egregious code growth. One way to
1485 do this is to estimate the size of the finally block, multiply
1486 that by the number of copies we'd need to make, and compare against
1487 the estimate of the size of the switch machinery we'd have to add. */
1488
1489 static bool
1490 decide_copy_try_finally (int ndests, gimple_seq finally)
1491 {
1492 int f_estimate, sw_estimate;
1493
1494 if (!optimize)
1495 return false;
1496
1497 /* Finally estimate N times, plus N gotos. */
1498 f_estimate = count_insns_seq (finally, &eni_size_weights);
1499 f_estimate = (f_estimate + 1) * ndests;
1500
1501 /* Switch statement (cost 10), N variable assignments, N gotos. */
1502 sw_estimate = 10 + 2 * ndests;
1503
1504 /* Optimize for size clearly wants our best guess. */
1505 if (optimize_function_for_size_p (cfun))
1506 return f_estimate < sw_estimate;
1507
1508 /* ??? These numbers are completely made up so far. */
1509 if (optimize > 1)
1510 return f_estimate < 100 || f_estimate < sw_estimate * 2;
1511 else
1512 return f_estimate < 40 || f_estimate * 2 < sw_estimate * 3;
1513 }
1514
1515 /* REG is the enclosing region for a possible cleanup region, or the region
1516 itself. Returns TRUE if such a region would be unreachable.
1517
1518 Cleanup regions within a must-not-throw region aren't actually reachable
1519 even if there are throwing stmts within them, because the personality
1520 routine will call terminate before unwinding. */
1521
1522 static bool
1523 cleanup_is_dead_in (eh_region reg)
1524 {
1525 while (reg && reg->type == ERT_CLEANUP)
1526 reg = reg->outer;
1527 return (reg && reg->type == ERT_MUST_NOT_THROW);
1528 }
1529
1530 /* A subroutine of lower_eh_constructs_1. Lower a GIMPLE_TRY_FINALLY nodes
1531 to a sequence of labels and blocks, plus the exception region trees
1532 that record all the magic. This is complicated by the need to
1533 arrange for the FINALLY block to be executed on all exits. */
1534
1535 static gimple_seq
1536 lower_try_finally (struct leh_state *state, gimple tp)
1537 {
1538 struct leh_tf_state this_tf;
1539 struct leh_state this_state;
1540 int ndests;
1541 gimple_seq old_eh_seq;
1542
1543 /* Process the try block. */
1544
1545 memset (&this_tf, 0, sizeof (this_tf));
1546 this_tf.try_finally_expr = tp;
1547 this_tf.top_p = tp;
1548 this_tf.outer = state;
1549 if (using_eh_for_cleanups_p && !cleanup_is_dead_in (state->cur_region))
1550 {
1551 this_tf.region = gen_eh_region_cleanup (state->cur_region);
1552 this_state.cur_region = this_tf.region;
1553 }
1554 else
1555 {
1556 this_tf.region = NULL;
1557 this_state.cur_region = state->cur_region;
1558 }
1559
1560 this_state.ehp_region = state->ehp_region;
1561 this_state.tf = &this_tf;
1562
1563 old_eh_seq = eh_seq;
1564 eh_seq = NULL;
1565
1566 lower_eh_constructs_1 (&this_state, gimple_try_eval(tp));
1567
1568 /* Determine if the try block is escaped through the bottom. */
1569 this_tf.may_fallthru = gimple_seq_may_fallthru (gimple_try_eval (tp));
1570
1571 /* Determine if any exceptions are possible within the try block. */
1572 if (this_tf.region)
1573 this_tf.may_throw = eh_region_may_contain_throw (this_tf.region);
1574 if (this_tf.may_throw)
1575 honor_protect_cleanup_actions (state, &this_state, &this_tf);
1576
1577 /* Determine how many edges (still) reach the finally block. Or rather,
1578 how many destinations are reached by the finally block. Use this to
1579 determine how we process the finally block itself. */
1580
1581 ndests = VEC_length (tree, this_tf.dest_array);
1582 ndests += this_tf.may_fallthru;
1583 ndests += this_tf.may_return;
1584 ndests += this_tf.may_throw;
1585
1586 /* If the FINALLY block is not reachable, dike it out. */
1587 if (ndests == 0)
1588 {
1589 gimple_seq_add_seq (&this_tf.top_p_seq, gimple_try_eval (tp));
1590 gimple_try_set_cleanup (tp, NULL);
1591 }
1592 /* If the finally block doesn't fall through, then any destination
1593 we might try to impose there isn't reached either. There may be
1594 some minor amount of cleanup and redirection still needed. */
1595 else if (!gimple_seq_may_fallthru (gimple_try_cleanup (tp)))
1596 lower_try_finally_nofallthru (state, &this_tf);
1597
1598 /* We can easily special-case redirection to a single destination. */
1599 else if (ndests == 1)
1600 lower_try_finally_onedest (state, &this_tf);
1601 else if (decide_copy_try_finally (ndests, gimple_try_cleanup (tp)))
1602 lower_try_finally_copy (state, &this_tf);
1603 else
1604 lower_try_finally_switch (state, &this_tf);
1605
1606 /* If someone requested we add a label at the end of the transformed
1607 block, do so. */
1608 if (this_tf.fallthru_label)
1609 {
1610 /* This must be reached only if ndests == 0. */
1611 gimple x = gimple_build_label (this_tf.fallthru_label);
1612 gimple_seq_add_stmt (&this_tf.top_p_seq, x);
1613 }
1614
1615 VEC_free (tree, heap, this_tf.dest_array);
1616 free (this_tf.goto_queue);
1617 if (this_tf.goto_queue_map)
1618 pointer_map_destroy (this_tf.goto_queue_map);
1619
1620 /* If there was an old (aka outer) eh_seq, append the current eh_seq.
1621 If there was no old eh_seq, then the append is trivially already done. */
1622 if (old_eh_seq)
1623 {
1624 if (eh_seq == NULL)
1625 eh_seq = old_eh_seq;
1626 else
1627 {
1628 gimple_seq new_eh_seq = eh_seq;
1629 eh_seq = old_eh_seq;
1630 gimple_seq_add_seq(&eh_seq, new_eh_seq);
1631 }
1632 }
1633
1634 return this_tf.top_p_seq;
1635 }
1636
1637 /* A subroutine of lower_eh_constructs_1. Lower a GIMPLE_TRY_CATCH with a
1638 list of GIMPLE_CATCH to a sequence of labels and blocks, plus the
1639 exception region trees that records all the magic. */
1640
1641 static gimple_seq
1642 lower_catch (struct leh_state *state, gimple tp)
1643 {
1644 eh_region try_region = NULL;
1645 struct leh_state this_state = *state;
1646 gimple_stmt_iterator gsi;
1647 tree out_label;
1648 gimple_seq new_seq;
1649 gimple x;
1650 location_t try_catch_loc = gimple_location (tp);
1651
1652 if (flag_exceptions)
1653 {
1654 try_region = gen_eh_region_try (state->cur_region);
1655 this_state.cur_region = try_region;
1656 }
1657
1658 lower_eh_constructs_1 (&this_state, gimple_try_eval (tp));
1659
1660 if (!eh_region_may_contain_throw (try_region))
1661 return gimple_try_eval (tp);
1662
1663 new_seq = NULL;
1664 emit_eh_dispatch (&new_seq, try_region);
1665 emit_resx (&new_seq, try_region);
1666
1667 this_state.cur_region = state->cur_region;
1668 this_state.ehp_region = try_region;
1669
1670 out_label = NULL;
1671 for (gsi = gsi_start (gimple_try_cleanup (tp));
1672 !gsi_end_p (gsi);
1673 gsi_next (&gsi))
1674 {
1675 eh_catch c;
1676 gimple gcatch;
1677 gimple_seq handler;
1678
1679 gcatch = gsi_stmt (gsi);
1680 c = gen_eh_region_catch (try_region, gimple_catch_types (gcatch));
1681
1682 handler = gimple_catch_handler (gcatch);
1683 lower_eh_constructs_1 (&this_state, handler);
1684
1685 c->label = create_artificial_label (UNKNOWN_LOCATION);
1686 x = gimple_build_label (c->label);
1687 gimple_seq_add_stmt (&new_seq, x);
1688
1689 gimple_seq_add_seq (&new_seq, handler);
1690
1691 if (gimple_seq_may_fallthru (new_seq))
1692 {
1693 if (!out_label)
1694 out_label = create_artificial_label (try_catch_loc);
1695
1696 x = gimple_build_goto (out_label);
1697 gimple_seq_add_stmt (&new_seq, x);
1698 }
1699 if (!c->type_list)
1700 break;
1701 }
1702
1703 gimple_try_set_cleanup (tp, new_seq);
1704
1705 return frob_into_branch_around (tp, try_region, out_label);
1706 }
1707
1708 /* A subroutine of lower_eh_constructs_1. Lower a GIMPLE_TRY with a
1709 GIMPLE_EH_FILTER to a sequence of labels and blocks, plus the exception
1710 region trees that record all the magic. */
1711
1712 static gimple_seq
1713 lower_eh_filter (struct leh_state *state, gimple tp)
1714 {
1715 struct leh_state this_state = *state;
1716 eh_region this_region = NULL;
1717 gimple inner, x;
1718 gimple_seq new_seq;
1719
1720 inner = gimple_seq_first_stmt (gimple_try_cleanup (tp));
1721
1722 if (flag_exceptions)
1723 {
1724 this_region = gen_eh_region_allowed (state->cur_region,
1725 gimple_eh_filter_types (inner));
1726 this_state.cur_region = this_region;
1727 }
1728
1729 lower_eh_constructs_1 (&this_state, gimple_try_eval (tp));
1730
1731 if (!eh_region_may_contain_throw (this_region))
1732 return gimple_try_eval (tp);
1733
1734 new_seq = NULL;
1735 this_state.cur_region = state->cur_region;
1736 this_state.ehp_region = this_region;
1737
1738 emit_eh_dispatch (&new_seq, this_region);
1739 emit_resx (&new_seq, this_region);
1740
1741 this_region->u.allowed.label = create_artificial_label (UNKNOWN_LOCATION);
1742 x = gimple_build_label (this_region->u.allowed.label);
1743 gimple_seq_add_stmt (&new_seq, x);
1744
1745 lower_eh_constructs_1 (&this_state, gimple_eh_filter_failure (inner));
1746 gimple_seq_add_seq (&new_seq, gimple_eh_filter_failure (inner));
1747
1748 gimple_try_set_cleanup (tp, new_seq);
1749
1750 return frob_into_branch_around (tp, this_region, NULL);
1751 }
1752
1753 /* A subroutine of lower_eh_constructs_1. Lower a GIMPLE_TRY with
1754 an GIMPLE_EH_MUST_NOT_THROW to a sequence of labels and blocks,
1755 plus the exception region trees that record all the magic. */
1756
1757 static gimple_seq
1758 lower_eh_must_not_throw (struct leh_state *state, gimple tp)
1759 {
1760 struct leh_state this_state = *state;
1761
1762 if (flag_exceptions)
1763 {
1764 gimple inner = gimple_seq_first_stmt (gimple_try_cleanup (tp));
1765 eh_region this_region;
1766
1767 this_region = gen_eh_region_must_not_throw (state->cur_region);
1768 this_region->u.must_not_throw.failure_decl
1769 = gimple_eh_must_not_throw_fndecl (inner);
1770 this_region->u.must_not_throw.failure_loc = gimple_location (tp);
1771
1772 /* In order to get mangling applied to this decl, we must mark it
1773 used now. Otherwise, pass_ipa_free_lang_data won't think it
1774 needs to happen. */
1775 TREE_USED (this_region->u.must_not_throw.failure_decl) = 1;
1776
1777 this_state.cur_region = this_region;
1778 }
1779
1780 lower_eh_constructs_1 (&this_state, gimple_try_eval (tp));
1781
1782 return gimple_try_eval (tp);
1783 }
1784
1785 /* Implement a cleanup expression. This is similar to try-finally,
1786 except that we only execute the cleanup block for exception edges. */
1787
1788 static gimple_seq
1789 lower_cleanup (struct leh_state *state, gimple tp)
1790 {
1791 struct leh_state this_state = *state;
1792 eh_region this_region = NULL;
1793 struct leh_tf_state fake_tf;
1794 gimple_seq result;
1795 bool cleanup_dead = cleanup_is_dead_in (state->cur_region);
1796
1797 if (flag_exceptions && !cleanup_dead)
1798 {
1799 this_region = gen_eh_region_cleanup (state->cur_region);
1800 this_state.cur_region = this_region;
1801 }
1802
1803 lower_eh_constructs_1 (&this_state, gimple_try_eval (tp));
1804
1805 if (cleanup_dead || !eh_region_may_contain_throw (this_region))
1806 return gimple_try_eval (tp);
1807
1808 /* Build enough of a try-finally state so that we can reuse
1809 honor_protect_cleanup_actions. */
1810 memset (&fake_tf, 0, sizeof (fake_tf));
1811 fake_tf.top_p = fake_tf.try_finally_expr = tp;
1812 fake_tf.outer = state;
1813 fake_tf.region = this_region;
1814 fake_tf.may_fallthru = gimple_seq_may_fallthru (gimple_try_eval (tp));
1815 fake_tf.may_throw = true;
1816
1817 honor_protect_cleanup_actions (state, NULL, &fake_tf);
1818
1819 if (fake_tf.may_throw)
1820 {
1821 /* In this case honor_protect_cleanup_actions had nothing to do,
1822 and we should process this normally. */
1823 lower_eh_constructs_1 (state, gimple_try_cleanup (tp));
1824 result = frob_into_branch_around (tp, this_region,
1825 fake_tf.fallthru_label);
1826 }
1827 else
1828 {
1829 /* In this case honor_protect_cleanup_actions did nearly all of
1830 the work. All we have left is to append the fallthru_label. */
1831
1832 result = gimple_try_eval (tp);
1833 if (fake_tf.fallthru_label)
1834 {
1835 gimple x = gimple_build_label (fake_tf.fallthru_label);
1836 gimple_seq_add_stmt (&result, x);
1837 }
1838 }
1839 return result;
1840 }
1841
1842 /* Main loop for lowering eh constructs. Also moves gsi to the next
1843 statement. */
1844
1845 static void
1846 lower_eh_constructs_2 (struct leh_state *state, gimple_stmt_iterator *gsi)
1847 {
1848 gimple_seq replace;
1849 gimple x;
1850 gimple stmt = gsi_stmt (*gsi);
1851
1852 switch (gimple_code (stmt))
1853 {
1854 case GIMPLE_CALL:
1855 {
1856 tree fndecl = gimple_call_fndecl (stmt);
1857 tree rhs, lhs;
1858
1859 if (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
1860 switch (DECL_FUNCTION_CODE (fndecl))
1861 {
1862 case BUILT_IN_EH_POINTER:
1863 /* The front end may have generated a call to
1864 __builtin_eh_pointer (0) within a catch region. Replace
1865 this zero argument with the current catch region number. */
1866 if (state->ehp_region)
1867 {
1868 tree nr = build_int_cst (integer_type_node,
1869 state->ehp_region->index);
1870 gimple_call_set_arg (stmt, 0, nr);
1871 }
1872 else
1873 {
1874 /* The user has dome something silly. Remove it. */
1875 rhs = null_pointer_node;
1876 goto do_replace;
1877 }
1878 break;
1879
1880 case BUILT_IN_EH_FILTER:
1881 /* ??? This should never appear, but since it's a builtin it
1882 is accessible to abuse by users. Just remove it and
1883 replace the use with the arbitrary value zero. */
1884 rhs = build_int_cst (TREE_TYPE (TREE_TYPE (fndecl)), 0);
1885 do_replace:
1886 lhs = gimple_call_lhs (stmt);
1887 x = gimple_build_assign (lhs, rhs);
1888 gsi_insert_before (gsi, x, GSI_SAME_STMT);
1889 /* FALLTHRU */
1890
1891 case BUILT_IN_EH_COPY_VALUES:
1892 /* Likewise this should not appear. Remove it. */
1893 gsi_remove (gsi, true);
1894 return;
1895
1896 default:
1897 break;
1898 }
1899 }
1900 /* FALLTHRU */
1901
1902 case GIMPLE_ASSIGN:
1903 /* If the stmt can throw use a new temporary for the assignment
1904 to a LHS. This makes sure the old value of the LHS is
1905 available on the EH edge. Only do so for statements that
1906 potentially fall thru (no noreturn calls e.g.), otherwise
1907 this new assignment might create fake fallthru regions. */
1908 if (stmt_could_throw_p (stmt)
1909 && gimple_has_lhs (stmt)
1910 && gimple_stmt_may_fallthru (stmt)
1911 && !tree_could_throw_p (gimple_get_lhs (stmt))
1912 && is_gimple_reg_type (TREE_TYPE (gimple_get_lhs (stmt))))
1913 {
1914 tree lhs = gimple_get_lhs (stmt);
1915 tree tmp = create_tmp_var (TREE_TYPE (lhs), NULL);
1916 gimple s = gimple_build_assign (lhs, tmp);
1917 gimple_set_location (s, gimple_location (stmt));
1918 gimple_set_block (s, gimple_block (stmt));
1919 gimple_set_lhs (stmt, tmp);
1920 if (TREE_CODE (TREE_TYPE (tmp)) == COMPLEX_TYPE
1921 || TREE_CODE (TREE_TYPE (tmp)) == VECTOR_TYPE)
1922 DECL_GIMPLE_REG_P (tmp) = 1;
1923 gsi_insert_after (gsi, s, GSI_SAME_STMT);
1924 }
1925 /* Look for things that can throw exceptions, and record them. */
1926 if (state->cur_region && stmt_could_throw_p (stmt))
1927 {
1928 record_stmt_eh_region (state->cur_region, stmt);
1929 note_eh_region_may_contain_throw (state->cur_region);
1930 }
1931 break;
1932
1933 case GIMPLE_COND:
1934 case GIMPLE_GOTO:
1935 case GIMPLE_RETURN:
1936 maybe_record_in_goto_queue (state, stmt);
1937 break;
1938
1939 case GIMPLE_SWITCH:
1940 verify_norecord_switch_expr (state, stmt);
1941 break;
1942
1943 case GIMPLE_TRY:
1944 if (gimple_try_kind (stmt) == GIMPLE_TRY_FINALLY)
1945 replace = lower_try_finally (state, stmt);
1946 else
1947 {
1948 x = gimple_seq_first_stmt (gimple_try_cleanup (stmt));
1949 if (!x)
1950 {
1951 replace = gimple_try_eval (stmt);
1952 lower_eh_constructs_1 (state, replace);
1953 }
1954 else
1955 switch (gimple_code (x))
1956 {
1957 case GIMPLE_CATCH:
1958 replace = lower_catch (state, stmt);
1959 break;
1960 case GIMPLE_EH_FILTER:
1961 replace = lower_eh_filter (state, stmt);
1962 break;
1963 case GIMPLE_EH_MUST_NOT_THROW:
1964 replace = lower_eh_must_not_throw (state, stmt);
1965 break;
1966 default:
1967 replace = lower_cleanup (state, stmt);
1968 break;
1969 }
1970 }
1971
1972 /* Remove the old stmt and insert the transformed sequence
1973 instead. */
1974 gsi_insert_seq_before (gsi, replace, GSI_SAME_STMT);
1975 gsi_remove (gsi, true);
1976
1977 /* Return since we don't want gsi_next () */
1978 return;
1979
1980 default:
1981 /* A type, a decl, or some kind of statement that we're not
1982 interested in. Don't walk them. */
1983 break;
1984 }
1985
1986 gsi_next (gsi);
1987 }
1988
1989 /* A helper to unwrap a gimple_seq and feed stmts to lower_eh_constructs_2. */
1990
1991 static void
1992 lower_eh_constructs_1 (struct leh_state *state, gimple_seq seq)
1993 {
1994 gimple_stmt_iterator gsi;
1995 for (gsi = gsi_start (seq); !gsi_end_p (gsi);)
1996 lower_eh_constructs_2 (state, &gsi);
1997 }
1998
1999 static unsigned int
2000 lower_eh_constructs (void)
2001 {
2002 struct leh_state null_state;
2003 gimple_seq bodyp;
2004
2005 bodyp = gimple_body (current_function_decl);
2006 if (bodyp == NULL)
2007 return 0;
2008
2009 finally_tree = htab_create (31, struct_ptr_hash, struct_ptr_eq, free);
2010 eh_region_may_contain_throw_map = BITMAP_ALLOC (NULL);
2011 memset (&null_state, 0, sizeof (null_state));
2012
2013 collect_finally_tree_1 (bodyp, NULL);
2014 lower_eh_constructs_1 (&null_state, bodyp);
2015
2016 /* We assume there's a return statement, or something, at the end of
2017 the function, and thus ploping the EH sequence afterward won't
2018 change anything. */
2019 gcc_assert (!gimple_seq_may_fallthru (bodyp));
2020 gimple_seq_add_seq (&bodyp, eh_seq);
2021
2022 /* We assume that since BODYP already existed, adding EH_SEQ to it
2023 didn't change its value, and we don't have to re-set the function. */
2024 gcc_assert (bodyp == gimple_body (current_function_decl));
2025
2026 htab_delete (finally_tree);
2027 BITMAP_FREE (eh_region_may_contain_throw_map);
2028 eh_seq = NULL;
2029
2030 /* If this function needs a language specific EH personality routine
2031 and the frontend didn't already set one do so now. */
2032 if (function_needs_eh_personality (cfun) == eh_personality_lang
2033 && !DECL_FUNCTION_PERSONALITY (current_function_decl))
2034 DECL_FUNCTION_PERSONALITY (current_function_decl)
2035 = lang_hooks.eh_personality ();
2036
2037 return 0;
2038 }
2039
2040 struct gimple_opt_pass pass_lower_eh =
2041 {
2042 {
2043 GIMPLE_PASS,
2044 "eh", /* name */
2045 NULL, /* gate */
2046 lower_eh_constructs, /* execute */
2047 NULL, /* sub */
2048 NULL, /* next */
2049 0, /* static_pass_number */
2050 TV_TREE_EH, /* tv_id */
2051 PROP_gimple_lcf, /* properties_required */
2052 PROP_gimple_leh, /* properties_provided */
2053 0, /* properties_destroyed */
2054 0, /* todo_flags_start */
2055 0 /* todo_flags_finish */
2056 }
2057 };
2058 \f
2059 /* Create the multiple edges from an EH_DISPATCH statement to all of
2060 the possible handlers for its EH region. Return true if there's
2061 no fallthru edge; false if there is. */
2062
2063 bool
2064 make_eh_dispatch_edges (gimple stmt)
2065 {
2066 eh_region r;
2067 eh_catch c;
2068 basic_block src, dst;
2069
2070 r = get_eh_region_from_number (gimple_eh_dispatch_region (stmt));
2071 src = gimple_bb (stmt);
2072
2073 switch (r->type)
2074 {
2075 case ERT_TRY:
2076 for (c = r->u.eh_try.first_catch; c ; c = c->next_catch)
2077 {
2078 dst = label_to_block (c->label);
2079 make_edge (src, dst, 0);
2080
2081 /* A catch-all handler doesn't have a fallthru. */
2082 if (c->type_list == NULL)
2083 return false;
2084 }
2085 break;
2086
2087 case ERT_ALLOWED_EXCEPTIONS:
2088 dst = label_to_block (r->u.allowed.label);
2089 make_edge (src, dst, 0);
2090 break;
2091
2092 default:
2093 gcc_unreachable ();
2094 }
2095
2096 return true;
2097 }
2098
2099 /* Create the single EH edge from STMT to its nearest landing pad,
2100 if there is such a landing pad within the current function. */
2101
2102 void
2103 make_eh_edges (gimple stmt)
2104 {
2105 basic_block src, dst;
2106 eh_landing_pad lp;
2107 int lp_nr;
2108
2109 lp_nr = lookup_stmt_eh_lp (stmt);
2110 if (lp_nr <= 0)
2111 return;
2112
2113 lp = get_eh_landing_pad_from_number (lp_nr);
2114 gcc_assert (lp != NULL);
2115
2116 src = gimple_bb (stmt);
2117 dst = label_to_block (lp->post_landing_pad);
2118 make_edge (src, dst, EDGE_EH);
2119 }
2120
2121 /* Do the work in redirecting EDGE_IN to NEW_BB within the EH region tree;
2122 do not actually perform the final edge redirection.
2123
2124 CHANGE_REGION is true when we're being called from cleanup_empty_eh and
2125 we intend to change the destination EH region as well; this means
2126 EH_LANDING_PAD_NR must already be set on the destination block label.
2127 If false, we're being called from generic cfg manipulation code and we
2128 should preserve our place within the region tree. */
2129
2130 static void
2131 redirect_eh_edge_1 (edge edge_in, basic_block new_bb, bool change_region)
2132 {
2133 eh_landing_pad old_lp, new_lp;
2134 basic_block old_bb;
2135 gimple throw_stmt;
2136 int old_lp_nr, new_lp_nr;
2137 tree old_label, new_label;
2138 edge_iterator ei;
2139 edge e;
2140
2141 old_bb = edge_in->dest;
2142 old_label = gimple_block_label (old_bb);
2143 old_lp_nr = EH_LANDING_PAD_NR (old_label);
2144 gcc_assert (old_lp_nr > 0);
2145 old_lp = get_eh_landing_pad_from_number (old_lp_nr);
2146
2147 throw_stmt = last_stmt (edge_in->src);
2148 gcc_assert (lookup_stmt_eh_lp (throw_stmt) == old_lp_nr);
2149
2150 new_label = gimple_block_label (new_bb);
2151
2152 /* Look for an existing region that might be using NEW_BB already. */
2153 new_lp_nr = EH_LANDING_PAD_NR (new_label);
2154 if (new_lp_nr)
2155 {
2156 new_lp = get_eh_landing_pad_from_number (new_lp_nr);
2157 gcc_assert (new_lp);
2158
2159 /* Unless CHANGE_REGION is true, the new and old landing pad
2160 had better be associated with the same EH region. */
2161 gcc_assert (change_region || new_lp->region == old_lp->region);
2162 }
2163 else
2164 {
2165 new_lp = NULL;
2166 gcc_assert (!change_region);
2167 }
2168
2169 /* Notice when we redirect the last EH edge away from OLD_BB. */
2170 FOR_EACH_EDGE (e, ei, old_bb->preds)
2171 if (e != edge_in && (e->flags & EDGE_EH))
2172 break;
2173
2174 if (new_lp)
2175 {
2176 /* NEW_LP already exists. If there are still edges into OLD_LP,
2177 there's nothing to do with the EH tree. If there are no more
2178 edges into OLD_LP, then we want to remove OLD_LP as it is unused.
2179 If CHANGE_REGION is true, then our caller is expecting to remove
2180 the landing pad. */
2181 if (e == NULL && !change_region)
2182 remove_eh_landing_pad (old_lp);
2183 }
2184 else
2185 {
2186 /* No correct landing pad exists. If there are no more edges
2187 into OLD_LP, then we can simply re-use the existing landing pad.
2188 Otherwise, we have to create a new landing pad. */
2189 if (e == NULL)
2190 {
2191 EH_LANDING_PAD_NR (old_lp->post_landing_pad) = 0;
2192 new_lp = old_lp;
2193 }
2194 else
2195 new_lp = gen_eh_landing_pad (old_lp->region);
2196 new_lp->post_landing_pad = new_label;
2197 EH_LANDING_PAD_NR (new_label) = new_lp->index;
2198 }
2199
2200 /* Maybe move the throwing statement to the new region. */
2201 if (old_lp != new_lp)
2202 {
2203 remove_stmt_from_eh_lp (throw_stmt);
2204 add_stmt_to_eh_lp (throw_stmt, new_lp->index);
2205 }
2206 }
2207
2208 /* Redirect EH edge E to NEW_BB. */
2209
2210 edge
2211 redirect_eh_edge (edge edge_in, basic_block new_bb)
2212 {
2213 redirect_eh_edge_1 (edge_in, new_bb, false);
2214 return ssa_redirect_edge (edge_in, new_bb);
2215 }
2216
2217 /* This is a subroutine of gimple_redirect_edge_and_branch. Update the
2218 labels for redirecting a non-fallthru EH_DISPATCH edge E to NEW_BB.
2219 The actual edge update will happen in the caller. */
2220
2221 void
2222 redirect_eh_dispatch_edge (gimple stmt, edge e, basic_block new_bb)
2223 {
2224 tree new_lab = gimple_block_label (new_bb);
2225 bool any_changed = false;
2226 basic_block old_bb;
2227 eh_region r;
2228 eh_catch c;
2229
2230 r = get_eh_region_from_number (gimple_eh_dispatch_region (stmt));
2231 switch (r->type)
2232 {
2233 case ERT_TRY:
2234 for (c = r->u.eh_try.first_catch; c ; c = c->next_catch)
2235 {
2236 old_bb = label_to_block (c->label);
2237 if (old_bb == e->dest)
2238 {
2239 c->label = new_lab;
2240 any_changed = true;
2241 }
2242 }
2243 break;
2244
2245 case ERT_ALLOWED_EXCEPTIONS:
2246 old_bb = label_to_block (r->u.allowed.label);
2247 gcc_assert (old_bb == e->dest);
2248 r->u.allowed.label = new_lab;
2249 any_changed = true;
2250 break;
2251
2252 default:
2253 gcc_unreachable ();
2254 }
2255
2256 gcc_assert (any_changed);
2257 }
2258 \f
2259 /* Helper function for operation_could_trap_p and stmt_could_throw_p. */
2260
2261 bool
2262 operation_could_trap_helper_p (enum tree_code op,
2263 bool fp_operation,
2264 bool honor_trapv,
2265 bool honor_nans,
2266 bool honor_snans,
2267 tree divisor,
2268 bool *handled)
2269 {
2270 *handled = true;
2271 switch (op)
2272 {
2273 case TRUNC_DIV_EXPR:
2274 case CEIL_DIV_EXPR:
2275 case FLOOR_DIV_EXPR:
2276 case ROUND_DIV_EXPR:
2277 case EXACT_DIV_EXPR:
2278 case CEIL_MOD_EXPR:
2279 case FLOOR_MOD_EXPR:
2280 case ROUND_MOD_EXPR:
2281 case TRUNC_MOD_EXPR:
2282 case RDIV_EXPR:
2283 if (honor_snans || honor_trapv)
2284 return true;
2285 if (fp_operation)
2286 return flag_trapping_math;
2287 if (!TREE_CONSTANT (divisor) || integer_zerop (divisor))
2288 return true;
2289 return false;
2290
2291 case LT_EXPR:
2292 case LE_EXPR:
2293 case GT_EXPR:
2294 case GE_EXPR:
2295 case LTGT_EXPR:
2296 /* Some floating point comparisons may trap. */
2297 return honor_nans;
2298
2299 case EQ_EXPR:
2300 case NE_EXPR:
2301 case UNORDERED_EXPR:
2302 case ORDERED_EXPR:
2303 case UNLT_EXPR:
2304 case UNLE_EXPR:
2305 case UNGT_EXPR:
2306 case UNGE_EXPR:
2307 case UNEQ_EXPR:
2308 return honor_snans;
2309
2310 case CONVERT_EXPR:
2311 case FIX_TRUNC_EXPR:
2312 /* Conversion of floating point might trap. */
2313 return honor_nans;
2314
2315 case NEGATE_EXPR:
2316 case ABS_EXPR:
2317 case CONJ_EXPR:
2318 /* These operations don't trap with floating point. */
2319 if (honor_trapv)
2320 return true;
2321 return false;
2322
2323 case PLUS_EXPR:
2324 case MINUS_EXPR:
2325 case MULT_EXPR:
2326 /* Any floating arithmetic may trap. */
2327 if (fp_operation && flag_trapping_math)
2328 return true;
2329 if (honor_trapv)
2330 return true;
2331 return false;
2332
2333 case COMPLEX_EXPR:
2334 case CONSTRUCTOR:
2335 /* Constructing an object cannot trap. */
2336 return false;
2337
2338 default:
2339 /* Any floating arithmetic may trap. */
2340 if (fp_operation && flag_trapping_math)
2341 return true;
2342
2343 *handled = false;
2344 return false;
2345 }
2346 }
2347
2348 /* Return true if operation OP may trap. FP_OPERATION is true if OP is applied
2349 on floating-point values. HONOR_TRAPV is true if OP is applied on integer
2350 type operands that may trap. If OP is a division operator, DIVISOR contains
2351 the value of the divisor. */
2352
2353 bool
2354 operation_could_trap_p (enum tree_code op, bool fp_operation, bool honor_trapv,
2355 tree divisor)
2356 {
2357 bool honor_nans = (fp_operation && flag_trapping_math
2358 && !flag_finite_math_only);
2359 bool honor_snans = fp_operation && flag_signaling_nans != 0;
2360 bool handled;
2361
2362 if (TREE_CODE_CLASS (op) != tcc_comparison
2363 && TREE_CODE_CLASS (op) != tcc_unary
2364 && TREE_CODE_CLASS (op) != tcc_binary)
2365 return false;
2366
2367 return operation_could_trap_helper_p (op, fp_operation, honor_trapv,
2368 honor_nans, honor_snans, divisor,
2369 &handled);
2370 }
2371
2372 /* Return true if EXPR can trap, as in dereferencing an invalid pointer
2373 location or floating point arithmetic. C.f. the rtl version, may_trap_p.
2374 This routine expects only GIMPLE lhs or rhs input. */
2375
2376 bool
2377 tree_could_trap_p (tree expr)
2378 {
2379 enum tree_code code;
2380 bool fp_operation = false;
2381 bool honor_trapv = false;
2382 tree t, base, div = NULL_TREE;
2383
2384 if (!expr)
2385 return false;
2386
2387 code = TREE_CODE (expr);
2388 t = TREE_TYPE (expr);
2389
2390 if (t)
2391 {
2392 if (COMPARISON_CLASS_P (expr))
2393 fp_operation = FLOAT_TYPE_P (TREE_TYPE (TREE_OPERAND (expr, 0)));
2394 else
2395 fp_operation = FLOAT_TYPE_P (t);
2396 honor_trapv = INTEGRAL_TYPE_P (t) && TYPE_OVERFLOW_TRAPS (t);
2397 }
2398
2399 if (TREE_CODE_CLASS (code) == tcc_binary)
2400 div = TREE_OPERAND (expr, 1);
2401 if (operation_could_trap_p (code, fp_operation, honor_trapv, div))
2402 return true;
2403
2404 restart:
2405 switch (code)
2406 {
2407 case TARGET_MEM_REF:
2408 if (TREE_CODE (TMR_BASE (expr)) == ADDR_EXPR
2409 && !TMR_INDEX (expr) && !TMR_INDEX2 (expr))
2410 return false;
2411 return !TREE_THIS_NOTRAP (expr);
2412
2413 case COMPONENT_REF:
2414 case REALPART_EXPR:
2415 case IMAGPART_EXPR:
2416 case BIT_FIELD_REF:
2417 case VIEW_CONVERT_EXPR:
2418 case WITH_SIZE_EXPR:
2419 expr = TREE_OPERAND (expr, 0);
2420 code = TREE_CODE (expr);
2421 goto restart;
2422
2423 case ARRAY_RANGE_REF:
2424 base = TREE_OPERAND (expr, 0);
2425 if (tree_could_trap_p (base))
2426 return true;
2427 if (TREE_THIS_NOTRAP (expr))
2428 return false;
2429 return !range_in_array_bounds_p (expr);
2430
2431 case ARRAY_REF:
2432 base = TREE_OPERAND (expr, 0);
2433 if (tree_could_trap_p (base))
2434 return true;
2435 if (TREE_THIS_NOTRAP (expr))
2436 return false;
2437 return !in_array_bounds_p (expr);
2438
2439 case MEM_REF:
2440 if (TREE_CODE (TREE_OPERAND (expr, 0)) == ADDR_EXPR)
2441 return false;
2442 /* Fallthru. */
2443 case INDIRECT_REF:
2444 return !TREE_THIS_NOTRAP (expr);
2445
2446 case ASM_EXPR:
2447 return TREE_THIS_VOLATILE (expr);
2448
2449 case CALL_EXPR:
2450 t = get_callee_fndecl (expr);
2451 /* Assume that calls to weak functions may trap. */
2452 if (!t || !DECL_P (t))
2453 return true;
2454 if (DECL_WEAK (t))
2455 return tree_could_trap_p (t);
2456 return false;
2457
2458 case FUNCTION_DECL:
2459 /* Assume that accesses to weak functions may trap, unless we know
2460 they are certainly defined in current TU or in some other
2461 LTO partition. */
2462 if (DECL_WEAK (expr))
2463 {
2464 struct cgraph_node *node;
2465 if (!DECL_EXTERNAL (expr))
2466 return false;
2467 node = cgraph_function_node (cgraph_get_node (expr), NULL);
2468 if (node && node->in_other_partition)
2469 return false;
2470 return true;
2471 }
2472 return false;
2473
2474 case VAR_DECL:
2475 /* Assume that accesses to weak vars may trap, unless we know
2476 they are certainly defined in current TU or in some other
2477 LTO partition. */
2478 if (DECL_WEAK (expr))
2479 {
2480 struct varpool_node *node;
2481 if (!DECL_EXTERNAL (expr))
2482 return false;
2483 node = varpool_variable_node (varpool_get_node (expr), NULL);
2484 if (node && node->in_other_partition)
2485 return false;
2486 return true;
2487 }
2488 return false;
2489
2490 default:
2491 return false;
2492 }
2493 }
2494
2495
2496 /* Helper for stmt_could_throw_p. Return true if STMT (assumed to be a
2497 an assignment or a conditional) may throw. */
2498
2499 static bool
2500 stmt_could_throw_1_p (gimple stmt)
2501 {
2502 enum tree_code code = gimple_expr_code (stmt);
2503 bool honor_nans = false;
2504 bool honor_snans = false;
2505 bool fp_operation = false;
2506 bool honor_trapv = false;
2507 tree t;
2508 size_t i;
2509 bool handled, ret;
2510
2511 if (TREE_CODE_CLASS (code) == tcc_comparison
2512 || TREE_CODE_CLASS (code) == tcc_unary
2513 || TREE_CODE_CLASS (code) == tcc_binary)
2514 {
2515 t = gimple_expr_type (stmt);
2516 fp_operation = FLOAT_TYPE_P (t);
2517 if (fp_operation)
2518 {
2519 honor_nans = flag_trapping_math && !flag_finite_math_only;
2520 honor_snans = flag_signaling_nans != 0;
2521 }
2522 else if (INTEGRAL_TYPE_P (t) && TYPE_OVERFLOW_TRAPS (t))
2523 honor_trapv = true;
2524 }
2525
2526 /* Check if the main expression may trap. */
2527 t = is_gimple_assign (stmt) ? gimple_assign_rhs2 (stmt) : NULL;
2528 ret = operation_could_trap_helper_p (code, fp_operation, honor_trapv,
2529 honor_nans, honor_snans, t,
2530 &handled);
2531 if (handled)
2532 return ret;
2533
2534 /* If the expression does not trap, see if any of the individual operands may
2535 trap. */
2536 for (i = 0; i < gimple_num_ops (stmt); i++)
2537 if (tree_could_trap_p (gimple_op (stmt, i)))
2538 return true;
2539
2540 return false;
2541 }
2542
2543
2544 /* Return true if statement STMT could throw an exception. */
2545
2546 bool
2547 stmt_could_throw_p (gimple stmt)
2548 {
2549 if (!flag_exceptions)
2550 return false;
2551
2552 /* The only statements that can throw an exception are assignments,
2553 conditionals, calls, resx, and asms. */
2554 switch (gimple_code (stmt))
2555 {
2556 case GIMPLE_RESX:
2557 return true;
2558
2559 case GIMPLE_CALL:
2560 return !gimple_call_nothrow_p (stmt);
2561
2562 case GIMPLE_ASSIGN:
2563 case GIMPLE_COND:
2564 if (!cfun->can_throw_non_call_exceptions)
2565 return false;
2566 return stmt_could_throw_1_p (stmt);
2567
2568 case GIMPLE_ASM:
2569 if (!cfun->can_throw_non_call_exceptions)
2570 return false;
2571 return gimple_asm_volatile_p (stmt);
2572
2573 default:
2574 return false;
2575 }
2576 }
2577
2578
2579 /* Return true if expression T could throw an exception. */
2580
2581 bool
2582 tree_could_throw_p (tree t)
2583 {
2584 if (!flag_exceptions)
2585 return false;
2586 if (TREE_CODE (t) == MODIFY_EXPR)
2587 {
2588 if (cfun->can_throw_non_call_exceptions
2589 && tree_could_trap_p (TREE_OPERAND (t, 0)))
2590 return true;
2591 t = TREE_OPERAND (t, 1);
2592 }
2593
2594 if (TREE_CODE (t) == WITH_SIZE_EXPR)
2595 t = TREE_OPERAND (t, 0);
2596 if (TREE_CODE (t) == CALL_EXPR)
2597 return (call_expr_flags (t) & ECF_NOTHROW) == 0;
2598 if (cfun->can_throw_non_call_exceptions)
2599 return tree_could_trap_p (t);
2600 return false;
2601 }
2602
2603 /* Return true if STMT can throw an exception that is not caught within
2604 the current function (CFUN). */
2605
2606 bool
2607 stmt_can_throw_external (gimple stmt)
2608 {
2609 int lp_nr;
2610
2611 if (!stmt_could_throw_p (stmt))
2612 return false;
2613
2614 lp_nr = lookup_stmt_eh_lp (stmt);
2615 return lp_nr == 0;
2616 }
2617
2618 /* Return true if STMT can throw an exception that is caught within
2619 the current function (CFUN). */
2620
2621 bool
2622 stmt_can_throw_internal (gimple stmt)
2623 {
2624 int lp_nr;
2625
2626 if (!stmt_could_throw_p (stmt))
2627 return false;
2628
2629 lp_nr = lookup_stmt_eh_lp (stmt);
2630 return lp_nr > 0;
2631 }
2632
2633 /* Given a statement STMT in IFUN, if STMT can no longer throw, then
2634 remove any entry it might have from the EH table. Return true if
2635 any change was made. */
2636
2637 bool
2638 maybe_clean_eh_stmt_fn (struct function *ifun, gimple stmt)
2639 {
2640 if (stmt_could_throw_p (stmt))
2641 return false;
2642 return remove_stmt_from_eh_lp_fn (ifun, stmt);
2643 }
2644
2645 /* Likewise, but always use the current function. */
2646
2647 bool
2648 maybe_clean_eh_stmt (gimple stmt)
2649 {
2650 return maybe_clean_eh_stmt_fn (cfun, stmt);
2651 }
2652
2653 /* Given a statement OLD_STMT and a new statement NEW_STMT that has replaced
2654 OLD_STMT in the function, remove OLD_STMT from the EH table and put NEW_STMT
2655 in the table if it should be in there. Return TRUE if a replacement was
2656 done that my require an EH edge purge. */
2657
2658 bool
2659 maybe_clean_or_replace_eh_stmt (gimple old_stmt, gimple new_stmt)
2660 {
2661 int lp_nr = lookup_stmt_eh_lp (old_stmt);
2662
2663 if (lp_nr != 0)
2664 {
2665 bool new_stmt_could_throw = stmt_could_throw_p (new_stmt);
2666
2667 if (new_stmt == old_stmt && new_stmt_could_throw)
2668 return false;
2669
2670 remove_stmt_from_eh_lp (old_stmt);
2671 if (new_stmt_could_throw)
2672 {
2673 add_stmt_to_eh_lp (new_stmt, lp_nr);
2674 return false;
2675 }
2676 else
2677 return true;
2678 }
2679
2680 return false;
2681 }
2682
2683 /* Given a statement OLD_STMT in OLD_FUN and a duplicate statment NEW_STMT
2684 in NEW_FUN, copy the EH table data from OLD_STMT to NEW_STMT. The MAP
2685 operand is the return value of duplicate_eh_regions. */
2686
2687 bool
2688 maybe_duplicate_eh_stmt_fn (struct function *new_fun, gimple new_stmt,
2689 struct function *old_fun, gimple old_stmt,
2690 struct pointer_map_t *map, int default_lp_nr)
2691 {
2692 int old_lp_nr, new_lp_nr;
2693 void **slot;
2694
2695 if (!stmt_could_throw_p (new_stmt))
2696 return false;
2697
2698 old_lp_nr = lookup_stmt_eh_lp_fn (old_fun, old_stmt);
2699 if (old_lp_nr == 0)
2700 {
2701 if (default_lp_nr == 0)
2702 return false;
2703 new_lp_nr = default_lp_nr;
2704 }
2705 else if (old_lp_nr > 0)
2706 {
2707 eh_landing_pad old_lp, new_lp;
2708
2709 old_lp = VEC_index (eh_landing_pad, old_fun->eh->lp_array, old_lp_nr);
2710 slot = pointer_map_contains (map, old_lp);
2711 new_lp = (eh_landing_pad) *slot;
2712 new_lp_nr = new_lp->index;
2713 }
2714 else
2715 {
2716 eh_region old_r, new_r;
2717
2718 old_r = VEC_index (eh_region, old_fun->eh->region_array, -old_lp_nr);
2719 slot = pointer_map_contains (map, old_r);
2720 new_r = (eh_region) *slot;
2721 new_lp_nr = -new_r->index;
2722 }
2723
2724 add_stmt_to_eh_lp_fn (new_fun, new_stmt, new_lp_nr);
2725 return true;
2726 }
2727
2728 /* Similar, but both OLD_STMT and NEW_STMT are within the current function,
2729 and thus no remapping is required. */
2730
2731 bool
2732 maybe_duplicate_eh_stmt (gimple new_stmt, gimple old_stmt)
2733 {
2734 int lp_nr;
2735
2736 if (!stmt_could_throw_p (new_stmt))
2737 return false;
2738
2739 lp_nr = lookup_stmt_eh_lp (old_stmt);
2740 if (lp_nr == 0)
2741 return false;
2742
2743 add_stmt_to_eh_lp (new_stmt, lp_nr);
2744 return true;
2745 }
2746 \f
2747 /* Returns TRUE if oneh and twoh are exception handlers (gimple_try_cleanup of
2748 GIMPLE_TRY) that are similar enough to be considered the same. Currently
2749 this only handles handlers consisting of a single call, as that's the
2750 important case for C++: a destructor call for a particular object showing
2751 up in multiple handlers. */
2752
2753 static bool
2754 same_handler_p (gimple_seq oneh, gimple_seq twoh)
2755 {
2756 gimple_stmt_iterator gsi;
2757 gimple ones, twos;
2758 unsigned int ai;
2759
2760 gsi = gsi_start (oneh);
2761 if (!gsi_one_before_end_p (gsi))
2762 return false;
2763 ones = gsi_stmt (gsi);
2764
2765 gsi = gsi_start (twoh);
2766 if (!gsi_one_before_end_p (gsi))
2767 return false;
2768 twos = gsi_stmt (gsi);
2769
2770 if (!is_gimple_call (ones)
2771 || !is_gimple_call (twos)
2772 || gimple_call_lhs (ones)
2773 || gimple_call_lhs (twos)
2774 || gimple_call_chain (ones)
2775 || gimple_call_chain (twos)
2776 || !gimple_call_same_target_p (ones, twos)
2777 || gimple_call_num_args (ones) != gimple_call_num_args (twos))
2778 return false;
2779
2780 for (ai = 0; ai < gimple_call_num_args (ones); ++ai)
2781 if (!operand_equal_p (gimple_call_arg (ones, ai),
2782 gimple_call_arg (twos, ai), 0))
2783 return false;
2784
2785 return true;
2786 }
2787
2788 /* Optimize
2789 try { A() } finally { try { ~B() } catch { ~A() } }
2790 try { ... } finally { ~A() }
2791 into
2792 try { A() } catch { ~B() }
2793 try { ~B() ... } finally { ~A() }
2794
2795 This occurs frequently in C++, where A is a local variable and B is a
2796 temporary used in the initializer for A. */
2797
2798 static void
2799 optimize_double_finally (gimple one, gimple two)
2800 {
2801 gimple oneh;
2802 gimple_stmt_iterator gsi;
2803
2804 gsi = gsi_start (gimple_try_cleanup (one));
2805 if (!gsi_one_before_end_p (gsi))
2806 return;
2807
2808 oneh = gsi_stmt (gsi);
2809 if (gimple_code (oneh) != GIMPLE_TRY
2810 || gimple_try_kind (oneh) != GIMPLE_TRY_CATCH)
2811 return;
2812
2813 if (same_handler_p (gimple_try_cleanup (oneh), gimple_try_cleanup (two)))
2814 {
2815 gimple_seq seq = gimple_try_eval (oneh);
2816
2817 gimple_try_set_cleanup (one, seq);
2818 gimple_try_set_kind (one, GIMPLE_TRY_CATCH);
2819 seq = copy_gimple_seq_and_replace_locals (seq);
2820 gimple_seq_add_seq (&seq, gimple_try_eval (two));
2821 gimple_try_set_eval (two, seq);
2822 }
2823 }
2824
2825 /* Perform EH refactoring optimizations that are simpler to do when code
2826 flow has been lowered but EH structures haven't. */
2827
2828 static void
2829 refactor_eh_r (gimple_seq seq)
2830 {
2831 gimple_stmt_iterator gsi;
2832 gimple one, two;
2833
2834 one = NULL;
2835 two = NULL;
2836 gsi = gsi_start (seq);
2837 while (1)
2838 {
2839 one = two;
2840 if (gsi_end_p (gsi))
2841 two = NULL;
2842 else
2843 two = gsi_stmt (gsi);
2844 if (one
2845 && two
2846 && gimple_code (one) == GIMPLE_TRY
2847 && gimple_code (two) == GIMPLE_TRY
2848 && gimple_try_kind (one) == GIMPLE_TRY_FINALLY
2849 && gimple_try_kind (two) == GIMPLE_TRY_FINALLY)
2850 optimize_double_finally (one, two);
2851 if (one)
2852 switch (gimple_code (one))
2853 {
2854 case GIMPLE_TRY:
2855 refactor_eh_r (gimple_try_eval (one));
2856 refactor_eh_r (gimple_try_cleanup (one));
2857 break;
2858 case GIMPLE_CATCH:
2859 refactor_eh_r (gimple_catch_handler (one));
2860 break;
2861 case GIMPLE_EH_FILTER:
2862 refactor_eh_r (gimple_eh_filter_failure (one));
2863 break;
2864 default:
2865 break;
2866 }
2867 if (two)
2868 gsi_next (&gsi);
2869 else
2870 break;
2871 }
2872 }
2873
2874 static unsigned
2875 refactor_eh (void)
2876 {
2877 refactor_eh_r (gimple_body (current_function_decl));
2878 return 0;
2879 }
2880
2881 static bool
2882 gate_refactor_eh (void)
2883 {
2884 return flag_exceptions != 0;
2885 }
2886
2887 struct gimple_opt_pass pass_refactor_eh =
2888 {
2889 {
2890 GIMPLE_PASS,
2891 "ehopt", /* name */
2892 gate_refactor_eh, /* gate */
2893 refactor_eh, /* execute */
2894 NULL, /* sub */
2895 NULL, /* next */
2896 0, /* static_pass_number */
2897 TV_TREE_EH, /* tv_id */
2898 PROP_gimple_lcf, /* properties_required */
2899 0, /* properties_provided */
2900 0, /* properties_destroyed */
2901 0, /* todo_flags_start */
2902 0 /* todo_flags_finish */
2903 }
2904 };
2905 \f
2906 /* At the end of gimple optimization, we can lower RESX. */
2907
2908 static bool
2909 lower_resx (basic_block bb, gimple stmt, struct pointer_map_t *mnt_map)
2910 {
2911 int lp_nr;
2912 eh_region src_r, dst_r;
2913 gimple_stmt_iterator gsi;
2914 gimple x;
2915 tree fn, src_nr;
2916 bool ret = false;
2917
2918 lp_nr = lookup_stmt_eh_lp (stmt);
2919 if (lp_nr != 0)
2920 dst_r = get_eh_region_from_lp_number (lp_nr);
2921 else
2922 dst_r = NULL;
2923
2924 src_r = get_eh_region_from_number (gimple_resx_region (stmt));
2925 gsi = gsi_last_bb (bb);
2926
2927 if (src_r == NULL)
2928 {
2929 /* We can wind up with no source region when pass_cleanup_eh shows
2930 that there are no entries into an eh region and deletes it, but
2931 then the block that contains the resx isn't removed. This can
2932 happen without optimization when the switch statement created by
2933 lower_try_finally_switch isn't simplified to remove the eh case.
2934
2935 Resolve this by expanding the resx node to an abort. */
2936
2937 fn = implicit_built_in_decls[BUILT_IN_TRAP];
2938 x = gimple_build_call (fn, 0);
2939 gsi_insert_before (&gsi, x, GSI_SAME_STMT);
2940
2941 while (EDGE_COUNT (bb->succs) > 0)
2942 remove_edge (EDGE_SUCC (bb, 0));
2943 }
2944 else if (dst_r)
2945 {
2946 /* When we have a destination region, we resolve this by copying
2947 the excptr and filter values into place, and changing the edge
2948 to immediately after the landing pad. */
2949 edge e;
2950
2951 if (lp_nr < 0)
2952 {
2953 basic_block new_bb;
2954 void **slot;
2955 tree lab;
2956
2957 /* We are resuming into a MUST_NOT_CALL region. Expand a call to
2958 the failure decl into a new block, if needed. */
2959 gcc_assert (dst_r->type == ERT_MUST_NOT_THROW);
2960
2961 slot = pointer_map_contains (mnt_map, dst_r);
2962 if (slot == NULL)
2963 {
2964 gimple_stmt_iterator gsi2;
2965
2966 new_bb = create_empty_bb (bb);
2967 lab = gimple_block_label (new_bb);
2968 gsi2 = gsi_start_bb (new_bb);
2969
2970 fn = dst_r->u.must_not_throw.failure_decl;
2971 x = gimple_build_call (fn, 0);
2972 gimple_set_location (x, dst_r->u.must_not_throw.failure_loc);
2973 gsi_insert_after (&gsi2, x, GSI_CONTINUE_LINKING);
2974
2975 slot = pointer_map_insert (mnt_map, dst_r);
2976 *slot = lab;
2977 }
2978 else
2979 {
2980 lab = (tree) *slot;
2981 new_bb = label_to_block (lab);
2982 }
2983
2984 gcc_assert (EDGE_COUNT (bb->succs) == 0);
2985 e = make_edge (bb, new_bb, EDGE_FALLTHRU);
2986 e->count = bb->count;
2987 e->probability = REG_BR_PROB_BASE;
2988 }
2989 else
2990 {
2991 edge_iterator ei;
2992 tree dst_nr = build_int_cst (integer_type_node, dst_r->index);
2993
2994 fn = implicit_built_in_decls[BUILT_IN_EH_COPY_VALUES];
2995 src_nr = build_int_cst (integer_type_node, src_r->index);
2996 x = gimple_build_call (fn, 2, dst_nr, src_nr);
2997 gsi_insert_before (&gsi, x, GSI_SAME_STMT);
2998
2999 /* Update the flags for the outgoing edge. */
3000 e = single_succ_edge (bb);
3001 gcc_assert (e->flags & EDGE_EH);
3002 e->flags = (e->flags & ~EDGE_EH) | EDGE_FALLTHRU;
3003
3004 /* If there are no more EH users of the landing pad, delete it. */
3005 FOR_EACH_EDGE (e, ei, e->dest->preds)
3006 if (e->flags & EDGE_EH)
3007 break;
3008 if (e == NULL)
3009 {
3010 eh_landing_pad lp = get_eh_landing_pad_from_number (lp_nr);
3011 remove_eh_landing_pad (lp);
3012 }
3013 }
3014
3015 ret = true;
3016 }
3017 else
3018 {
3019 tree var;
3020
3021 /* When we don't have a destination region, this exception escapes
3022 up the call chain. We resolve this by generating a call to the
3023 _Unwind_Resume library function. */
3024
3025 /* The ARM EABI redefines _Unwind_Resume as __cxa_end_cleanup
3026 with no arguments for C++ and Java. Check for that. */
3027 if (src_r->use_cxa_end_cleanup)
3028 {
3029 fn = implicit_built_in_decls[BUILT_IN_CXA_END_CLEANUP];
3030 x = gimple_build_call (fn, 0);
3031 gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3032 }
3033 else
3034 {
3035 fn = implicit_built_in_decls[BUILT_IN_EH_POINTER];
3036 src_nr = build_int_cst (integer_type_node, src_r->index);
3037 x = gimple_build_call (fn, 1, src_nr);
3038 var = create_tmp_var (ptr_type_node, NULL);
3039 var = make_ssa_name (var, x);
3040 gimple_call_set_lhs (x, var);
3041 gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3042
3043 fn = implicit_built_in_decls[BUILT_IN_UNWIND_RESUME];
3044 x = gimple_build_call (fn, 1, var);
3045 gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3046 }
3047
3048 gcc_assert (EDGE_COUNT (bb->succs) == 0);
3049 }
3050
3051 gsi_remove (&gsi, true);
3052
3053 return ret;
3054 }
3055
3056 static unsigned
3057 execute_lower_resx (void)
3058 {
3059 basic_block bb;
3060 struct pointer_map_t *mnt_map;
3061 bool dominance_invalidated = false;
3062 bool any_rewritten = false;
3063
3064 mnt_map = pointer_map_create ();
3065
3066 FOR_EACH_BB (bb)
3067 {
3068 gimple last = last_stmt (bb);
3069 if (last && is_gimple_resx (last))
3070 {
3071 dominance_invalidated |= lower_resx (bb, last, mnt_map);
3072 any_rewritten = true;
3073 }
3074 }
3075
3076 pointer_map_destroy (mnt_map);
3077
3078 if (dominance_invalidated)
3079 {
3080 free_dominance_info (CDI_DOMINATORS);
3081 free_dominance_info (CDI_POST_DOMINATORS);
3082 }
3083
3084 return any_rewritten ? TODO_update_ssa_only_virtuals : 0;
3085 }
3086
3087 static bool
3088 gate_lower_resx (void)
3089 {
3090 return flag_exceptions != 0;
3091 }
3092
3093 struct gimple_opt_pass pass_lower_resx =
3094 {
3095 {
3096 GIMPLE_PASS,
3097 "resx", /* name */
3098 gate_lower_resx, /* gate */
3099 execute_lower_resx, /* execute */
3100 NULL, /* sub */
3101 NULL, /* next */
3102 0, /* static_pass_number */
3103 TV_TREE_EH, /* tv_id */
3104 PROP_gimple_lcf, /* properties_required */
3105 0, /* properties_provided */
3106 0, /* properties_destroyed */
3107 0, /* todo_flags_start */
3108 TODO_verify_flow /* todo_flags_finish */
3109 }
3110 };
3111
3112
3113 /* At the end of inlining, we can lower EH_DISPATCH. Return true when
3114 we have found some duplicate labels and removed some edges. */
3115
3116 static bool
3117 lower_eh_dispatch (basic_block src, gimple stmt)
3118 {
3119 gimple_stmt_iterator gsi;
3120 int region_nr;
3121 eh_region r;
3122 tree filter, fn;
3123 gimple x;
3124 bool redirected = false;
3125
3126 region_nr = gimple_eh_dispatch_region (stmt);
3127 r = get_eh_region_from_number (region_nr);
3128
3129 gsi = gsi_last_bb (src);
3130
3131 switch (r->type)
3132 {
3133 case ERT_TRY:
3134 {
3135 VEC (tree, heap) *labels = NULL;
3136 tree default_label = NULL;
3137 eh_catch c;
3138 edge_iterator ei;
3139 edge e;
3140 struct pointer_set_t *seen_values = pointer_set_create ();
3141
3142 /* Collect the labels for a switch. Zero the post_landing_pad
3143 field becase we'll no longer have anything keeping these labels
3144 in existance and the optimizer will be free to merge these
3145 blocks at will. */
3146 for (c = r->u.eh_try.first_catch; c ; c = c->next_catch)
3147 {
3148 tree tp_node, flt_node, lab = c->label;
3149 bool have_label = false;
3150
3151 c->label = NULL;
3152 tp_node = c->type_list;
3153 flt_node = c->filter_list;
3154
3155 if (tp_node == NULL)
3156 {
3157 default_label = lab;
3158 break;
3159 }
3160 do
3161 {
3162 /* Filter out duplicate labels that arise when this handler
3163 is shadowed by an earlier one. When no labels are
3164 attached to the handler anymore, we remove
3165 the corresponding edge and then we delete unreachable
3166 blocks at the end of this pass. */
3167 if (! pointer_set_contains (seen_values, TREE_VALUE (flt_node)))
3168 {
3169 tree t = build_case_label (TREE_VALUE (flt_node),
3170 NULL, lab);
3171 VEC_safe_push (tree, heap, labels, t);
3172 pointer_set_insert (seen_values, TREE_VALUE (flt_node));
3173 have_label = true;
3174 }
3175
3176 tp_node = TREE_CHAIN (tp_node);
3177 flt_node = TREE_CHAIN (flt_node);
3178 }
3179 while (tp_node);
3180 if (! have_label)
3181 {
3182 remove_edge (find_edge (src, label_to_block (lab)));
3183 redirected = true;
3184 }
3185 }
3186
3187 /* Clean up the edge flags. */
3188 FOR_EACH_EDGE (e, ei, src->succs)
3189 {
3190 if (e->flags & EDGE_FALLTHRU)
3191 {
3192 /* If there was no catch-all, use the fallthru edge. */
3193 if (default_label == NULL)
3194 default_label = gimple_block_label (e->dest);
3195 e->flags &= ~EDGE_FALLTHRU;
3196 }
3197 }
3198 gcc_assert (default_label != NULL);
3199
3200 /* Don't generate a switch if there's only a default case.
3201 This is common in the form of try { A; } catch (...) { B; }. */
3202 if (labels == NULL)
3203 {
3204 e = single_succ_edge (src);
3205 e->flags |= EDGE_FALLTHRU;
3206 }
3207 else
3208 {
3209 fn = implicit_built_in_decls[BUILT_IN_EH_FILTER];
3210 x = gimple_build_call (fn, 1, build_int_cst (integer_type_node,
3211 region_nr));
3212 filter = create_tmp_var (TREE_TYPE (TREE_TYPE (fn)), NULL);
3213 filter = make_ssa_name (filter, x);
3214 gimple_call_set_lhs (x, filter);
3215 gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3216
3217 /* Turn the default label into a default case. */
3218 default_label = build_case_label (NULL, NULL, default_label);
3219 sort_case_labels (labels);
3220
3221 x = gimple_build_switch_vec (filter, default_label, labels);
3222 gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3223
3224 VEC_free (tree, heap, labels);
3225 }
3226 pointer_set_destroy (seen_values);
3227 }
3228 break;
3229
3230 case ERT_ALLOWED_EXCEPTIONS:
3231 {
3232 edge b_e = BRANCH_EDGE (src);
3233 edge f_e = FALLTHRU_EDGE (src);
3234
3235 fn = implicit_built_in_decls[BUILT_IN_EH_FILTER];
3236 x = gimple_build_call (fn, 1, build_int_cst (integer_type_node,
3237 region_nr));
3238 filter = create_tmp_var (TREE_TYPE (TREE_TYPE (fn)), NULL);
3239 filter = make_ssa_name (filter, x);
3240 gimple_call_set_lhs (x, filter);
3241 gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3242
3243 r->u.allowed.label = NULL;
3244 x = gimple_build_cond (EQ_EXPR, filter,
3245 build_int_cst (TREE_TYPE (filter),
3246 r->u.allowed.filter),
3247 NULL_TREE, NULL_TREE);
3248 gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3249
3250 b_e->flags = b_e->flags | EDGE_TRUE_VALUE;
3251 f_e->flags = (f_e->flags & ~EDGE_FALLTHRU) | EDGE_FALSE_VALUE;
3252 }
3253 break;
3254
3255 default:
3256 gcc_unreachable ();
3257 }
3258
3259 /* Replace the EH_DISPATCH with the SWITCH or COND generated above. */
3260 gsi_remove (&gsi, true);
3261 return redirected;
3262 }
3263
3264 static unsigned
3265 execute_lower_eh_dispatch (void)
3266 {
3267 basic_block bb;
3268 bool any_rewritten = false;
3269 bool redirected = false;
3270
3271 assign_filter_values ();
3272
3273 FOR_EACH_BB (bb)
3274 {
3275 gimple last = last_stmt (bb);
3276 if (last && gimple_code (last) == GIMPLE_EH_DISPATCH)
3277 {
3278 redirected |= lower_eh_dispatch (bb, last);
3279 any_rewritten = true;
3280 }
3281 }
3282
3283 if (redirected)
3284 delete_unreachable_blocks ();
3285 return any_rewritten ? TODO_update_ssa_only_virtuals : 0;
3286 }
3287
3288 static bool
3289 gate_lower_eh_dispatch (void)
3290 {
3291 return cfun->eh->region_tree != NULL;
3292 }
3293
3294 struct gimple_opt_pass pass_lower_eh_dispatch =
3295 {
3296 {
3297 GIMPLE_PASS,
3298 "ehdisp", /* name */
3299 gate_lower_eh_dispatch, /* gate */
3300 execute_lower_eh_dispatch, /* execute */
3301 NULL, /* sub */
3302 NULL, /* next */
3303 0, /* static_pass_number */
3304 TV_TREE_EH, /* tv_id */
3305 PROP_gimple_lcf, /* properties_required */
3306 0, /* properties_provided */
3307 0, /* properties_destroyed */
3308 0, /* todo_flags_start */
3309 TODO_verify_flow /* todo_flags_finish */
3310 }
3311 };
3312 \f
3313 /* Walk statements, see what regions are really referenced and remove
3314 those that are unused. */
3315
3316 static void
3317 remove_unreachable_handlers (void)
3318 {
3319 sbitmap r_reachable, lp_reachable;
3320 eh_region region;
3321 eh_landing_pad lp;
3322 basic_block bb;
3323 int lp_nr, r_nr;
3324
3325 r_reachable = sbitmap_alloc (VEC_length (eh_region, cfun->eh->region_array));
3326 lp_reachable
3327 = sbitmap_alloc (VEC_length (eh_landing_pad, cfun->eh->lp_array));
3328 sbitmap_zero (r_reachable);
3329 sbitmap_zero (lp_reachable);
3330
3331 FOR_EACH_BB (bb)
3332 {
3333 gimple_stmt_iterator gsi;
3334
3335 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
3336 {
3337 gimple stmt = gsi_stmt (gsi);
3338 lp_nr = lookup_stmt_eh_lp (stmt);
3339
3340 /* Negative LP numbers are MUST_NOT_THROW regions which
3341 are not considered BB enders. */
3342 if (lp_nr < 0)
3343 SET_BIT (r_reachable, -lp_nr);
3344
3345 /* Positive LP numbers are real landing pads, are are BB enders. */
3346 else if (lp_nr > 0)
3347 {
3348 gcc_assert (gsi_one_before_end_p (gsi));
3349 region = get_eh_region_from_lp_number (lp_nr);
3350 SET_BIT (r_reachable, region->index);
3351 SET_BIT (lp_reachable, lp_nr);
3352 }
3353
3354 /* Avoid removing regions referenced from RESX/EH_DISPATCH. */
3355 switch (gimple_code (stmt))
3356 {
3357 case GIMPLE_RESX:
3358 SET_BIT (r_reachable, gimple_resx_region (stmt));
3359 break;
3360 case GIMPLE_EH_DISPATCH:
3361 SET_BIT (r_reachable, gimple_eh_dispatch_region (stmt));
3362 break;
3363 default:
3364 break;
3365 }
3366 }
3367 }
3368
3369 if (dump_file)
3370 {
3371 fprintf (dump_file, "Before removal of unreachable regions:\n");
3372 dump_eh_tree (dump_file, cfun);
3373 fprintf (dump_file, "Reachable regions: ");
3374 dump_sbitmap_file (dump_file, r_reachable);
3375 fprintf (dump_file, "Reachable landing pads: ");
3376 dump_sbitmap_file (dump_file, lp_reachable);
3377 }
3378
3379 for (r_nr = 1;
3380 VEC_iterate (eh_region, cfun->eh->region_array, r_nr, region); ++r_nr)
3381 if (region && !TEST_BIT (r_reachable, r_nr))
3382 {
3383 if (dump_file)
3384 fprintf (dump_file, "Removing unreachable region %d\n", r_nr);
3385 remove_eh_handler (region);
3386 }
3387
3388 for (lp_nr = 1;
3389 VEC_iterate (eh_landing_pad, cfun->eh->lp_array, lp_nr, lp); ++lp_nr)
3390 if (lp && !TEST_BIT (lp_reachable, lp_nr))
3391 {
3392 if (dump_file)
3393 fprintf (dump_file, "Removing unreachable landing pad %d\n", lp_nr);
3394 remove_eh_landing_pad (lp);
3395 }
3396
3397 if (dump_file)
3398 {
3399 fprintf (dump_file, "\n\nAfter removal of unreachable regions:\n");
3400 dump_eh_tree (dump_file, cfun);
3401 fprintf (dump_file, "\n\n");
3402 }
3403
3404 sbitmap_free (r_reachable);
3405 sbitmap_free (lp_reachable);
3406
3407 #ifdef ENABLE_CHECKING
3408 verify_eh_tree (cfun);
3409 #endif
3410 }
3411
3412 /* Remove regions that do not have landing pads. This assumes
3413 that remove_unreachable_handlers has already been run, and
3414 that we've just manipulated the landing pads since then. */
3415
3416 static void
3417 remove_unreachable_handlers_no_lp (void)
3418 {
3419 eh_region r;
3420 int i;
3421
3422 for (i = 1; VEC_iterate (eh_region, cfun->eh->region_array, i, r); ++i)
3423 if (r && r->landing_pads == NULL && r->type != ERT_MUST_NOT_THROW)
3424 {
3425 if (dump_file)
3426 fprintf (dump_file, "Removing unreachable region %d\n", i);
3427 remove_eh_handler (r);
3428 }
3429 }
3430
3431 /* Undo critical edge splitting on an EH landing pad. Earlier, we
3432 optimisticaly split all sorts of edges, including EH edges. The
3433 optimization passes in between may not have needed them; if not,
3434 we should undo the split.
3435
3436 Recognize this case by having one EH edge incoming to the BB and
3437 one normal edge outgoing; BB should be empty apart from the
3438 post_landing_pad label.
3439
3440 Note that this is slightly different from the empty handler case
3441 handled by cleanup_empty_eh, in that the actual handler may yet
3442 have actual code but the landing pad has been separated from the
3443 handler. As such, cleanup_empty_eh relies on this transformation
3444 having been done first. */
3445
3446 static bool
3447 unsplit_eh (eh_landing_pad lp)
3448 {
3449 basic_block bb = label_to_block (lp->post_landing_pad);
3450 gimple_stmt_iterator gsi;
3451 edge e_in, e_out;
3452
3453 /* Quickly check the edge counts on BB for singularity. */
3454 if (EDGE_COUNT (bb->preds) != 1 || EDGE_COUNT (bb->succs) != 1)
3455 return false;
3456 e_in = EDGE_PRED (bb, 0);
3457 e_out = EDGE_SUCC (bb, 0);
3458
3459 /* Input edge must be EH and output edge must be normal. */
3460 if ((e_in->flags & EDGE_EH) == 0 || (e_out->flags & EDGE_EH) != 0)
3461 return false;
3462
3463 /* The block must be empty except for the labels and debug insns. */
3464 gsi = gsi_after_labels (bb);
3465 if (!gsi_end_p (gsi) && is_gimple_debug (gsi_stmt (gsi)))
3466 gsi_next_nondebug (&gsi);
3467 if (!gsi_end_p (gsi))
3468 return false;
3469
3470 /* The destination block must not already have a landing pad
3471 for a different region. */
3472 for (gsi = gsi_start_bb (e_out->dest); !gsi_end_p (gsi); gsi_next (&gsi))
3473 {
3474 gimple stmt = gsi_stmt (gsi);
3475 tree lab;
3476 int lp_nr;
3477
3478 if (gimple_code (stmt) != GIMPLE_LABEL)
3479 break;
3480 lab = gimple_label_label (stmt);
3481 lp_nr = EH_LANDING_PAD_NR (lab);
3482 if (lp_nr && get_eh_region_from_lp_number (lp_nr) != lp->region)
3483 return false;
3484 }
3485
3486 /* The new destination block must not already be a destination of
3487 the source block, lest we merge fallthru and eh edges and get
3488 all sorts of confused. */
3489 if (find_edge (e_in->src, e_out->dest))
3490 return false;
3491
3492 /* ??? We can get degenerate phis due to cfg cleanups. I would have
3493 thought this should have been cleaned up by a phicprop pass, but
3494 that doesn't appear to handle virtuals. Propagate by hand. */
3495 if (!gimple_seq_empty_p (phi_nodes (bb)))
3496 {
3497 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); )
3498 {
3499 gimple use_stmt, phi = gsi_stmt (gsi);
3500 tree lhs = gimple_phi_result (phi);
3501 tree rhs = gimple_phi_arg_def (phi, 0);
3502 use_operand_p use_p;
3503 imm_use_iterator iter;
3504
3505 FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
3506 {
3507 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
3508 SET_USE (use_p, rhs);
3509 }
3510
3511 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs))
3512 SSA_NAME_OCCURS_IN_ABNORMAL_PHI (rhs) = 1;
3513
3514 remove_phi_node (&gsi, true);
3515 }
3516 }
3517
3518 if (dump_file && (dump_flags & TDF_DETAILS))
3519 fprintf (dump_file, "Unsplit EH landing pad %d to block %i.\n",
3520 lp->index, e_out->dest->index);
3521
3522 /* Redirect the edge. Since redirect_eh_edge_1 expects to be moving
3523 a successor edge, humor it. But do the real CFG change with the
3524 predecessor of E_OUT in order to preserve the ordering of arguments
3525 to the PHI nodes in E_OUT->DEST. */
3526 redirect_eh_edge_1 (e_in, e_out->dest, false);
3527 redirect_edge_pred (e_out, e_in->src);
3528 e_out->flags = e_in->flags;
3529 e_out->probability = e_in->probability;
3530 e_out->count = e_in->count;
3531 remove_edge (e_in);
3532
3533 return true;
3534 }
3535
3536 /* Examine each landing pad block and see if it matches unsplit_eh. */
3537
3538 static bool
3539 unsplit_all_eh (void)
3540 {
3541 bool changed = false;
3542 eh_landing_pad lp;
3543 int i;
3544
3545 for (i = 1; VEC_iterate (eh_landing_pad, cfun->eh->lp_array, i, lp); ++i)
3546 if (lp)
3547 changed |= unsplit_eh (lp);
3548
3549 return changed;
3550 }
3551
3552 /* A subroutine of cleanup_empty_eh. Redirect all EH edges incoming
3553 to OLD_BB to NEW_BB; return true on success, false on failure.
3554
3555 OLD_BB_OUT is the edge into NEW_BB from OLD_BB, so if we miss any
3556 PHI variables from OLD_BB we can pick them up from OLD_BB_OUT.
3557 Virtual PHIs may be deleted and marked for renaming. */
3558
3559 static bool
3560 cleanup_empty_eh_merge_phis (basic_block new_bb, basic_block old_bb,
3561 edge old_bb_out, bool change_region)
3562 {
3563 gimple_stmt_iterator ngsi, ogsi;
3564 edge_iterator ei;
3565 edge e;
3566 bitmap rename_virts;
3567 bitmap ophi_handled;
3568
3569 FOR_EACH_EDGE (e, ei, old_bb->preds)
3570 redirect_edge_var_map_clear (e);
3571
3572 ophi_handled = BITMAP_ALLOC (NULL);
3573 rename_virts = BITMAP_ALLOC (NULL);
3574
3575 /* First, iterate through the PHIs on NEW_BB and set up the edge_var_map
3576 for the edges we're going to move. */
3577 for (ngsi = gsi_start_phis (new_bb); !gsi_end_p (ngsi); gsi_next (&ngsi))
3578 {
3579 gimple ophi, nphi = gsi_stmt (ngsi);
3580 tree nresult, nop;
3581
3582 nresult = gimple_phi_result (nphi);
3583 nop = gimple_phi_arg_def (nphi, old_bb_out->dest_idx);
3584
3585 /* Find the corresponding PHI in OLD_BB so we can forward-propagate
3586 the source ssa_name. */
3587 ophi = NULL;
3588 for (ogsi = gsi_start_phis (old_bb); !gsi_end_p (ogsi); gsi_next (&ogsi))
3589 {
3590 ophi = gsi_stmt (ogsi);
3591 if (gimple_phi_result (ophi) == nop)
3592 break;
3593 ophi = NULL;
3594 }
3595
3596 /* If we did find the corresponding PHI, copy those inputs. */
3597 if (ophi)
3598 {
3599 /* If NOP is used somewhere else beyond phis in new_bb, give up. */
3600 if (!has_single_use (nop))
3601 {
3602 imm_use_iterator imm_iter;
3603 use_operand_p use_p;
3604
3605 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, nop)
3606 {
3607 if (!gimple_debug_bind_p (USE_STMT (use_p))
3608 && (gimple_code (USE_STMT (use_p)) != GIMPLE_PHI
3609 || gimple_bb (USE_STMT (use_p)) != new_bb))
3610 goto fail;
3611 }
3612 }
3613 bitmap_set_bit (ophi_handled, SSA_NAME_VERSION (nop));
3614 FOR_EACH_EDGE (e, ei, old_bb->preds)
3615 {
3616 location_t oloc;
3617 tree oop;
3618
3619 if ((e->flags & EDGE_EH) == 0)
3620 continue;
3621 oop = gimple_phi_arg_def (ophi, e->dest_idx);
3622 oloc = gimple_phi_arg_location (ophi, e->dest_idx);
3623 redirect_edge_var_map_add (e, nresult, oop, oloc);
3624 }
3625 }
3626 /* If we didn't find the PHI, but it's a VOP, remember to rename
3627 it later, assuming all other tests succeed. */
3628 else if (!is_gimple_reg (nresult))
3629 bitmap_set_bit (rename_virts, SSA_NAME_VERSION (nresult));
3630 /* If we didn't find the PHI, and it's a real variable, we know
3631 from the fact that OLD_BB is tree_empty_eh_handler_p that the
3632 variable is unchanged from input to the block and we can simply
3633 re-use the input to NEW_BB from the OLD_BB_OUT edge. */
3634 else
3635 {
3636 location_t nloc
3637 = gimple_phi_arg_location (nphi, old_bb_out->dest_idx);
3638 FOR_EACH_EDGE (e, ei, old_bb->preds)
3639 redirect_edge_var_map_add (e, nresult, nop, nloc);
3640 }
3641 }
3642
3643 /* Second, verify that all PHIs from OLD_BB have been handled. If not,
3644 we don't know what values from the other edges into NEW_BB to use. */
3645 for (ogsi = gsi_start_phis (old_bb); !gsi_end_p (ogsi); gsi_next (&ogsi))
3646 {
3647 gimple ophi = gsi_stmt (ogsi);
3648 tree oresult = gimple_phi_result (ophi);
3649 if (!bitmap_bit_p (ophi_handled, SSA_NAME_VERSION (oresult)))
3650 goto fail;
3651 }
3652
3653 /* At this point we know that the merge will succeed. Remove the PHI
3654 nodes for the virtuals that we want to rename. */
3655 if (!bitmap_empty_p (rename_virts))
3656 {
3657 for (ngsi = gsi_start_phis (new_bb); !gsi_end_p (ngsi); )
3658 {
3659 gimple nphi = gsi_stmt (ngsi);
3660 tree nresult = gimple_phi_result (nphi);
3661 if (bitmap_bit_p (rename_virts, SSA_NAME_VERSION (nresult)))
3662 {
3663 mark_virtual_phi_result_for_renaming (nphi);
3664 remove_phi_node (&ngsi, true);
3665 }
3666 else
3667 gsi_next (&ngsi);
3668 }
3669 }
3670
3671 /* Finally, move the edges and update the PHIs. */
3672 for (ei = ei_start (old_bb->preds); (e = ei_safe_edge (ei)); )
3673 if (e->flags & EDGE_EH)
3674 {
3675 redirect_eh_edge_1 (e, new_bb, change_region);
3676 redirect_edge_succ (e, new_bb);
3677 flush_pending_stmts (e);
3678 }
3679 else
3680 ei_next (&ei);
3681
3682 BITMAP_FREE (ophi_handled);
3683 BITMAP_FREE (rename_virts);
3684 return true;
3685
3686 fail:
3687 FOR_EACH_EDGE (e, ei, old_bb->preds)
3688 redirect_edge_var_map_clear (e);
3689 BITMAP_FREE (ophi_handled);
3690 BITMAP_FREE (rename_virts);
3691 return false;
3692 }
3693
3694 /* A subroutine of cleanup_empty_eh. Move a landing pad LP from its
3695 old region to NEW_REGION at BB. */
3696
3697 static void
3698 cleanup_empty_eh_move_lp (basic_block bb, edge e_out,
3699 eh_landing_pad lp, eh_region new_region)
3700 {
3701 gimple_stmt_iterator gsi;
3702 eh_landing_pad *pp;
3703
3704 for (pp = &lp->region->landing_pads; *pp != lp; pp = &(*pp)->next_lp)
3705 continue;
3706 *pp = lp->next_lp;
3707
3708 lp->region = new_region;
3709 lp->next_lp = new_region->landing_pads;
3710 new_region->landing_pads = lp;
3711
3712 /* Delete the RESX that was matched within the empty handler block. */
3713 gsi = gsi_last_bb (bb);
3714 mark_virtual_ops_for_renaming (gsi_stmt (gsi));
3715 gsi_remove (&gsi, true);
3716
3717 /* Clean up E_OUT for the fallthru. */
3718 e_out->flags = (e_out->flags & ~EDGE_EH) | EDGE_FALLTHRU;
3719 e_out->probability = REG_BR_PROB_BASE;
3720 }
3721
3722 /* A subroutine of cleanup_empty_eh. Handle more complex cases of
3723 unsplitting than unsplit_eh was prepared to handle, e.g. when
3724 multiple incoming edges and phis are involved. */
3725
3726 static bool
3727 cleanup_empty_eh_unsplit (basic_block bb, edge e_out, eh_landing_pad lp)
3728 {
3729 gimple_stmt_iterator gsi;
3730 tree lab;
3731 edge_iterator ei;
3732 edge e;
3733
3734 /* We really ought not have totally lost everything following
3735 a landing pad label. Given that BB is empty, there had better
3736 be a successor. */
3737 gcc_assert (e_out != NULL);
3738
3739 /* The destination block must not already have a landing pad
3740 for a different region. */
3741 lab = NULL;
3742 for (gsi = gsi_start_bb (e_out->dest); !gsi_end_p (gsi); gsi_next (&gsi))
3743 {
3744 gimple stmt = gsi_stmt (gsi);
3745 int lp_nr;
3746
3747 if (gimple_code (stmt) != GIMPLE_LABEL)
3748 break;
3749 lab = gimple_label_label (stmt);
3750 lp_nr = EH_LANDING_PAD_NR (lab);
3751 if (lp_nr && get_eh_region_from_lp_number (lp_nr) != lp->region)
3752 return false;
3753 }
3754
3755 /* The destination block must not be a regular successor for any
3756 of the preds of the landing pad. Thus, avoid turning
3757 <..>
3758 | \ EH
3759 | <..>
3760 | /
3761 <..>
3762 into
3763 <..>
3764 | | EH
3765 <..>
3766 which CFG verification would choke on. See PR45172. */
3767 FOR_EACH_EDGE (e, ei, bb->preds)
3768 if (find_edge (e->src, e_out->dest))
3769 return false;
3770
3771 /* Attempt to move the PHIs into the successor block. */
3772 if (cleanup_empty_eh_merge_phis (e_out->dest, bb, e_out, false))
3773 {
3774 if (dump_file && (dump_flags & TDF_DETAILS))
3775 fprintf (dump_file,
3776 "Unsplit EH landing pad %d to block %i "
3777 "(via cleanup_empty_eh).\n",
3778 lp->index, e_out->dest->index);
3779 return true;
3780 }
3781
3782 return false;
3783 }
3784
3785 /* Return true if edge E_FIRST is part of an empty infinite loop
3786 or leads to such a loop through a series of single successor
3787 empty bbs. */
3788
3789 static bool
3790 infinite_empty_loop_p (edge e_first)
3791 {
3792 bool inf_loop = false;
3793 edge e;
3794
3795 if (e_first->dest == e_first->src)
3796 return true;
3797
3798 e_first->src->aux = (void *) 1;
3799 for (e = e_first; single_succ_p (e->dest); e = single_succ_edge (e->dest))
3800 {
3801 gimple_stmt_iterator gsi;
3802 if (e->dest->aux)
3803 {
3804 inf_loop = true;
3805 break;
3806 }
3807 e->dest->aux = (void *) 1;
3808 gsi = gsi_after_labels (e->dest);
3809 if (!gsi_end_p (gsi) && is_gimple_debug (gsi_stmt (gsi)))
3810 gsi_next_nondebug (&gsi);
3811 if (!gsi_end_p (gsi))
3812 break;
3813 }
3814 e_first->src->aux = NULL;
3815 for (e = e_first; e->dest->aux; e = single_succ_edge (e->dest))
3816 e->dest->aux = NULL;
3817
3818 return inf_loop;
3819 }
3820
3821 /* Examine the block associated with LP to determine if it's an empty
3822 handler for its EH region. If so, attempt to redirect EH edges to
3823 an outer region. Return true the CFG was updated in any way. This
3824 is similar to jump forwarding, just across EH edges. */
3825
3826 static bool
3827 cleanup_empty_eh (eh_landing_pad lp)
3828 {
3829 basic_block bb = label_to_block (lp->post_landing_pad);
3830 gimple_stmt_iterator gsi;
3831 gimple resx;
3832 eh_region new_region;
3833 edge_iterator ei;
3834 edge e, e_out;
3835 bool has_non_eh_pred;
3836 int new_lp_nr;
3837
3838 /* There can be zero or one edges out of BB. This is the quickest test. */
3839 switch (EDGE_COUNT (bb->succs))
3840 {
3841 case 0:
3842 e_out = NULL;
3843 break;
3844 case 1:
3845 e_out = EDGE_SUCC (bb, 0);
3846 break;
3847 default:
3848 return false;
3849 }
3850 gsi = gsi_after_labels (bb);
3851
3852 /* Make sure to skip debug statements. */
3853 if (!gsi_end_p (gsi) && is_gimple_debug (gsi_stmt (gsi)))
3854 gsi_next_nondebug (&gsi);
3855
3856 /* If the block is totally empty, look for more unsplitting cases. */
3857 if (gsi_end_p (gsi))
3858 {
3859 /* For the degenerate case of an infinite loop bail out. */
3860 if (infinite_empty_loop_p (e_out))
3861 return false;
3862
3863 return cleanup_empty_eh_unsplit (bb, e_out, lp);
3864 }
3865
3866 /* The block should consist only of a single RESX statement, modulo a
3867 preceding call to __builtin_stack_restore if there is no outgoing
3868 edge, since the call can be eliminated in this case. */
3869 resx = gsi_stmt (gsi);
3870 if (!e_out && gimple_call_builtin_p (resx, BUILT_IN_STACK_RESTORE))
3871 {
3872 gsi_next (&gsi);
3873 resx = gsi_stmt (gsi);
3874 }
3875 if (!is_gimple_resx (resx))
3876 return false;
3877 gcc_assert (gsi_one_before_end_p (gsi));
3878
3879 /* Determine if there are non-EH edges, or resx edges into the handler. */
3880 has_non_eh_pred = false;
3881 FOR_EACH_EDGE (e, ei, bb->preds)
3882 if (!(e->flags & EDGE_EH))
3883 has_non_eh_pred = true;
3884
3885 /* Find the handler that's outer of the empty handler by looking at
3886 where the RESX instruction was vectored. */
3887 new_lp_nr = lookup_stmt_eh_lp (resx);
3888 new_region = get_eh_region_from_lp_number (new_lp_nr);
3889
3890 /* If there's no destination region within the current function,
3891 redirection is trivial via removing the throwing statements from
3892 the EH region, removing the EH edges, and allowing the block
3893 to go unreachable. */
3894 if (new_region == NULL)
3895 {
3896 gcc_assert (e_out == NULL);
3897 for (ei = ei_start (bb->preds); (e = ei_safe_edge (ei)); )
3898 if (e->flags & EDGE_EH)
3899 {
3900 gimple stmt = last_stmt (e->src);
3901 remove_stmt_from_eh_lp (stmt);
3902 remove_edge (e);
3903 }
3904 else
3905 ei_next (&ei);
3906 goto succeed;
3907 }
3908
3909 /* If the destination region is a MUST_NOT_THROW, allow the runtime
3910 to handle the abort and allow the blocks to go unreachable. */
3911 if (new_region->type == ERT_MUST_NOT_THROW)
3912 {
3913 for (ei = ei_start (bb->preds); (e = ei_safe_edge (ei)); )
3914 if (e->flags & EDGE_EH)
3915 {
3916 gimple stmt = last_stmt (e->src);
3917 remove_stmt_from_eh_lp (stmt);
3918 add_stmt_to_eh_lp (stmt, new_lp_nr);
3919 remove_edge (e);
3920 }
3921 else
3922 ei_next (&ei);
3923 goto succeed;
3924 }
3925
3926 /* Try to redirect the EH edges and merge the PHIs into the destination
3927 landing pad block. If the merge succeeds, we'll already have redirected
3928 all the EH edges. The handler itself will go unreachable if there were
3929 no normal edges. */
3930 if (cleanup_empty_eh_merge_phis (e_out->dest, bb, e_out, true))
3931 goto succeed;
3932
3933 /* Finally, if all input edges are EH edges, then we can (potentially)
3934 reduce the number of transfers from the runtime by moving the landing
3935 pad from the original region to the new region. This is a win when
3936 we remove the last CLEANUP region along a particular exception
3937 propagation path. Since nothing changes except for the region with
3938 which the landing pad is associated, the PHI nodes do not need to be
3939 adjusted at all. */
3940 if (!has_non_eh_pred)
3941 {
3942 cleanup_empty_eh_move_lp (bb, e_out, lp, new_region);
3943 if (dump_file && (dump_flags & TDF_DETAILS))
3944 fprintf (dump_file, "Empty EH handler %i moved to EH region %i.\n",
3945 lp->index, new_region->index);
3946
3947 /* ??? The CFG didn't change, but we may have rendered the
3948 old EH region unreachable. Trigger a cleanup there. */
3949 return true;
3950 }
3951
3952 return false;
3953
3954 succeed:
3955 if (dump_file && (dump_flags & TDF_DETAILS))
3956 fprintf (dump_file, "Empty EH handler %i removed.\n", lp->index);
3957 remove_eh_landing_pad (lp);
3958 return true;
3959 }
3960
3961 /* Do a post-order traversal of the EH region tree. Examine each
3962 post_landing_pad block and see if we can eliminate it as empty. */
3963
3964 static bool
3965 cleanup_all_empty_eh (void)
3966 {
3967 bool changed = false;
3968 eh_landing_pad lp;
3969 int i;
3970
3971 for (i = 1; VEC_iterate (eh_landing_pad, cfun->eh->lp_array, i, lp); ++i)
3972 if (lp)
3973 changed |= cleanup_empty_eh (lp);
3974
3975 return changed;
3976 }
3977
3978 /* Perform cleanups and lowering of exception handling
3979 1) cleanups regions with handlers doing nothing are optimized out
3980 2) MUST_NOT_THROW regions that became dead because of 1) are optimized out
3981 3) Info about regions that are containing instructions, and regions
3982 reachable via local EH edges is collected
3983 4) Eh tree is pruned for regions no longer neccesary.
3984
3985 TODO: Push MUST_NOT_THROW regions to the root of the EH tree.
3986 Unify those that have the same failure decl and locus.
3987 */
3988
3989 static unsigned int
3990 execute_cleanup_eh_1 (void)
3991 {
3992 /* Do this first: unsplit_all_eh and cleanup_all_empty_eh can die
3993 looking up unreachable landing pads. */
3994 remove_unreachable_handlers ();
3995
3996 /* Watch out for the region tree vanishing due to all unreachable. */
3997 if (cfun->eh->region_tree && optimize)
3998 {
3999 bool changed = false;
4000
4001 changed |= unsplit_all_eh ();
4002 changed |= cleanup_all_empty_eh ();
4003
4004 if (changed)
4005 {
4006 free_dominance_info (CDI_DOMINATORS);
4007 free_dominance_info (CDI_POST_DOMINATORS);
4008
4009 /* We delayed all basic block deletion, as we may have performed
4010 cleanups on EH edges while non-EH edges were still present. */
4011 delete_unreachable_blocks ();
4012
4013 /* We manipulated the landing pads. Remove any region that no
4014 longer has a landing pad. */
4015 remove_unreachable_handlers_no_lp ();
4016
4017 return TODO_cleanup_cfg | TODO_update_ssa_only_virtuals;
4018 }
4019 }
4020
4021 return 0;
4022 }
4023
4024 static unsigned int
4025 execute_cleanup_eh (void)
4026 {
4027 int ret = execute_cleanup_eh_1 ();
4028
4029 /* If the function no longer needs an EH personality routine
4030 clear it. This exposes cross-language inlining opportunities
4031 and avoids references to a never defined personality routine. */
4032 if (DECL_FUNCTION_PERSONALITY (current_function_decl)
4033 && function_needs_eh_personality (cfun) != eh_personality_lang)
4034 DECL_FUNCTION_PERSONALITY (current_function_decl) = NULL_TREE;
4035
4036 return ret;
4037 }
4038
4039 static bool
4040 gate_cleanup_eh (void)
4041 {
4042 return cfun->eh != NULL && cfun->eh->region_tree != NULL;
4043 }
4044
4045 struct gimple_opt_pass pass_cleanup_eh = {
4046 {
4047 GIMPLE_PASS,
4048 "ehcleanup", /* name */
4049 gate_cleanup_eh, /* gate */
4050 execute_cleanup_eh, /* execute */
4051 NULL, /* sub */
4052 NULL, /* next */
4053 0, /* static_pass_number */
4054 TV_TREE_EH, /* tv_id */
4055 PROP_gimple_lcf, /* properties_required */
4056 0, /* properties_provided */
4057 0, /* properties_destroyed */
4058 0, /* todo_flags_start */
4059 0 /* todo_flags_finish */
4060 }
4061 };
4062 \f
4063 /* Verify that BB containing STMT as the last statement, has precisely the
4064 edge that make_eh_edges would create. */
4065
4066 DEBUG_FUNCTION bool
4067 verify_eh_edges (gimple stmt)
4068 {
4069 basic_block bb = gimple_bb (stmt);
4070 eh_landing_pad lp = NULL;
4071 int lp_nr;
4072 edge_iterator ei;
4073 edge e, eh_edge;
4074
4075 lp_nr = lookup_stmt_eh_lp (stmt);
4076 if (lp_nr > 0)
4077 lp = get_eh_landing_pad_from_number (lp_nr);
4078
4079 eh_edge = NULL;
4080 FOR_EACH_EDGE (e, ei, bb->succs)
4081 {
4082 if (e->flags & EDGE_EH)
4083 {
4084 if (eh_edge)
4085 {
4086 error ("BB %i has multiple EH edges", bb->index);
4087 return true;
4088 }
4089 else
4090 eh_edge = e;
4091 }
4092 }
4093
4094 if (lp == NULL)
4095 {
4096 if (eh_edge)
4097 {
4098 error ("BB %i can not throw but has an EH edge", bb->index);
4099 return true;
4100 }
4101 return false;
4102 }
4103
4104 if (!stmt_could_throw_p (stmt))
4105 {
4106 error ("BB %i last statement has incorrectly set lp", bb->index);
4107 return true;
4108 }
4109
4110 if (eh_edge == NULL)
4111 {
4112 error ("BB %i is missing an EH edge", bb->index);
4113 return true;
4114 }
4115
4116 if (eh_edge->dest != label_to_block (lp->post_landing_pad))
4117 {
4118 error ("Incorrect EH edge %i->%i", bb->index, eh_edge->dest->index);
4119 return true;
4120 }
4121
4122 return false;
4123 }
4124
4125 /* Similarly, but handle GIMPLE_EH_DISPATCH specifically. */
4126
4127 DEBUG_FUNCTION bool
4128 verify_eh_dispatch_edge (gimple stmt)
4129 {
4130 eh_region r;
4131 eh_catch c;
4132 basic_block src, dst;
4133 bool want_fallthru = true;
4134 edge_iterator ei;
4135 edge e, fall_edge;
4136
4137 r = get_eh_region_from_number (gimple_eh_dispatch_region (stmt));
4138 src = gimple_bb (stmt);
4139
4140 FOR_EACH_EDGE (e, ei, src->succs)
4141 gcc_assert (e->aux == NULL);
4142
4143 switch (r->type)
4144 {
4145 case ERT_TRY:
4146 for (c = r->u.eh_try.first_catch; c ; c = c->next_catch)
4147 {
4148 dst = label_to_block (c->label);
4149 e = find_edge (src, dst);
4150 if (e == NULL)
4151 {
4152 error ("BB %i is missing an edge", src->index);
4153 return true;
4154 }
4155 e->aux = (void *)e;
4156
4157 /* A catch-all handler doesn't have a fallthru. */
4158 if (c->type_list == NULL)
4159 {
4160 want_fallthru = false;
4161 break;
4162 }
4163 }
4164 break;
4165
4166 case ERT_ALLOWED_EXCEPTIONS:
4167 dst = label_to_block (r->u.allowed.label);
4168 e = find_edge (src, dst);
4169 if (e == NULL)
4170 {
4171 error ("BB %i is missing an edge", src->index);
4172 return true;
4173 }
4174 e->aux = (void *)e;
4175 break;
4176
4177 default:
4178 gcc_unreachable ();
4179 }
4180
4181 fall_edge = NULL;
4182 FOR_EACH_EDGE (e, ei, src->succs)
4183 {
4184 if (e->flags & EDGE_FALLTHRU)
4185 {
4186 if (fall_edge != NULL)
4187 {
4188 error ("BB %i too many fallthru edges", src->index);
4189 return true;
4190 }
4191 fall_edge = e;
4192 }
4193 else if (e->aux)
4194 e->aux = NULL;
4195 else
4196 {
4197 error ("BB %i has incorrect edge", src->index);
4198 return true;
4199 }
4200 }
4201 if ((fall_edge != NULL) ^ want_fallthru)
4202 {
4203 error ("BB %i has incorrect fallthru edge", src->index);
4204 return true;
4205 }
4206
4207 return false;
4208 }