re PR middle-end/41573 (segfault in trunk related to strings)
[gcc.git] / gcc / tree-eh.c
1 /* Exception handling semantics and decomposition for trees.
2 Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009
3 Free Software Foundation, Inc.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "tree.h"
26 #include "rtl.h"
27 #include "tm_p.h"
28 #include "flags.h"
29 #include "function.h"
30 #include "except.h"
31 #include "tree-flow.h"
32 #include "tree-dump.h"
33 #include "tree-inline.h"
34 #include "tree-iterator.h"
35 #include "tree-pass.h"
36 #include "timevar.h"
37 #include "langhooks.h"
38 #include "ggc.h"
39 #include "toplev.h"
40 #include "gimple.h"
41 #include "target.h"
42
43 /* In some instances a tree and a gimple need to be stored in a same table,
44 i.e. in hash tables. This is a structure to do this. */
45 typedef union {tree *tp; tree t; gimple g;} treemple;
46
47 /* Nonzero if we are using EH to handle cleanups. */
48 static int using_eh_for_cleanups_p = 0;
49
50 void
51 using_eh_for_cleanups (void)
52 {
53 using_eh_for_cleanups_p = 1;
54 }
55
56 /* Misc functions used in this file. */
57
58 /* Compare and hash for any structure which begins with a canonical
59 pointer. Assumes all pointers are interchangeable, which is sort
60 of already assumed by gcc elsewhere IIRC. */
61
62 static int
63 struct_ptr_eq (const void *a, const void *b)
64 {
65 const void * const * x = (const void * const *) a;
66 const void * const * y = (const void * const *) b;
67 return *x == *y;
68 }
69
70 static hashval_t
71 struct_ptr_hash (const void *a)
72 {
73 const void * const * x = (const void * const *) a;
74 return (size_t)*x >> 4;
75 }
76
77
78 /* Remember and lookup EH landing pad data for arbitrary statements.
79 Really this means any statement that could_throw_p. We could
80 stuff this information into the stmt_ann data structure, but:
81
82 (1) We absolutely rely on this information being kept until
83 we get to rtl. Once we're done with lowering here, if we lose
84 the information there's no way to recover it!
85
86 (2) There are many more statements that *cannot* throw as
87 compared to those that can. We should be saving some amount
88 of space by only allocating memory for those that can throw. */
89
90 /* Add statement T in function IFUN to landing pad NUM. */
91
92 void
93 add_stmt_to_eh_lp_fn (struct function *ifun, gimple t, int num)
94 {
95 struct throw_stmt_node *n;
96 void **slot;
97
98 gcc_assert (num != 0);
99
100 n = GGC_NEW (struct throw_stmt_node);
101 n->stmt = t;
102 n->lp_nr = num;
103
104 if (!get_eh_throw_stmt_table (ifun))
105 set_eh_throw_stmt_table (ifun, htab_create_ggc (31, struct_ptr_hash,
106 struct_ptr_eq,
107 ggc_free));
108
109 slot = htab_find_slot (get_eh_throw_stmt_table (ifun), n, INSERT);
110 gcc_assert (!*slot);
111 *slot = n;
112 }
113
114 /* Add statement T in the current function (cfun) to EH landing pad NUM. */
115
116 void
117 add_stmt_to_eh_lp (gimple t, int num)
118 {
119 add_stmt_to_eh_lp_fn (cfun, t, num);
120 }
121
122 /* Add statement T to the single EH landing pad in REGION. */
123
124 static void
125 record_stmt_eh_region (eh_region region, gimple t)
126 {
127 if (region == NULL)
128 return;
129 if (region->type == ERT_MUST_NOT_THROW)
130 add_stmt_to_eh_lp_fn (cfun, t, -region->index);
131 else
132 {
133 eh_landing_pad lp = region->landing_pads;
134 if (lp == NULL)
135 lp = gen_eh_landing_pad (region);
136 else
137 gcc_assert (lp->next_lp == NULL);
138 add_stmt_to_eh_lp_fn (cfun, t, lp->index);
139 }
140 }
141
142
143 /* Remove statement T in function IFUN from its EH landing pad. */
144
145 bool
146 remove_stmt_from_eh_lp_fn (struct function *ifun, gimple t)
147 {
148 struct throw_stmt_node dummy;
149 void **slot;
150
151 if (!get_eh_throw_stmt_table (ifun))
152 return false;
153
154 dummy.stmt = t;
155 slot = htab_find_slot (get_eh_throw_stmt_table (ifun), &dummy,
156 NO_INSERT);
157 if (slot)
158 {
159 htab_clear_slot (get_eh_throw_stmt_table (ifun), slot);
160 return true;
161 }
162 else
163 return false;
164 }
165
166
167 /* Remove statement T in the current function (cfun) from its
168 EH landing pad. */
169
170 bool
171 remove_stmt_from_eh_lp (gimple t)
172 {
173 return remove_stmt_from_eh_lp_fn (cfun, t);
174 }
175
176 /* Determine if statement T is inside an EH region in function IFUN.
177 Positive numbers indicate a landing pad index; negative numbers
178 indicate a MUST_NOT_THROW region index; zero indicates that the
179 statement is not recorded in the region table. */
180
181 int
182 lookup_stmt_eh_lp_fn (struct function *ifun, gimple t)
183 {
184 struct throw_stmt_node *p, n;
185
186 if (ifun->eh->throw_stmt_table == NULL)
187 return 0;
188
189 n.stmt = t;
190 p = (struct throw_stmt_node *) htab_find (ifun->eh->throw_stmt_table, &n);
191 return p ? p->lp_nr : 0;
192 }
193
194 /* Likewise, but always use the current function. */
195
196 int
197 lookup_stmt_eh_lp (gimple t)
198 {
199 /* We can get called from initialized data when -fnon-call-exceptions
200 is on; prevent crash. */
201 if (!cfun)
202 return 0;
203 return lookup_stmt_eh_lp_fn (cfun, t);
204 }
205
206 /* Likewise, but reference a tree expression instead. */
207
208 int
209 lookup_expr_eh_lp (tree t)
210 {
211 if (cfun && cfun->eh->throw_stmt_table && t && EXPR_P (t))
212 {
213 tree_ann_common_t ann = tree_common_ann (t);
214 if (ann)
215 return ann->lp_nr;
216 }
217 return 0;
218 }
219
220
221 /* First pass of EH node decomposition. Build up a tree of GIMPLE_TRY_FINALLY
222 nodes and LABEL_DECL nodes. We will use this during the second phase to
223 determine if a goto leaves the body of a TRY_FINALLY_EXPR node. */
224
225 struct finally_tree_node
226 {
227 /* When storing a GIMPLE_TRY, we have to record a gimple. However
228 when deciding whether a GOTO to a certain LABEL_DECL (which is a
229 tree) leaves the TRY block, its necessary to record a tree in
230 this field. Thus a treemple is used. */
231 treemple child;
232 gimple parent;
233 };
234
235 /* Note that this table is *not* marked GTY. It is short-lived. */
236 static htab_t finally_tree;
237
238 static void
239 record_in_finally_tree (treemple child, gimple parent)
240 {
241 struct finally_tree_node *n;
242 void **slot;
243
244 n = XNEW (struct finally_tree_node);
245 n->child = child;
246 n->parent = parent;
247
248 slot = htab_find_slot (finally_tree, n, INSERT);
249 gcc_assert (!*slot);
250 *slot = n;
251 }
252
253 static void
254 collect_finally_tree (gimple stmt, gimple region);
255
256 /* Go through the gimple sequence. Works with collect_finally_tree to
257 record all GIMPLE_LABEL and GIMPLE_TRY statements. */
258
259 static void
260 collect_finally_tree_1 (gimple_seq seq, gimple region)
261 {
262 gimple_stmt_iterator gsi;
263
264 for (gsi = gsi_start (seq); !gsi_end_p (gsi); gsi_next (&gsi))
265 collect_finally_tree (gsi_stmt (gsi), region);
266 }
267
268 static void
269 collect_finally_tree (gimple stmt, gimple region)
270 {
271 treemple temp;
272
273 switch (gimple_code (stmt))
274 {
275 case GIMPLE_LABEL:
276 temp.t = gimple_label_label (stmt);
277 record_in_finally_tree (temp, region);
278 break;
279
280 case GIMPLE_TRY:
281 if (gimple_try_kind (stmt) == GIMPLE_TRY_FINALLY)
282 {
283 temp.g = stmt;
284 record_in_finally_tree (temp, region);
285 collect_finally_tree_1 (gimple_try_eval (stmt), stmt);
286 collect_finally_tree_1 (gimple_try_cleanup (stmt), region);
287 }
288 else if (gimple_try_kind (stmt) == GIMPLE_TRY_CATCH)
289 {
290 collect_finally_tree_1 (gimple_try_eval (stmt), region);
291 collect_finally_tree_1 (gimple_try_cleanup (stmt), region);
292 }
293 break;
294
295 case GIMPLE_CATCH:
296 collect_finally_tree_1 (gimple_catch_handler (stmt), region);
297 break;
298
299 case GIMPLE_EH_FILTER:
300 collect_finally_tree_1 (gimple_eh_filter_failure (stmt), region);
301 break;
302
303 default:
304 /* A type, a decl, or some kind of statement that we're not
305 interested in. Don't walk them. */
306 break;
307 }
308 }
309
310
311 /* Use the finally tree to determine if a jump from START to TARGET
312 would leave the try_finally node that START lives in. */
313
314 static bool
315 outside_finally_tree (treemple start, gimple target)
316 {
317 struct finally_tree_node n, *p;
318
319 do
320 {
321 n.child = start;
322 p = (struct finally_tree_node *) htab_find (finally_tree, &n);
323 if (!p)
324 return true;
325 start.g = p->parent;
326 }
327 while (start.g != target);
328
329 return false;
330 }
331
332 /* Second pass of EH node decomposition. Actually transform the GIMPLE_TRY
333 nodes into a set of gotos, magic labels, and eh regions.
334 The eh region creation is straight-forward, but frobbing all the gotos
335 and such into shape isn't. */
336
337 /* The sequence into which we record all EH stuff. This will be
338 placed at the end of the function when we're all done. */
339 static gimple_seq eh_seq;
340
341 /* Record whether an EH region contains something that can throw,
342 indexed by EH region number. */
343 static bitmap eh_region_may_contain_throw;
344
345 /* The GOTO_QUEUE is is an array of GIMPLE_GOTO and GIMPLE_RETURN
346 statements that are seen to escape this GIMPLE_TRY_FINALLY node.
347 The idea is to record a gimple statement for everything except for
348 the conditionals, which get their labels recorded. Since labels are
349 of type 'tree', we need this node to store both gimple and tree
350 objects. REPL_STMT is the sequence used to replace the goto/return
351 statement. CONT_STMT is used to store the statement that allows
352 the return/goto to jump to the original destination. */
353
354 struct goto_queue_node
355 {
356 treemple stmt;
357 gimple_seq repl_stmt;
358 gimple cont_stmt;
359 int index;
360 /* This is used when index >= 0 to indicate that stmt is a label (as
361 opposed to a goto stmt). */
362 int is_label;
363 };
364
365 /* State of the world while lowering. */
366
367 struct leh_state
368 {
369 /* What's "current" while constructing the eh region tree. These
370 correspond to variables of the same name in cfun->eh, which we
371 don't have easy access to. */
372 eh_region cur_region;
373
374 /* What's "current" for the purposes of __builtin_eh_pointer. For
375 a CATCH, this is the associated TRY. For an EH_FILTER, this is
376 the associated ALLOWED_EXCEPTIONS, etc. */
377 eh_region ehp_region;
378
379 /* Processing of TRY_FINALLY requires a bit more state. This is
380 split out into a separate structure so that we don't have to
381 copy so much when processing other nodes. */
382 struct leh_tf_state *tf;
383 };
384
385 struct leh_tf_state
386 {
387 /* Pointer to the GIMPLE_TRY_FINALLY node under discussion. The
388 try_finally_expr is the original GIMPLE_TRY_FINALLY. We need to retain
389 this so that outside_finally_tree can reliably reference the tree used
390 in the collect_finally_tree data structures. */
391 gimple try_finally_expr;
392 gimple top_p;
393
394 /* While lowering a top_p usually it is expanded into multiple statements,
395 thus we need the following field to store them. */
396 gimple_seq top_p_seq;
397
398 /* The state outside this try_finally node. */
399 struct leh_state *outer;
400
401 /* The exception region created for it. */
402 eh_region region;
403
404 /* The goto queue. */
405 struct goto_queue_node *goto_queue;
406 size_t goto_queue_size;
407 size_t goto_queue_active;
408
409 /* Pointer map to help in searching goto_queue when it is large. */
410 struct pointer_map_t *goto_queue_map;
411
412 /* The set of unique labels seen as entries in the goto queue. */
413 VEC(tree,heap) *dest_array;
414
415 /* A label to be added at the end of the completed transformed
416 sequence. It will be set if may_fallthru was true *at one time*,
417 though subsequent transformations may have cleared that flag. */
418 tree fallthru_label;
419
420 /* True if it is possible to fall out the bottom of the try block.
421 Cleared if the fallthru is converted to a goto. */
422 bool may_fallthru;
423
424 /* True if any entry in goto_queue is a GIMPLE_RETURN. */
425 bool may_return;
426
427 /* True if the finally block can receive an exception edge.
428 Cleared if the exception case is handled by code duplication. */
429 bool may_throw;
430 };
431
432 static gimple_seq lower_eh_must_not_throw (struct leh_state *, gimple);
433
434 /* Search for STMT in the goto queue. Return the replacement,
435 or null if the statement isn't in the queue. */
436
437 #define LARGE_GOTO_QUEUE 20
438
439 static void lower_eh_constructs_1 (struct leh_state *state, gimple_seq seq);
440
441 static gimple_seq
442 find_goto_replacement (struct leh_tf_state *tf, treemple stmt)
443 {
444 unsigned int i;
445 void **slot;
446
447 if (tf->goto_queue_active < LARGE_GOTO_QUEUE)
448 {
449 for (i = 0; i < tf->goto_queue_active; i++)
450 if ( tf->goto_queue[i].stmt.g == stmt.g)
451 return tf->goto_queue[i].repl_stmt;
452 return NULL;
453 }
454
455 /* If we have a large number of entries in the goto_queue, create a
456 pointer map and use that for searching. */
457
458 if (!tf->goto_queue_map)
459 {
460 tf->goto_queue_map = pointer_map_create ();
461 for (i = 0; i < tf->goto_queue_active; i++)
462 {
463 slot = pointer_map_insert (tf->goto_queue_map,
464 tf->goto_queue[i].stmt.g);
465 gcc_assert (*slot == NULL);
466 *slot = &tf->goto_queue[i];
467 }
468 }
469
470 slot = pointer_map_contains (tf->goto_queue_map, stmt.g);
471 if (slot != NULL)
472 return (((struct goto_queue_node *) *slot)->repl_stmt);
473
474 return NULL;
475 }
476
477 /* A subroutine of replace_goto_queue_1. Handles the sub-clauses of a
478 lowered GIMPLE_COND. If, by chance, the replacement is a simple goto,
479 then we can just splat it in, otherwise we add the new stmts immediately
480 after the GIMPLE_COND and redirect. */
481
482 static void
483 replace_goto_queue_cond_clause (tree *tp, struct leh_tf_state *tf,
484 gimple_stmt_iterator *gsi)
485 {
486 tree label;
487 gimple_seq new_seq;
488 treemple temp;
489 location_t loc = gimple_location (gsi_stmt (*gsi));
490
491 temp.tp = tp;
492 new_seq = find_goto_replacement (tf, temp);
493 if (!new_seq)
494 return;
495
496 if (gimple_seq_singleton_p (new_seq)
497 && gimple_code (gimple_seq_first_stmt (new_seq)) == GIMPLE_GOTO)
498 {
499 *tp = gimple_goto_dest (gimple_seq_first_stmt (new_seq));
500 return;
501 }
502
503 label = create_artificial_label (loc);
504 /* Set the new label for the GIMPLE_COND */
505 *tp = label;
506
507 gsi_insert_after (gsi, gimple_build_label (label), GSI_CONTINUE_LINKING);
508 gsi_insert_seq_after (gsi, gimple_seq_copy (new_seq), GSI_CONTINUE_LINKING);
509 }
510
511 /* The real work of replace_goto_queue. Returns with TSI updated to
512 point to the next statement. */
513
514 static void replace_goto_queue_stmt_list (gimple_seq, struct leh_tf_state *);
515
516 static void
517 replace_goto_queue_1 (gimple stmt, struct leh_tf_state *tf,
518 gimple_stmt_iterator *gsi)
519 {
520 gimple_seq seq;
521 treemple temp;
522 temp.g = NULL;
523
524 switch (gimple_code (stmt))
525 {
526 case GIMPLE_GOTO:
527 case GIMPLE_RETURN:
528 temp.g = stmt;
529 seq = find_goto_replacement (tf, temp);
530 if (seq)
531 {
532 gsi_insert_seq_before (gsi, gimple_seq_copy (seq), GSI_SAME_STMT);
533 gsi_remove (gsi, false);
534 return;
535 }
536 break;
537
538 case GIMPLE_COND:
539 replace_goto_queue_cond_clause (gimple_op_ptr (stmt, 2), tf, gsi);
540 replace_goto_queue_cond_clause (gimple_op_ptr (stmt, 3), tf, gsi);
541 break;
542
543 case GIMPLE_TRY:
544 replace_goto_queue_stmt_list (gimple_try_eval (stmt), tf);
545 replace_goto_queue_stmt_list (gimple_try_cleanup (stmt), tf);
546 break;
547 case GIMPLE_CATCH:
548 replace_goto_queue_stmt_list (gimple_catch_handler (stmt), tf);
549 break;
550 case GIMPLE_EH_FILTER:
551 replace_goto_queue_stmt_list (gimple_eh_filter_failure (stmt), tf);
552 break;
553
554 default:
555 /* These won't have gotos in them. */
556 break;
557 }
558
559 gsi_next (gsi);
560 }
561
562 /* A subroutine of replace_goto_queue. Handles GIMPLE_SEQ. */
563
564 static void
565 replace_goto_queue_stmt_list (gimple_seq seq, struct leh_tf_state *tf)
566 {
567 gimple_stmt_iterator gsi = gsi_start (seq);
568
569 while (!gsi_end_p (gsi))
570 replace_goto_queue_1 (gsi_stmt (gsi), tf, &gsi);
571 }
572
573 /* Replace all goto queue members. */
574
575 static void
576 replace_goto_queue (struct leh_tf_state *tf)
577 {
578 if (tf->goto_queue_active == 0)
579 return;
580 replace_goto_queue_stmt_list (tf->top_p_seq, tf);
581 }
582
583 /* Add a new record to the goto queue contained in TF. NEW_STMT is the
584 data to be added, IS_LABEL indicates whether NEW_STMT is a label or
585 a gimple return. */
586
587 static void
588 record_in_goto_queue (struct leh_tf_state *tf,
589 treemple new_stmt,
590 int index,
591 bool is_label)
592 {
593 size_t active, size;
594 struct goto_queue_node *q;
595
596 gcc_assert (!tf->goto_queue_map);
597
598 active = tf->goto_queue_active;
599 size = tf->goto_queue_size;
600 if (active >= size)
601 {
602 size = (size ? size * 2 : 32);
603 tf->goto_queue_size = size;
604 tf->goto_queue
605 = XRESIZEVEC (struct goto_queue_node, tf->goto_queue, size);
606 }
607
608 q = &tf->goto_queue[active];
609 tf->goto_queue_active = active + 1;
610
611 memset (q, 0, sizeof (*q));
612 q->stmt = new_stmt;
613 q->index = index;
614 q->is_label = is_label;
615 }
616
617 /* Record the LABEL label in the goto queue contained in TF.
618 TF is not null. */
619
620 static void
621 record_in_goto_queue_label (struct leh_tf_state *tf, treemple stmt, tree label)
622 {
623 int index;
624 treemple temp, new_stmt;
625
626 if (!label)
627 return;
628
629 /* Computed and non-local gotos do not get processed. Given
630 their nature we can neither tell whether we've escaped the
631 finally block nor redirect them if we knew. */
632 if (TREE_CODE (label) != LABEL_DECL)
633 return;
634
635 /* No need to record gotos that don't leave the try block. */
636 temp.t = label;
637 if (!outside_finally_tree (temp, tf->try_finally_expr))
638 return;
639
640 if (! tf->dest_array)
641 {
642 tf->dest_array = VEC_alloc (tree, heap, 10);
643 VEC_quick_push (tree, tf->dest_array, label);
644 index = 0;
645 }
646 else
647 {
648 int n = VEC_length (tree, tf->dest_array);
649 for (index = 0; index < n; ++index)
650 if (VEC_index (tree, tf->dest_array, index) == label)
651 break;
652 if (index == n)
653 VEC_safe_push (tree, heap, tf->dest_array, label);
654 }
655
656 /* In the case of a GOTO we want to record the destination label,
657 since with a GIMPLE_COND we have an easy access to the then/else
658 labels. */
659 new_stmt = stmt;
660 record_in_goto_queue (tf, new_stmt, index, true);
661
662 }
663
664 /* For any GIMPLE_GOTO or GIMPLE_RETURN, decide whether it leaves a try_finally
665 node, and if so record that fact in the goto queue associated with that
666 try_finally node. */
667
668 static void
669 maybe_record_in_goto_queue (struct leh_state *state, gimple stmt)
670 {
671 struct leh_tf_state *tf = state->tf;
672 treemple new_stmt;
673
674 if (!tf)
675 return;
676
677 switch (gimple_code (stmt))
678 {
679 case GIMPLE_COND:
680 new_stmt.tp = gimple_op_ptr (stmt, 2);
681 record_in_goto_queue_label (tf, new_stmt, gimple_cond_true_label (stmt));
682 new_stmt.tp = gimple_op_ptr (stmt, 3);
683 record_in_goto_queue_label (tf, new_stmt, gimple_cond_false_label (stmt));
684 break;
685 case GIMPLE_GOTO:
686 new_stmt.g = stmt;
687 record_in_goto_queue_label (tf, new_stmt, gimple_goto_dest (stmt));
688 break;
689
690 case GIMPLE_RETURN:
691 tf->may_return = true;
692 new_stmt.g = stmt;
693 record_in_goto_queue (tf, new_stmt, -1, false);
694 break;
695
696 default:
697 gcc_unreachable ();
698 }
699 }
700
701
702 #ifdef ENABLE_CHECKING
703 /* We do not process GIMPLE_SWITCHes for now. As long as the original source
704 was in fact structured, and we've not yet done jump threading, then none
705 of the labels will leave outer GIMPLE_TRY_FINALLY nodes. Verify this. */
706
707 static void
708 verify_norecord_switch_expr (struct leh_state *state, gimple switch_expr)
709 {
710 struct leh_tf_state *tf = state->tf;
711 size_t i, n;
712
713 if (!tf)
714 return;
715
716 n = gimple_switch_num_labels (switch_expr);
717
718 for (i = 0; i < n; ++i)
719 {
720 treemple temp;
721 tree lab = CASE_LABEL (gimple_switch_label (switch_expr, i));
722 temp.t = lab;
723 gcc_assert (!outside_finally_tree (temp, tf->try_finally_expr));
724 }
725 }
726 #else
727 #define verify_norecord_switch_expr(state, switch_expr)
728 #endif
729
730 /* Redirect a RETURN_EXPR pointed to by STMT_P to FINLAB. Place in CONT_P
731 whatever is needed to finish the return. If MOD is non-null, insert it
732 before the new branch. RETURN_VALUE_P is a cache containing a temporary
733 variable to be used in manipulating the value returned from the function. */
734
735 static void
736 do_return_redirection (struct goto_queue_node *q, tree finlab, gimple_seq mod,
737 tree *return_value_p)
738 {
739 tree ret_expr;
740 gimple x;
741
742 /* In the case of a return, the queue node must be a gimple statement. */
743 gcc_assert (!q->is_label);
744
745 ret_expr = gimple_return_retval (q->stmt.g);
746
747 if (ret_expr)
748 {
749 if (!*return_value_p)
750 *return_value_p = ret_expr;
751 else
752 gcc_assert (*return_value_p == ret_expr);
753 q->cont_stmt = q->stmt.g;
754 /* The nasty part about redirecting the return value is that the
755 return value itself is to be computed before the FINALLY block
756 is executed. e.g.
757
758 int x;
759 int foo (void)
760 {
761 x = 0;
762 try {
763 return x;
764 } finally {
765 x++;
766 }
767 }
768
769 should return 0, not 1. Arrange for this to happen by copying
770 computed the return value into a local temporary. This also
771 allows us to redirect multiple return statements through the
772 same destination block; whether this is a net win or not really
773 depends, I guess, but it does make generation of the switch in
774 lower_try_finally_switch easier. */
775
776 if (TREE_CODE (ret_expr) == RESULT_DECL)
777 {
778 if (!*return_value_p)
779 *return_value_p = ret_expr;
780 else
781 gcc_assert (*return_value_p == ret_expr);
782 q->cont_stmt = q->stmt.g;
783 }
784 else
785 gcc_unreachable ();
786 }
787 else
788 /* If we don't return a value, all return statements are the same. */
789 q->cont_stmt = q->stmt.g;
790
791 if (!q->repl_stmt)
792 q->repl_stmt = gimple_seq_alloc ();
793
794 if (mod)
795 gimple_seq_add_seq (&q->repl_stmt, mod);
796
797 x = gimple_build_goto (finlab);
798 gimple_seq_add_stmt (&q->repl_stmt, x);
799 }
800
801 /* Similar, but easier, for GIMPLE_GOTO. */
802
803 static void
804 do_goto_redirection (struct goto_queue_node *q, tree finlab, gimple_seq mod,
805 struct leh_tf_state *tf)
806 {
807 gimple x;
808
809 gcc_assert (q->is_label);
810 if (!q->repl_stmt)
811 q->repl_stmt = gimple_seq_alloc ();
812
813 q->cont_stmt = gimple_build_goto (VEC_index (tree, tf->dest_array, q->index));
814
815 if (mod)
816 gimple_seq_add_seq (&q->repl_stmt, mod);
817
818 x = gimple_build_goto (finlab);
819 gimple_seq_add_stmt (&q->repl_stmt, x);
820 }
821
822 /* Emit a standard landing pad sequence into SEQ for REGION. */
823
824 static void
825 emit_post_landing_pad (gimple_seq *seq, eh_region region)
826 {
827 eh_landing_pad lp = region->landing_pads;
828 gimple x;
829
830 if (lp == NULL)
831 lp = gen_eh_landing_pad (region);
832
833 lp->post_landing_pad = create_artificial_label (UNKNOWN_LOCATION);
834 EH_LANDING_PAD_NR (lp->post_landing_pad) = lp->index;
835
836 x = gimple_build_label (lp->post_landing_pad);
837 gimple_seq_add_stmt (seq, x);
838 }
839
840 /* Emit a RESX statement into SEQ for REGION. */
841
842 static void
843 emit_resx (gimple_seq *seq, eh_region region)
844 {
845 gimple x = gimple_build_resx (region->index);
846 gimple_seq_add_stmt (seq, x);
847 if (region->outer)
848 record_stmt_eh_region (region->outer, x);
849 }
850
851 /* Emit an EH_DISPATCH statement into SEQ for REGION. */
852
853 static void
854 emit_eh_dispatch (gimple_seq *seq, eh_region region)
855 {
856 gimple x = gimple_build_eh_dispatch (region->index);
857 gimple_seq_add_stmt (seq, x);
858 }
859
860 /* Note that the current EH region may contain a throw, or a
861 call to a function which itself may contain a throw. */
862
863 static void
864 note_eh_region_may_contain_throw (eh_region region)
865 {
866 while (!bitmap_bit_p (eh_region_may_contain_throw, region->index))
867 {
868 bitmap_set_bit (eh_region_may_contain_throw, region->index);
869 region = region->outer;
870 if (region == NULL)
871 break;
872 }
873 }
874
875 /* We want to transform
876 try { body; } catch { stuff; }
877 to
878 normal_seqence:
879 body;
880 over:
881 eh_seqence:
882 landing_pad:
883 stuff;
884 goto over;
885
886 TP is a GIMPLE_TRY node. REGION is the region whose post_landing_pad
887 should be placed before the second operand, or NULL. OVER is
888 an existing label that should be put at the exit, or NULL. */
889
890 static gimple_seq
891 frob_into_branch_around (gimple tp, eh_region region, tree over)
892 {
893 gimple x;
894 gimple_seq cleanup, result;
895 location_t loc = gimple_location (tp);
896
897 cleanup = gimple_try_cleanup (tp);
898 result = gimple_try_eval (tp);
899
900 if (region)
901 emit_post_landing_pad (&eh_seq, region);
902
903 if (gimple_seq_may_fallthru (cleanup))
904 {
905 if (!over)
906 over = create_artificial_label (loc);
907 x = gimple_build_goto (over);
908 gimple_seq_add_stmt (&cleanup, x);
909 }
910 gimple_seq_add_seq (&eh_seq, cleanup);
911
912 if (over)
913 {
914 x = gimple_build_label (over);
915 gimple_seq_add_stmt (&result, x);
916 }
917 return result;
918 }
919
920 /* A subroutine of lower_try_finally. Duplicate the tree rooted at T.
921 Make sure to record all new labels found. */
922
923 static gimple_seq
924 lower_try_finally_dup_block (gimple_seq seq, struct leh_state *outer_state)
925 {
926 gimple region = NULL;
927 gimple_seq new_seq;
928
929 new_seq = copy_gimple_seq_and_replace_locals (seq);
930
931 if (outer_state->tf)
932 region = outer_state->tf->try_finally_expr;
933 collect_finally_tree_1 (new_seq, region);
934
935 return new_seq;
936 }
937
938 /* A subroutine of lower_try_finally. Create a fallthru label for
939 the given try_finally state. The only tricky bit here is that
940 we have to make sure to record the label in our outer context. */
941
942 static tree
943 lower_try_finally_fallthru_label (struct leh_tf_state *tf)
944 {
945 tree label = tf->fallthru_label;
946 treemple temp;
947
948 if (!label)
949 {
950 label = create_artificial_label (gimple_location (tf->try_finally_expr));
951 tf->fallthru_label = label;
952 if (tf->outer->tf)
953 {
954 temp.t = label;
955 record_in_finally_tree (temp, tf->outer->tf->try_finally_expr);
956 }
957 }
958 return label;
959 }
960
961 /* A subroutine of lower_try_finally. If lang_protect_cleanup_actions
962 returns non-null, then the language requires that the exception path out
963 of a try_finally be treated specially. To wit: the code within the
964 finally block may not itself throw an exception. We have two choices here.
965 First we can duplicate the finally block and wrap it in a must_not_throw
966 region. Second, we can generate code like
967
968 try {
969 finally_block;
970 } catch {
971 if (fintmp == eh_edge)
972 protect_cleanup_actions;
973 }
974
975 where "fintmp" is the temporary used in the switch statement generation
976 alternative considered below. For the nonce, we always choose the first
977 option.
978
979 THIS_STATE may be null if this is a try-cleanup, not a try-finally. */
980
981 static void
982 honor_protect_cleanup_actions (struct leh_state *outer_state,
983 struct leh_state *this_state,
984 struct leh_tf_state *tf)
985 {
986 tree protect_cleanup_actions;
987 gimple_stmt_iterator gsi;
988 bool finally_may_fallthru;
989 gimple_seq finally;
990 gimple x;
991
992 /* First check for nothing to do. */
993 if (lang_protect_cleanup_actions == NULL)
994 return;
995 protect_cleanup_actions = lang_protect_cleanup_actions ();
996 if (protect_cleanup_actions == NULL)
997 return;
998
999 finally = gimple_try_cleanup (tf->top_p);
1000 finally_may_fallthru = gimple_seq_may_fallthru (finally);
1001
1002 /* Duplicate the FINALLY block. Only need to do this for try-finally,
1003 and not for cleanups. */
1004 if (this_state)
1005 finally = lower_try_finally_dup_block (finally, outer_state);
1006
1007 /* If this cleanup consists of a TRY_CATCH_EXPR with TRY_CATCH_IS_CLEANUP
1008 set, the handler of the TRY_CATCH_EXPR is another cleanup which ought
1009 to be in an enclosing scope, but needs to be implemented at this level
1010 to avoid a nesting violation (see wrap_temporary_cleanups in
1011 cp/decl.c). Since it's logically at an outer level, we should call
1012 terminate before we get to it, so strip it away before adding the
1013 MUST_NOT_THROW filter. */
1014 gsi = gsi_start (finally);
1015 x = gsi_stmt (gsi);
1016 if (gimple_code (x) == GIMPLE_TRY
1017 && gimple_try_kind (x) == GIMPLE_TRY_CATCH
1018 && gimple_try_catch_is_cleanup (x))
1019 {
1020 gsi_insert_seq_before (&gsi, gimple_try_eval (x), GSI_SAME_STMT);
1021 gsi_remove (&gsi, false);
1022 }
1023
1024 /* Wrap the block with protect_cleanup_actions as the action. */
1025 x = gimple_build_eh_must_not_throw (protect_cleanup_actions);
1026 x = gimple_build_try (finally, gimple_seq_alloc_with_stmt (x),
1027 GIMPLE_TRY_CATCH);
1028 finally = lower_eh_must_not_throw (outer_state, x);
1029
1030 /* Drop all of this into the exception sequence. */
1031 emit_post_landing_pad (&eh_seq, tf->region);
1032 gimple_seq_add_seq (&eh_seq, finally);
1033 if (finally_may_fallthru)
1034 emit_resx (&eh_seq, tf->region);
1035
1036 /* Having now been handled, EH isn't to be considered with
1037 the rest of the outgoing edges. */
1038 tf->may_throw = false;
1039 }
1040
1041 /* A subroutine of lower_try_finally. We have determined that there is
1042 no fallthru edge out of the finally block. This means that there is
1043 no outgoing edge corresponding to any incoming edge. Restructure the
1044 try_finally node for this special case. */
1045
1046 static void
1047 lower_try_finally_nofallthru (struct leh_state *state,
1048 struct leh_tf_state *tf)
1049 {
1050 tree lab, return_val;
1051 gimple x;
1052 gimple_seq finally;
1053 struct goto_queue_node *q, *qe;
1054
1055 lab = create_artificial_label (gimple_location (tf->try_finally_expr));
1056
1057 /* We expect that tf->top_p is a GIMPLE_TRY. */
1058 finally = gimple_try_cleanup (tf->top_p);
1059 tf->top_p_seq = gimple_try_eval (tf->top_p);
1060
1061 x = gimple_build_label (lab);
1062 gimple_seq_add_stmt (&tf->top_p_seq, x);
1063
1064 return_val = NULL;
1065 q = tf->goto_queue;
1066 qe = q + tf->goto_queue_active;
1067 for (; q < qe; ++q)
1068 if (q->index < 0)
1069 do_return_redirection (q, lab, NULL, &return_val);
1070 else
1071 do_goto_redirection (q, lab, NULL, tf);
1072
1073 replace_goto_queue (tf);
1074
1075 lower_eh_constructs_1 (state, finally);
1076 gimple_seq_add_seq (&tf->top_p_seq, finally);
1077
1078 if (tf->may_throw)
1079 {
1080 emit_post_landing_pad (&eh_seq, tf->region);
1081
1082 x = gimple_build_goto (lab);
1083 gimple_seq_add_stmt (&eh_seq, x);
1084 }
1085 }
1086
1087 /* A subroutine of lower_try_finally. We have determined that there is
1088 exactly one destination of the finally block. Restructure the
1089 try_finally node for this special case. */
1090
1091 static void
1092 lower_try_finally_onedest (struct leh_state *state, struct leh_tf_state *tf)
1093 {
1094 struct goto_queue_node *q, *qe;
1095 gimple x;
1096 gimple_seq finally;
1097 tree finally_label;
1098 location_t loc = gimple_location (tf->try_finally_expr);
1099
1100 finally = gimple_try_cleanup (tf->top_p);
1101 tf->top_p_seq = gimple_try_eval (tf->top_p);
1102
1103 lower_eh_constructs_1 (state, finally);
1104
1105 if (tf->may_throw)
1106 {
1107 /* Only reachable via the exception edge. Add the given label to
1108 the head of the FINALLY block. Append a RESX at the end. */
1109 emit_post_landing_pad (&eh_seq, tf->region);
1110 gimple_seq_add_seq (&eh_seq, finally);
1111 emit_resx (&eh_seq, tf->region);
1112 return;
1113 }
1114
1115 if (tf->may_fallthru)
1116 {
1117 /* Only reachable via the fallthru edge. Do nothing but let
1118 the two blocks run together; we'll fall out the bottom. */
1119 gimple_seq_add_seq (&tf->top_p_seq, finally);
1120 return;
1121 }
1122
1123 finally_label = create_artificial_label (loc);
1124 x = gimple_build_label (finally_label);
1125 gimple_seq_add_stmt (&tf->top_p_seq, x);
1126
1127 gimple_seq_add_seq (&tf->top_p_seq, finally);
1128
1129 q = tf->goto_queue;
1130 qe = q + tf->goto_queue_active;
1131
1132 if (tf->may_return)
1133 {
1134 /* Reachable by return expressions only. Redirect them. */
1135 tree return_val = NULL;
1136 for (; q < qe; ++q)
1137 do_return_redirection (q, finally_label, NULL, &return_val);
1138 replace_goto_queue (tf);
1139 }
1140 else
1141 {
1142 /* Reachable by goto expressions only. Redirect them. */
1143 for (; q < qe; ++q)
1144 do_goto_redirection (q, finally_label, NULL, tf);
1145 replace_goto_queue (tf);
1146
1147 if (VEC_index (tree, tf->dest_array, 0) == tf->fallthru_label)
1148 {
1149 /* Reachable by goto to fallthru label only. Redirect it
1150 to the new label (already created, sadly), and do not
1151 emit the final branch out, or the fallthru label. */
1152 tf->fallthru_label = NULL;
1153 return;
1154 }
1155 }
1156
1157 /* Place the original return/goto to the original destination
1158 immediately after the finally block. */
1159 x = tf->goto_queue[0].cont_stmt;
1160 gimple_seq_add_stmt (&tf->top_p_seq, x);
1161 maybe_record_in_goto_queue (state, x);
1162 }
1163
1164 /* A subroutine of lower_try_finally. There are multiple edges incoming
1165 and outgoing from the finally block. Implement this by duplicating the
1166 finally block for every destination. */
1167
1168 static void
1169 lower_try_finally_copy (struct leh_state *state, struct leh_tf_state *tf)
1170 {
1171 gimple_seq finally;
1172 gimple_seq new_stmt;
1173 gimple_seq seq;
1174 gimple x;
1175 tree tmp;
1176 location_t tf_loc = gimple_location (tf->try_finally_expr);
1177
1178 finally = gimple_try_cleanup (tf->top_p);
1179 tf->top_p_seq = gimple_try_eval (tf->top_p);
1180 new_stmt = NULL;
1181
1182 if (tf->may_fallthru)
1183 {
1184 seq = lower_try_finally_dup_block (finally, state);
1185 lower_eh_constructs_1 (state, seq);
1186 gimple_seq_add_seq (&new_stmt, seq);
1187
1188 tmp = lower_try_finally_fallthru_label (tf);
1189 x = gimple_build_goto (tmp);
1190 gimple_seq_add_stmt (&new_stmt, x);
1191 }
1192
1193 if (tf->may_throw)
1194 {
1195 emit_post_landing_pad (&eh_seq, tf->region);
1196
1197 seq = lower_try_finally_dup_block (finally, state);
1198 lower_eh_constructs_1 (state, seq);
1199 gimple_seq_add_seq (&eh_seq, seq);
1200
1201 emit_resx (&eh_seq, tf->region);
1202 }
1203
1204 if (tf->goto_queue)
1205 {
1206 struct goto_queue_node *q, *qe;
1207 tree return_val = NULL;
1208 int return_index, index;
1209 struct labels_s
1210 {
1211 struct goto_queue_node *q;
1212 tree label;
1213 } *labels;
1214
1215 return_index = VEC_length (tree, tf->dest_array);
1216 labels = XCNEWVEC (struct labels_s, return_index + 1);
1217
1218 q = tf->goto_queue;
1219 qe = q + tf->goto_queue_active;
1220 for (; q < qe; q++)
1221 {
1222 index = q->index < 0 ? return_index : q->index;
1223
1224 if (!labels[index].q)
1225 labels[index].q = q;
1226 }
1227
1228 for (index = 0; index < return_index + 1; index++)
1229 {
1230 tree lab;
1231
1232 q = labels[index].q;
1233 if (! q)
1234 continue;
1235
1236 lab = labels[index].label
1237 = create_artificial_label (tf_loc);
1238
1239 if (index == return_index)
1240 do_return_redirection (q, lab, NULL, &return_val);
1241 else
1242 do_goto_redirection (q, lab, NULL, tf);
1243
1244 x = gimple_build_label (lab);
1245 gimple_seq_add_stmt (&new_stmt, x);
1246
1247 seq = lower_try_finally_dup_block (finally, state);
1248 lower_eh_constructs_1 (state, seq);
1249 gimple_seq_add_seq (&new_stmt, seq);
1250
1251 gimple_seq_add_stmt (&new_stmt, q->cont_stmt);
1252 maybe_record_in_goto_queue (state, q->cont_stmt);
1253 }
1254
1255 for (q = tf->goto_queue; q < qe; q++)
1256 {
1257 tree lab;
1258
1259 index = q->index < 0 ? return_index : q->index;
1260
1261 if (labels[index].q == q)
1262 continue;
1263
1264 lab = labels[index].label;
1265
1266 if (index == return_index)
1267 do_return_redirection (q, lab, NULL, &return_val);
1268 else
1269 do_goto_redirection (q, lab, NULL, tf);
1270 }
1271
1272 replace_goto_queue (tf);
1273 free (labels);
1274 }
1275
1276 /* Need to link new stmts after running replace_goto_queue due
1277 to not wanting to process the same goto stmts twice. */
1278 gimple_seq_add_seq (&tf->top_p_seq, new_stmt);
1279 }
1280
1281 /* A subroutine of lower_try_finally. There are multiple edges incoming
1282 and outgoing from the finally block. Implement this by instrumenting
1283 each incoming edge and creating a switch statement at the end of the
1284 finally block that branches to the appropriate destination. */
1285
1286 static void
1287 lower_try_finally_switch (struct leh_state *state, struct leh_tf_state *tf)
1288 {
1289 struct goto_queue_node *q, *qe;
1290 tree return_val = NULL;
1291 tree finally_tmp, finally_label;
1292 int return_index, eh_index, fallthru_index;
1293 int nlabels, ndests, j, last_case_index;
1294 tree last_case;
1295 VEC (tree,heap) *case_label_vec;
1296 gimple_seq switch_body;
1297 gimple x;
1298 tree tmp;
1299 gimple switch_stmt;
1300 gimple_seq finally;
1301 struct pointer_map_t *cont_map = NULL;
1302 /* The location of the TRY_FINALLY stmt. */
1303 location_t tf_loc = gimple_location (tf->try_finally_expr);
1304 /* The location of the finally block. */
1305 location_t finally_loc;
1306
1307 switch_body = gimple_seq_alloc ();
1308
1309 /* Mash the TRY block to the head of the chain. */
1310 finally = gimple_try_cleanup (tf->top_p);
1311 tf->top_p_seq = gimple_try_eval (tf->top_p);
1312
1313 /* The location of the finally is either the last stmt in the finally
1314 block or the location of the TRY_FINALLY itself. */
1315 finally_loc = gimple_seq_last_stmt (tf->top_p_seq) != NULL ?
1316 gimple_location (gimple_seq_last_stmt (tf->top_p_seq))
1317 : tf_loc;
1318
1319 /* Lower the finally block itself. */
1320 lower_eh_constructs_1 (state, finally);
1321
1322 /* Prepare for switch statement generation. */
1323 nlabels = VEC_length (tree, tf->dest_array);
1324 return_index = nlabels;
1325 eh_index = return_index + tf->may_return;
1326 fallthru_index = eh_index + tf->may_throw;
1327 ndests = fallthru_index + tf->may_fallthru;
1328
1329 finally_tmp = create_tmp_var (integer_type_node, "finally_tmp");
1330 finally_label = create_artificial_label (finally_loc);
1331
1332 /* We use VEC_quick_push on case_label_vec throughout this function,
1333 since we know the size in advance and allocate precisely as muce
1334 space as needed. */
1335 case_label_vec = VEC_alloc (tree, heap, ndests);
1336 last_case = NULL;
1337 last_case_index = 0;
1338
1339 /* Begin inserting code for getting to the finally block. Things
1340 are done in this order to correspond to the sequence the code is
1341 layed out. */
1342
1343 if (tf->may_fallthru)
1344 {
1345 x = gimple_build_assign (finally_tmp,
1346 build_int_cst (NULL, fallthru_index));
1347 gimple_seq_add_stmt (&tf->top_p_seq, x);
1348
1349 last_case = build3 (CASE_LABEL_EXPR, void_type_node,
1350 build_int_cst (NULL, fallthru_index),
1351 NULL, create_artificial_label (tf_loc));
1352 VEC_quick_push (tree, case_label_vec, last_case);
1353 last_case_index++;
1354
1355 x = gimple_build_label (CASE_LABEL (last_case));
1356 gimple_seq_add_stmt (&switch_body, x);
1357
1358 tmp = lower_try_finally_fallthru_label (tf);
1359 x = gimple_build_goto (tmp);
1360 gimple_seq_add_stmt (&switch_body, x);
1361 }
1362
1363 if (tf->may_throw)
1364 {
1365 emit_post_landing_pad (&eh_seq, tf->region);
1366
1367 x = gimple_build_assign (finally_tmp,
1368 build_int_cst (NULL, eh_index));
1369 gimple_seq_add_stmt (&eh_seq, x);
1370
1371 x = gimple_build_goto (finally_label);
1372 gimple_seq_add_stmt (&eh_seq, x);
1373
1374 last_case = build3 (CASE_LABEL_EXPR, void_type_node,
1375 build_int_cst (NULL, eh_index),
1376 NULL, create_artificial_label (tf_loc));
1377 VEC_quick_push (tree, case_label_vec, last_case);
1378 last_case_index++;
1379
1380 x = gimple_build_label (CASE_LABEL (last_case));
1381 gimple_seq_add_stmt (&eh_seq, x);
1382 emit_resx (&eh_seq, tf->region);
1383 }
1384
1385 x = gimple_build_label (finally_label);
1386 gimple_seq_add_stmt (&tf->top_p_seq, x);
1387
1388 gimple_seq_add_seq (&tf->top_p_seq, finally);
1389
1390 /* Redirect each incoming goto edge. */
1391 q = tf->goto_queue;
1392 qe = q + tf->goto_queue_active;
1393 j = last_case_index + tf->may_return;
1394 /* Prepare the assignments to finally_tmp that are executed upon the
1395 entrance through a particular edge. */
1396 for (; q < qe; ++q)
1397 {
1398 gimple_seq mod;
1399 int switch_id;
1400 unsigned int case_index;
1401
1402 mod = gimple_seq_alloc ();
1403
1404 if (q->index < 0)
1405 {
1406 x = gimple_build_assign (finally_tmp,
1407 build_int_cst (NULL, return_index));
1408 gimple_seq_add_stmt (&mod, x);
1409 do_return_redirection (q, finally_label, mod, &return_val);
1410 switch_id = return_index;
1411 }
1412 else
1413 {
1414 x = gimple_build_assign (finally_tmp,
1415 build_int_cst (NULL, q->index));
1416 gimple_seq_add_stmt (&mod, x);
1417 do_goto_redirection (q, finally_label, mod, tf);
1418 switch_id = q->index;
1419 }
1420
1421 case_index = j + q->index;
1422 if (VEC_length (tree, case_label_vec) <= case_index
1423 || !VEC_index (tree, case_label_vec, case_index))
1424 {
1425 tree case_lab;
1426 void **slot;
1427 case_lab = build3 (CASE_LABEL_EXPR, void_type_node,
1428 build_int_cst (NULL, switch_id),
1429 NULL, NULL);
1430 /* We store the cont_stmt in the pointer map, so that we can recover
1431 it in the loop below. We don't create the new label while
1432 walking the goto_queue because pointers don't offer a stable
1433 order. */
1434 if (!cont_map)
1435 cont_map = pointer_map_create ();
1436 slot = pointer_map_insert (cont_map, case_lab);
1437 *slot = q->cont_stmt;
1438 VEC_quick_push (tree, case_label_vec, case_lab);
1439 }
1440 }
1441 for (j = last_case_index; j < last_case_index + nlabels; j++)
1442 {
1443 tree label;
1444 gimple cont_stmt;
1445 void **slot;
1446
1447 last_case = VEC_index (tree, case_label_vec, j);
1448
1449 gcc_assert (last_case);
1450 gcc_assert (cont_map);
1451
1452 slot = pointer_map_contains (cont_map, last_case);
1453 /* As the comment above suggests, CASE_LABEL (last_case) was just a
1454 placeholder, it does not store an actual label, yet. */
1455 gcc_assert (slot);
1456 cont_stmt = *(gimple *) slot;
1457
1458 label = create_artificial_label (tf_loc);
1459 CASE_LABEL (last_case) = label;
1460
1461 x = gimple_build_label (label);
1462 gimple_seq_add_stmt (&switch_body, x);
1463 gimple_seq_add_stmt (&switch_body, cont_stmt);
1464 maybe_record_in_goto_queue (state, cont_stmt);
1465 }
1466 if (cont_map)
1467 pointer_map_destroy (cont_map);
1468
1469 replace_goto_queue (tf);
1470
1471 /* Make sure that the last case is the default label, as one is required.
1472 Then sort the labels, which is also required in GIMPLE. */
1473 CASE_LOW (last_case) = NULL;
1474 sort_case_labels (case_label_vec);
1475
1476 /* Build the switch statement, setting last_case to be the default
1477 label. */
1478 switch_stmt = gimple_build_switch_vec (finally_tmp, last_case,
1479 case_label_vec);
1480 gimple_set_location (switch_stmt, finally_loc);
1481
1482 /* Need to link SWITCH_STMT after running replace_goto_queue
1483 due to not wanting to process the same goto stmts twice. */
1484 gimple_seq_add_stmt (&tf->top_p_seq, switch_stmt);
1485 gimple_seq_add_seq (&tf->top_p_seq, switch_body);
1486 }
1487
1488 /* Decide whether or not we are going to duplicate the finally block.
1489 There are several considerations.
1490
1491 First, if this is Java, then the finally block contains code
1492 written by the user. It has line numbers associated with it,
1493 so duplicating the block means it's difficult to set a breakpoint.
1494 Since controlling code generation via -g is verboten, we simply
1495 never duplicate code without optimization.
1496
1497 Second, we'd like to prevent egregious code growth. One way to
1498 do this is to estimate the size of the finally block, multiply
1499 that by the number of copies we'd need to make, and compare against
1500 the estimate of the size of the switch machinery we'd have to add. */
1501
1502 static bool
1503 decide_copy_try_finally (int ndests, gimple_seq finally)
1504 {
1505 int f_estimate, sw_estimate;
1506
1507 if (!optimize)
1508 return false;
1509
1510 /* Finally estimate N times, plus N gotos. */
1511 f_estimate = count_insns_seq (finally, &eni_size_weights);
1512 f_estimate = (f_estimate + 1) * ndests;
1513
1514 /* Switch statement (cost 10), N variable assignments, N gotos. */
1515 sw_estimate = 10 + 2 * ndests;
1516
1517 /* Optimize for size clearly wants our best guess. */
1518 if (optimize_function_for_size_p (cfun))
1519 return f_estimate < sw_estimate;
1520
1521 /* ??? These numbers are completely made up so far. */
1522 if (optimize > 1)
1523 return f_estimate < 100 || f_estimate < sw_estimate * 2;
1524 else
1525 return f_estimate < 40 || f_estimate * 2 < sw_estimate * 3;
1526 }
1527
1528
1529 /* A subroutine of lower_eh_constructs_1. Lower a GIMPLE_TRY_FINALLY nodes
1530 to a sequence of labels and blocks, plus the exception region trees
1531 that record all the magic. This is complicated by the need to
1532 arrange for the FINALLY block to be executed on all exits. */
1533
1534 static gimple_seq
1535 lower_try_finally (struct leh_state *state, gimple tp)
1536 {
1537 struct leh_tf_state this_tf;
1538 struct leh_state this_state;
1539 int ndests;
1540
1541 /* Process the try block. */
1542
1543 memset (&this_tf, 0, sizeof (this_tf));
1544 this_tf.try_finally_expr = tp;
1545 this_tf.top_p = tp;
1546 this_tf.outer = state;
1547 if (using_eh_for_cleanups_p)
1548 this_tf.region = gen_eh_region_cleanup (state->cur_region);
1549 else
1550 this_tf.region = NULL;
1551
1552 this_state.cur_region = this_tf.region;
1553 this_state.ehp_region = state->ehp_region;
1554 this_state.tf = &this_tf;
1555
1556 lower_eh_constructs_1 (&this_state, gimple_try_eval(tp));
1557
1558 /* Determine if the try block is escaped through the bottom. */
1559 this_tf.may_fallthru = gimple_seq_may_fallthru (gimple_try_eval (tp));
1560
1561 /* Determine if any exceptions are possible within the try block. */
1562 if (using_eh_for_cleanups_p)
1563 this_tf.may_throw = bitmap_bit_p (eh_region_may_contain_throw,
1564 this_tf.region->index);
1565 if (this_tf.may_throw)
1566 honor_protect_cleanup_actions (state, &this_state, &this_tf);
1567
1568 /* Determine how many edges (still) reach the finally block. Or rather,
1569 how many destinations are reached by the finally block. Use this to
1570 determine how we process the finally block itself. */
1571
1572 ndests = VEC_length (tree, this_tf.dest_array);
1573 ndests += this_tf.may_fallthru;
1574 ndests += this_tf.may_return;
1575 ndests += this_tf.may_throw;
1576
1577 /* If the FINALLY block is not reachable, dike it out. */
1578 if (ndests == 0)
1579 {
1580 gimple_seq_add_seq (&this_tf.top_p_seq, gimple_try_eval (tp));
1581 gimple_try_set_cleanup (tp, NULL);
1582 }
1583 /* If the finally block doesn't fall through, then any destination
1584 we might try to impose there isn't reached either. There may be
1585 some minor amount of cleanup and redirection still needed. */
1586 else if (!gimple_seq_may_fallthru (gimple_try_cleanup (tp)))
1587 lower_try_finally_nofallthru (state, &this_tf);
1588
1589 /* We can easily special-case redirection to a single destination. */
1590 else if (ndests == 1)
1591 lower_try_finally_onedest (state, &this_tf);
1592 else if (decide_copy_try_finally (ndests, gimple_try_cleanup (tp)))
1593 lower_try_finally_copy (state, &this_tf);
1594 else
1595 lower_try_finally_switch (state, &this_tf);
1596
1597 /* If someone requested we add a label at the end of the transformed
1598 block, do so. */
1599 if (this_tf.fallthru_label)
1600 {
1601 /* This must be reached only if ndests == 0. */
1602 gimple x = gimple_build_label (this_tf.fallthru_label);
1603 gimple_seq_add_stmt (&this_tf.top_p_seq, x);
1604 }
1605
1606 VEC_free (tree, heap, this_tf.dest_array);
1607 if (this_tf.goto_queue)
1608 free (this_tf.goto_queue);
1609 if (this_tf.goto_queue_map)
1610 pointer_map_destroy (this_tf.goto_queue_map);
1611
1612 return this_tf.top_p_seq;
1613 }
1614
1615 /* A subroutine of lower_eh_constructs_1. Lower a GIMPLE_TRY_CATCH with a
1616 list of GIMPLE_CATCH to a sequence of labels and blocks, plus the
1617 exception region trees that records all the magic. */
1618
1619 static gimple_seq
1620 lower_catch (struct leh_state *state, gimple tp)
1621 {
1622 eh_region try_region;
1623 struct leh_state this_state;
1624 gimple_stmt_iterator gsi;
1625 tree out_label;
1626 gimple_seq new_seq;
1627 gimple x;
1628 location_t try_catch_loc = gimple_location (tp);
1629
1630 try_region = gen_eh_region_try (state->cur_region);
1631
1632 this_state = *state;
1633 this_state.cur_region = try_region;
1634
1635 lower_eh_constructs_1 (&this_state, gimple_try_eval (tp));
1636
1637 if (!bitmap_bit_p (eh_region_may_contain_throw, try_region->index))
1638 return gimple_try_eval (tp);
1639
1640 new_seq = NULL;
1641 emit_eh_dispatch (&new_seq, try_region);
1642 emit_resx (&new_seq, try_region);
1643
1644 this_state.cur_region = state->cur_region;
1645 this_state.ehp_region = try_region;
1646
1647 out_label = NULL;
1648 for (gsi = gsi_start (gimple_try_cleanup (tp));
1649 !gsi_end_p (gsi);
1650 gsi_next (&gsi))
1651 {
1652 eh_catch c;
1653 gimple gcatch;
1654 gimple_seq handler;
1655
1656 gcatch = gsi_stmt (gsi);
1657 c = gen_eh_region_catch (try_region, gimple_catch_types (gcatch));
1658
1659 handler = gimple_catch_handler (gcatch);
1660 lower_eh_constructs_1 (&this_state, handler);
1661
1662 c->label = create_artificial_label (UNKNOWN_LOCATION);
1663 x = gimple_build_label (c->label);
1664 gimple_seq_add_stmt (&new_seq, x);
1665
1666 gimple_seq_add_seq (&new_seq, handler);
1667
1668 if (gimple_seq_may_fallthru (new_seq))
1669 {
1670 if (!out_label)
1671 out_label = create_artificial_label (try_catch_loc);
1672
1673 x = gimple_build_goto (out_label);
1674 gimple_seq_add_stmt (&new_seq, x);
1675 }
1676 }
1677
1678 gimple_try_set_cleanup (tp, new_seq);
1679
1680 return frob_into_branch_around (tp, try_region, out_label);
1681 }
1682
1683 /* A subroutine of lower_eh_constructs_1. Lower a GIMPLE_TRY with a
1684 GIMPLE_EH_FILTER to a sequence of labels and blocks, plus the exception
1685 region trees that record all the magic. */
1686
1687 static gimple_seq
1688 lower_eh_filter (struct leh_state *state, gimple tp)
1689 {
1690 struct leh_state this_state;
1691 eh_region this_region;
1692 gimple inner, x;
1693 gimple_seq new_seq;
1694
1695 inner = gimple_seq_first_stmt (gimple_try_cleanup (tp));
1696
1697 this_region = gen_eh_region_allowed (state->cur_region,
1698 gimple_eh_filter_types (inner));
1699 this_state = *state;
1700 this_state.cur_region = this_region;
1701
1702 lower_eh_constructs_1 (&this_state, gimple_try_eval (tp));
1703
1704 if (!bitmap_bit_p (eh_region_may_contain_throw, this_region->index))
1705 return gimple_try_eval (tp);
1706
1707 new_seq = NULL;
1708 this_state.cur_region = state->cur_region;
1709 this_state.ehp_region = this_region;
1710
1711 emit_eh_dispatch (&new_seq, this_region);
1712 emit_resx (&new_seq, this_region);
1713
1714 this_region->u.allowed.label = create_artificial_label (UNKNOWN_LOCATION);
1715 x = gimple_build_label (this_region->u.allowed.label);
1716 gimple_seq_add_stmt (&new_seq, x);
1717
1718 lower_eh_constructs_1 (&this_state, gimple_eh_filter_failure (inner));
1719 gimple_seq_add_seq (&new_seq, gimple_eh_filter_failure (inner));
1720
1721 gimple_try_set_cleanup (tp, new_seq);
1722
1723 return frob_into_branch_around (tp, this_region, NULL);
1724 }
1725
1726 /* A subroutine of lower_eh_constructs_1. Lower a GIMPLE_TRY with
1727 an GIMPLE_EH_MUST_NOT_THROW to a sequence of labels and blocks,
1728 plus the exception region trees that record all the magic. */
1729
1730 static gimple_seq
1731 lower_eh_must_not_throw (struct leh_state *state, gimple tp)
1732 {
1733 struct leh_state this_state;
1734 eh_region this_region;
1735 gimple inner;
1736
1737 inner = gimple_seq_first_stmt (gimple_try_cleanup (tp));
1738
1739 this_region = gen_eh_region_must_not_throw (state->cur_region);
1740 this_region->u.must_not_throw.failure_decl
1741 = gimple_eh_must_not_throw_fndecl (inner);
1742 this_region->u.must_not_throw.failure_loc = gimple_location (tp);
1743
1744 /* In order to get mangling applied to this decl, we must mark it
1745 used now. Otherwise, pass_ipa_free_lang_data won't think it
1746 needs to happen. */
1747 TREE_USED (this_region->u.must_not_throw.failure_decl) = 1;
1748
1749 this_state = *state;
1750 this_state.cur_region = this_region;
1751
1752 lower_eh_constructs_1 (&this_state, gimple_try_eval (tp));
1753
1754 return gimple_try_eval (tp);
1755 }
1756
1757 /* Implement a cleanup expression. This is similar to try-finally,
1758 except that we only execute the cleanup block for exception edges. */
1759
1760 static gimple_seq
1761 lower_cleanup (struct leh_state *state, gimple tp)
1762 {
1763 struct leh_state this_state;
1764 eh_region this_region;
1765 struct leh_tf_state fake_tf;
1766 gimple_seq result;
1767
1768 /* If not using eh, then exception-only cleanups are no-ops. */
1769 if (!flag_exceptions)
1770 {
1771 result = gimple_try_eval (tp);
1772 lower_eh_constructs_1 (state, result);
1773 return result;
1774 }
1775
1776 this_region = gen_eh_region_cleanup (state->cur_region);
1777 this_state = *state;
1778 this_state.cur_region = this_region;
1779
1780 lower_eh_constructs_1 (&this_state, gimple_try_eval (tp));
1781
1782 if (!bitmap_bit_p (eh_region_may_contain_throw, this_region->index))
1783 return gimple_try_eval (tp);
1784
1785 /* Build enough of a try-finally state so that we can reuse
1786 honor_protect_cleanup_actions. */
1787 memset (&fake_tf, 0, sizeof (fake_tf));
1788 fake_tf.top_p = fake_tf.try_finally_expr = tp;
1789 fake_tf.outer = state;
1790 fake_tf.region = this_region;
1791 fake_tf.may_fallthru = gimple_seq_may_fallthru (gimple_try_eval (tp));
1792 fake_tf.may_throw = true;
1793
1794 honor_protect_cleanup_actions (state, NULL, &fake_tf);
1795
1796 if (fake_tf.may_throw)
1797 {
1798 /* In this case honor_protect_cleanup_actions had nothing to do,
1799 and we should process this normally. */
1800 lower_eh_constructs_1 (state, gimple_try_cleanup (tp));
1801 result = frob_into_branch_around (tp, this_region,
1802 fake_tf.fallthru_label);
1803 }
1804 else
1805 {
1806 /* In this case honor_protect_cleanup_actions did nearly all of
1807 the work. All we have left is to append the fallthru_label. */
1808
1809 result = gimple_try_eval (tp);
1810 if (fake_tf.fallthru_label)
1811 {
1812 gimple x = gimple_build_label (fake_tf.fallthru_label);
1813 gimple_seq_add_stmt (&result, x);
1814 }
1815 }
1816 return result;
1817 }
1818
1819 /* Main loop for lowering eh constructs. Also moves gsi to the next
1820 statement. */
1821
1822 static void
1823 lower_eh_constructs_2 (struct leh_state *state, gimple_stmt_iterator *gsi)
1824 {
1825 gimple_seq replace;
1826 gimple x;
1827 gimple stmt = gsi_stmt (*gsi);
1828
1829 switch (gimple_code (stmt))
1830 {
1831 case GIMPLE_CALL:
1832 {
1833 tree fndecl = gimple_call_fndecl (stmt);
1834 tree rhs, lhs;
1835
1836 if (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
1837 switch (DECL_FUNCTION_CODE (fndecl))
1838 {
1839 case BUILT_IN_EH_POINTER:
1840 /* The front end may have generated a call to
1841 __builtin_eh_pointer (0) within a catch region. Replace
1842 this zero argument with the current catch region number. */
1843 if (state->ehp_region)
1844 {
1845 tree nr = build_int_cst (NULL, state->ehp_region->index);
1846 gimple_call_set_arg (stmt, 0, nr);
1847 }
1848 else
1849 {
1850 /* The user has dome something silly. Remove it. */
1851 rhs = build_int_cst (ptr_type_node, 0);
1852 goto do_replace;
1853 }
1854 break;
1855
1856 case BUILT_IN_EH_FILTER:
1857 /* ??? This should never appear, but since it's a builtin it
1858 is accessible to abuse by users. Just remove it and
1859 replace the use with the arbitrary value zero. */
1860 rhs = build_int_cst (TREE_TYPE (TREE_TYPE (fndecl)), 0);
1861 do_replace:
1862 lhs = gimple_call_lhs (stmt);
1863 x = gimple_build_assign (lhs, rhs);
1864 gsi_insert_before (gsi, x, GSI_SAME_STMT);
1865 /* FALLTHRU */
1866
1867 case BUILT_IN_EH_COPY_VALUES:
1868 /* Likewise this should not appear. Remove it. */
1869 gsi_remove (gsi, true);
1870 return;
1871
1872 default:
1873 break;
1874 }
1875 }
1876 /* FALLTHRU */
1877
1878 case GIMPLE_ASSIGN:
1879 /* If the stmt can throw use a new temporary for the assignment
1880 to a LHS. This makes sure the old value of the LHS is
1881 available on the EH edge. */
1882 if (stmt_could_throw_p (stmt)
1883 && gimple_has_lhs (stmt)
1884 && !tree_could_throw_p (gimple_get_lhs (stmt))
1885 && is_gimple_reg_type (TREE_TYPE (gimple_get_lhs (stmt))))
1886 {
1887 tree lhs = gimple_get_lhs (stmt);
1888 tree tmp = create_tmp_var (TREE_TYPE (lhs), NULL);
1889 gimple s = gimple_build_assign (lhs, tmp);
1890 gimple_set_location (s, gimple_location (stmt));
1891 gimple_set_block (s, gimple_block (stmt));
1892 gimple_set_lhs (stmt, tmp);
1893 if (TREE_CODE (TREE_TYPE (tmp)) == COMPLEX_TYPE
1894 || TREE_CODE (TREE_TYPE (tmp)) == VECTOR_TYPE)
1895 DECL_GIMPLE_REG_P (tmp) = 1;
1896 gsi_insert_after (gsi, s, GSI_SAME_STMT);
1897 }
1898 /* Look for things that can throw exceptions, and record them. */
1899 if (state->cur_region && stmt_could_throw_p (stmt))
1900 {
1901 record_stmt_eh_region (state->cur_region, stmt);
1902 note_eh_region_may_contain_throw (state->cur_region);
1903 }
1904 break;
1905
1906 case GIMPLE_COND:
1907 case GIMPLE_GOTO:
1908 case GIMPLE_RETURN:
1909 maybe_record_in_goto_queue (state, stmt);
1910 break;
1911
1912 case GIMPLE_SWITCH:
1913 verify_norecord_switch_expr (state, stmt);
1914 break;
1915
1916 case GIMPLE_TRY:
1917 if (gimple_try_kind (stmt) == GIMPLE_TRY_FINALLY)
1918 replace = lower_try_finally (state, stmt);
1919 else
1920 {
1921 x = gimple_seq_first_stmt (gimple_try_cleanup (stmt));
1922 if (!x)
1923 {
1924 replace = gimple_try_eval (stmt);
1925 lower_eh_constructs_1 (state, replace);
1926 }
1927 else
1928 switch (gimple_code (x))
1929 {
1930 case GIMPLE_CATCH:
1931 replace = lower_catch (state, stmt);
1932 break;
1933 case GIMPLE_EH_FILTER:
1934 replace = lower_eh_filter (state, stmt);
1935 break;
1936 case GIMPLE_EH_MUST_NOT_THROW:
1937 replace = lower_eh_must_not_throw (state, stmt);
1938 break;
1939 default:
1940 replace = lower_cleanup (state, stmt);
1941 break;
1942 }
1943 }
1944
1945 /* Remove the old stmt and insert the transformed sequence
1946 instead. */
1947 gsi_insert_seq_before (gsi, replace, GSI_SAME_STMT);
1948 gsi_remove (gsi, true);
1949
1950 /* Return since we don't want gsi_next () */
1951 return;
1952
1953 default:
1954 /* A type, a decl, or some kind of statement that we're not
1955 interested in. Don't walk them. */
1956 break;
1957 }
1958
1959 gsi_next (gsi);
1960 }
1961
1962 /* A helper to unwrap a gimple_seq and feed stmts to lower_eh_constructs_2. */
1963
1964 static void
1965 lower_eh_constructs_1 (struct leh_state *state, gimple_seq seq)
1966 {
1967 gimple_stmt_iterator gsi;
1968 for (gsi = gsi_start (seq); !gsi_end_p (gsi);)
1969 lower_eh_constructs_2 (state, &gsi);
1970 }
1971
1972 static unsigned int
1973 lower_eh_constructs (void)
1974 {
1975 struct leh_state null_state;
1976 gimple_seq bodyp;
1977
1978 bodyp = gimple_body (current_function_decl);
1979 if (bodyp == NULL)
1980 return 0;
1981
1982 finally_tree = htab_create (31, struct_ptr_hash, struct_ptr_eq, free);
1983 eh_region_may_contain_throw = BITMAP_ALLOC (NULL);
1984 memset (&null_state, 0, sizeof (null_state));
1985
1986 collect_finally_tree_1 (bodyp, NULL);
1987 lower_eh_constructs_1 (&null_state, bodyp);
1988
1989 /* We assume there's a return statement, or something, at the end of
1990 the function, and thus ploping the EH sequence afterward won't
1991 change anything. */
1992 gcc_assert (!gimple_seq_may_fallthru (bodyp));
1993 gimple_seq_add_seq (&bodyp, eh_seq);
1994
1995 /* We assume that since BODYP already existed, adding EH_SEQ to it
1996 didn't change its value, and we don't have to re-set the function. */
1997 gcc_assert (bodyp == gimple_body (current_function_decl));
1998
1999 htab_delete (finally_tree);
2000 BITMAP_FREE (eh_region_may_contain_throw);
2001 eh_seq = NULL;
2002
2003 /* If this function needs a language specific EH personality routine
2004 and the frontend didn't already set one do so now. */
2005 if (function_needs_eh_personality (cfun) == eh_personality_lang
2006 && !DECL_FUNCTION_PERSONALITY (current_function_decl))
2007 DECL_FUNCTION_PERSONALITY (current_function_decl)
2008 = lang_hooks.eh_personality ();
2009
2010 return 0;
2011 }
2012
2013 struct gimple_opt_pass pass_lower_eh =
2014 {
2015 {
2016 GIMPLE_PASS,
2017 "eh", /* name */
2018 NULL, /* gate */
2019 lower_eh_constructs, /* execute */
2020 NULL, /* sub */
2021 NULL, /* next */
2022 0, /* static_pass_number */
2023 TV_TREE_EH, /* tv_id */
2024 PROP_gimple_lcf, /* properties_required */
2025 PROP_gimple_leh, /* properties_provided */
2026 0, /* properties_destroyed */
2027 0, /* todo_flags_start */
2028 TODO_dump_func /* todo_flags_finish */
2029 }
2030 };
2031 \f
2032 /* Create the multiple edges from an EH_DISPATCH statement to all of
2033 the possible handlers for its EH region. Return true if there's
2034 no fallthru edge; false if there is. */
2035
2036 bool
2037 make_eh_dispatch_edges (gimple stmt)
2038 {
2039 eh_region r;
2040 eh_catch c;
2041 basic_block src, dst;
2042
2043 r = get_eh_region_from_number (gimple_eh_dispatch_region (stmt));
2044 src = gimple_bb (stmt);
2045
2046 switch (r->type)
2047 {
2048 case ERT_TRY:
2049 for (c = r->u.eh_try.first_catch; c ; c = c->next_catch)
2050 {
2051 dst = label_to_block (c->label);
2052 make_edge (src, dst, 0);
2053
2054 /* A catch-all handler doesn't have a fallthru. */
2055 if (c->type_list == NULL)
2056 return false;
2057 }
2058 break;
2059
2060 case ERT_ALLOWED_EXCEPTIONS:
2061 dst = label_to_block (r->u.allowed.label);
2062 make_edge (src, dst, 0);
2063 break;
2064
2065 default:
2066 gcc_unreachable ();
2067 }
2068
2069 return true;
2070 }
2071
2072 /* Create the single EH edge from STMT to its nearest landing pad,
2073 if there is such a landing pad within the current function. */
2074
2075 void
2076 make_eh_edges (gimple stmt)
2077 {
2078 basic_block src, dst;
2079 eh_landing_pad lp;
2080 int lp_nr;
2081
2082 lp_nr = lookup_stmt_eh_lp (stmt);
2083 if (lp_nr <= 0)
2084 return;
2085
2086 lp = get_eh_landing_pad_from_number (lp_nr);
2087 gcc_assert (lp != NULL);
2088
2089 src = gimple_bb (stmt);
2090 dst = label_to_block (lp->post_landing_pad);
2091 make_edge (src, dst, EDGE_EH);
2092 }
2093
2094 /* Do the work in redirecting EDGE_IN to NEW_BB within the EH region tree;
2095 do not actually perform the final edge redirection.
2096
2097 CHANGE_REGION is true when we're being called from cleanup_empty_eh and
2098 we intend to change the destination EH region as well; this means
2099 EH_LANDING_PAD_NR must already be set on the destination block label.
2100 If false, we're being called from generic cfg manipulation code and we
2101 should preserve our place within the region tree. */
2102
2103 static void
2104 redirect_eh_edge_1 (edge edge_in, basic_block new_bb, bool change_region)
2105 {
2106 eh_landing_pad old_lp, new_lp;
2107 basic_block old_bb;
2108 gimple throw_stmt;
2109 int old_lp_nr, new_lp_nr;
2110 tree old_label, new_label;
2111 edge_iterator ei;
2112 edge e;
2113
2114 old_bb = edge_in->dest;
2115 old_label = gimple_block_label (old_bb);
2116 old_lp_nr = EH_LANDING_PAD_NR (old_label);
2117 gcc_assert (old_lp_nr > 0);
2118 old_lp = get_eh_landing_pad_from_number (old_lp_nr);
2119
2120 throw_stmt = last_stmt (edge_in->src);
2121 gcc_assert (lookup_stmt_eh_lp (throw_stmt) == old_lp_nr);
2122
2123 new_label = gimple_block_label (new_bb);
2124
2125 /* Look for an existing region that might be using NEW_BB already. */
2126 new_lp_nr = EH_LANDING_PAD_NR (new_label);
2127 if (new_lp_nr)
2128 {
2129 new_lp = get_eh_landing_pad_from_number (new_lp_nr);
2130 gcc_assert (new_lp);
2131
2132 /* Unless CHANGE_REGION is true, the new and old landing pad
2133 had better be associated with the same EH region. */
2134 gcc_assert (change_region || new_lp->region == old_lp->region);
2135 }
2136 else
2137 {
2138 new_lp = NULL;
2139 gcc_assert (!change_region);
2140 }
2141
2142 /* Notice when we redirect the last EH edge away from OLD_BB. */
2143 FOR_EACH_EDGE (e, ei, old_bb->preds)
2144 if (e != edge_in && (e->flags & EDGE_EH))
2145 break;
2146
2147 if (new_lp)
2148 {
2149 /* NEW_LP already exists. If there are still edges into OLD_LP,
2150 there's nothing to do with the EH tree. If there are no more
2151 edges into OLD_LP, then we want to remove OLD_LP as it is unused.
2152 If CHANGE_REGION is true, then our caller is expecting to remove
2153 the landing pad. */
2154 if (e == NULL && !change_region)
2155 remove_eh_landing_pad (old_lp);
2156 }
2157 else
2158 {
2159 /* No correct landing pad exists. If there are no more edges
2160 into OLD_LP, then we can simply re-use the existing landing pad.
2161 Otherwise, we have to create a new landing pad. */
2162 if (e == NULL)
2163 {
2164 EH_LANDING_PAD_NR (old_lp->post_landing_pad) = 0;
2165 new_lp = old_lp;
2166 }
2167 else
2168 new_lp = gen_eh_landing_pad (old_lp->region);
2169 new_lp->post_landing_pad = new_label;
2170 EH_LANDING_PAD_NR (new_label) = new_lp->index;
2171 }
2172
2173 /* Maybe move the throwing statement to the new region. */
2174 if (old_lp != new_lp)
2175 {
2176 remove_stmt_from_eh_lp (throw_stmt);
2177 add_stmt_to_eh_lp (throw_stmt, new_lp->index);
2178 }
2179 }
2180
2181 /* Redirect EH edge E to NEW_BB. */
2182
2183 edge
2184 redirect_eh_edge (edge edge_in, basic_block new_bb)
2185 {
2186 redirect_eh_edge_1 (edge_in, new_bb, false);
2187 return ssa_redirect_edge (edge_in, new_bb);
2188 }
2189
2190 /* This is a subroutine of gimple_redirect_edge_and_branch. Update the
2191 labels for redirecting a non-fallthru EH_DISPATCH edge E to NEW_BB.
2192 The actual edge update will happen in the caller. */
2193
2194 void
2195 redirect_eh_dispatch_edge (gimple stmt, edge e, basic_block new_bb)
2196 {
2197 tree new_lab = gimple_block_label (new_bb);
2198 bool any_changed = false;
2199 basic_block old_bb;
2200 eh_region r;
2201 eh_catch c;
2202
2203 r = get_eh_region_from_number (gimple_eh_dispatch_region (stmt));
2204 switch (r->type)
2205 {
2206 case ERT_TRY:
2207 for (c = r->u.eh_try.first_catch; c ; c = c->next_catch)
2208 {
2209 old_bb = label_to_block (c->label);
2210 if (old_bb == e->dest)
2211 {
2212 c->label = new_lab;
2213 any_changed = true;
2214 }
2215 }
2216 break;
2217
2218 case ERT_ALLOWED_EXCEPTIONS:
2219 old_bb = label_to_block (r->u.allowed.label);
2220 gcc_assert (old_bb == e->dest);
2221 r->u.allowed.label = new_lab;
2222 any_changed = true;
2223 break;
2224
2225 default:
2226 gcc_unreachable ();
2227 }
2228
2229 gcc_assert (any_changed);
2230 }
2231 \f
2232 /* Helper function for operation_could_trap_p and stmt_could_throw_p. */
2233
2234 bool
2235 operation_could_trap_helper_p (enum tree_code op,
2236 bool fp_operation,
2237 bool honor_trapv,
2238 bool honor_nans,
2239 bool honor_snans,
2240 tree divisor,
2241 bool *handled)
2242 {
2243 *handled = true;
2244 switch (op)
2245 {
2246 case TRUNC_DIV_EXPR:
2247 case CEIL_DIV_EXPR:
2248 case FLOOR_DIV_EXPR:
2249 case ROUND_DIV_EXPR:
2250 case EXACT_DIV_EXPR:
2251 case CEIL_MOD_EXPR:
2252 case FLOOR_MOD_EXPR:
2253 case ROUND_MOD_EXPR:
2254 case TRUNC_MOD_EXPR:
2255 case RDIV_EXPR:
2256 if (honor_snans || honor_trapv)
2257 return true;
2258 if (fp_operation)
2259 return flag_trapping_math;
2260 if (!TREE_CONSTANT (divisor) || integer_zerop (divisor))
2261 return true;
2262 return false;
2263
2264 case LT_EXPR:
2265 case LE_EXPR:
2266 case GT_EXPR:
2267 case GE_EXPR:
2268 case LTGT_EXPR:
2269 /* Some floating point comparisons may trap. */
2270 return honor_nans;
2271
2272 case EQ_EXPR:
2273 case NE_EXPR:
2274 case UNORDERED_EXPR:
2275 case ORDERED_EXPR:
2276 case UNLT_EXPR:
2277 case UNLE_EXPR:
2278 case UNGT_EXPR:
2279 case UNGE_EXPR:
2280 case UNEQ_EXPR:
2281 return honor_snans;
2282
2283 case CONVERT_EXPR:
2284 case FIX_TRUNC_EXPR:
2285 /* Conversion of floating point might trap. */
2286 return honor_nans;
2287
2288 case NEGATE_EXPR:
2289 case ABS_EXPR:
2290 case CONJ_EXPR:
2291 /* These operations don't trap with floating point. */
2292 if (honor_trapv)
2293 return true;
2294 return false;
2295
2296 case PLUS_EXPR:
2297 case MINUS_EXPR:
2298 case MULT_EXPR:
2299 /* Any floating arithmetic may trap. */
2300 if (fp_operation && flag_trapping_math)
2301 return true;
2302 if (honor_trapv)
2303 return true;
2304 return false;
2305
2306 default:
2307 /* Any floating arithmetic may trap. */
2308 if (fp_operation && flag_trapping_math)
2309 return true;
2310
2311 *handled = false;
2312 return false;
2313 }
2314 }
2315
2316 /* Return true if operation OP may trap. FP_OPERATION is true if OP is applied
2317 on floating-point values. HONOR_TRAPV is true if OP is applied on integer
2318 type operands that may trap. If OP is a division operator, DIVISOR contains
2319 the value of the divisor. */
2320
2321 bool
2322 operation_could_trap_p (enum tree_code op, bool fp_operation, bool honor_trapv,
2323 tree divisor)
2324 {
2325 bool honor_nans = (fp_operation && flag_trapping_math
2326 && !flag_finite_math_only);
2327 bool honor_snans = fp_operation && flag_signaling_nans != 0;
2328 bool handled;
2329
2330 if (TREE_CODE_CLASS (op) != tcc_comparison
2331 && TREE_CODE_CLASS (op) != tcc_unary
2332 && TREE_CODE_CLASS (op) != tcc_binary)
2333 return false;
2334
2335 return operation_could_trap_helper_p (op, fp_operation, honor_trapv,
2336 honor_nans, honor_snans, divisor,
2337 &handled);
2338 }
2339
2340 /* Return true if EXPR can trap, as in dereferencing an invalid pointer
2341 location or floating point arithmetic. C.f. the rtl version, may_trap_p.
2342 This routine expects only GIMPLE lhs or rhs input. */
2343
2344 bool
2345 tree_could_trap_p (tree expr)
2346 {
2347 enum tree_code code;
2348 bool fp_operation = false;
2349 bool honor_trapv = false;
2350 tree t, base, div = NULL_TREE;
2351
2352 if (!expr)
2353 return false;
2354
2355 code = TREE_CODE (expr);
2356 t = TREE_TYPE (expr);
2357
2358 if (t)
2359 {
2360 if (COMPARISON_CLASS_P (expr))
2361 fp_operation = FLOAT_TYPE_P (TREE_TYPE (TREE_OPERAND (expr, 0)));
2362 else
2363 fp_operation = FLOAT_TYPE_P (t);
2364 honor_trapv = INTEGRAL_TYPE_P (t) && TYPE_OVERFLOW_TRAPS (t);
2365 }
2366
2367 if (TREE_CODE_CLASS (code) == tcc_binary)
2368 div = TREE_OPERAND (expr, 1);
2369 if (operation_could_trap_p (code, fp_operation, honor_trapv, div))
2370 return true;
2371
2372 restart:
2373 switch (code)
2374 {
2375 case TARGET_MEM_REF:
2376 /* For TARGET_MEM_REFs use the information based on the original
2377 reference. */
2378 expr = TMR_ORIGINAL (expr);
2379 code = TREE_CODE (expr);
2380 goto restart;
2381
2382 case COMPONENT_REF:
2383 case REALPART_EXPR:
2384 case IMAGPART_EXPR:
2385 case BIT_FIELD_REF:
2386 case VIEW_CONVERT_EXPR:
2387 case WITH_SIZE_EXPR:
2388 expr = TREE_OPERAND (expr, 0);
2389 code = TREE_CODE (expr);
2390 goto restart;
2391
2392 case ARRAY_RANGE_REF:
2393 base = TREE_OPERAND (expr, 0);
2394 if (tree_could_trap_p (base))
2395 return true;
2396 if (TREE_THIS_NOTRAP (expr))
2397 return false;
2398 return !range_in_array_bounds_p (expr);
2399
2400 case ARRAY_REF:
2401 base = TREE_OPERAND (expr, 0);
2402 if (tree_could_trap_p (base))
2403 return true;
2404 if (TREE_THIS_NOTRAP (expr))
2405 return false;
2406 return !in_array_bounds_p (expr);
2407
2408 case INDIRECT_REF:
2409 case ALIGN_INDIRECT_REF:
2410 case MISALIGNED_INDIRECT_REF:
2411 return !TREE_THIS_NOTRAP (expr);
2412
2413 case ASM_EXPR:
2414 return TREE_THIS_VOLATILE (expr);
2415
2416 case CALL_EXPR:
2417 t = get_callee_fndecl (expr);
2418 /* Assume that calls to weak functions may trap. */
2419 if (!t || !DECL_P (t) || DECL_WEAK (t))
2420 return true;
2421 return false;
2422
2423 default:
2424 return false;
2425 }
2426 }
2427
2428
2429 /* Helper for stmt_could_throw_p. Return true if STMT (assumed to be a
2430 an assignment or a conditional) may throw. */
2431
2432 static bool
2433 stmt_could_throw_1_p (gimple stmt)
2434 {
2435 enum tree_code code = gimple_expr_code (stmt);
2436 bool honor_nans = false;
2437 bool honor_snans = false;
2438 bool fp_operation = false;
2439 bool honor_trapv = false;
2440 tree t;
2441 size_t i;
2442 bool handled, ret;
2443
2444 if (TREE_CODE_CLASS (code) == tcc_comparison
2445 || TREE_CODE_CLASS (code) == tcc_unary
2446 || TREE_CODE_CLASS (code) == tcc_binary)
2447 {
2448 t = gimple_expr_type (stmt);
2449 fp_operation = FLOAT_TYPE_P (t);
2450 if (fp_operation)
2451 {
2452 honor_nans = flag_trapping_math && !flag_finite_math_only;
2453 honor_snans = flag_signaling_nans != 0;
2454 }
2455 else if (INTEGRAL_TYPE_P (t) && TYPE_OVERFLOW_TRAPS (t))
2456 honor_trapv = true;
2457 }
2458
2459 /* Check if the main expression may trap. */
2460 t = is_gimple_assign (stmt) ? gimple_assign_rhs2 (stmt) : NULL;
2461 ret = operation_could_trap_helper_p (code, fp_operation, honor_trapv,
2462 honor_nans, honor_snans, t,
2463 &handled);
2464 if (handled)
2465 return ret;
2466
2467 /* If the expression does not trap, see if any of the individual operands may
2468 trap. */
2469 for (i = 0; i < gimple_num_ops (stmt); i++)
2470 if (tree_could_trap_p (gimple_op (stmt, i)))
2471 return true;
2472
2473 return false;
2474 }
2475
2476
2477 /* Return true if statement STMT could throw an exception. */
2478
2479 bool
2480 stmt_could_throw_p (gimple stmt)
2481 {
2482 if (!flag_exceptions)
2483 return false;
2484
2485 /* The only statements that can throw an exception are assignments,
2486 conditionals, calls, resx, and asms. */
2487 switch (gimple_code (stmt))
2488 {
2489 case GIMPLE_RESX:
2490 return true;
2491
2492 case GIMPLE_CALL:
2493 return !gimple_call_nothrow_p (stmt);
2494
2495 case GIMPLE_ASSIGN:
2496 case GIMPLE_COND:
2497 if (!flag_non_call_exceptions)
2498 return false;
2499 return stmt_could_throw_1_p (stmt);
2500
2501 case GIMPLE_ASM:
2502 if (!flag_non_call_exceptions)
2503 return false;
2504 return gimple_asm_volatile_p (stmt);
2505
2506 default:
2507 return false;
2508 }
2509 }
2510
2511
2512 /* Return true if expression T could throw an exception. */
2513
2514 bool
2515 tree_could_throw_p (tree t)
2516 {
2517 if (!flag_exceptions)
2518 return false;
2519 if (TREE_CODE (t) == MODIFY_EXPR)
2520 {
2521 if (flag_non_call_exceptions
2522 && tree_could_trap_p (TREE_OPERAND (t, 0)))
2523 return true;
2524 t = TREE_OPERAND (t, 1);
2525 }
2526
2527 if (TREE_CODE (t) == WITH_SIZE_EXPR)
2528 t = TREE_OPERAND (t, 0);
2529 if (TREE_CODE (t) == CALL_EXPR)
2530 return (call_expr_flags (t) & ECF_NOTHROW) == 0;
2531 if (flag_non_call_exceptions)
2532 return tree_could_trap_p (t);
2533 return false;
2534 }
2535
2536 /* Return true if STMT can throw an exception that is not caught within
2537 the current function (CFUN). */
2538
2539 bool
2540 stmt_can_throw_external (gimple stmt)
2541 {
2542 int lp_nr;
2543
2544 if (!stmt_could_throw_p (stmt))
2545 return false;
2546
2547 lp_nr = lookup_stmt_eh_lp (stmt);
2548 return lp_nr == 0;
2549 }
2550
2551 /* Return true if STMT can throw an exception that is caught within
2552 the current function (CFUN). */
2553
2554 bool
2555 stmt_can_throw_internal (gimple stmt)
2556 {
2557 int lp_nr;
2558
2559 if (!stmt_could_throw_p (stmt))
2560 return false;
2561
2562 lp_nr = lookup_stmt_eh_lp (stmt);
2563 return lp_nr > 0;
2564 }
2565
2566 /* Given a statement STMT in IFUN, if STMT can no longer throw, then
2567 remove any entry it might have from the EH table. Return true if
2568 any change was made. */
2569
2570 bool
2571 maybe_clean_eh_stmt_fn (struct function *ifun, gimple stmt)
2572 {
2573 if (stmt_could_throw_p (stmt))
2574 return false;
2575 return remove_stmt_from_eh_lp_fn (ifun, stmt);
2576 }
2577
2578 /* Likewise, but always use the current function. */
2579
2580 bool
2581 maybe_clean_eh_stmt (gimple stmt)
2582 {
2583 return maybe_clean_eh_stmt_fn (cfun, stmt);
2584 }
2585
2586 /* Given a statement OLD_STMT and a new statement NEW_STMT that has replaced
2587 OLD_STMT in the function, remove OLD_STMT from the EH table and put NEW_STMT
2588 in the table if it should be in there. Return TRUE if a replacement was
2589 done that my require an EH edge purge. */
2590
2591 bool
2592 maybe_clean_or_replace_eh_stmt (gimple old_stmt, gimple new_stmt)
2593 {
2594 int lp_nr = lookup_stmt_eh_lp (old_stmt);
2595
2596 if (lp_nr != 0)
2597 {
2598 bool new_stmt_could_throw = stmt_could_throw_p (new_stmt);
2599
2600 if (new_stmt == old_stmt && new_stmt_could_throw)
2601 return false;
2602
2603 remove_stmt_from_eh_lp (old_stmt);
2604 if (new_stmt_could_throw)
2605 {
2606 add_stmt_to_eh_lp (new_stmt, lp_nr);
2607 return false;
2608 }
2609 else
2610 return true;
2611 }
2612
2613 return false;
2614 }
2615
2616 /* Given a statement OLD_STMT in OLD_FUN and a duplicate statment NEW_STMT
2617 in NEW_FUN, copy the EH table data from OLD_STMT to NEW_STMT. The MAP
2618 operand is the return value of duplicate_eh_regions. */
2619
2620 bool
2621 maybe_duplicate_eh_stmt_fn (struct function *new_fun, gimple new_stmt,
2622 struct function *old_fun, gimple old_stmt,
2623 struct pointer_map_t *map, int default_lp_nr)
2624 {
2625 int old_lp_nr, new_lp_nr;
2626 void **slot;
2627
2628 if (!stmt_could_throw_p (new_stmt))
2629 return false;
2630
2631 old_lp_nr = lookup_stmt_eh_lp_fn (old_fun, old_stmt);
2632 if (old_lp_nr == 0)
2633 {
2634 if (default_lp_nr == 0)
2635 return false;
2636 new_lp_nr = default_lp_nr;
2637 }
2638 else if (old_lp_nr > 0)
2639 {
2640 eh_landing_pad old_lp, new_lp;
2641
2642 old_lp = VEC_index (eh_landing_pad, old_fun->eh->lp_array, old_lp_nr);
2643 slot = pointer_map_contains (map, old_lp);
2644 new_lp = (eh_landing_pad) *slot;
2645 new_lp_nr = new_lp->index;
2646 }
2647 else
2648 {
2649 eh_region old_r, new_r;
2650
2651 old_r = VEC_index (eh_region, old_fun->eh->region_array, -old_lp_nr);
2652 slot = pointer_map_contains (map, old_r);
2653 new_r = (eh_region) *slot;
2654 new_lp_nr = -new_r->index;
2655 }
2656
2657 add_stmt_to_eh_lp_fn (new_fun, new_stmt, new_lp_nr);
2658 return true;
2659 }
2660
2661 /* Similar, but both OLD_STMT and NEW_STMT are within the current function,
2662 and thus no remapping is required. */
2663
2664 bool
2665 maybe_duplicate_eh_stmt (gimple new_stmt, gimple old_stmt)
2666 {
2667 int lp_nr;
2668
2669 if (!stmt_could_throw_p (new_stmt))
2670 return false;
2671
2672 lp_nr = lookup_stmt_eh_lp (old_stmt);
2673 if (lp_nr == 0)
2674 return false;
2675
2676 add_stmt_to_eh_lp (new_stmt, lp_nr);
2677 return true;
2678 }
2679 \f
2680 /* Returns TRUE if oneh and twoh are exception handlers (gimple_try_cleanup of
2681 GIMPLE_TRY) that are similar enough to be considered the same. Currently
2682 this only handles handlers consisting of a single call, as that's the
2683 important case for C++: a destructor call for a particular object showing
2684 up in multiple handlers. */
2685
2686 static bool
2687 same_handler_p (gimple_seq oneh, gimple_seq twoh)
2688 {
2689 gimple_stmt_iterator gsi;
2690 gimple ones, twos;
2691 unsigned int ai;
2692
2693 gsi = gsi_start (oneh);
2694 if (!gsi_one_before_end_p (gsi))
2695 return false;
2696 ones = gsi_stmt (gsi);
2697
2698 gsi = gsi_start (twoh);
2699 if (!gsi_one_before_end_p (gsi))
2700 return false;
2701 twos = gsi_stmt (gsi);
2702
2703 if (!is_gimple_call (ones)
2704 || !is_gimple_call (twos)
2705 || gimple_call_lhs (ones)
2706 || gimple_call_lhs (twos)
2707 || gimple_call_chain (ones)
2708 || gimple_call_chain (twos)
2709 || !operand_equal_p (gimple_call_fn (ones), gimple_call_fn (twos), 0)
2710 || gimple_call_num_args (ones) != gimple_call_num_args (twos))
2711 return false;
2712
2713 for (ai = 0; ai < gimple_call_num_args (ones); ++ai)
2714 if (!operand_equal_p (gimple_call_arg (ones, ai),
2715 gimple_call_arg (twos, ai), 0))
2716 return false;
2717
2718 return true;
2719 }
2720
2721 /* Optimize
2722 try { A() } finally { try { ~B() } catch { ~A() } }
2723 try { ... } finally { ~A() }
2724 into
2725 try { A() } catch { ~B() }
2726 try { ~B() ... } finally { ~A() }
2727
2728 This occurs frequently in C++, where A is a local variable and B is a
2729 temporary used in the initializer for A. */
2730
2731 static void
2732 optimize_double_finally (gimple one, gimple two)
2733 {
2734 gimple oneh;
2735 gimple_stmt_iterator gsi;
2736
2737 gsi = gsi_start (gimple_try_cleanup (one));
2738 if (!gsi_one_before_end_p (gsi))
2739 return;
2740
2741 oneh = gsi_stmt (gsi);
2742 if (gimple_code (oneh) != GIMPLE_TRY
2743 || gimple_try_kind (oneh) != GIMPLE_TRY_CATCH)
2744 return;
2745
2746 if (same_handler_p (gimple_try_cleanup (oneh), gimple_try_cleanup (two)))
2747 {
2748 gimple_seq seq = gimple_try_eval (oneh);
2749
2750 gimple_try_set_cleanup (one, seq);
2751 gimple_try_set_kind (one, GIMPLE_TRY_CATCH);
2752 seq = copy_gimple_seq_and_replace_locals (seq);
2753 gimple_seq_add_seq (&seq, gimple_try_eval (two));
2754 gimple_try_set_eval (two, seq);
2755 }
2756 }
2757
2758 /* Perform EH refactoring optimizations that are simpler to do when code
2759 flow has been lowered but EH structures haven't. */
2760
2761 static void
2762 refactor_eh_r (gimple_seq seq)
2763 {
2764 gimple_stmt_iterator gsi;
2765 gimple one, two;
2766
2767 one = NULL;
2768 two = NULL;
2769 gsi = gsi_start (seq);
2770 while (1)
2771 {
2772 one = two;
2773 if (gsi_end_p (gsi))
2774 two = NULL;
2775 else
2776 two = gsi_stmt (gsi);
2777 if (one
2778 && two
2779 && gimple_code (one) == GIMPLE_TRY
2780 && gimple_code (two) == GIMPLE_TRY
2781 && gimple_try_kind (one) == GIMPLE_TRY_FINALLY
2782 && gimple_try_kind (two) == GIMPLE_TRY_FINALLY)
2783 optimize_double_finally (one, two);
2784 if (one)
2785 switch (gimple_code (one))
2786 {
2787 case GIMPLE_TRY:
2788 refactor_eh_r (gimple_try_eval (one));
2789 refactor_eh_r (gimple_try_cleanup (one));
2790 break;
2791 case GIMPLE_CATCH:
2792 refactor_eh_r (gimple_catch_handler (one));
2793 break;
2794 case GIMPLE_EH_FILTER:
2795 refactor_eh_r (gimple_eh_filter_failure (one));
2796 break;
2797 default:
2798 break;
2799 }
2800 if (two)
2801 gsi_next (&gsi);
2802 else
2803 break;
2804 }
2805 }
2806
2807 static unsigned
2808 refactor_eh (void)
2809 {
2810 refactor_eh_r (gimple_body (current_function_decl));
2811 return 0;
2812 }
2813
2814 static bool
2815 gate_refactor_eh (void)
2816 {
2817 return flag_exceptions != 0;
2818 }
2819
2820 struct gimple_opt_pass pass_refactor_eh =
2821 {
2822 {
2823 GIMPLE_PASS,
2824 "ehopt", /* name */
2825 gate_refactor_eh, /* gate */
2826 refactor_eh, /* execute */
2827 NULL, /* sub */
2828 NULL, /* next */
2829 0, /* static_pass_number */
2830 TV_TREE_EH, /* tv_id */
2831 PROP_gimple_lcf, /* properties_required */
2832 0, /* properties_provided */
2833 0, /* properties_destroyed */
2834 0, /* todo_flags_start */
2835 TODO_dump_func /* todo_flags_finish */
2836 }
2837 };
2838 \f
2839 /* At the end of gimple optimization, we can lower RESX. */
2840
2841 static bool
2842 lower_resx (basic_block bb, gimple stmt, struct pointer_map_t *mnt_map)
2843 {
2844 int lp_nr;
2845 eh_region src_r, dst_r;
2846 gimple_stmt_iterator gsi;
2847 gimple x;
2848 tree fn, src_nr;
2849 bool ret = false;
2850
2851 lp_nr = lookup_stmt_eh_lp (stmt);
2852 if (lp_nr != 0)
2853 dst_r = get_eh_region_from_lp_number (lp_nr);
2854 else
2855 dst_r = NULL;
2856
2857 src_r = get_eh_region_from_number (gimple_resx_region (stmt));
2858 gsi = gsi_last_bb (bb);
2859
2860 if (src_r == NULL)
2861 {
2862 /* We can wind up with no source region when pass_cleanup_eh shows
2863 that there are no entries into an eh region and deletes it, but
2864 then the block that contains the resx isn't removed. This can
2865 happen without optimization when the switch statement created by
2866 lower_try_finally_switch isn't simplified to remove the eh case.
2867
2868 Resolve this by expanding the resx node to an abort. */
2869
2870 fn = implicit_built_in_decls[BUILT_IN_TRAP];
2871 x = gimple_build_call (fn, 0);
2872 gsi_insert_before (&gsi, x, GSI_SAME_STMT);
2873
2874 while (EDGE_COUNT (bb->succs) > 0)
2875 remove_edge (EDGE_SUCC (bb, 0));
2876 }
2877 else if (dst_r)
2878 {
2879 /* When we have a destination region, we resolve this by copying
2880 the excptr and filter values into place, and changing the edge
2881 to immediately after the landing pad. */
2882 edge e;
2883
2884 if (lp_nr < 0)
2885 {
2886 basic_block new_bb;
2887 void **slot;
2888 tree lab;
2889
2890 /* We are resuming into a MUST_NOT_CALL region. Expand a call to
2891 the failure decl into a new block, if needed. */
2892 gcc_assert (dst_r->type == ERT_MUST_NOT_THROW);
2893
2894 slot = pointer_map_contains (mnt_map, dst_r);
2895 if (slot == NULL)
2896 {
2897 gimple_stmt_iterator gsi2;
2898
2899 new_bb = create_empty_bb (bb);
2900 lab = gimple_block_label (new_bb);
2901 gsi2 = gsi_start_bb (new_bb);
2902
2903 fn = dst_r->u.must_not_throw.failure_decl;
2904 x = gimple_build_call (fn, 0);
2905 gimple_set_location (x, dst_r->u.must_not_throw.failure_loc);
2906 gsi_insert_after (&gsi2, x, GSI_CONTINUE_LINKING);
2907
2908 slot = pointer_map_insert (mnt_map, dst_r);
2909 *slot = lab;
2910 }
2911 else
2912 {
2913 lab = (tree) *slot;
2914 new_bb = label_to_block (lab);
2915 }
2916
2917 gcc_assert (EDGE_COUNT (bb->succs) == 0);
2918 e = make_edge (bb, new_bb, EDGE_FALLTHRU);
2919 e->count = bb->count;
2920 e->probability = REG_BR_PROB_BASE;
2921 }
2922 else
2923 {
2924 edge_iterator ei;
2925 tree dst_nr = build_int_cst (NULL, dst_r->index);
2926
2927 fn = implicit_built_in_decls[BUILT_IN_EH_COPY_VALUES];
2928 src_nr = build_int_cst (NULL, src_r->index);
2929 x = gimple_build_call (fn, 2, dst_nr, src_nr);
2930 gsi_insert_before (&gsi, x, GSI_SAME_STMT);
2931
2932 /* Update the flags for the outgoing edge. */
2933 e = single_succ_edge (bb);
2934 gcc_assert (e->flags & EDGE_EH);
2935 e->flags = (e->flags & ~EDGE_EH) | EDGE_FALLTHRU;
2936
2937 /* If there are no more EH users of the landing pad, delete it. */
2938 FOR_EACH_EDGE (e, ei, e->dest->preds)
2939 if (e->flags & EDGE_EH)
2940 break;
2941 if (e == NULL)
2942 {
2943 eh_landing_pad lp = get_eh_landing_pad_from_number (lp_nr);
2944 remove_eh_landing_pad (lp);
2945 }
2946 }
2947
2948 ret = true;
2949 }
2950 else
2951 {
2952 tree var;
2953
2954 /* When we don't have a destination region, this exception escapes
2955 up the call chain. We resolve this by generating a call to the
2956 _Unwind_Resume library function. */
2957
2958 /* The ARM EABI redefines _Unwind_Resume as __cxa_end_cleanup
2959 with no arguments for C++ and Java. Check for that. */
2960 if (src_r->use_cxa_end_cleanup)
2961 {
2962 fn = implicit_built_in_decls[BUILT_IN_CXA_END_CLEANUP];
2963 x = gimple_build_call (fn, 0);
2964 gsi_insert_before (&gsi, x, GSI_SAME_STMT);
2965 }
2966 else
2967 {
2968 fn = implicit_built_in_decls[BUILT_IN_EH_POINTER];
2969 src_nr = build_int_cst (NULL, src_r->index);
2970 x = gimple_build_call (fn, 1, src_nr);
2971 var = create_tmp_var (ptr_type_node, NULL);
2972 var = make_ssa_name (var, x);
2973 gimple_call_set_lhs (x, var);
2974 gsi_insert_before (&gsi, x, GSI_SAME_STMT);
2975
2976 fn = implicit_built_in_decls[BUILT_IN_UNWIND_RESUME];
2977 x = gimple_build_call (fn, 1, var);
2978 gsi_insert_before (&gsi, x, GSI_SAME_STMT);
2979 }
2980
2981 gcc_assert (EDGE_COUNT (bb->succs) == 0);
2982 }
2983
2984 gsi_remove (&gsi, true);
2985
2986 return ret;
2987 }
2988
2989 static unsigned
2990 execute_lower_resx (void)
2991 {
2992 basic_block bb;
2993 struct pointer_map_t *mnt_map;
2994 bool dominance_invalidated = false;
2995 bool any_rewritten = false;
2996
2997 mnt_map = pointer_map_create ();
2998
2999 FOR_EACH_BB (bb)
3000 {
3001 gimple last = last_stmt (bb);
3002 if (last && is_gimple_resx (last))
3003 {
3004 dominance_invalidated |= lower_resx (bb, last, mnt_map);
3005 any_rewritten = true;
3006 }
3007 }
3008
3009 pointer_map_destroy (mnt_map);
3010
3011 if (dominance_invalidated)
3012 {
3013 free_dominance_info (CDI_DOMINATORS);
3014 free_dominance_info (CDI_POST_DOMINATORS);
3015 }
3016
3017 return any_rewritten ? TODO_update_ssa_only_virtuals : 0;
3018 }
3019
3020 static bool
3021 gate_lower_resx (void)
3022 {
3023 return flag_exceptions != 0;
3024 }
3025
3026 struct gimple_opt_pass pass_lower_resx =
3027 {
3028 {
3029 GIMPLE_PASS,
3030 "resx", /* name */
3031 gate_lower_resx, /* gate */
3032 execute_lower_resx, /* execute */
3033 NULL, /* sub */
3034 NULL, /* next */
3035 0, /* static_pass_number */
3036 TV_TREE_EH, /* tv_id */
3037 PROP_gimple_lcf, /* properties_required */
3038 0, /* properties_provided */
3039 0, /* properties_destroyed */
3040 0, /* todo_flags_start */
3041 TODO_dump_func | TODO_verify_flow /* todo_flags_finish */
3042 }
3043 };
3044
3045
3046 /* At the end of inlining, we can lower EH_DISPATCH. */
3047
3048 static void
3049 lower_eh_dispatch (basic_block src, gimple stmt)
3050 {
3051 gimple_stmt_iterator gsi;
3052 int region_nr;
3053 eh_region r;
3054 tree filter, fn;
3055 gimple x;
3056
3057 region_nr = gimple_eh_dispatch_region (stmt);
3058 r = get_eh_region_from_number (region_nr);
3059
3060 gsi = gsi_last_bb (src);
3061
3062 switch (r->type)
3063 {
3064 case ERT_TRY:
3065 {
3066 VEC (tree, heap) *labels = NULL;
3067 tree default_label = NULL;
3068 eh_catch c;
3069 edge_iterator ei;
3070 edge e;
3071
3072 /* Collect the labels for a switch. Zero the post_landing_pad
3073 field becase we'll no longer have anything keeping these labels
3074 in existance and the optimizer will be free to merge these
3075 blocks at will. */
3076 for (c = r->u.eh_try.first_catch; c ; c = c->next_catch)
3077 {
3078 tree tp_node, flt_node, lab = c->label;
3079
3080 c->label = NULL;
3081 tp_node = c->type_list;
3082 flt_node = c->filter_list;
3083
3084 if (tp_node == NULL)
3085 {
3086 default_label = lab;
3087 break;
3088 }
3089 do
3090 {
3091 tree t = build3 (CASE_LABEL_EXPR, void_type_node,
3092 TREE_VALUE (flt_node), NULL, lab);
3093 VEC_safe_push (tree, heap, labels, t);
3094
3095 tp_node = TREE_CHAIN (tp_node);
3096 flt_node = TREE_CHAIN (flt_node);
3097 }
3098 while (tp_node);
3099 }
3100
3101 /* Clean up the edge flags. */
3102 FOR_EACH_EDGE (e, ei, src->succs)
3103 {
3104 if (e->flags & EDGE_FALLTHRU)
3105 {
3106 /* If there was no catch-all, use the fallthru edge. */
3107 if (default_label == NULL)
3108 default_label = gimple_block_label (e->dest);
3109 e->flags &= ~EDGE_FALLTHRU;
3110 }
3111 }
3112 gcc_assert (default_label != NULL);
3113
3114 /* Don't generate a switch if there's only a default case.
3115 This is common in the form of try { A; } catch (...) { B; }. */
3116 if (labels == NULL)
3117 {
3118 e = single_succ_edge (src);
3119 e->flags |= EDGE_FALLTHRU;
3120 }
3121 else
3122 {
3123 fn = implicit_built_in_decls[BUILT_IN_EH_FILTER];
3124 x = gimple_build_call (fn, 1, build_int_cst (NULL, region_nr));
3125 filter = create_tmp_var (TREE_TYPE (TREE_TYPE (fn)), NULL);
3126 filter = make_ssa_name (filter, x);
3127 gimple_call_set_lhs (x, filter);
3128 gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3129
3130 /* Turn the default label into a default case. */
3131 default_label = build3 (CASE_LABEL_EXPR, void_type_node,
3132 NULL, NULL, default_label);
3133 sort_case_labels (labels);
3134
3135 x = gimple_build_switch_vec (filter, default_label, labels);
3136 gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3137
3138 VEC_free (tree, heap, labels);
3139 }
3140 }
3141 break;
3142
3143 case ERT_ALLOWED_EXCEPTIONS:
3144 {
3145 edge b_e = BRANCH_EDGE (src);
3146 edge f_e = FALLTHRU_EDGE (src);
3147
3148 fn = implicit_built_in_decls[BUILT_IN_EH_FILTER];
3149 x = gimple_build_call (fn, 1, build_int_cst (NULL, region_nr));
3150 filter = create_tmp_var (TREE_TYPE (TREE_TYPE (fn)), NULL);
3151 filter = make_ssa_name (filter, x);
3152 gimple_call_set_lhs (x, filter);
3153 gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3154
3155 r->u.allowed.label = NULL;
3156 x = gimple_build_cond (EQ_EXPR, filter,
3157 build_int_cst (TREE_TYPE (filter),
3158 r->u.allowed.filter),
3159 NULL_TREE, NULL_TREE);
3160 gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3161
3162 b_e->flags = b_e->flags | EDGE_TRUE_VALUE;
3163 f_e->flags = (f_e->flags & ~EDGE_FALLTHRU) | EDGE_FALSE_VALUE;
3164 }
3165 break;
3166
3167 default:
3168 gcc_unreachable ();
3169 }
3170
3171 /* Replace the EH_DISPATCH with the SWITCH or COND generated above. */
3172 gsi_remove (&gsi, true);
3173 }
3174
3175 static unsigned
3176 execute_lower_eh_dispatch (void)
3177 {
3178 basic_block bb;
3179 bool any_rewritten = false;
3180
3181 assign_filter_values ();
3182
3183 FOR_EACH_BB (bb)
3184 {
3185 gimple last = last_stmt (bb);
3186 if (last && gimple_code (last) == GIMPLE_EH_DISPATCH)
3187 {
3188 lower_eh_dispatch (bb, last);
3189 any_rewritten = true;
3190 }
3191 }
3192
3193 return any_rewritten ? TODO_update_ssa_only_virtuals : 0;
3194 }
3195
3196 static bool
3197 gate_lower_eh_dispatch (void)
3198 {
3199 return cfun->eh->region_tree != NULL;
3200 }
3201
3202 struct gimple_opt_pass pass_lower_eh_dispatch =
3203 {
3204 {
3205 GIMPLE_PASS,
3206 "ehdisp", /* name */
3207 gate_lower_eh_dispatch, /* gate */
3208 execute_lower_eh_dispatch, /* execute */
3209 NULL, /* sub */
3210 NULL, /* next */
3211 0, /* static_pass_number */
3212 TV_TREE_EH, /* tv_id */
3213 PROP_gimple_lcf, /* properties_required */
3214 0, /* properties_provided */
3215 0, /* properties_destroyed */
3216 0, /* todo_flags_start */
3217 TODO_dump_func | TODO_verify_flow /* todo_flags_finish */
3218 }
3219 };
3220 \f
3221 /* Walk statements, see what regions are really referenced and remove
3222 those that are unused. */
3223
3224 static void
3225 remove_unreachable_handlers (void)
3226 {
3227 sbitmap r_reachable, lp_reachable;
3228 eh_region region;
3229 eh_landing_pad lp;
3230 basic_block bb;
3231 int lp_nr, r_nr;
3232
3233 r_reachable = sbitmap_alloc (VEC_length (eh_region, cfun->eh->region_array));
3234 lp_reachable
3235 = sbitmap_alloc (VEC_length (eh_landing_pad, cfun->eh->lp_array));
3236 sbitmap_zero (r_reachable);
3237 sbitmap_zero (lp_reachable);
3238
3239 FOR_EACH_BB (bb)
3240 {
3241 gimple_stmt_iterator gsi = gsi_start_bb (bb);
3242
3243 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
3244 {
3245 gimple stmt = gsi_stmt (gsi);
3246 lp_nr = lookup_stmt_eh_lp (stmt);
3247
3248 /* Negative LP numbers are MUST_NOT_THROW regions which
3249 are not considered BB enders. */
3250 if (lp_nr < 0)
3251 SET_BIT (r_reachable, -lp_nr);
3252
3253 /* Positive LP numbers are real landing pads, are are BB enders. */
3254 else if (lp_nr > 0)
3255 {
3256 gcc_assert (gsi_one_before_end_p (gsi));
3257 region = get_eh_region_from_lp_number (lp_nr);
3258 SET_BIT (r_reachable, region->index);
3259 SET_BIT (lp_reachable, lp_nr);
3260 }
3261 }
3262 }
3263
3264 if (dump_file)
3265 {
3266 fprintf (dump_file, "Before removal of unreachable regions:\n");
3267 dump_eh_tree (dump_file, cfun);
3268 fprintf (dump_file, "Reachable regions: ");
3269 dump_sbitmap_file (dump_file, r_reachable);
3270 fprintf (dump_file, "Reachable landing pads: ");
3271 dump_sbitmap_file (dump_file, lp_reachable);
3272 }
3273
3274 for (r_nr = 1;
3275 VEC_iterate (eh_region, cfun->eh->region_array, r_nr, region); ++r_nr)
3276 if (region && !TEST_BIT (r_reachable, r_nr))
3277 {
3278 if (dump_file)
3279 fprintf (dump_file, "Removing unreachable region %d\n", r_nr);
3280 remove_eh_handler (region);
3281 }
3282
3283 for (lp_nr = 1;
3284 VEC_iterate (eh_landing_pad, cfun->eh->lp_array, lp_nr, lp); ++lp_nr)
3285 if (lp && !TEST_BIT (lp_reachable, lp_nr))
3286 {
3287 if (dump_file)
3288 fprintf (dump_file, "Removing unreachable landing pad %d\n", lp_nr);
3289 remove_eh_landing_pad (lp);
3290 }
3291
3292 if (dump_file)
3293 {
3294 fprintf (dump_file, "\n\nAfter removal of unreachable regions:\n");
3295 dump_eh_tree (dump_file, cfun);
3296 fprintf (dump_file, "\n\n");
3297 }
3298
3299 sbitmap_free (r_reachable);
3300 sbitmap_free (lp_reachable);
3301
3302 #ifdef ENABLE_CHECKING
3303 verify_eh_tree (cfun);
3304 #endif
3305 }
3306
3307 /* Remove regions that do not have landing pads. This assumes
3308 that remove_unreachable_handlers has already been run, and
3309 that we've just manipulated the landing pads since then. */
3310
3311 static void
3312 remove_unreachable_handlers_no_lp (void)
3313 {
3314 eh_region r;
3315 int i;
3316
3317 for (i = 1; VEC_iterate (eh_region, cfun->eh->region_array, i, r); ++i)
3318 if (r && r->landing_pads == NULL && r->type != ERT_MUST_NOT_THROW)
3319 {
3320 if (dump_file)
3321 fprintf (dump_file, "Removing unreachable region %d\n", i);
3322 remove_eh_handler (r);
3323 }
3324 }
3325
3326 /* Undo critical edge splitting on an EH landing pad. Earlier, we
3327 optimisticaly split all sorts of edges, including EH edges. The
3328 optimization passes in between may not have needed them; if not,
3329 we should undo the split.
3330
3331 Recognize this case by having one EH edge incoming to the BB and
3332 one normal edge outgoing; BB should be empty apart from the
3333 post_landing_pad label.
3334
3335 Note that this is slightly different from the empty handler case
3336 handled by cleanup_empty_eh, in that the actual handler may yet
3337 have actual code but the landing pad has been separated from the
3338 handler. As such, cleanup_empty_eh relies on this transformation
3339 having been done first. */
3340
3341 static bool
3342 unsplit_eh (eh_landing_pad lp)
3343 {
3344 basic_block bb = label_to_block (lp->post_landing_pad);
3345 gimple_stmt_iterator gsi;
3346 edge e_in, e_out;
3347
3348 /* Quickly check the edge counts on BB for singularity. */
3349 if (EDGE_COUNT (bb->preds) != 1 || EDGE_COUNT (bb->succs) != 1)
3350 return false;
3351 e_in = EDGE_PRED (bb, 0);
3352 e_out = EDGE_SUCC (bb, 0);
3353
3354 /* Input edge must be EH and output edge must be normal. */
3355 if ((e_in->flags & EDGE_EH) == 0 || (e_out->flags & EDGE_EH) != 0)
3356 return false;
3357
3358 /* The block must be empty except for the labels. */
3359 if (!gsi_end_p (gsi_after_labels (bb)))
3360 return false;
3361
3362 /* The destination block must not already have a landing pad
3363 for a different region. */
3364 for (gsi = gsi_start_bb (e_out->dest); !gsi_end_p (gsi); gsi_next (&gsi))
3365 {
3366 gimple stmt = gsi_stmt (gsi);
3367 tree lab;
3368 int lp_nr;
3369
3370 if (gimple_code (stmt) != GIMPLE_LABEL)
3371 break;
3372 lab = gimple_label_label (stmt);
3373 lp_nr = EH_LANDING_PAD_NR (lab);
3374 if (lp_nr && get_eh_region_from_lp_number (lp_nr) != lp->region)
3375 return false;
3376 }
3377
3378 /* The new destination block must not already be a destination of
3379 the source block, lest we merge fallthru and eh edges and get
3380 all sorts of confused. */
3381 if (find_edge (e_in->src, e_out->dest))
3382 return false;
3383
3384 /* ??? I can't imagine there would be PHI nodes, since by nature
3385 of critical edge splitting this block should never have been
3386 a dominance frontier. If cfg cleanups somehow confuse this,
3387 due to single edges in and out we ought to have degenerate PHIs
3388 and can easily propagate the PHI arguments. */
3389 gcc_assert (gimple_seq_empty_p (phi_nodes (bb)));
3390
3391 if (dump_file && (dump_flags & TDF_DETAILS))
3392 fprintf (dump_file, "Unsplit EH landing pad %d to block %i.\n",
3393 lp->index, e_out->dest->index);
3394
3395 /* Redirect the edge. Since redirect_eh_edge_1 expects to be moving
3396 a successor edge, humor it. But do the real CFG change with the
3397 predecessor of E_OUT in order to preserve the ordering of arguments
3398 to the PHI nodes in E_OUT->DEST. */
3399 redirect_eh_edge_1 (e_in, e_out->dest, false);
3400 redirect_edge_pred (e_out, e_in->src);
3401 e_out->flags = e_in->flags;
3402 e_out->probability = e_in->probability;
3403 e_out->count = e_in->count;
3404 remove_edge (e_in);
3405
3406 return true;
3407 }
3408
3409 /* Examine each landing pad block and see if it matches unsplit_eh. */
3410
3411 static bool
3412 unsplit_all_eh (void)
3413 {
3414 bool changed = false;
3415 eh_landing_pad lp;
3416 int i;
3417
3418 for (i = 1; VEC_iterate (eh_landing_pad, cfun->eh->lp_array, i, lp); ++i)
3419 if (lp)
3420 changed |= unsplit_eh (lp);
3421
3422 return changed;
3423 }
3424
3425 /* A subroutine of cleanup_empty_eh. Redirect all EH edges incoming
3426 to OLD_BB to NEW_BB; return true on success, false on failure.
3427
3428 OLD_BB_OUT is the edge into NEW_BB from OLD_BB, so if we miss any
3429 PHI variables from OLD_BB we can pick them up from OLD_BB_OUT.
3430 Virtual PHIs may be deleted and marked for renaming. */
3431
3432 static bool
3433 cleanup_empty_eh_merge_phis (basic_block new_bb, basic_block old_bb,
3434 edge old_bb_out)
3435 {
3436 gimple_stmt_iterator ngsi, ogsi;
3437 edge_iterator ei;
3438 edge e;
3439 bitmap rename_virts;
3440 bitmap ophi_handled;
3441
3442 FOR_EACH_EDGE (e, ei, old_bb->preds)
3443 redirect_edge_var_map_clear (e);
3444
3445 ophi_handled = BITMAP_ALLOC (NULL);
3446 rename_virts = BITMAP_ALLOC (NULL);
3447
3448 /* First, iterate through the PHIs on NEW_BB and set up the edge_var_map
3449 for the edges we're going to move. */
3450 for (ngsi = gsi_start_phis (new_bb); !gsi_end_p (ngsi); gsi_next (&ngsi))
3451 {
3452 gimple ophi, nphi = gsi_stmt (ngsi);
3453 tree nresult, nop;
3454
3455 nresult = gimple_phi_result (nphi);
3456 nop = gimple_phi_arg_def (nphi, old_bb_out->dest_idx);
3457
3458 /* Find the corresponding PHI in OLD_BB so we can forward-propagate
3459 the source ssa_name. */
3460 ophi = NULL;
3461 for (ogsi = gsi_start_phis (old_bb); !gsi_end_p (ogsi); gsi_next (&ogsi))
3462 {
3463 ophi = gsi_stmt (ogsi);
3464 if (gimple_phi_result (ophi) == nop)
3465 break;
3466 ophi = NULL;
3467 }
3468
3469 /* If we did find the corresponding PHI, copy those inputs. */
3470 if (ophi)
3471 {
3472 bitmap_set_bit (ophi_handled, SSA_NAME_VERSION (nop));
3473 FOR_EACH_EDGE (e, ei, old_bb->preds)
3474 {
3475 location_t oloc;
3476 tree oop;
3477
3478 if ((e->flags & EDGE_EH) == 0)
3479 continue;
3480 oop = gimple_phi_arg_def (ophi, e->dest_idx);
3481 oloc = gimple_phi_arg_location (ophi, e->dest_idx);
3482 redirect_edge_var_map_add (e, nresult, oop, oloc);
3483 }
3484 }
3485 /* If we didn't find the PHI, but it's a VOP, remember to rename
3486 it later, assuming all other tests succeed. */
3487 else if (!is_gimple_reg (nresult))
3488 bitmap_set_bit (rename_virts, SSA_NAME_VERSION (nresult));
3489 /* If we didn't find the PHI, and it's a real variable, we know
3490 from the fact that OLD_BB is tree_empty_eh_handler_p that the
3491 variable is unchanged from input to the block and we can simply
3492 re-use the input to NEW_BB from the OLD_BB_OUT edge. */
3493 else
3494 {
3495 location_t nloc
3496 = gimple_phi_arg_location (nphi, old_bb_out->dest_idx);
3497 FOR_EACH_EDGE (e, ei, old_bb->preds)
3498 redirect_edge_var_map_add (e, nresult, nop, nloc);
3499 }
3500 }
3501
3502 /* Second, verify that all PHIs from OLD_BB have been handled. If not,
3503 we don't know what values from the other edges into NEW_BB to use. */
3504 for (ogsi = gsi_start_phis (old_bb); !gsi_end_p (ogsi); gsi_next (&ogsi))
3505 {
3506 gimple ophi = gsi_stmt (ogsi);
3507 tree oresult = gimple_phi_result (ophi);
3508 if (!bitmap_bit_p (ophi_handled, SSA_NAME_VERSION (oresult)))
3509 goto fail;
3510 }
3511
3512 /* At this point we know that the merge will succeed. Remove the PHI
3513 nodes for the virtuals that we want to rename. */
3514 if (!bitmap_empty_p (rename_virts))
3515 {
3516 for (ngsi = gsi_start_phis (new_bb); !gsi_end_p (ngsi); )
3517 {
3518 gimple nphi = gsi_stmt (ngsi);
3519 tree nresult = gimple_phi_result (nphi);
3520 if (bitmap_bit_p (rename_virts, SSA_NAME_VERSION (nresult)))
3521 {
3522 mark_virtual_phi_result_for_renaming (nphi);
3523 remove_phi_node (&ngsi, true);
3524 }
3525 else
3526 gsi_next (&ngsi);
3527 }
3528 }
3529
3530 /* Finally, move the edges and update the PHIs. */
3531 for (ei = ei_start (old_bb->preds); (e = ei_safe_edge (ei)); )
3532 if (e->flags & EDGE_EH)
3533 {
3534 redirect_eh_edge_1 (e, new_bb, true);
3535 redirect_edge_succ (e, new_bb);
3536 flush_pending_stmts (e);
3537 }
3538 else
3539 ei_next (&ei);
3540
3541 BITMAP_FREE (ophi_handled);
3542 BITMAP_FREE (rename_virts);
3543 return true;
3544
3545 fail:
3546 FOR_EACH_EDGE (e, ei, old_bb->preds)
3547 redirect_edge_var_map_clear (e);
3548 BITMAP_FREE (ophi_handled);
3549 BITMAP_FREE (rename_virts);
3550 return false;
3551 }
3552
3553 /* A subroutine of cleanup_empty_eh. Move a landing pad LP from its
3554 old region to NEW_REGION at BB. */
3555
3556 static void
3557 cleanup_empty_eh_move_lp (basic_block bb, edge e_out,
3558 eh_landing_pad lp, eh_region new_region)
3559 {
3560 gimple_stmt_iterator gsi;
3561 eh_landing_pad *pp;
3562
3563 for (pp = &lp->region->landing_pads; *pp != lp; pp = &(*pp)->next_lp)
3564 continue;
3565 *pp = lp->next_lp;
3566
3567 lp->region = new_region;
3568 lp->next_lp = new_region->landing_pads;
3569 new_region->landing_pads = lp;
3570
3571 /* Delete the RESX that was matched within the empty handler block. */
3572 gsi = gsi_last_bb (bb);
3573 mark_virtual_ops_for_renaming (gsi_stmt (gsi));
3574 gsi_remove (&gsi, true);
3575
3576 /* Clean up E_OUT for the fallthru. */
3577 e_out->flags = (e_out->flags & ~EDGE_EH) | EDGE_FALLTHRU;
3578 e_out->probability = REG_BR_PROB_BASE;
3579 }
3580
3581 /* A subroutine of cleanup_empty_eh. Handle more complex cases of
3582 unsplitting than unsplit_eh was prepared to handle, e.g. when
3583 multiple incoming edges and phis are involved. */
3584
3585 static bool
3586 cleanup_empty_eh_unsplit (basic_block bb, edge e_out, eh_landing_pad olp)
3587 {
3588 gimple_stmt_iterator gsi;
3589 eh_landing_pad nlp;
3590 tree lab;
3591
3592 /* We really ought not have totally lost everything following
3593 a landing pad label. Given that BB is empty, there had better
3594 be a successor. */
3595 gcc_assert (e_out != NULL);
3596
3597 /* Look for an EH label in the successor block. */
3598 lab = NULL;
3599 for (gsi = gsi_start_bb (e_out->dest); !gsi_end_p (gsi); gsi_next (&gsi))
3600 {
3601 gimple stmt = gsi_stmt (gsi);
3602 if (gimple_code (stmt) != GIMPLE_LABEL)
3603 break;
3604 lab = gimple_label_label (stmt);
3605 if (EH_LANDING_PAD_NR (lab))
3606 goto found;
3607 }
3608 return false;
3609 found:
3610
3611 /* The other label had better be part of the same EH region. Given that
3612 we've not lowered RESX, there should be no way to have a totally empty
3613 landing pad that crosses to another EH region. */
3614 nlp = get_eh_landing_pad_from_number (EH_LANDING_PAD_NR (lab));
3615 gcc_assert (nlp->region == olp->region);
3616
3617 /* Attempt to move the PHIs into the successor block. */
3618 if (cleanup_empty_eh_merge_phis (e_out->dest, bb, e_out))
3619 {
3620 if (dump_file && (dump_flags & TDF_DETAILS))
3621 fprintf (dump_file,
3622 "Unsplit EH landing pad %d to block %d via lp %d.\n",
3623 olp->index, e_out->dest->index, nlp->index);
3624
3625 remove_eh_landing_pad (olp);
3626 return true;
3627 }
3628
3629 return false;
3630 }
3631
3632 /* Examine the block associated with LP to determine if it's an empty
3633 handler for its EH region. If so, attempt to redirect EH edges to
3634 an outer region. Return true the CFG was updated in any way. This
3635 is similar to jump forwarding, just across EH edges. */
3636
3637 static bool
3638 cleanup_empty_eh (eh_landing_pad lp)
3639 {
3640 basic_block bb = label_to_block (lp->post_landing_pad);
3641 gimple_stmt_iterator gsi;
3642 gimple resx;
3643 eh_region new_region;
3644 edge_iterator ei;
3645 edge e, e_out;
3646 bool has_non_eh_pred;
3647 int new_lp_nr;
3648
3649 /* There can be zero or one edges out of BB. This is the quickest test. */
3650 switch (EDGE_COUNT (bb->succs))
3651 {
3652 case 0:
3653 e_out = NULL;
3654 break;
3655 case 1:
3656 e_out = EDGE_SUCC (bb, 0);
3657 break;
3658 default:
3659 return false;
3660 }
3661 gsi = gsi_after_labels (bb);
3662
3663 /* Make sure to skip debug statements. */
3664 if (!gsi_end_p (gsi) && is_gimple_debug (gsi_stmt (gsi)))
3665 gsi_next_nondebug (&gsi);
3666
3667 /* If the block is totally empty, look for more unsplitting cases. */
3668 if (gsi_end_p (gsi))
3669 return cleanup_empty_eh_unsplit (bb, e_out, lp);
3670
3671 /* The block should consist only of a single RESX statement. */
3672 resx = gsi_stmt (gsi);
3673 if (!is_gimple_resx (resx))
3674 return false;
3675 gcc_assert (gsi_one_before_end_p (gsi));
3676
3677 /* Determine if there are non-EH edges, or resx edges into the handler. */
3678 has_non_eh_pred = false;
3679 FOR_EACH_EDGE (e, ei, bb->preds)
3680 if (!(e->flags & EDGE_EH))
3681 has_non_eh_pred = true;
3682
3683 /* Find the handler that's outer of the empty handler by looking at
3684 where the RESX instruction was vectored. */
3685 new_lp_nr = lookup_stmt_eh_lp (resx);
3686 new_region = get_eh_region_from_lp_number (new_lp_nr);
3687
3688 /* If there's no destination region within the current function,
3689 redirection is trivial via removing the throwing statements from
3690 the EH region, removing the EH edges, and allowing the block
3691 to go unreachable. */
3692 if (new_region == NULL)
3693 {
3694 gcc_assert (e_out == NULL);
3695 for (ei = ei_start (bb->preds); (e = ei_safe_edge (ei)); )
3696 if (e->flags & EDGE_EH)
3697 {
3698 gimple stmt = last_stmt (e->src);
3699 remove_stmt_from_eh_lp (stmt);
3700 remove_edge (e);
3701 }
3702 else
3703 ei_next (&ei);
3704 goto succeed;
3705 }
3706
3707 /* If the destination region is a MUST_NOT_THROW, allow the runtime
3708 to handle the abort and allow the blocks to go unreachable. */
3709 if (new_region->type == ERT_MUST_NOT_THROW)
3710 {
3711 for (ei = ei_start (bb->preds); (e = ei_safe_edge (ei)); )
3712 if (e->flags & EDGE_EH)
3713 {
3714 gimple stmt = last_stmt (e->src);
3715 remove_stmt_from_eh_lp (stmt);
3716 add_stmt_to_eh_lp (stmt, new_lp_nr);
3717 remove_edge (e);
3718 }
3719 else
3720 ei_next (&ei);
3721 goto succeed;
3722 }
3723
3724 /* Try to redirect the EH edges and merge the PHIs into the destination
3725 landing pad block. If the merge succeeds, we'll already have redirected
3726 all the EH edges. The handler itself will go unreachable if there were
3727 no normal edges. */
3728 if (cleanup_empty_eh_merge_phis (e_out->dest, bb, e_out))
3729 goto succeed;
3730
3731 /* Finally, if all input edges are EH edges, then we can (potentially)
3732 reduce the number of transfers from the runtime by moving the landing
3733 pad from the original region to the new region. This is a win when
3734 we remove the last CLEANUP region along a particular exception
3735 propagation path. Since nothing changes except for the region with
3736 which the landing pad is associated, the PHI nodes do not need to be
3737 adjusted at all. */
3738 if (!has_non_eh_pred)
3739 {
3740 cleanup_empty_eh_move_lp (bb, e_out, lp, new_region);
3741 if (dump_file && (dump_flags & TDF_DETAILS))
3742 fprintf (dump_file, "Empty EH handler %i moved to EH region %i.\n",
3743 lp->index, new_region->index);
3744
3745 /* ??? The CFG didn't change, but we may have rendered the
3746 old EH region unreachable. Trigger a cleanup there. */
3747 return true;
3748 }
3749
3750 return false;
3751
3752 succeed:
3753 if (dump_file && (dump_flags & TDF_DETAILS))
3754 fprintf (dump_file, "Empty EH handler %i removed.\n", lp->index);
3755 remove_eh_landing_pad (lp);
3756 return true;
3757 }
3758
3759 /* Do a post-order traversal of the EH region tree. Examine each
3760 post_landing_pad block and see if we can eliminate it as empty. */
3761
3762 static bool
3763 cleanup_all_empty_eh (void)
3764 {
3765 bool changed = false;
3766 eh_landing_pad lp;
3767 int i;
3768
3769 for (i = 1; VEC_iterate (eh_landing_pad, cfun->eh->lp_array, i, lp); ++i)
3770 if (lp)
3771 changed |= cleanup_empty_eh (lp);
3772
3773 return changed;
3774 }
3775
3776 /* Perform cleanups and lowering of exception handling
3777 1) cleanups regions with handlers doing nothing are optimized out
3778 2) MUST_NOT_THROW regions that became dead because of 1) are optimized out
3779 3) Info about regions that are containing instructions, and regions
3780 reachable via local EH edges is collected
3781 4) Eh tree is pruned for regions no longer neccesary.
3782
3783 TODO: Push MUST_NOT_THROW regions to the root of the EH tree.
3784 Unify those that have the same failure decl and locus.
3785 */
3786
3787 static unsigned int
3788 execute_cleanup_eh (void)
3789 {
3790 /* Do this first: unsplit_all_eh and cleanup_all_empty_eh can die
3791 looking up unreachable landing pads. */
3792 remove_unreachable_handlers ();
3793
3794 /* Watch out for the region tree vanishing due to all unreachable. */
3795 if (cfun->eh->region_tree && optimize)
3796 {
3797 bool changed = false;
3798
3799 changed |= unsplit_all_eh ();
3800 changed |= cleanup_all_empty_eh ();
3801
3802 if (changed)
3803 {
3804 free_dominance_info (CDI_DOMINATORS);
3805 free_dominance_info (CDI_POST_DOMINATORS);
3806
3807 /* We delayed all basic block deletion, as we may have performed
3808 cleanups on EH edges while non-EH edges were still present. */
3809 delete_unreachable_blocks ();
3810
3811 /* We manipulated the landing pads. Remove any region that no
3812 longer has a landing pad. */
3813 remove_unreachable_handlers_no_lp ();
3814
3815 return TODO_cleanup_cfg | TODO_update_ssa_only_virtuals;
3816 }
3817 }
3818
3819 return 0;
3820 }
3821
3822 static bool
3823 gate_cleanup_eh (void)
3824 {
3825 return cfun->eh != NULL && cfun->eh->region_tree != NULL;
3826 }
3827
3828 struct gimple_opt_pass pass_cleanup_eh = {
3829 {
3830 GIMPLE_PASS,
3831 "ehcleanup", /* name */
3832 gate_cleanup_eh, /* gate */
3833 execute_cleanup_eh, /* execute */
3834 NULL, /* sub */
3835 NULL, /* next */
3836 0, /* static_pass_number */
3837 TV_TREE_EH, /* tv_id */
3838 PROP_gimple_lcf, /* properties_required */
3839 0, /* properties_provided */
3840 0, /* properties_destroyed */
3841 0, /* todo_flags_start */
3842 TODO_dump_func /* todo_flags_finish */
3843 }
3844 };
3845 \f
3846 /* Verify that BB containing STMT as the last statement, has precisely the
3847 edge that make_eh_edges would create. */
3848
3849 bool
3850 verify_eh_edges (gimple stmt)
3851 {
3852 basic_block bb = gimple_bb (stmt);
3853 eh_landing_pad lp = NULL;
3854 int lp_nr;
3855 edge_iterator ei;
3856 edge e, eh_edge;
3857
3858 lp_nr = lookup_stmt_eh_lp (stmt);
3859 if (lp_nr > 0)
3860 lp = get_eh_landing_pad_from_number (lp_nr);
3861
3862 eh_edge = NULL;
3863 FOR_EACH_EDGE (e, ei, bb->succs)
3864 {
3865 if (e->flags & EDGE_EH)
3866 {
3867 if (eh_edge)
3868 {
3869 error ("BB %i has multiple EH edges", bb->index);
3870 return true;
3871 }
3872 else
3873 eh_edge = e;
3874 }
3875 }
3876
3877 if (lp == NULL)
3878 {
3879 if (eh_edge)
3880 {
3881 error ("BB %i can not throw but has an EH edge", bb->index);
3882 return true;
3883 }
3884 return false;
3885 }
3886
3887 if (!stmt_could_throw_p (stmt))
3888 {
3889 error ("BB %i last statement has incorrectly set lp", bb->index);
3890 return true;
3891 }
3892
3893 if (eh_edge == NULL)
3894 {
3895 error ("BB %i is missing an EH edge", bb->index);
3896 return true;
3897 }
3898
3899 if (eh_edge->dest != label_to_block (lp->post_landing_pad))
3900 {
3901 error ("Incorrect EH edge %i->%i", bb->index, eh_edge->dest->index);
3902 return true;
3903 }
3904
3905 return false;
3906 }
3907
3908 /* Similarly, but handle GIMPLE_EH_DISPATCH specifically. */
3909
3910 bool
3911 verify_eh_dispatch_edge (gimple stmt)
3912 {
3913 eh_region r;
3914 eh_catch c;
3915 basic_block src, dst;
3916 bool want_fallthru = true;
3917 edge_iterator ei;
3918 edge e, fall_edge;
3919
3920 r = get_eh_region_from_number (gimple_eh_dispatch_region (stmt));
3921 src = gimple_bb (stmt);
3922
3923 FOR_EACH_EDGE (e, ei, src->succs)
3924 gcc_assert (e->aux == NULL);
3925
3926 switch (r->type)
3927 {
3928 case ERT_TRY:
3929 for (c = r->u.eh_try.first_catch; c ; c = c->next_catch)
3930 {
3931 dst = label_to_block (c->label);
3932 e = find_edge (src, dst);
3933 if (e == NULL)
3934 {
3935 error ("BB %i is missing an edge", src->index);
3936 return true;
3937 }
3938 e->aux = (void *)e;
3939
3940 /* A catch-all handler doesn't have a fallthru. */
3941 if (c->type_list == NULL)
3942 {
3943 want_fallthru = false;
3944 break;
3945 }
3946 }
3947 break;
3948
3949 case ERT_ALLOWED_EXCEPTIONS:
3950 dst = label_to_block (r->u.allowed.label);
3951 e = find_edge (src, dst);
3952 if (e == NULL)
3953 {
3954 error ("BB %i is missing an edge", src->index);
3955 return true;
3956 }
3957 e->aux = (void *)e;
3958 break;
3959
3960 default:
3961 gcc_unreachable ();
3962 }
3963
3964 fall_edge = NULL;
3965 FOR_EACH_EDGE (e, ei, src->succs)
3966 {
3967 if (e->flags & EDGE_FALLTHRU)
3968 {
3969 if (fall_edge != NULL)
3970 {
3971 error ("BB %i too many fallthru edges", src->index);
3972 return true;
3973 }
3974 fall_edge = e;
3975 }
3976 else if (e->aux)
3977 e->aux = NULL;
3978 else
3979 {
3980 error ("BB %i has incorrect edge", src->index);
3981 return true;
3982 }
3983 }
3984 if ((fall_edge != NULL) ^ want_fallthru)
3985 {
3986 error ("BB %i has incorrect fallthru edge", src->index);
3987 return true;
3988 }
3989
3990 return false;
3991 }