Squash commit of EH in gimple
[gcc.git] / gcc / tree-eh.c
1 /* Exception handling semantics and decomposition for trees.
2 Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009
3 Free Software Foundation, Inc.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "tree.h"
26 #include "rtl.h"
27 #include "tm_p.h"
28 #include "flags.h"
29 #include "function.h"
30 #include "except.h"
31 #include "tree-flow.h"
32 #include "tree-dump.h"
33 #include "tree-inline.h"
34 #include "tree-iterator.h"
35 #include "tree-pass.h"
36 #include "timevar.h"
37 #include "langhooks.h"
38 #include "ggc.h"
39 #include "toplev.h"
40 #include "gimple.h"
41 #include "target.h"
42
43 /* In some instances a tree and a gimple need to be stored in a same table,
44 i.e. in hash tables. This is a structure to do this. */
45 typedef union {tree *tp; tree t; gimple g;} treemple;
46
47 /* Nonzero if we are using EH to handle cleanups. */
48 static int using_eh_for_cleanups_p = 0;
49
50 void
51 using_eh_for_cleanups (void)
52 {
53 using_eh_for_cleanups_p = 1;
54 }
55
56 /* Misc functions used in this file. */
57
58 /* Compare and hash for any structure which begins with a canonical
59 pointer. Assumes all pointers are interchangeable, which is sort
60 of already assumed by gcc elsewhere IIRC. */
61
62 static int
63 struct_ptr_eq (const void *a, const void *b)
64 {
65 const void * const * x = (const void * const *) a;
66 const void * const * y = (const void * const *) b;
67 return *x == *y;
68 }
69
70 static hashval_t
71 struct_ptr_hash (const void *a)
72 {
73 const void * const * x = (const void * const *) a;
74 return (size_t)*x >> 4;
75 }
76
77
78 /* Remember and lookup EH landing pad data for arbitrary statements.
79 Really this means any statement that could_throw_p. We could
80 stuff this information into the stmt_ann data structure, but:
81
82 (1) We absolutely rely on this information being kept until
83 we get to rtl. Once we're done with lowering here, if we lose
84 the information there's no way to recover it!
85
86 (2) There are many more statements that *cannot* throw as
87 compared to those that can. We should be saving some amount
88 of space by only allocating memory for those that can throw. */
89
90 /* Add statement T in function IFUN to landing pad NUM. */
91
92 void
93 add_stmt_to_eh_lp_fn (struct function *ifun, gimple t, int num)
94 {
95 struct throw_stmt_node *n;
96 void **slot;
97
98 gcc_assert (num != 0);
99
100 n = GGC_NEW (struct throw_stmt_node);
101 n->stmt = t;
102 n->lp_nr = num;
103
104 if (!get_eh_throw_stmt_table (ifun))
105 set_eh_throw_stmt_table (ifun, htab_create_ggc (31, struct_ptr_hash,
106 struct_ptr_eq,
107 ggc_free));
108
109 slot = htab_find_slot (get_eh_throw_stmt_table (ifun), n, INSERT);
110 gcc_assert (!*slot);
111 *slot = n;
112 }
113
114 /* Add statement T in the current function (cfun) to EH landing pad NUM. */
115
116 void
117 add_stmt_to_eh_lp (gimple t, int num)
118 {
119 add_stmt_to_eh_lp_fn (cfun, t, num);
120 }
121
122 /* Add statement T to the single EH landing pad in REGION. */
123
124 static void
125 record_stmt_eh_region (eh_region region, gimple t)
126 {
127 if (region == NULL)
128 return;
129 if (region->type == ERT_MUST_NOT_THROW)
130 add_stmt_to_eh_lp_fn (cfun, t, -region->index);
131 else
132 {
133 eh_landing_pad lp = region->landing_pads;
134 if (lp == NULL)
135 lp = gen_eh_landing_pad (region);
136 else
137 gcc_assert (lp->next_lp == NULL);
138 add_stmt_to_eh_lp_fn (cfun, t, lp->index);
139 }
140 }
141
142
143 /* Remove statement T in function IFUN from its EH landing pad. */
144
145 bool
146 remove_stmt_from_eh_lp_fn (struct function *ifun, gimple t)
147 {
148 struct throw_stmt_node dummy;
149 void **slot;
150
151 if (!get_eh_throw_stmt_table (ifun))
152 return false;
153
154 dummy.stmt = t;
155 slot = htab_find_slot (get_eh_throw_stmt_table (ifun), &dummy,
156 NO_INSERT);
157 if (slot)
158 {
159 htab_clear_slot (get_eh_throw_stmt_table (ifun), slot);
160 return true;
161 }
162 else
163 return false;
164 }
165
166
167 /* Remove statement T in the current function (cfun) from its
168 EH landing pad. */
169
170 bool
171 remove_stmt_from_eh_lp (gimple t)
172 {
173 return remove_stmt_from_eh_lp_fn (cfun, t);
174 }
175
176 /* Determine if statement T is inside an EH region in function IFUN.
177 Positive numbers indicate a landing pad index; negative numbers
178 indicate a MUST_NOT_THROW region index; zero indicates that the
179 statement is not recorded in the region table. */
180
181 int
182 lookup_stmt_eh_lp_fn (struct function *ifun, gimple t)
183 {
184 struct throw_stmt_node *p, n;
185
186 if (ifun->eh->throw_stmt_table == NULL)
187 return 0;
188
189 n.stmt = t;
190 p = (struct throw_stmt_node *) htab_find (ifun->eh->throw_stmt_table, &n);
191 return p ? p->lp_nr : 0;
192 }
193
194 /* Likewise, but always use the current function. */
195
196 int
197 lookup_stmt_eh_lp (gimple t)
198 {
199 /* We can get called from initialized data when -fnon-call-exceptions
200 is on; prevent crash. */
201 if (!cfun)
202 return 0;
203 return lookup_stmt_eh_lp_fn (cfun, t);
204 }
205
206 /* Likewise, but reference a tree expression instead. */
207
208 int
209 lookup_expr_eh_lp (tree t)
210 {
211 if (cfun && cfun->eh->throw_stmt_table && t && EXPR_P (t))
212 {
213 tree_ann_common_t ann = tree_common_ann (t);
214 if (ann)
215 return ann->lp_nr;
216 }
217 return 0;
218 }
219
220
221 /* First pass of EH node decomposition. Build up a tree of GIMPLE_TRY_FINALLY
222 nodes and LABEL_DECL nodes. We will use this during the second phase to
223 determine if a goto leaves the body of a TRY_FINALLY_EXPR node. */
224
225 struct finally_tree_node
226 {
227 /* When storing a GIMPLE_TRY, we have to record a gimple. However
228 when deciding whether a GOTO to a certain LABEL_DECL (which is a
229 tree) leaves the TRY block, its necessary to record a tree in
230 this field. Thus a treemple is used. */
231 treemple child;
232 gimple parent;
233 };
234
235 /* Note that this table is *not* marked GTY. It is short-lived. */
236 static htab_t finally_tree;
237
238 static void
239 record_in_finally_tree (treemple child, gimple parent)
240 {
241 struct finally_tree_node *n;
242 void **slot;
243
244 n = XNEW (struct finally_tree_node);
245 n->child = child;
246 n->parent = parent;
247
248 slot = htab_find_slot (finally_tree, n, INSERT);
249 gcc_assert (!*slot);
250 *slot = n;
251 }
252
253 static void
254 collect_finally_tree (gimple stmt, gimple region);
255
256 /* Go through the gimple sequence. Works with collect_finally_tree to
257 record all GIMPLE_LABEL and GIMPLE_TRY statements. */
258
259 static void
260 collect_finally_tree_1 (gimple_seq seq, gimple region)
261 {
262 gimple_stmt_iterator gsi;
263
264 for (gsi = gsi_start (seq); !gsi_end_p (gsi); gsi_next (&gsi))
265 collect_finally_tree (gsi_stmt (gsi), region);
266 }
267
268 static void
269 collect_finally_tree (gimple stmt, gimple region)
270 {
271 treemple temp;
272
273 switch (gimple_code (stmt))
274 {
275 case GIMPLE_LABEL:
276 temp.t = gimple_label_label (stmt);
277 record_in_finally_tree (temp, region);
278 break;
279
280 case GIMPLE_TRY:
281 if (gimple_try_kind (stmt) == GIMPLE_TRY_FINALLY)
282 {
283 temp.g = stmt;
284 record_in_finally_tree (temp, region);
285 collect_finally_tree_1 (gimple_try_eval (stmt), stmt);
286 collect_finally_tree_1 (gimple_try_cleanup (stmt), region);
287 }
288 else if (gimple_try_kind (stmt) == GIMPLE_TRY_CATCH)
289 {
290 collect_finally_tree_1 (gimple_try_eval (stmt), region);
291 collect_finally_tree_1 (gimple_try_cleanup (stmt), region);
292 }
293 break;
294
295 case GIMPLE_CATCH:
296 collect_finally_tree_1 (gimple_catch_handler (stmt), region);
297 break;
298
299 case GIMPLE_EH_FILTER:
300 collect_finally_tree_1 (gimple_eh_filter_failure (stmt), region);
301 break;
302
303 default:
304 /* A type, a decl, or some kind of statement that we're not
305 interested in. Don't walk them. */
306 break;
307 }
308 }
309
310
311 /* Use the finally tree to determine if a jump from START to TARGET
312 would leave the try_finally node that START lives in. */
313
314 static bool
315 outside_finally_tree (treemple start, gimple target)
316 {
317 struct finally_tree_node n, *p;
318
319 do
320 {
321 n.child = start;
322 p = (struct finally_tree_node *) htab_find (finally_tree, &n);
323 if (!p)
324 return true;
325 start.g = p->parent;
326 }
327 while (start.g != target);
328
329 return false;
330 }
331
332 /* Second pass of EH node decomposition. Actually transform the GIMPLE_TRY
333 nodes into a set of gotos, magic labels, and eh regions.
334 The eh region creation is straight-forward, but frobbing all the gotos
335 and such into shape isn't. */
336
337 /* The sequence into which we record all EH stuff. This will be
338 placed at the end of the function when we're all done. */
339 static gimple_seq eh_seq;
340
341 /* Record whether an EH region contains something that can throw,
342 indexed by EH region number. */
343 static bitmap eh_region_may_contain_throw;
344
345 /* The GOTO_QUEUE is is an array of GIMPLE_GOTO and GIMPLE_RETURN
346 statements that are seen to escape this GIMPLE_TRY_FINALLY node.
347 The idea is to record a gimple statement for everything except for
348 the conditionals, which get their labels recorded. Since labels are
349 of type 'tree', we need this node to store both gimple and tree
350 objects. REPL_STMT is the sequence used to replace the goto/return
351 statement. CONT_STMT is used to store the statement that allows
352 the return/goto to jump to the original destination. */
353
354 struct goto_queue_node
355 {
356 treemple stmt;
357 gimple_seq repl_stmt;
358 gimple cont_stmt;
359 int index;
360 /* This is used when index >= 0 to indicate that stmt is a label (as
361 opposed to a goto stmt). */
362 int is_label;
363 };
364
365 /* State of the world while lowering. */
366
367 struct leh_state
368 {
369 /* What's "current" while constructing the eh region tree. These
370 correspond to variables of the same name in cfun->eh, which we
371 don't have easy access to. */
372 eh_region cur_region;
373
374 /* What's "current" for the purposes of __builtin_eh_pointer. For
375 a CATCH, this is the associated TRY. For an EH_FILTER, this is
376 the associated ALLOWED_EXCEPTIONS, etc. */
377 eh_region ehp_region;
378
379 /* Processing of TRY_FINALLY requires a bit more state. This is
380 split out into a separate structure so that we don't have to
381 copy so much when processing other nodes. */
382 struct leh_tf_state *tf;
383 };
384
385 struct leh_tf_state
386 {
387 /* Pointer to the GIMPLE_TRY_FINALLY node under discussion. The
388 try_finally_expr is the original GIMPLE_TRY_FINALLY. We need to retain
389 this so that outside_finally_tree can reliably reference the tree used
390 in the collect_finally_tree data structures. */
391 gimple try_finally_expr;
392 gimple top_p;
393
394 /* While lowering a top_p usually it is expanded into multiple statements,
395 thus we need the following field to store them. */
396 gimple_seq top_p_seq;
397
398 /* The state outside this try_finally node. */
399 struct leh_state *outer;
400
401 /* The exception region created for it. */
402 eh_region region;
403
404 /* The goto queue. */
405 struct goto_queue_node *goto_queue;
406 size_t goto_queue_size;
407 size_t goto_queue_active;
408
409 /* Pointer map to help in searching goto_queue when it is large. */
410 struct pointer_map_t *goto_queue_map;
411
412 /* The set of unique labels seen as entries in the goto queue. */
413 VEC(tree,heap) *dest_array;
414
415 /* A label to be added at the end of the completed transformed
416 sequence. It will be set if may_fallthru was true *at one time*,
417 though subsequent transformations may have cleared that flag. */
418 tree fallthru_label;
419
420 /* True if it is possible to fall out the bottom of the try block.
421 Cleared if the fallthru is converted to a goto. */
422 bool may_fallthru;
423
424 /* True if any entry in goto_queue is a GIMPLE_RETURN. */
425 bool may_return;
426
427 /* True if the finally block can receive an exception edge.
428 Cleared if the exception case is handled by code duplication. */
429 bool may_throw;
430 };
431
432 static gimple_seq lower_eh_must_not_throw (struct leh_state *, gimple);
433
434 /* Search for STMT in the goto queue. Return the replacement,
435 or null if the statement isn't in the queue. */
436
437 #define LARGE_GOTO_QUEUE 20
438
439 static void lower_eh_constructs_1 (struct leh_state *state, gimple_seq seq);
440
441 static gimple_seq
442 find_goto_replacement (struct leh_tf_state *tf, treemple stmt)
443 {
444 unsigned int i;
445 void **slot;
446
447 if (tf->goto_queue_active < LARGE_GOTO_QUEUE)
448 {
449 for (i = 0; i < tf->goto_queue_active; i++)
450 if ( tf->goto_queue[i].stmt.g == stmt.g)
451 return tf->goto_queue[i].repl_stmt;
452 return NULL;
453 }
454
455 /* If we have a large number of entries in the goto_queue, create a
456 pointer map and use that for searching. */
457
458 if (!tf->goto_queue_map)
459 {
460 tf->goto_queue_map = pointer_map_create ();
461 for (i = 0; i < tf->goto_queue_active; i++)
462 {
463 slot = pointer_map_insert (tf->goto_queue_map,
464 tf->goto_queue[i].stmt.g);
465 gcc_assert (*slot == NULL);
466 *slot = &tf->goto_queue[i];
467 }
468 }
469
470 slot = pointer_map_contains (tf->goto_queue_map, stmt.g);
471 if (slot != NULL)
472 return (((struct goto_queue_node *) *slot)->repl_stmt);
473
474 return NULL;
475 }
476
477 /* A subroutine of replace_goto_queue_1. Handles the sub-clauses of a
478 lowered GIMPLE_COND. If, by chance, the replacement is a simple goto,
479 then we can just splat it in, otherwise we add the new stmts immediately
480 after the GIMPLE_COND and redirect. */
481
482 static void
483 replace_goto_queue_cond_clause (tree *tp, struct leh_tf_state *tf,
484 gimple_stmt_iterator *gsi)
485 {
486 tree label;
487 gimple_seq new_seq;
488 treemple temp;
489 location_t loc = gimple_location (gsi_stmt (*gsi));
490
491 temp.tp = tp;
492 new_seq = find_goto_replacement (tf, temp);
493 if (!new_seq)
494 return;
495
496 if (gimple_seq_singleton_p (new_seq)
497 && gimple_code (gimple_seq_first_stmt (new_seq)) == GIMPLE_GOTO)
498 {
499 *tp = gimple_goto_dest (gimple_seq_first_stmt (new_seq));
500 return;
501 }
502
503 label = create_artificial_label (loc);
504 /* Set the new label for the GIMPLE_COND */
505 *tp = label;
506
507 gsi_insert_after (gsi, gimple_build_label (label), GSI_CONTINUE_LINKING);
508 gsi_insert_seq_after (gsi, gimple_seq_copy (new_seq), GSI_CONTINUE_LINKING);
509 }
510
511 /* The real work of replace_goto_queue. Returns with TSI updated to
512 point to the next statement. */
513
514 static void replace_goto_queue_stmt_list (gimple_seq, struct leh_tf_state *);
515
516 static void
517 replace_goto_queue_1 (gimple stmt, struct leh_tf_state *tf,
518 gimple_stmt_iterator *gsi)
519 {
520 gimple_seq seq;
521 treemple temp;
522 temp.g = NULL;
523
524 switch (gimple_code (stmt))
525 {
526 case GIMPLE_GOTO:
527 case GIMPLE_RETURN:
528 temp.g = stmt;
529 seq = find_goto_replacement (tf, temp);
530 if (seq)
531 {
532 gsi_insert_seq_before (gsi, gimple_seq_copy (seq), GSI_SAME_STMT);
533 gsi_remove (gsi, false);
534 return;
535 }
536 break;
537
538 case GIMPLE_COND:
539 replace_goto_queue_cond_clause (gimple_op_ptr (stmt, 2), tf, gsi);
540 replace_goto_queue_cond_clause (gimple_op_ptr (stmt, 3), tf, gsi);
541 break;
542
543 case GIMPLE_TRY:
544 replace_goto_queue_stmt_list (gimple_try_eval (stmt), tf);
545 replace_goto_queue_stmt_list (gimple_try_cleanup (stmt), tf);
546 break;
547 case GIMPLE_CATCH:
548 replace_goto_queue_stmt_list (gimple_catch_handler (stmt), tf);
549 break;
550 case GIMPLE_EH_FILTER:
551 replace_goto_queue_stmt_list (gimple_eh_filter_failure (stmt), tf);
552 break;
553
554 default:
555 /* These won't have gotos in them. */
556 break;
557 }
558
559 gsi_next (gsi);
560 }
561
562 /* A subroutine of replace_goto_queue. Handles GIMPLE_SEQ. */
563
564 static void
565 replace_goto_queue_stmt_list (gimple_seq seq, struct leh_tf_state *tf)
566 {
567 gimple_stmt_iterator gsi = gsi_start (seq);
568
569 while (!gsi_end_p (gsi))
570 replace_goto_queue_1 (gsi_stmt (gsi), tf, &gsi);
571 }
572
573 /* Replace all goto queue members. */
574
575 static void
576 replace_goto_queue (struct leh_tf_state *tf)
577 {
578 if (tf->goto_queue_active == 0)
579 return;
580 replace_goto_queue_stmt_list (tf->top_p_seq, tf);
581 }
582
583 /* Add a new record to the goto queue contained in TF. NEW_STMT is the
584 data to be added, IS_LABEL indicates whether NEW_STMT is a label or
585 a gimple return. */
586
587 static void
588 record_in_goto_queue (struct leh_tf_state *tf,
589 treemple new_stmt,
590 int index,
591 bool is_label)
592 {
593 size_t active, size;
594 struct goto_queue_node *q;
595
596 gcc_assert (!tf->goto_queue_map);
597
598 active = tf->goto_queue_active;
599 size = tf->goto_queue_size;
600 if (active >= size)
601 {
602 size = (size ? size * 2 : 32);
603 tf->goto_queue_size = size;
604 tf->goto_queue
605 = XRESIZEVEC (struct goto_queue_node, tf->goto_queue, size);
606 }
607
608 q = &tf->goto_queue[active];
609 tf->goto_queue_active = active + 1;
610
611 memset (q, 0, sizeof (*q));
612 q->stmt = new_stmt;
613 q->index = index;
614 q->is_label = is_label;
615 }
616
617 /* Record the LABEL label in the goto queue contained in TF.
618 TF is not null. */
619
620 static void
621 record_in_goto_queue_label (struct leh_tf_state *tf, treemple stmt, tree label)
622 {
623 int index;
624 treemple temp, new_stmt;
625
626 if (!label)
627 return;
628
629 /* Computed and non-local gotos do not get processed. Given
630 their nature we can neither tell whether we've escaped the
631 finally block nor redirect them if we knew. */
632 if (TREE_CODE (label) != LABEL_DECL)
633 return;
634
635 /* No need to record gotos that don't leave the try block. */
636 temp.t = label;
637 if (!outside_finally_tree (temp, tf->try_finally_expr))
638 return;
639
640 if (! tf->dest_array)
641 {
642 tf->dest_array = VEC_alloc (tree, heap, 10);
643 VEC_quick_push (tree, tf->dest_array, label);
644 index = 0;
645 }
646 else
647 {
648 int n = VEC_length (tree, tf->dest_array);
649 for (index = 0; index < n; ++index)
650 if (VEC_index (tree, tf->dest_array, index) == label)
651 break;
652 if (index == n)
653 VEC_safe_push (tree, heap, tf->dest_array, label);
654 }
655
656 /* In the case of a GOTO we want to record the destination label,
657 since with a GIMPLE_COND we have an easy access to the then/else
658 labels. */
659 new_stmt = stmt;
660 record_in_goto_queue (tf, new_stmt, index, true);
661
662 }
663
664 /* For any GIMPLE_GOTO or GIMPLE_RETURN, decide whether it leaves a try_finally
665 node, and if so record that fact in the goto queue associated with that
666 try_finally node. */
667
668 static void
669 maybe_record_in_goto_queue (struct leh_state *state, gimple stmt)
670 {
671 struct leh_tf_state *tf = state->tf;
672 treemple new_stmt;
673
674 if (!tf)
675 return;
676
677 switch (gimple_code (stmt))
678 {
679 case GIMPLE_COND:
680 new_stmt.tp = gimple_op_ptr (stmt, 2);
681 record_in_goto_queue_label (tf, new_stmt, gimple_cond_true_label (stmt));
682 new_stmt.tp = gimple_op_ptr (stmt, 3);
683 record_in_goto_queue_label (tf, new_stmt, gimple_cond_false_label (stmt));
684 break;
685 case GIMPLE_GOTO:
686 new_stmt.g = stmt;
687 record_in_goto_queue_label (tf, new_stmt, gimple_goto_dest (stmt));
688 break;
689
690 case GIMPLE_RETURN:
691 tf->may_return = true;
692 new_stmt.g = stmt;
693 record_in_goto_queue (tf, new_stmt, -1, false);
694 break;
695
696 default:
697 gcc_unreachable ();
698 }
699 }
700
701
702 #ifdef ENABLE_CHECKING
703 /* We do not process GIMPLE_SWITCHes for now. As long as the original source
704 was in fact structured, and we've not yet done jump threading, then none
705 of the labels will leave outer GIMPLE_TRY_FINALLY nodes. Verify this. */
706
707 static void
708 verify_norecord_switch_expr (struct leh_state *state, gimple switch_expr)
709 {
710 struct leh_tf_state *tf = state->tf;
711 size_t i, n;
712
713 if (!tf)
714 return;
715
716 n = gimple_switch_num_labels (switch_expr);
717
718 for (i = 0; i < n; ++i)
719 {
720 treemple temp;
721 tree lab = CASE_LABEL (gimple_switch_label (switch_expr, i));
722 temp.t = lab;
723 gcc_assert (!outside_finally_tree (temp, tf->try_finally_expr));
724 }
725 }
726 #else
727 #define verify_norecord_switch_expr(state, switch_expr)
728 #endif
729
730 /* Redirect a RETURN_EXPR pointed to by STMT_P to FINLAB. Place in CONT_P
731 whatever is needed to finish the return. If MOD is non-null, insert it
732 before the new branch. RETURN_VALUE_P is a cache containing a temporary
733 variable to be used in manipulating the value returned from the function. */
734
735 static void
736 do_return_redirection (struct goto_queue_node *q, tree finlab, gimple_seq mod,
737 tree *return_value_p)
738 {
739 tree ret_expr;
740 gimple x;
741
742 /* In the case of a return, the queue node must be a gimple statement. */
743 gcc_assert (!q->is_label);
744
745 ret_expr = gimple_return_retval (q->stmt.g);
746
747 if (ret_expr)
748 {
749 if (!*return_value_p)
750 *return_value_p = ret_expr;
751 else
752 gcc_assert (*return_value_p == ret_expr);
753 q->cont_stmt = q->stmt.g;
754 /* The nasty part about redirecting the return value is that the
755 return value itself is to be computed before the FINALLY block
756 is executed. e.g.
757
758 int x;
759 int foo (void)
760 {
761 x = 0;
762 try {
763 return x;
764 } finally {
765 x++;
766 }
767 }
768
769 should return 0, not 1. Arrange for this to happen by copying
770 computed the return value into a local temporary. This also
771 allows us to redirect multiple return statements through the
772 same destination block; whether this is a net win or not really
773 depends, I guess, but it does make generation of the switch in
774 lower_try_finally_switch easier. */
775
776 if (TREE_CODE (ret_expr) == RESULT_DECL)
777 {
778 if (!*return_value_p)
779 *return_value_p = ret_expr;
780 else
781 gcc_assert (*return_value_p == ret_expr);
782 q->cont_stmt = q->stmt.g;
783 }
784 else
785 gcc_unreachable ();
786 }
787 else
788 /* If we don't return a value, all return statements are the same. */
789 q->cont_stmt = q->stmt.g;
790
791 if (!q->repl_stmt)
792 q->repl_stmt = gimple_seq_alloc ();
793
794 if (mod)
795 gimple_seq_add_seq (&q->repl_stmt, mod);
796
797 x = gimple_build_goto (finlab);
798 gimple_seq_add_stmt (&q->repl_stmt, x);
799 }
800
801 /* Similar, but easier, for GIMPLE_GOTO. */
802
803 static void
804 do_goto_redirection (struct goto_queue_node *q, tree finlab, gimple_seq mod,
805 struct leh_tf_state *tf)
806 {
807 gimple x;
808
809 gcc_assert (q->is_label);
810 if (!q->repl_stmt)
811 q->repl_stmt = gimple_seq_alloc ();
812
813 q->cont_stmt = gimple_build_goto (VEC_index (tree, tf->dest_array, q->index));
814
815 if (mod)
816 gimple_seq_add_seq (&q->repl_stmt, mod);
817
818 x = gimple_build_goto (finlab);
819 gimple_seq_add_stmt (&q->repl_stmt, x);
820 }
821
822 /* Emit a standard landing pad sequence into SEQ for REGION. */
823
824 static void
825 emit_post_landing_pad (gimple_seq *seq, eh_region region)
826 {
827 eh_landing_pad lp = region->landing_pads;
828 gimple x;
829
830 if (lp == NULL)
831 lp = gen_eh_landing_pad (region);
832
833 lp->post_landing_pad = create_artificial_label (UNKNOWN_LOCATION);
834 EH_LANDING_PAD_NR (lp->post_landing_pad) = lp->index;
835
836 x = gimple_build_label (lp->post_landing_pad);
837 gimple_seq_add_stmt (seq, x);
838 }
839
840 /* Emit a RESX statement into SEQ for REGION. */
841
842 static void
843 emit_resx (gimple_seq *seq, eh_region region)
844 {
845 gimple x = gimple_build_resx (region->index);
846 gimple_seq_add_stmt (seq, x);
847 if (region->outer)
848 record_stmt_eh_region (region->outer, x);
849 }
850
851 /* Emit an EH_DISPATCH statement into SEQ for REGION. */
852
853 static void
854 emit_eh_dispatch (gimple_seq *seq, eh_region region)
855 {
856 gimple x = gimple_build_eh_dispatch (region->index);
857 gimple_seq_add_stmt (seq, x);
858 }
859
860 /* Note that the current EH region may contain a throw, or a
861 call to a function which itself may contain a throw. */
862
863 static void
864 note_eh_region_may_contain_throw (eh_region region)
865 {
866 while (!bitmap_bit_p (eh_region_may_contain_throw, region->index))
867 {
868 bitmap_set_bit (eh_region_may_contain_throw, region->index);
869 region = region->outer;
870 if (region == NULL)
871 break;
872 }
873 }
874
875 /* We want to transform
876 try { body; } catch { stuff; }
877 to
878 normal_seqence:
879 body;
880 over:
881 eh_seqence:
882 landing_pad:
883 stuff;
884 goto over;
885
886 TP is a GIMPLE_TRY node. REGION is the region whose post_landing_pad
887 should be placed before the second operand, or NULL. OVER is
888 an existing label that should be put at the exit, or NULL. */
889
890 static gimple_seq
891 frob_into_branch_around (gimple tp, eh_region region, tree over)
892 {
893 gimple x;
894 gimple_seq cleanup, result;
895 location_t loc = gimple_location (tp);
896
897 cleanup = gimple_try_cleanup (tp);
898 result = gimple_try_eval (tp);
899
900 if (region)
901 emit_post_landing_pad (&eh_seq, region);
902
903 if (gimple_seq_may_fallthru (cleanup))
904 {
905 if (!over)
906 over = create_artificial_label (loc);
907 x = gimple_build_goto (over);
908 gimple_seq_add_stmt (&cleanup, x);
909 }
910 gimple_seq_add_seq (&eh_seq, cleanup);
911
912 if (over)
913 {
914 x = gimple_build_label (over);
915 gimple_seq_add_stmt (&result, x);
916 }
917 return result;
918 }
919
920 /* A subroutine of lower_try_finally. Duplicate the tree rooted at T.
921 Make sure to record all new labels found. */
922
923 static gimple_seq
924 lower_try_finally_dup_block (gimple_seq seq, struct leh_state *outer_state)
925 {
926 gimple region = NULL;
927 gimple_seq new_seq;
928
929 new_seq = copy_gimple_seq_and_replace_locals (seq);
930
931 if (outer_state->tf)
932 region = outer_state->tf->try_finally_expr;
933 collect_finally_tree_1 (new_seq, region);
934
935 return new_seq;
936 }
937
938 /* A subroutine of lower_try_finally. Create a fallthru label for
939 the given try_finally state. The only tricky bit here is that
940 we have to make sure to record the label in our outer context. */
941
942 static tree
943 lower_try_finally_fallthru_label (struct leh_tf_state *tf)
944 {
945 tree label = tf->fallthru_label;
946 treemple temp;
947
948 if (!label)
949 {
950 label = create_artificial_label (gimple_location (tf->try_finally_expr));
951 tf->fallthru_label = label;
952 if (tf->outer->tf)
953 {
954 temp.t = label;
955 record_in_finally_tree (temp, tf->outer->tf->try_finally_expr);
956 }
957 }
958 return label;
959 }
960
961 /* A subroutine of lower_try_finally. If lang_protect_cleanup_actions
962 returns non-null, then the language requires that the exception path out
963 of a try_finally be treated specially. To wit: the code within the
964 finally block may not itself throw an exception. We have two choices here.
965 First we can duplicate the finally block and wrap it in a must_not_throw
966 region. Second, we can generate code like
967
968 try {
969 finally_block;
970 } catch {
971 if (fintmp == eh_edge)
972 protect_cleanup_actions;
973 }
974
975 where "fintmp" is the temporary used in the switch statement generation
976 alternative considered below. For the nonce, we always choose the first
977 option.
978
979 THIS_STATE may be null if this is a try-cleanup, not a try-finally. */
980
981 static void
982 honor_protect_cleanup_actions (struct leh_state *outer_state,
983 struct leh_state *this_state,
984 struct leh_tf_state *tf)
985 {
986 tree protect_cleanup_actions;
987 gimple_stmt_iterator gsi;
988 bool finally_may_fallthru;
989 gimple_seq finally;
990 gimple x;
991
992 /* First check for nothing to do. */
993 if (lang_protect_cleanup_actions == NULL)
994 return;
995 protect_cleanup_actions = lang_protect_cleanup_actions ();
996 if (protect_cleanup_actions == NULL)
997 return;
998
999 finally = gimple_try_cleanup (tf->top_p);
1000 finally_may_fallthru = gimple_seq_may_fallthru (finally);
1001
1002 /* Duplicate the FINALLY block. Only need to do this for try-finally,
1003 and not for cleanups. */
1004 if (this_state)
1005 finally = lower_try_finally_dup_block (finally, outer_state);
1006
1007 /* If this cleanup consists of a TRY_CATCH_EXPR with TRY_CATCH_IS_CLEANUP
1008 set, the handler of the TRY_CATCH_EXPR is another cleanup which ought
1009 to be in an enclosing scope, but needs to be implemented at this level
1010 to avoid a nesting violation (see wrap_temporary_cleanups in
1011 cp/decl.c). Since it's logically at an outer level, we should call
1012 terminate before we get to it, so strip it away before adding the
1013 MUST_NOT_THROW filter. */
1014 gsi = gsi_start (finally);
1015 x = gsi_stmt (gsi);
1016 if (gimple_code (x) == GIMPLE_TRY
1017 && gimple_try_kind (x) == GIMPLE_TRY_CATCH
1018 && gimple_try_catch_is_cleanup (x))
1019 {
1020 gsi_insert_seq_before (&gsi, gimple_try_eval (x), GSI_SAME_STMT);
1021 gsi_remove (&gsi, false);
1022 }
1023
1024 /* Wrap the block with protect_cleanup_actions as the action. */
1025 x = gimple_build_eh_must_not_throw (protect_cleanup_actions);
1026 x = gimple_build_try (finally, gimple_seq_alloc_with_stmt (x),
1027 GIMPLE_TRY_CATCH);
1028 finally = lower_eh_must_not_throw (outer_state, x);
1029
1030 /* Drop all of this into the exception sequence. */
1031 emit_post_landing_pad (&eh_seq, tf->region);
1032 gimple_seq_add_seq (&eh_seq, finally);
1033 if (finally_may_fallthru)
1034 emit_resx (&eh_seq, tf->region);
1035
1036 /* Having now been handled, EH isn't to be considered with
1037 the rest of the outgoing edges. */
1038 tf->may_throw = false;
1039 }
1040
1041 /* A subroutine of lower_try_finally. We have determined that there is
1042 no fallthru edge out of the finally block. This means that there is
1043 no outgoing edge corresponding to any incoming edge. Restructure the
1044 try_finally node for this special case. */
1045
1046 static void
1047 lower_try_finally_nofallthru (struct leh_state *state,
1048 struct leh_tf_state *tf)
1049 {
1050 tree lab, return_val;
1051 gimple x;
1052 gimple_seq finally;
1053 struct goto_queue_node *q, *qe;
1054
1055 lab = create_artificial_label (gimple_location (tf->try_finally_expr));
1056
1057 /* We expect that tf->top_p is a GIMPLE_TRY. */
1058 finally = gimple_try_cleanup (tf->top_p);
1059 tf->top_p_seq = gimple_try_eval (tf->top_p);
1060
1061 x = gimple_build_label (lab);
1062 gimple_seq_add_stmt (&tf->top_p_seq, x);
1063
1064 return_val = NULL;
1065 q = tf->goto_queue;
1066 qe = q + tf->goto_queue_active;
1067 for (; q < qe; ++q)
1068 if (q->index < 0)
1069 do_return_redirection (q, lab, NULL, &return_val);
1070 else
1071 do_goto_redirection (q, lab, NULL, tf);
1072
1073 replace_goto_queue (tf);
1074
1075 lower_eh_constructs_1 (state, finally);
1076 gimple_seq_add_seq (&tf->top_p_seq, finally);
1077
1078 if (tf->may_throw)
1079 {
1080 emit_post_landing_pad (&eh_seq, tf->region);
1081
1082 x = gimple_build_goto (lab);
1083 gimple_seq_add_stmt (&eh_seq, x);
1084 }
1085 }
1086
1087 /* A subroutine of lower_try_finally. We have determined that there is
1088 exactly one destination of the finally block. Restructure the
1089 try_finally node for this special case. */
1090
1091 static void
1092 lower_try_finally_onedest (struct leh_state *state, struct leh_tf_state *tf)
1093 {
1094 struct goto_queue_node *q, *qe;
1095 gimple x;
1096 gimple_seq finally;
1097 tree finally_label;
1098 location_t loc = gimple_location (tf->try_finally_expr);
1099
1100 finally = gimple_try_cleanup (tf->top_p);
1101 tf->top_p_seq = gimple_try_eval (tf->top_p);
1102
1103 lower_eh_constructs_1 (state, finally);
1104
1105 if (tf->may_throw)
1106 {
1107 /* Only reachable via the exception edge. Add the given label to
1108 the head of the FINALLY block. Append a RESX at the end. */
1109 emit_post_landing_pad (&eh_seq, tf->region);
1110 gimple_seq_add_seq (&eh_seq, finally);
1111 emit_resx (&eh_seq, tf->region);
1112 return;
1113 }
1114
1115 if (tf->may_fallthru)
1116 {
1117 /* Only reachable via the fallthru edge. Do nothing but let
1118 the two blocks run together; we'll fall out the bottom. */
1119 gimple_seq_add_seq (&tf->top_p_seq, finally);
1120 return;
1121 }
1122
1123 finally_label = create_artificial_label (loc);
1124 x = gimple_build_label (finally_label);
1125 gimple_seq_add_stmt (&tf->top_p_seq, x);
1126
1127 gimple_seq_add_seq (&tf->top_p_seq, finally);
1128
1129 q = tf->goto_queue;
1130 qe = q + tf->goto_queue_active;
1131
1132 if (tf->may_return)
1133 {
1134 /* Reachable by return expressions only. Redirect them. */
1135 tree return_val = NULL;
1136 for (; q < qe; ++q)
1137 do_return_redirection (q, finally_label, NULL, &return_val);
1138 replace_goto_queue (tf);
1139 }
1140 else
1141 {
1142 /* Reachable by goto expressions only. Redirect them. */
1143 for (; q < qe; ++q)
1144 do_goto_redirection (q, finally_label, NULL, tf);
1145 replace_goto_queue (tf);
1146
1147 if (VEC_index (tree, tf->dest_array, 0) == tf->fallthru_label)
1148 {
1149 /* Reachable by goto to fallthru label only. Redirect it
1150 to the new label (already created, sadly), and do not
1151 emit the final branch out, or the fallthru label. */
1152 tf->fallthru_label = NULL;
1153 return;
1154 }
1155 }
1156
1157 /* Place the original return/goto to the original destination
1158 immediately after the finally block. */
1159 x = tf->goto_queue[0].cont_stmt;
1160 gimple_seq_add_stmt (&tf->top_p_seq, x);
1161 maybe_record_in_goto_queue (state, x);
1162 }
1163
1164 /* A subroutine of lower_try_finally. There are multiple edges incoming
1165 and outgoing from the finally block. Implement this by duplicating the
1166 finally block for every destination. */
1167
1168 static void
1169 lower_try_finally_copy (struct leh_state *state, struct leh_tf_state *tf)
1170 {
1171 gimple_seq finally;
1172 gimple_seq new_stmt;
1173 gimple_seq seq;
1174 gimple x;
1175 tree tmp;
1176 location_t tf_loc = gimple_location (tf->try_finally_expr);
1177
1178 finally = gimple_try_cleanup (tf->top_p);
1179 tf->top_p_seq = gimple_try_eval (tf->top_p);
1180 new_stmt = NULL;
1181
1182 if (tf->may_fallthru)
1183 {
1184 seq = lower_try_finally_dup_block (finally, state);
1185 lower_eh_constructs_1 (state, seq);
1186 gimple_seq_add_seq (&new_stmt, seq);
1187
1188 tmp = lower_try_finally_fallthru_label (tf);
1189 x = gimple_build_goto (tmp);
1190 gimple_seq_add_stmt (&new_stmt, x);
1191 }
1192
1193 if (tf->may_throw)
1194 {
1195 emit_post_landing_pad (&eh_seq, tf->region);
1196
1197 seq = lower_try_finally_dup_block (finally, state);
1198 lower_eh_constructs_1 (state, seq);
1199 gimple_seq_add_seq (&eh_seq, seq);
1200
1201 emit_resx (&eh_seq, tf->region);
1202 }
1203
1204 if (tf->goto_queue)
1205 {
1206 struct goto_queue_node *q, *qe;
1207 tree return_val = NULL;
1208 int return_index, index;
1209 struct labels_s
1210 {
1211 struct goto_queue_node *q;
1212 tree label;
1213 } *labels;
1214
1215 return_index = VEC_length (tree, tf->dest_array);
1216 labels = XCNEWVEC (struct labels_s, return_index + 1);
1217
1218 q = tf->goto_queue;
1219 qe = q + tf->goto_queue_active;
1220 for (; q < qe; q++)
1221 {
1222 index = q->index < 0 ? return_index : q->index;
1223
1224 if (!labels[index].q)
1225 labels[index].q = q;
1226 }
1227
1228 for (index = 0; index < return_index + 1; index++)
1229 {
1230 tree lab;
1231
1232 q = labels[index].q;
1233 if (! q)
1234 continue;
1235
1236 lab = labels[index].label
1237 = create_artificial_label (tf_loc);
1238
1239 if (index == return_index)
1240 do_return_redirection (q, lab, NULL, &return_val);
1241 else
1242 do_goto_redirection (q, lab, NULL, tf);
1243
1244 x = gimple_build_label (lab);
1245 gimple_seq_add_stmt (&new_stmt, x);
1246
1247 seq = lower_try_finally_dup_block (finally, state);
1248 lower_eh_constructs_1 (state, seq);
1249 gimple_seq_add_seq (&new_stmt, seq);
1250
1251 gimple_seq_add_stmt (&new_stmt, q->cont_stmt);
1252 maybe_record_in_goto_queue (state, q->cont_stmt);
1253 }
1254
1255 for (q = tf->goto_queue; q < qe; q++)
1256 {
1257 tree lab;
1258
1259 index = q->index < 0 ? return_index : q->index;
1260
1261 if (labels[index].q == q)
1262 continue;
1263
1264 lab = labels[index].label;
1265
1266 if (index == return_index)
1267 do_return_redirection (q, lab, NULL, &return_val);
1268 else
1269 do_goto_redirection (q, lab, NULL, tf);
1270 }
1271
1272 replace_goto_queue (tf);
1273 free (labels);
1274 }
1275
1276 /* Need to link new stmts after running replace_goto_queue due
1277 to not wanting to process the same goto stmts twice. */
1278 gimple_seq_add_seq (&tf->top_p_seq, new_stmt);
1279 }
1280
1281 /* A subroutine of lower_try_finally. There are multiple edges incoming
1282 and outgoing from the finally block. Implement this by instrumenting
1283 each incoming edge and creating a switch statement at the end of the
1284 finally block that branches to the appropriate destination. */
1285
1286 static void
1287 lower_try_finally_switch (struct leh_state *state, struct leh_tf_state *tf)
1288 {
1289 struct goto_queue_node *q, *qe;
1290 tree return_val = NULL;
1291 tree finally_tmp, finally_label;
1292 int return_index, eh_index, fallthru_index;
1293 int nlabels, ndests, j, last_case_index;
1294 tree last_case;
1295 VEC (tree,heap) *case_label_vec;
1296 gimple_seq switch_body;
1297 gimple x;
1298 tree tmp;
1299 gimple switch_stmt;
1300 gimple_seq finally;
1301 struct pointer_map_t *cont_map = NULL;
1302 /* The location of the TRY_FINALLY stmt. */
1303 location_t tf_loc = gimple_location (tf->try_finally_expr);
1304 /* The location of the finally block. */
1305 location_t finally_loc;
1306
1307 switch_body = gimple_seq_alloc ();
1308
1309 /* Mash the TRY block to the head of the chain. */
1310 finally = gimple_try_cleanup (tf->top_p);
1311 tf->top_p_seq = gimple_try_eval (tf->top_p);
1312
1313 /* The location of the finally is either the last stmt in the finally
1314 block or the location of the TRY_FINALLY itself. */
1315 finally_loc = gimple_seq_last_stmt (tf->top_p_seq) != NULL ?
1316 gimple_location (gimple_seq_last_stmt (tf->top_p_seq))
1317 : tf_loc;
1318
1319 /* Lower the finally block itself. */
1320 lower_eh_constructs_1 (state, finally);
1321
1322 /* Prepare for switch statement generation. */
1323 nlabels = VEC_length (tree, tf->dest_array);
1324 return_index = nlabels;
1325 eh_index = return_index + tf->may_return;
1326 fallthru_index = eh_index + tf->may_throw;
1327 ndests = fallthru_index + tf->may_fallthru;
1328
1329 finally_tmp = create_tmp_var (integer_type_node, "finally_tmp");
1330 finally_label = create_artificial_label (finally_loc);
1331
1332 /* We use VEC_quick_push on case_label_vec throughout this function,
1333 since we know the size in advance and allocate precisely as muce
1334 space as needed. */
1335 case_label_vec = VEC_alloc (tree, heap, ndests);
1336 last_case = NULL;
1337 last_case_index = 0;
1338
1339 /* Begin inserting code for getting to the finally block. Things
1340 are done in this order to correspond to the sequence the code is
1341 layed out. */
1342
1343 if (tf->may_fallthru)
1344 {
1345 x = gimple_build_assign (finally_tmp,
1346 build_int_cst (NULL, fallthru_index));
1347 gimple_seq_add_stmt (&tf->top_p_seq, x);
1348
1349 last_case = build3 (CASE_LABEL_EXPR, void_type_node,
1350 build_int_cst (NULL, fallthru_index),
1351 NULL, create_artificial_label (tf_loc));
1352 VEC_quick_push (tree, case_label_vec, last_case);
1353 last_case_index++;
1354
1355 x = gimple_build_label (CASE_LABEL (last_case));
1356 gimple_seq_add_stmt (&switch_body, x);
1357
1358 tmp = lower_try_finally_fallthru_label (tf);
1359 x = gimple_build_goto (tmp);
1360 gimple_seq_add_stmt (&switch_body, x);
1361 }
1362
1363 if (tf->may_throw)
1364 {
1365 emit_post_landing_pad (&eh_seq, tf->region);
1366
1367 x = gimple_build_assign (finally_tmp,
1368 build_int_cst (NULL, eh_index));
1369 gimple_seq_add_stmt (&eh_seq, x);
1370
1371 x = gimple_build_goto (finally_label);
1372 gimple_seq_add_stmt (&eh_seq, x);
1373
1374 last_case = build3 (CASE_LABEL_EXPR, void_type_node,
1375 build_int_cst (NULL, eh_index),
1376 NULL, create_artificial_label (tf_loc));
1377 VEC_quick_push (tree, case_label_vec, last_case);
1378 last_case_index++;
1379
1380 x = gimple_build_label (CASE_LABEL (last_case));
1381 gimple_seq_add_stmt (&eh_seq, x);
1382 emit_resx (&eh_seq, tf->region);
1383 }
1384
1385 x = gimple_build_label (finally_label);
1386 gimple_seq_add_stmt (&tf->top_p_seq, x);
1387
1388 gimple_seq_add_seq (&tf->top_p_seq, finally);
1389
1390 /* Redirect each incoming goto edge. */
1391 q = tf->goto_queue;
1392 qe = q + tf->goto_queue_active;
1393 j = last_case_index + tf->may_return;
1394 /* Prepare the assignments to finally_tmp that are executed upon the
1395 entrance through a particular edge. */
1396 for (; q < qe; ++q)
1397 {
1398 gimple_seq mod;
1399 int switch_id;
1400 unsigned int case_index;
1401
1402 mod = gimple_seq_alloc ();
1403
1404 if (q->index < 0)
1405 {
1406 x = gimple_build_assign (finally_tmp,
1407 build_int_cst (NULL, return_index));
1408 gimple_seq_add_stmt (&mod, x);
1409 do_return_redirection (q, finally_label, mod, &return_val);
1410 switch_id = return_index;
1411 }
1412 else
1413 {
1414 x = gimple_build_assign (finally_tmp,
1415 build_int_cst (NULL, q->index));
1416 gimple_seq_add_stmt (&mod, x);
1417 do_goto_redirection (q, finally_label, mod, tf);
1418 switch_id = q->index;
1419 }
1420
1421 case_index = j + q->index;
1422 if (VEC_length (tree, case_label_vec) <= case_index
1423 || !VEC_index (tree, case_label_vec, case_index))
1424 {
1425 tree case_lab;
1426 void **slot;
1427 case_lab = build3 (CASE_LABEL_EXPR, void_type_node,
1428 build_int_cst (NULL, switch_id),
1429 NULL, NULL);
1430 /* We store the cont_stmt in the pointer map, so that we can recover
1431 it in the loop below. We don't create the new label while
1432 walking the goto_queue because pointers don't offer a stable
1433 order. */
1434 if (!cont_map)
1435 cont_map = pointer_map_create ();
1436 slot = pointer_map_insert (cont_map, case_lab);
1437 *slot = q->cont_stmt;
1438 VEC_quick_push (tree, case_label_vec, case_lab);
1439 }
1440 }
1441 for (j = last_case_index; j < last_case_index + nlabels; j++)
1442 {
1443 tree label;
1444 gimple cont_stmt;
1445 void **slot;
1446
1447 last_case = VEC_index (tree, case_label_vec, j);
1448
1449 gcc_assert (last_case);
1450 gcc_assert (cont_map);
1451
1452 slot = pointer_map_contains (cont_map, last_case);
1453 /* As the comment above suggests, CASE_LABEL (last_case) was just a
1454 placeholder, it does not store an actual label, yet. */
1455 gcc_assert (slot);
1456 cont_stmt = *(gimple *) slot;
1457
1458 label = create_artificial_label (tf_loc);
1459 CASE_LABEL (last_case) = label;
1460
1461 x = gimple_build_label (label);
1462 gimple_seq_add_stmt (&switch_body, x);
1463 gimple_seq_add_stmt (&switch_body, cont_stmt);
1464 maybe_record_in_goto_queue (state, cont_stmt);
1465 }
1466 if (cont_map)
1467 pointer_map_destroy (cont_map);
1468
1469 replace_goto_queue (tf);
1470
1471 /* Make sure that the last case is the default label, as one is required.
1472 Then sort the labels, which is also required in GIMPLE. */
1473 CASE_LOW (last_case) = NULL;
1474 sort_case_labels (case_label_vec);
1475
1476 /* Build the switch statement, setting last_case to be the default
1477 label. */
1478 switch_stmt = gimple_build_switch_vec (finally_tmp, last_case,
1479 case_label_vec);
1480 gimple_set_location (switch_stmt, finally_loc);
1481
1482 /* Need to link SWITCH_STMT after running replace_goto_queue
1483 due to not wanting to process the same goto stmts twice. */
1484 gimple_seq_add_stmt (&tf->top_p_seq, switch_stmt);
1485 gimple_seq_add_seq (&tf->top_p_seq, switch_body);
1486 }
1487
1488 /* Decide whether or not we are going to duplicate the finally block.
1489 There are several considerations.
1490
1491 First, if this is Java, then the finally block contains code
1492 written by the user. It has line numbers associated with it,
1493 so duplicating the block means it's difficult to set a breakpoint.
1494 Since controlling code generation via -g is verboten, we simply
1495 never duplicate code without optimization.
1496
1497 Second, we'd like to prevent egregious code growth. One way to
1498 do this is to estimate the size of the finally block, multiply
1499 that by the number of copies we'd need to make, and compare against
1500 the estimate of the size of the switch machinery we'd have to add. */
1501
1502 static bool
1503 decide_copy_try_finally (int ndests, gimple_seq finally)
1504 {
1505 int f_estimate, sw_estimate;
1506
1507 if (!optimize)
1508 return false;
1509
1510 /* Finally estimate N times, plus N gotos. */
1511 f_estimate = count_insns_seq (finally, &eni_size_weights);
1512 f_estimate = (f_estimate + 1) * ndests;
1513
1514 /* Switch statement (cost 10), N variable assignments, N gotos. */
1515 sw_estimate = 10 + 2 * ndests;
1516
1517 /* Optimize for size clearly wants our best guess. */
1518 if (optimize_function_for_size_p (cfun))
1519 return f_estimate < sw_estimate;
1520
1521 /* ??? These numbers are completely made up so far. */
1522 if (optimize > 1)
1523 return f_estimate < 100 || f_estimate < sw_estimate * 2;
1524 else
1525 return f_estimate < 40 || f_estimate * 2 < sw_estimate * 3;
1526 }
1527
1528
1529 /* A subroutine of lower_eh_constructs_1. Lower a GIMPLE_TRY_FINALLY nodes
1530 to a sequence of labels and blocks, plus the exception region trees
1531 that record all the magic. This is complicated by the need to
1532 arrange for the FINALLY block to be executed on all exits. */
1533
1534 static gimple_seq
1535 lower_try_finally (struct leh_state *state, gimple tp)
1536 {
1537 struct leh_tf_state this_tf;
1538 struct leh_state this_state;
1539 int ndests;
1540
1541 /* Process the try block. */
1542
1543 memset (&this_tf, 0, sizeof (this_tf));
1544 this_tf.try_finally_expr = tp;
1545 this_tf.top_p = tp;
1546 this_tf.outer = state;
1547 if (using_eh_for_cleanups_p)
1548 this_tf.region = gen_eh_region_cleanup (state->cur_region);
1549 else
1550 this_tf.region = NULL;
1551
1552 this_state.cur_region = this_tf.region;
1553 this_state.ehp_region = state->ehp_region;
1554 this_state.tf = &this_tf;
1555
1556 lower_eh_constructs_1 (&this_state, gimple_try_eval(tp));
1557
1558 /* Determine if the try block is escaped through the bottom. */
1559 this_tf.may_fallthru = gimple_seq_may_fallthru (gimple_try_eval (tp));
1560
1561 /* Determine if any exceptions are possible within the try block. */
1562 if (using_eh_for_cleanups_p)
1563 this_tf.may_throw = bitmap_bit_p (eh_region_may_contain_throw,
1564 this_tf.region->index);
1565 if (this_tf.may_throw)
1566 honor_protect_cleanup_actions (state, &this_state, &this_tf);
1567
1568 /* Determine how many edges (still) reach the finally block. Or rather,
1569 how many destinations are reached by the finally block. Use this to
1570 determine how we process the finally block itself. */
1571
1572 ndests = VEC_length (tree, this_tf.dest_array);
1573 ndests += this_tf.may_fallthru;
1574 ndests += this_tf.may_return;
1575 ndests += this_tf.may_throw;
1576
1577 /* If the FINALLY block is not reachable, dike it out. */
1578 if (ndests == 0)
1579 {
1580 gimple_seq_add_seq (&this_tf.top_p_seq, gimple_try_eval (tp));
1581 gimple_try_set_cleanup (tp, NULL);
1582 }
1583 /* If the finally block doesn't fall through, then any destination
1584 we might try to impose there isn't reached either. There may be
1585 some minor amount of cleanup and redirection still needed. */
1586 else if (!gimple_seq_may_fallthru (gimple_try_cleanup (tp)))
1587 lower_try_finally_nofallthru (state, &this_tf);
1588
1589 /* We can easily special-case redirection to a single destination. */
1590 else if (ndests == 1)
1591 lower_try_finally_onedest (state, &this_tf);
1592 else if (decide_copy_try_finally (ndests, gimple_try_cleanup (tp)))
1593 lower_try_finally_copy (state, &this_tf);
1594 else
1595 lower_try_finally_switch (state, &this_tf);
1596
1597 /* If someone requested we add a label at the end of the transformed
1598 block, do so. */
1599 if (this_tf.fallthru_label)
1600 {
1601 /* This must be reached only if ndests == 0. */
1602 gimple x = gimple_build_label (this_tf.fallthru_label);
1603 gimple_seq_add_stmt (&this_tf.top_p_seq, x);
1604 }
1605
1606 VEC_free (tree, heap, this_tf.dest_array);
1607 if (this_tf.goto_queue)
1608 free (this_tf.goto_queue);
1609 if (this_tf.goto_queue_map)
1610 pointer_map_destroy (this_tf.goto_queue_map);
1611
1612 return this_tf.top_p_seq;
1613 }
1614
1615 /* A subroutine of lower_eh_constructs_1. Lower a GIMPLE_TRY_CATCH with a
1616 list of GIMPLE_CATCH to a sequence of labels and blocks, plus the
1617 exception region trees that records all the magic. */
1618
1619 static gimple_seq
1620 lower_catch (struct leh_state *state, gimple tp)
1621 {
1622 eh_region try_region;
1623 struct leh_state this_state;
1624 gimple_stmt_iterator gsi;
1625 tree out_label;
1626 gimple_seq new_seq;
1627 gimple x;
1628 location_t try_catch_loc = gimple_location (tp);
1629
1630 try_region = gen_eh_region_try (state->cur_region);
1631
1632 this_state = *state;
1633 this_state.cur_region = try_region;
1634
1635 lower_eh_constructs_1 (&this_state, gimple_try_eval (tp));
1636
1637 if (!bitmap_bit_p (eh_region_may_contain_throw, try_region->index))
1638 return gimple_try_eval (tp);
1639
1640 new_seq = NULL;
1641 emit_eh_dispatch (&new_seq, try_region);
1642 emit_resx (&new_seq, try_region);
1643
1644 this_state.cur_region = state->cur_region;
1645 this_state.ehp_region = try_region;
1646
1647 out_label = NULL;
1648 for (gsi = gsi_start (gimple_try_cleanup (tp));
1649 !gsi_end_p (gsi);
1650 gsi_next (&gsi))
1651 {
1652 eh_catch c;
1653 gimple gcatch;
1654 gimple_seq handler;
1655
1656 gcatch = gsi_stmt (gsi);
1657 c = gen_eh_region_catch (try_region, gimple_catch_types (gcatch));
1658
1659 handler = gimple_catch_handler (gcatch);
1660 lower_eh_constructs_1 (&this_state, handler);
1661
1662 c->label = create_artificial_label (UNKNOWN_LOCATION);
1663 x = gimple_build_label (c->label);
1664 gimple_seq_add_stmt (&new_seq, x);
1665
1666 gimple_seq_add_seq (&new_seq, handler);
1667
1668 if (gimple_seq_may_fallthru (new_seq))
1669 {
1670 if (!out_label)
1671 out_label = create_artificial_label (try_catch_loc);
1672
1673 x = gimple_build_goto (out_label);
1674 gimple_seq_add_stmt (&new_seq, x);
1675 }
1676 }
1677
1678 gimple_try_set_cleanup (tp, new_seq);
1679
1680 return frob_into_branch_around (tp, try_region, out_label);
1681 }
1682
1683 /* A subroutine of lower_eh_constructs_1. Lower a GIMPLE_TRY with a
1684 GIMPLE_EH_FILTER to a sequence of labels and blocks, plus the exception
1685 region trees that record all the magic. */
1686
1687 static gimple_seq
1688 lower_eh_filter (struct leh_state *state, gimple tp)
1689 {
1690 struct leh_state this_state;
1691 eh_region this_region;
1692 gimple inner, x;
1693 gimple_seq new_seq;
1694
1695 inner = gimple_seq_first_stmt (gimple_try_cleanup (tp));
1696
1697 this_region = gen_eh_region_allowed (state->cur_region,
1698 gimple_eh_filter_types (inner));
1699 this_state = *state;
1700 this_state.cur_region = this_region;
1701
1702 lower_eh_constructs_1 (&this_state, gimple_try_eval (tp));
1703
1704 if (!bitmap_bit_p (eh_region_may_contain_throw, this_region->index))
1705 return gimple_try_eval (tp);
1706
1707 new_seq = NULL;
1708 this_state.cur_region = state->cur_region;
1709 this_state.ehp_region = this_region;
1710
1711 emit_eh_dispatch (&new_seq, this_region);
1712 emit_resx (&new_seq, this_region);
1713
1714 this_region->u.allowed.label = create_artificial_label (UNKNOWN_LOCATION);
1715 x = gimple_build_label (this_region->u.allowed.label);
1716 gimple_seq_add_stmt (&new_seq, x);
1717
1718 lower_eh_constructs_1 (&this_state, gimple_eh_filter_failure (inner));
1719 gimple_seq_add_seq (&new_seq, gimple_eh_filter_failure (inner));
1720
1721 gimple_try_set_cleanup (tp, new_seq);
1722
1723 return frob_into_branch_around (tp, this_region, NULL);
1724 }
1725
1726 /* A subroutine of lower_eh_constructs_1. Lower a GIMPLE_TRY with
1727 an GIMPLE_EH_MUST_NOT_THROW to a sequence of labels and blocks,
1728 plus the exception region trees that record all the magic. */
1729
1730 static gimple_seq
1731 lower_eh_must_not_throw (struct leh_state *state, gimple tp)
1732 {
1733 struct leh_state this_state;
1734 eh_region this_region;
1735 gimple inner;
1736
1737 inner = gimple_seq_first_stmt (gimple_try_cleanup (tp));
1738
1739 this_region = gen_eh_region_must_not_throw (state->cur_region);
1740 this_region->u.must_not_throw.failure_decl
1741 = gimple_eh_must_not_throw_fndecl (inner);
1742 this_region->u.must_not_throw.failure_loc = gimple_location (tp);
1743
1744 /* In order to get mangling applied to this decl, we must mark it
1745 used now. Otherwise, pass_ipa_free_lang_data won't think it
1746 needs to happen. */
1747 TREE_USED (this_region->u.must_not_throw.failure_decl) = 1;
1748
1749 this_state = *state;
1750 this_state.cur_region = this_region;
1751
1752 lower_eh_constructs_1 (&this_state, gimple_try_eval (tp));
1753
1754 return gimple_try_eval (tp);
1755 }
1756
1757 /* Implement a cleanup expression. This is similar to try-finally,
1758 except that we only execute the cleanup block for exception edges. */
1759
1760 static gimple_seq
1761 lower_cleanup (struct leh_state *state, gimple tp)
1762 {
1763 struct leh_state this_state;
1764 eh_region this_region;
1765 struct leh_tf_state fake_tf;
1766 gimple_seq result;
1767
1768 /* If not using eh, then exception-only cleanups are no-ops. */
1769 if (!flag_exceptions)
1770 {
1771 result = gimple_try_eval (tp);
1772 lower_eh_constructs_1 (state, result);
1773 return result;
1774 }
1775
1776 this_region = gen_eh_region_cleanup (state->cur_region);
1777 this_state = *state;
1778 this_state.cur_region = this_region;
1779
1780 lower_eh_constructs_1 (&this_state, gimple_try_eval (tp));
1781
1782 if (!bitmap_bit_p (eh_region_may_contain_throw, this_region->index))
1783 return gimple_try_eval (tp);
1784
1785 /* Build enough of a try-finally state so that we can reuse
1786 honor_protect_cleanup_actions. */
1787 memset (&fake_tf, 0, sizeof (fake_tf));
1788 fake_tf.top_p = fake_tf.try_finally_expr = tp;
1789 fake_tf.outer = state;
1790 fake_tf.region = this_region;
1791 fake_tf.may_fallthru = gimple_seq_may_fallthru (gimple_try_eval (tp));
1792 fake_tf.may_throw = true;
1793
1794 honor_protect_cleanup_actions (state, NULL, &fake_tf);
1795
1796 if (fake_tf.may_throw)
1797 {
1798 /* In this case honor_protect_cleanup_actions had nothing to do,
1799 and we should process this normally. */
1800 lower_eh_constructs_1 (state, gimple_try_cleanup (tp));
1801 result = frob_into_branch_around (tp, this_region,
1802 fake_tf.fallthru_label);
1803 }
1804 else
1805 {
1806 /* In this case honor_protect_cleanup_actions did nearly all of
1807 the work. All we have left is to append the fallthru_label. */
1808
1809 result = gimple_try_eval (tp);
1810 if (fake_tf.fallthru_label)
1811 {
1812 gimple x = gimple_build_label (fake_tf.fallthru_label);
1813 gimple_seq_add_stmt (&result, x);
1814 }
1815 }
1816 return result;
1817 }
1818
1819 /* Main loop for lowering eh constructs. Also moves gsi to the next
1820 statement. */
1821
1822 static void
1823 lower_eh_constructs_2 (struct leh_state *state, gimple_stmt_iterator *gsi)
1824 {
1825 gimple_seq replace;
1826 gimple x;
1827 gimple stmt = gsi_stmt (*gsi);
1828
1829 switch (gimple_code (stmt))
1830 {
1831 case GIMPLE_CALL:
1832 {
1833 tree fndecl = gimple_call_fndecl (stmt);
1834 tree rhs, lhs;
1835
1836 if (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
1837 switch (DECL_FUNCTION_CODE (fndecl))
1838 {
1839 case BUILT_IN_EH_POINTER:
1840 /* The front end may have generated a call to
1841 __builtin_eh_pointer (0) within a catch region. Replace
1842 this zero argument with the current catch region number. */
1843 if (state->ehp_region)
1844 {
1845 tree nr = build_int_cst (NULL, state->ehp_region->index);
1846 gimple_call_set_arg (stmt, 0, nr);
1847 }
1848 else
1849 {
1850 /* The user has dome something silly. Remove it. */
1851 rhs = build_int_cst (ptr_type_node, 0);
1852 goto do_replace;
1853 }
1854 break;
1855
1856 case BUILT_IN_EH_FILTER:
1857 /* ??? This should never appear, but since it's a builtin it
1858 is accessible to abuse by users. Just remove it and
1859 replace the use with the arbitrary value zero. */
1860 rhs = build_int_cst (TREE_TYPE (TREE_TYPE (fndecl)), 0);
1861 do_replace:
1862 lhs = gimple_call_lhs (stmt);
1863 x = gimple_build_assign (lhs, rhs);
1864 gsi_insert_before (gsi, x, GSI_SAME_STMT);
1865 /* FALLTHRU */
1866
1867 case BUILT_IN_EH_COPY_VALUES:
1868 /* Likewise this should not appear. Remove it. */
1869 gsi_remove (gsi, true);
1870 return;
1871
1872 default:
1873 break;
1874 }
1875 }
1876 /* FALLTHRU */
1877
1878 case GIMPLE_ASSIGN:
1879 /* If the stmt can throw use a new temporary for the assignment
1880 to a LHS. This makes sure the old value of the LHS is
1881 available on the EH edge. */
1882 if (stmt_could_throw_p (stmt)
1883 && gimple_has_lhs (stmt)
1884 && !tree_could_throw_p (gimple_get_lhs (stmt))
1885 && is_gimple_reg_type (TREE_TYPE (gimple_get_lhs (stmt))))
1886 {
1887 tree lhs = gimple_get_lhs (stmt);
1888 tree tmp = create_tmp_var (TREE_TYPE (lhs), NULL);
1889 gimple s = gimple_build_assign (lhs, tmp);
1890 gimple_set_location (s, gimple_location (stmt));
1891 gimple_set_block (s, gimple_block (stmt));
1892 gimple_set_lhs (stmt, tmp);
1893 if (TREE_CODE (TREE_TYPE (tmp)) == COMPLEX_TYPE
1894 || TREE_CODE (TREE_TYPE (tmp)) == VECTOR_TYPE)
1895 DECL_GIMPLE_REG_P (tmp) = 1;
1896 gsi_insert_after (gsi, s, GSI_SAME_STMT);
1897 }
1898 /* Look for things that can throw exceptions, and record them. */
1899 if (state->cur_region && stmt_could_throw_p (stmt))
1900 {
1901 record_stmt_eh_region (state->cur_region, stmt);
1902 note_eh_region_may_contain_throw (state->cur_region);
1903 }
1904 break;
1905
1906 case GIMPLE_COND:
1907 case GIMPLE_GOTO:
1908 case GIMPLE_RETURN:
1909 maybe_record_in_goto_queue (state, stmt);
1910 break;
1911
1912 case GIMPLE_SWITCH:
1913 verify_norecord_switch_expr (state, stmt);
1914 break;
1915
1916 case GIMPLE_TRY:
1917 if (gimple_try_kind (stmt) == GIMPLE_TRY_FINALLY)
1918 replace = lower_try_finally (state, stmt);
1919 else
1920 {
1921 x = gimple_seq_first_stmt (gimple_try_cleanup (stmt));
1922 switch (gimple_code (x))
1923 {
1924 case GIMPLE_CATCH:
1925 replace = lower_catch (state, stmt);
1926 break;
1927 case GIMPLE_EH_FILTER:
1928 replace = lower_eh_filter (state, stmt);
1929 break;
1930 case GIMPLE_EH_MUST_NOT_THROW:
1931 replace = lower_eh_must_not_throw (state, stmt);
1932 break;
1933 default:
1934 replace = lower_cleanup (state, stmt);
1935 break;
1936 }
1937 }
1938
1939 /* Remove the old stmt and insert the transformed sequence
1940 instead. */
1941 gsi_insert_seq_before (gsi, replace, GSI_SAME_STMT);
1942 gsi_remove (gsi, true);
1943
1944 /* Return since we don't want gsi_next () */
1945 return;
1946
1947 default:
1948 /* A type, a decl, or some kind of statement that we're not
1949 interested in. Don't walk them. */
1950 break;
1951 }
1952
1953 gsi_next (gsi);
1954 }
1955
1956 /* A helper to unwrap a gimple_seq and feed stmts to lower_eh_constructs_2. */
1957
1958 static void
1959 lower_eh_constructs_1 (struct leh_state *state, gimple_seq seq)
1960 {
1961 gimple_stmt_iterator gsi;
1962 for (gsi = gsi_start (seq); !gsi_end_p (gsi);)
1963 lower_eh_constructs_2 (state, &gsi);
1964 }
1965
1966 static unsigned int
1967 lower_eh_constructs (void)
1968 {
1969 struct leh_state null_state;
1970 gimple_seq bodyp;
1971
1972 bodyp = gimple_body (current_function_decl);
1973 if (bodyp == NULL)
1974 return 0;
1975
1976 finally_tree = htab_create (31, struct_ptr_hash, struct_ptr_eq, free);
1977 eh_region_may_contain_throw = BITMAP_ALLOC (NULL);
1978 memset (&null_state, 0, sizeof (null_state));
1979
1980 collect_finally_tree_1 (bodyp, NULL);
1981 lower_eh_constructs_1 (&null_state, bodyp);
1982
1983 /* We assume there's a return statement, or something, at the end of
1984 the function, and thus ploping the EH sequence afterward won't
1985 change anything. */
1986 gcc_assert (!gimple_seq_may_fallthru (bodyp));
1987 gimple_seq_add_seq (&bodyp, eh_seq);
1988
1989 /* We assume that since BODYP already existed, adding EH_SEQ to it
1990 didn't change its value, and we don't have to re-set the function. */
1991 gcc_assert (bodyp == gimple_body (current_function_decl));
1992
1993 htab_delete (finally_tree);
1994 BITMAP_FREE (eh_region_may_contain_throw);
1995 eh_seq = NULL;
1996
1997 /* If this function needs a language specific EH personality routine
1998 and the frontend didn't already set one do so now. */
1999 if (function_needs_eh_personality (cfun) == eh_personality_lang
2000 && !DECL_FUNCTION_PERSONALITY (current_function_decl))
2001 DECL_FUNCTION_PERSONALITY (current_function_decl)
2002 = lang_hooks.eh_personality ();
2003
2004 return 0;
2005 }
2006
2007 struct gimple_opt_pass pass_lower_eh =
2008 {
2009 {
2010 GIMPLE_PASS,
2011 "eh", /* name */
2012 NULL, /* gate */
2013 lower_eh_constructs, /* execute */
2014 NULL, /* sub */
2015 NULL, /* next */
2016 0, /* static_pass_number */
2017 TV_TREE_EH, /* tv_id */
2018 PROP_gimple_lcf, /* properties_required */
2019 PROP_gimple_leh, /* properties_provided */
2020 0, /* properties_destroyed */
2021 0, /* todo_flags_start */
2022 TODO_dump_func /* todo_flags_finish */
2023 }
2024 };
2025 \f
2026 /* Create the multiple edges from an EH_DISPATCH statement to all of
2027 the possible handlers for its EH region. Return true if there's
2028 no fallthru edge; false if there is. */
2029
2030 bool
2031 make_eh_dispatch_edges (gimple stmt)
2032 {
2033 eh_region r;
2034 eh_catch c;
2035 basic_block src, dst;
2036
2037 r = get_eh_region_from_number (gimple_eh_dispatch_region (stmt));
2038 src = gimple_bb (stmt);
2039
2040 switch (r->type)
2041 {
2042 case ERT_TRY:
2043 for (c = r->u.eh_try.first_catch; c ; c = c->next_catch)
2044 {
2045 dst = label_to_block (c->label);
2046 make_edge (src, dst, 0);
2047
2048 /* A catch-all handler doesn't have a fallthru. */
2049 if (c->type_list == NULL)
2050 return false;
2051 }
2052 break;
2053
2054 case ERT_ALLOWED_EXCEPTIONS:
2055 dst = label_to_block (r->u.allowed.label);
2056 make_edge (src, dst, 0);
2057 break;
2058
2059 default:
2060 gcc_unreachable ();
2061 }
2062
2063 return true;
2064 }
2065
2066 /* Create the single EH edge from STMT to its nearest landing pad,
2067 if there is such a landing pad within the current function. */
2068
2069 void
2070 make_eh_edges (gimple stmt)
2071 {
2072 basic_block src, dst;
2073 eh_landing_pad lp;
2074 int lp_nr;
2075
2076 lp_nr = lookup_stmt_eh_lp (stmt);
2077 if (lp_nr <= 0)
2078 return;
2079
2080 lp = get_eh_landing_pad_from_number (lp_nr);
2081 gcc_assert (lp != NULL);
2082
2083 src = gimple_bb (stmt);
2084 dst = label_to_block (lp->post_landing_pad);
2085 make_edge (src, dst, EDGE_EH);
2086 }
2087
2088 /* Do the work in redirecting EDGE_IN to NEW_BB within the EH region tree;
2089 do not actually perform the final edge redirection.
2090
2091 CHANGE_REGION is true when we're being called from cleanup_empty_eh and
2092 we intend to change the destination EH region as well; this means
2093 EH_LANDING_PAD_NR must already be set on the destination block label.
2094 If false, we're being called from generic cfg manipulation code and we
2095 should preserve our place within the region tree. */
2096
2097 static void
2098 redirect_eh_edge_1 (edge edge_in, basic_block new_bb, bool change_region)
2099 {
2100 eh_landing_pad old_lp, new_lp;
2101 basic_block old_bb;
2102 gimple throw_stmt;
2103 int old_lp_nr, new_lp_nr;
2104 tree old_label, new_label;
2105 edge_iterator ei;
2106 edge e;
2107
2108 old_bb = edge_in->dest;
2109 old_label = gimple_block_label (old_bb);
2110 old_lp_nr = EH_LANDING_PAD_NR (old_label);
2111 gcc_assert (old_lp_nr > 0);
2112 old_lp = get_eh_landing_pad_from_number (old_lp_nr);
2113
2114 throw_stmt = last_stmt (edge_in->src);
2115 gcc_assert (lookup_stmt_eh_lp (throw_stmt) == old_lp_nr);
2116
2117 new_label = gimple_block_label (new_bb);
2118
2119 /* Look for an existing region that might be using NEW_BB already. */
2120 new_lp_nr = EH_LANDING_PAD_NR (new_label);
2121 if (new_lp_nr)
2122 {
2123 new_lp = get_eh_landing_pad_from_number (new_lp_nr);
2124 gcc_assert (new_lp);
2125
2126 /* Unless CHANGE_REGION is true, the new and old landing pad
2127 had better be associated with the same EH region. */
2128 gcc_assert (change_region || new_lp->region == old_lp->region);
2129 }
2130 else
2131 {
2132 new_lp = NULL;
2133 gcc_assert (!change_region);
2134 }
2135
2136 /* Notice when we redirect the last EH edge away from OLD_BB. */
2137 FOR_EACH_EDGE (e, ei, old_bb->preds)
2138 if (e != edge_in && (e->flags & EDGE_EH))
2139 break;
2140
2141 if (new_lp)
2142 {
2143 /* NEW_LP already exists. If there are still edges into OLD_LP,
2144 there's nothing to do with the EH tree. If there are no more
2145 edges into OLD_LP, then we want to remove OLD_LP as it is unused.
2146 If CHANGE_REGION is true, then our caller is expecting to remove
2147 the landing pad. */
2148 if (e == NULL && !change_region)
2149 remove_eh_landing_pad (old_lp);
2150 }
2151 else
2152 {
2153 /* No correct landing pad exists. If there are no more edges
2154 into OLD_LP, then we can simply re-use the existing landing pad.
2155 Otherwise, we have to create a new landing pad. */
2156 if (e == NULL)
2157 {
2158 EH_LANDING_PAD_NR (old_lp->post_landing_pad) = 0;
2159 new_lp = old_lp;
2160 }
2161 else
2162 new_lp = gen_eh_landing_pad (old_lp->region);
2163 new_lp->post_landing_pad = new_label;
2164 EH_LANDING_PAD_NR (new_label) = new_lp->index;
2165 }
2166
2167 /* Maybe move the throwing statement to the new region. */
2168 if (old_lp != new_lp)
2169 {
2170 remove_stmt_from_eh_lp (throw_stmt);
2171 add_stmt_to_eh_lp (throw_stmt, new_lp->index);
2172 }
2173 }
2174
2175 /* Redirect EH edge E to NEW_BB. */
2176
2177 edge
2178 redirect_eh_edge (edge edge_in, basic_block new_bb)
2179 {
2180 redirect_eh_edge_1 (edge_in, new_bb, false);
2181 return ssa_redirect_edge (edge_in, new_bb);
2182 }
2183
2184 /* This is a subroutine of gimple_redirect_edge_and_branch. Update the
2185 labels for redirecting a non-fallthru EH_DISPATCH edge E to NEW_BB.
2186 The actual edge update will happen in the caller. */
2187
2188 void
2189 redirect_eh_dispatch_edge (gimple stmt, edge e, basic_block new_bb)
2190 {
2191 tree new_lab = gimple_block_label (new_bb);
2192 bool any_changed = false;
2193 basic_block old_bb;
2194 eh_region r;
2195 eh_catch c;
2196
2197 r = get_eh_region_from_number (gimple_eh_dispatch_region (stmt));
2198 switch (r->type)
2199 {
2200 case ERT_TRY:
2201 for (c = r->u.eh_try.first_catch; c ; c = c->next_catch)
2202 {
2203 old_bb = label_to_block (c->label);
2204 if (old_bb == e->dest)
2205 {
2206 c->label = new_lab;
2207 any_changed = true;
2208 }
2209 }
2210 break;
2211
2212 case ERT_ALLOWED_EXCEPTIONS:
2213 old_bb = label_to_block (r->u.allowed.label);
2214 gcc_assert (old_bb == e->dest);
2215 r->u.allowed.label = new_lab;
2216 any_changed = true;
2217 break;
2218
2219 default:
2220 gcc_unreachable ();
2221 }
2222
2223 gcc_assert (any_changed);
2224 }
2225 \f
2226 /* Helper function for operation_could_trap_p and stmt_could_throw_p. */
2227
2228 bool
2229 operation_could_trap_helper_p (enum tree_code op,
2230 bool fp_operation,
2231 bool honor_trapv,
2232 bool honor_nans,
2233 bool honor_snans,
2234 tree divisor,
2235 bool *handled)
2236 {
2237 *handled = true;
2238 switch (op)
2239 {
2240 case TRUNC_DIV_EXPR:
2241 case CEIL_DIV_EXPR:
2242 case FLOOR_DIV_EXPR:
2243 case ROUND_DIV_EXPR:
2244 case EXACT_DIV_EXPR:
2245 case CEIL_MOD_EXPR:
2246 case FLOOR_MOD_EXPR:
2247 case ROUND_MOD_EXPR:
2248 case TRUNC_MOD_EXPR:
2249 case RDIV_EXPR:
2250 if (honor_snans || honor_trapv)
2251 return true;
2252 if (fp_operation)
2253 return flag_trapping_math;
2254 if (!TREE_CONSTANT (divisor) || integer_zerop (divisor))
2255 return true;
2256 return false;
2257
2258 case LT_EXPR:
2259 case LE_EXPR:
2260 case GT_EXPR:
2261 case GE_EXPR:
2262 case LTGT_EXPR:
2263 /* Some floating point comparisons may trap. */
2264 return honor_nans;
2265
2266 case EQ_EXPR:
2267 case NE_EXPR:
2268 case UNORDERED_EXPR:
2269 case ORDERED_EXPR:
2270 case UNLT_EXPR:
2271 case UNLE_EXPR:
2272 case UNGT_EXPR:
2273 case UNGE_EXPR:
2274 case UNEQ_EXPR:
2275 return honor_snans;
2276
2277 case CONVERT_EXPR:
2278 case FIX_TRUNC_EXPR:
2279 /* Conversion of floating point might trap. */
2280 return honor_nans;
2281
2282 case NEGATE_EXPR:
2283 case ABS_EXPR:
2284 case CONJ_EXPR:
2285 /* These operations don't trap with floating point. */
2286 if (honor_trapv)
2287 return true;
2288 return false;
2289
2290 case PLUS_EXPR:
2291 case MINUS_EXPR:
2292 case MULT_EXPR:
2293 /* Any floating arithmetic may trap. */
2294 if (fp_operation && flag_trapping_math)
2295 return true;
2296 if (honor_trapv)
2297 return true;
2298 return false;
2299
2300 default:
2301 /* Any floating arithmetic may trap. */
2302 if (fp_operation && flag_trapping_math)
2303 return true;
2304
2305 *handled = false;
2306 return false;
2307 }
2308 }
2309
2310 /* Return true if operation OP may trap. FP_OPERATION is true if OP is applied
2311 on floating-point values. HONOR_TRAPV is true if OP is applied on integer
2312 type operands that may trap. If OP is a division operator, DIVISOR contains
2313 the value of the divisor. */
2314
2315 bool
2316 operation_could_trap_p (enum tree_code op, bool fp_operation, bool honor_trapv,
2317 tree divisor)
2318 {
2319 bool honor_nans = (fp_operation && flag_trapping_math
2320 && !flag_finite_math_only);
2321 bool honor_snans = fp_operation && flag_signaling_nans != 0;
2322 bool handled;
2323
2324 if (TREE_CODE_CLASS (op) != tcc_comparison
2325 && TREE_CODE_CLASS (op) != tcc_unary
2326 && TREE_CODE_CLASS (op) != tcc_binary)
2327 return false;
2328
2329 return operation_could_trap_helper_p (op, fp_operation, honor_trapv,
2330 honor_nans, honor_snans, divisor,
2331 &handled);
2332 }
2333
2334 /* Return true if EXPR can trap, as in dereferencing an invalid pointer
2335 location or floating point arithmetic. C.f. the rtl version, may_trap_p.
2336 This routine expects only GIMPLE lhs or rhs input. */
2337
2338 bool
2339 tree_could_trap_p (tree expr)
2340 {
2341 enum tree_code code;
2342 bool fp_operation = false;
2343 bool honor_trapv = false;
2344 tree t, base, div = NULL_TREE;
2345
2346 if (!expr)
2347 return false;
2348
2349 code = TREE_CODE (expr);
2350 t = TREE_TYPE (expr);
2351
2352 if (t)
2353 {
2354 if (COMPARISON_CLASS_P (expr))
2355 fp_operation = FLOAT_TYPE_P (TREE_TYPE (TREE_OPERAND (expr, 0)));
2356 else
2357 fp_operation = FLOAT_TYPE_P (t);
2358 honor_trapv = INTEGRAL_TYPE_P (t) && TYPE_OVERFLOW_TRAPS (t);
2359 }
2360
2361 if (TREE_CODE_CLASS (code) == tcc_binary)
2362 div = TREE_OPERAND (expr, 1);
2363 if (operation_could_trap_p (code, fp_operation, honor_trapv, div))
2364 return true;
2365
2366 restart:
2367 switch (code)
2368 {
2369 case TARGET_MEM_REF:
2370 /* For TARGET_MEM_REFs use the information based on the original
2371 reference. */
2372 expr = TMR_ORIGINAL (expr);
2373 code = TREE_CODE (expr);
2374 goto restart;
2375
2376 case COMPONENT_REF:
2377 case REALPART_EXPR:
2378 case IMAGPART_EXPR:
2379 case BIT_FIELD_REF:
2380 case VIEW_CONVERT_EXPR:
2381 case WITH_SIZE_EXPR:
2382 expr = TREE_OPERAND (expr, 0);
2383 code = TREE_CODE (expr);
2384 goto restart;
2385
2386 case ARRAY_RANGE_REF:
2387 base = TREE_OPERAND (expr, 0);
2388 if (tree_could_trap_p (base))
2389 return true;
2390 if (TREE_THIS_NOTRAP (expr))
2391 return false;
2392 return !range_in_array_bounds_p (expr);
2393
2394 case ARRAY_REF:
2395 base = TREE_OPERAND (expr, 0);
2396 if (tree_could_trap_p (base))
2397 return true;
2398 if (TREE_THIS_NOTRAP (expr))
2399 return false;
2400 return !in_array_bounds_p (expr);
2401
2402 case INDIRECT_REF:
2403 case ALIGN_INDIRECT_REF:
2404 case MISALIGNED_INDIRECT_REF:
2405 return !TREE_THIS_NOTRAP (expr);
2406
2407 case ASM_EXPR:
2408 return TREE_THIS_VOLATILE (expr);
2409
2410 case CALL_EXPR:
2411 t = get_callee_fndecl (expr);
2412 /* Assume that calls to weak functions may trap. */
2413 if (!t || !DECL_P (t) || DECL_WEAK (t))
2414 return true;
2415 return false;
2416
2417 default:
2418 return false;
2419 }
2420 }
2421
2422
2423 /* Helper for stmt_could_throw_p. Return true if STMT (assumed to be a
2424 an assignment or a conditional) may throw. */
2425
2426 static bool
2427 stmt_could_throw_1_p (gimple stmt)
2428 {
2429 enum tree_code code = gimple_expr_code (stmt);
2430 bool honor_nans = false;
2431 bool honor_snans = false;
2432 bool fp_operation = false;
2433 bool honor_trapv = false;
2434 tree t;
2435 size_t i;
2436 bool handled, ret;
2437
2438 if (TREE_CODE_CLASS (code) == tcc_comparison
2439 || TREE_CODE_CLASS (code) == tcc_unary
2440 || TREE_CODE_CLASS (code) == tcc_binary)
2441 {
2442 t = gimple_expr_type (stmt);
2443 fp_operation = FLOAT_TYPE_P (t);
2444 if (fp_operation)
2445 {
2446 honor_nans = flag_trapping_math && !flag_finite_math_only;
2447 honor_snans = flag_signaling_nans != 0;
2448 }
2449 else if (INTEGRAL_TYPE_P (t) && TYPE_OVERFLOW_TRAPS (t))
2450 honor_trapv = true;
2451 }
2452
2453 /* Check if the main expression may trap. */
2454 t = is_gimple_assign (stmt) ? gimple_assign_rhs2 (stmt) : NULL;
2455 ret = operation_could_trap_helper_p (code, fp_operation, honor_trapv,
2456 honor_nans, honor_snans, t,
2457 &handled);
2458 if (handled)
2459 return ret;
2460
2461 /* If the expression does not trap, see if any of the individual operands may
2462 trap. */
2463 for (i = 0; i < gimple_num_ops (stmt); i++)
2464 if (tree_could_trap_p (gimple_op (stmt, i)))
2465 return true;
2466
2467 return false;
2468 }
2469
2470
2471 /* Return true if statement STMT could throw an exception. */
2472
2473 bool
2474 stmt_could_throw_p (gimple stmt)
2475 {
2476 if (!flag_exceptions)
2477 return false;
2478
2479 /* The only statements that can throw an exception are assignments,
2480 conditionals, calls, resx, and asms. */
2481 switch (gimple_code (stmt))
2482 {
2483 case GIMPLE_RESX:
2484 return true;
2485
2486 case GIMPLE_CALL:
2487 return !gimple_call_nothrow_p (stmt);
2488
2489 case GIMPLE_ASSIGN:
2490 case GIMPLE_COND:
2491 if (!flag_non_call_exceptions)
2492 return false;
2493 return stmt_could_throw_1_p (stmt);
2494
2495 case GIMPLE_ASM:
2496 if (!flag_non_call_exceptions)
2497 return false;
2498 return gimple_asm_volatile_p (stmt);
2499
2500 default:
2501 return false;
2502 }
2503 }
2504
2505
2506 /* Return true if expression T could throw an exception. */
2507
2508 bool
2509 tree_could_throw_p (tree t)
2510 {
2511 if (!flag_exceptions)
2512 return false;
2513 if (TREE_CODE (t) == MODIFY_EXPR)
2514 {
2515 if (flag_non_call_exceptions
2516 && tree_could_trap_p (TREE_OPERAND (t, 0)))
2517 return true;
2518 t = TREE_OPERAND (t, 1);
2519 }
2520
2521 if (TREE_CODE (t) == WITH_SIZE_EXPR)
2522 t = TREE_OPERAND (t, 0);
2523 if (TREE_CODE (t) == CALL_EXPR)
2524 return (call_expr_flags (t) & ECF_NOTHROW) == 0;
2525 if (flag_non_call_exceptions)
2526 return tree_could_trap_p (t);
2527 return false;
2528 }
2529
2530 /* Return true if STMT can throw an exception that is not caught within
2531 the current function (CFUN). */
2532
2533 bool
2534 stmt_can_throw_external (gimple stmt)
2535 {
2536 int lp_nr;
2537
2538 if (!stmt_could_throw_p (stmt))
2539 return false;
2540
2541 lp_nr = lookup_stmt_eh_lp (stmt);
2542 return lp_nr == 0;
2543 }
2544
2545 /* Return true if STMT can throw an exception that is caught within
2546 the current function (CFUN). */
2547
2548 bool
2549 stmt_can_throw_internal (gimple stmt)
2550 {
2551 int lp_nr;
2552
2553 if (!stmt_could_throw_p (stmt))
2554 return false;
2555
2556 lp_nr = lookup_stmt_eh_lp (stmt);
2557 return lp_nr > 0;
2558 }
2559
2560 /* Given a statement STMT in IFUN, if STMT can no longer throw, then
2561 remove any entry it might have from the EH table. Return true if
2562 any change was made. */
2563
2564 bool
2565 maybe_clean_eh_stmt_fn (struct function *ifun, gimple stmt)
2566 {
2567 if (stmt_could_throw_p (stmt))
2568 return false;
2569 return remove_stmt_from_eh_lp_fn (ifun, stmt);
2570 }
2571
2572 /* Likewise, but always use the current function. */
2573
2574 bool
2575 maybe_clean_eh_stmt (gimple stmt)
2576 {
2577 return maybe_clean_eh_stmt_fn (cfun, stmt);
2578 }
2579
2580 /* Given a statement OLD_STMT and a new statement NEW_STMT that has replaced
2581 OLD_STMT in the function, remove OLD_STMT from the EH table and put NEW_STMT
2582 in the table if it should be in there. Return TRUE if a replacement was
2583 done that my require an EH edge purge. */
2584
2585 bool
2586 maybe_clean_or_replace_eh_stmt (gimple old_stmt, gimple new_stmt)
2587 {
2588 int lp_nr = lookup_stmt_eh_lp (old_stmt);
2589
2590 if (lp_nr != 0)
2591 {
2592 bool new_stmt_could_throw = stmt_could_throw_p (new_stmt);
2593
2594 if (new_stmt == old_stmt && new_stmt_could_throw)
2595 return false;
2596
2597 remove_stmt_from_eh_lp (old_stmt);
2598 if (new_stmt_could_throw)
2599 {
2600 add_stmt_to_eh_lp (new_stmt, lp_nr);
2601 return false;
2602 }
2603 else
2604 return true;
2605 }
2606
2607 return false;
2608 }
2609
2610 /* Given a statement OLD_STMT in OLD_FUN and a duplicate statment NEW_STMT
2611 in NEW_FUN, copy the EH table data from OLD_STMT to NEW_STMT. The MAP
2612 operand is the return value of duplicate_eh_regions. */
2613
2614 bool
2615 maybe_duplicate_eh_stmt_fn (struct function *new_fun, gimple new_stmt,
2616 struct function *old_fun, gimple old_stmt,
2617 struct pointer_map_t *map, int default_lp_nr)
2618 {
2619 int old_lp_nr, new_lp_nr;
2620 void **slot;
2621
2622 if (!stmt_could_throw_p (new_stmt))
2623 return false;
2624
2625 old_lp_nr = lookup_stmt_eh_lp_fn (old_fun, old_stmt);
2626 if (old_lp_nr == 0)
2627 {
2628 if (default_lp_nr == 0)
2629 return false;
2630 new_lp_nr = default_lp_nr;
2631 }
2632 else if (old_lp_nr > 0)
2633 {
2634 eh_landing_pad old_lp, new_lp;
2635
2636 old_lp = VEC_index (eh_landing_pad, old_fun->eh->lp_array, old_lp_nr);
2637 slot = pointer_map_contains (map, old_lp);
2638 new_lp = (eh_landing_pad) *slot;
2639 new_lp_nr = new_lp->index;
2640 }
2641 else
2642 {
2643 eh_region old_r, new_r;
2644
2645 old_r = VEC_index (eh_region, old_fun->eh->region_array, -old_lp_nr);
2646 slot = pointer_map_contains (map, old_r);
2647 new_r = (eh_region) *slot;
2648 new_lp_nr = -new_r->index;
2649 }
2650
2651 add_stmt_to_eh_lp_fn (new_fun, new_stmt, new_lp_nr);
2652 return true;
2653 }
2654
2655 /* Similar, but both OLD_STMT and NEW_STMT are within the current function,
2656 and thus no remapping is required. */
2657
2658 bool
2659 maybe_duplicate_eh_stmt (gimple new_stmt, gimple old_stmt)
2660 {
2661 int lp_nr;
2662
2663 if (!stmt_could_throw_p (new_stmt))
2664 return false;
2665
2666 lp_nr = lookup_stmt_eh_lp (old_stmt);
2667 if (lp_nr == 0)
2668 return false;
2669
2670 add_stmt_to_eh_lp (new_stmt, lp_nr);
2671 return true;
2672 }
2673 \f
2674 /* Returns TRUE if oneh and twoh are exception handlers (gimple_try_cleanup of
2675 GIMPLE_TRY) that are similar enough to be considered the same. Currently
2676 this only handles handlers consisting of a single call, as that's the
2677 important case for C++: a destructor call for a particular object showing
2678 up in multiple handlers. */
2679
2680 static bool
2681 same_handler_p (gimple_seq oneh, gimple_seq twoh)
2682 {
2683 gimple_stmt_iterator gsi;
2684 gimple ones, twos;
2685 unsigned int ai;
2686
2687 gsi = gsi_start (oneh);
2688 if (!gsi_one_before_end_p (gsi))
2689 return false;
2690 ones = gsi_stmt (gsi);
2691
2692 gsi = gsi_start (twoh);
2693 if (!gsi_one_before_end_p (gsi))
2694 return false;
2695 twos = gsi_stmt (gsi);
2696
2697 if (!is_gimple_call (ones)
2698 || !is_gimple_call (twos)
2699 || gimple_call_lhs (ones)
2700 || gimple_call_lhs (twos)
2701 || gimple_call_chain (ones)
2702 || gimple_call_chain (twos)
2703 || !operand_equal_p (gimple_call_fn (ones), gimple_call_fn (twos), 0)
2704 || gimple_call_num_args (ones) != gimple_call_num_args (twos))
2705 return false;
2706
2707 for (ai = 0; ai < gimple_call_num_args (ones); ++ai)
2708 if (!operand_equal_p (gimple_call_arg (ones, ai),
2709 gimple_call_arg (twos, ai), 0))
2710 return false;
2711
2712 return true;
2713 }
2714
2715 /* Optimize
2716 try { A() } finally { try { ~B() } catch { ~A() } }
2717 try { ... } finally { ~A() }
2718 into
2719 try { A() } catch { ~B() }
2720 try { ~B() ... } finally { ~A() }
2721
2722 This occurs frequently in C++, where A is a local variable and B is a
2723 temporary used in the initializer for A. */
2724
2725 static void
2726 optimize_double_finally (gimple one, gimple two)
2727 {
2728 gimple oneh;
2729 gimple_stmt_iterator gsi;
2730
2731 gsi = gsi_start (gimple_try_cleanup (one));
2732 if (!gsi_one_before_end_p (gsi))
2733 return;
2734
2735 oneh = gsi_stmt (gsi);
2736 if (gimple_code (oneh) != GIMPLE_TRY
2737 || gimple_try_kind (oneh) != GIMPLE_TRY_CATCH)
2738 return;
2739
2740 if (same_handler_p (gimple_try_cleanup (oneh), gimple_try_cleanup (two)))
2741 {
2742 gimple_seq seq = gimple_try_eval (oneh);
2743
2744 gimple_try_set_cleanup (one, seq);
2745 gimple_try_set_kind (one, GIMPLE_TRY_CATCH);
2746 seq = copy_gimple_seq_and_replace_locals (seq);
2747 gimple_seq_add_seq (&seq, gimple_try_eval (two));
2748 gimple_try_set_eval (two, seq);
2749 }
2750 }
2751
2752 /* Perform EH refactoring optimizations that are simpler to do when code
2753 flow has been lowered but EH structures haven't. */
2754
2755 static void
2756 refactor_eh_r (gimple_seq seq)
2757 {
2758 gimple_stmt_iterator gsi;
2759 gimple one, two;
2760
2761 one = NULL;
2762 two = NULL;
2763 gsi = gsi_start (seq);
2764 while (1)
2765 {
2766 one = two;
2767 if (gsi_end_p (gsi))
2768 two = NULL;
2769 else
2770 two = gsi_stmt (gsi);
2771 if (one
2772 && two
2773 && gimple_code (one) == GIMPLE_TRY
2774 && gimple_code (two) == GIMPLE_TRY
2775 && gimple_try_kind (one) == GIMPLE_TRY_FINALLY
2776 && gimple_try_kind (two) == GIMPLE_TRY_FINALLY)
2777 optimize_double_finally (one, two);
2778 if (one)
2779 switch (gimple_code (one))
2780 {
2781 case GIMPLE_TRY:
2782 refactor_eh_r (gimple_try_eval (one));
2783 refactor_eh_r (gimple_try_cleanup (one));
2784 break;
2785 case GIMPLE_CATCH:
2786 refactor_eh_r (gimple_catch_handler (one));
2787 break;
2788 case GIMPLE_EH_FILTER:
2789 refactor_eh_r (gimple_eh_filter_failure (one));
2790 break;
2791 default:
2792 break;
2793 }
2794 if (two)
2795 gsi_next (&gsi);
2796 else
2797 break;
2798 }
2799 }
2800
2801 static unsigned
2802 refactor_eh (void)
2803 {
2804 refactor_eh_r (gimple_body (current_function_decl));
2805 return 0;
2806 }
2807
2808 static bool
2809 gate_refactor_eh (void)
2810 {
2811 return flag_exceptions != 0;
2812 }
2813
2814 struct gimple_opt_pass pass_refactor_eh =
2815 {
2816 {
2817 GIMPLE_PASS,
2818 "ehopt", /* name */
2819 gate_refactor_eh, /* gate */
2820 refactor_eh, /* execute */
2821 NULL, /* sub */
2822 NULL, /* next */
2823 0, /* static_pass_number */
2824 TV_TREE_EH, /* tv_id */
2825 PROP_gimple_lcf, /* properties_required */
2826 0, /* properties_provided */
2827 0, /* properties_destroyed */
2828 0, /* todo_flags_start */
2829 TODO_dump_func /* todo_flags_finish */
2830 }
2831 };
2832 \f
2833 /* At the end of gimple optimization, we can lower RESX. */
2834
2835 static bool
2836 lower_resx (basic_block bb, gimple stmt, struct pointer_map_t *mnt_map)
2837 {
2838 int lp_nr;
2839 eh_region src_r, dst_r;
2840 gimple_stmt_iterator gsi;
2841 gimple x;
2842 tree fn, src_nr;
2843 bool ret = false;
2844
2845 lp_nr = lookup_stmt_eh_lp (stmt);
2846 if (lp_nr != 0)
2847 dst_r = get_eh_region_from_lp_number (lp_nr);
2848 else
2849 dst_r = NULL;
2850
2851 src_r = get_eh_region_from_number (gimple_resx_region (stmt));
2852 src_nr = build_int_cst (NULL, src_r->index);
2853 gsi = gsi_last_bb (bb);
2854
2855 if (dst_r)
2856 {
2857 /* When we have a destination region, we resolve this by copying
2858 the excptr and filter values into place, and changing the edge
2859 to immediately after the landing pad. */
2860 edge e;
2861
2862 if (lp_nr < 0)
2863 {
2864 basic_block new_bb;
2865 void **slot;
2866 tree lab;
2867
2868 /* We are resuming into a MUST_NOT_CALL region. Expand a call to
2869 the failure decl into a new block, if needed. */
2870 gcc_assert (dst_r->type == ERT_MUST_NOT_THROW);
2871
2872 slot = pointer_map_contains (mnt_map, dst_r);
2873 if (slot == NULL)
2874 {
2875 gimple_stmt_iterator gsi2;
2876
2877 new_bb = create_empty_bb (bb);
2878 lab = gimple_block_label (new_bb);
2879 gsi2 = gsi_start_bb (new_bb);
2880
2881 fn = dst_r->u.must_not_throw.failure_decl;
2882 x = gimple_build_call (fn, 0);
2883 gimple_set_location (x, dst_r->u.must_not_throw.failure_loc);
2884 gsi_insert_after (&gsi2, x, GSI_CONTINUE_LINKING);
2885
2886 slot = pointer_map_insert (mnt_map, dst_r);
2887 *slot = lab;
2888 }
2889 else
2890 {
2891 lab = (tree) *slot;
2892 new_bb = label_to_block (lab);
2893 }
2894
2895 gcc_assert (EDGE_COUNT (bb->succs) == 0);
2896 e = make_edge (bb, new_bb, EDGE_FALLTHRU);
2897 e->count = bb->count;
2898 e->probability = REG_BR_PROB_BASE;
2899 }
2900 else
2901 {
2902 edge_iterator ei;
2903 tree dst_nr = build_int_cst (NULL, dst_r->index);
2904
2905 fn = implicit_built_in_decls[BUILT_IN_EH_COPY_VALUES];
2906 x = gimple_build_call (fn, 2, dst_nr, src_nr);
2907 gsi_insert_before (&gsi, x, GSI_SAME_STMT);
2908
2909 /* Update the flags for the outgoing edge. */
2910 e = single_succ_edge (bb);
2911 gcc_assert (e->flags & EDGE_EH);
2912 e->flags = (e->flags & ~EDGE_EH) | EDGE_FALLTHRU;
2913
2914 /* If there are no more EH users of the landing pad, delete it. */
2915 FOR_EACH_EDGE (e, ei, e->dest->preds)
2916 if (e->flags & EDGE_EH)
2917 break;
2918 if (e == NULL)
2919 {
2920 eh_landing_pad lp = get_eh_landing_pad_from_number (lp_nr);
2921 remove_eh_landing_pad (lp);
2922 }
2923 }
2924
2925 ret = true;
2926 }
2927 else
2928 {
2929 tree var;
2930
2931 /* When we don't have a destination region, this exception escapes
2932 up the call chain. We resolve this by generating a call to the
2933 _Unwind_Resume library function. */
2934
2935 /* ??? The ARM EABI redefines _Unwind_Resume as __cxa_end_cleanup
2936 with no arguments for C++ and Java. Check for that. */
2937 switch (targetm.arm_eabi_unwinder)
2938 {
2939 default:
2940 fn = implicit_built_in_decls[BUILT_IN_UNWIND_RESUME];
2941 if (TYPE_ARG_TYPES (TREE_TYPE (fn)) == void_list_node)
2942 {
2943 x = gimple_build_call (fn, 0);
2944 gsi_insert_before (&gsi, x, GSI_SAME_STMT);
2945 break;
2946 }
2947 /* FALLTHRU */
2948
2949 case 0:
2950 fn = implicit_built_in_decls[BUILT_IN_EH_POINTER];
2951 x = gimple_build_call (fn, 1, src_nr);
2952 var = create_tmp_var (ptr_type_node, NULL);
2953 var = make_ssa_name (var, x);
2954 gimple_call_set_lhs (x, var);
2955 gsi_insert_before (&gsi, x, GSI_SAME_STMT);
2956
2957 fn = implicit_built_in_decls[BUILT_IN_UNWIND_RESUME];
2958 x = gimple_build_call (fn, 1, var);
2959 gsi_insert_before (&gsi, x, GSI_SAME_STMT);
2960 break;
2961 }
2962
2963 gcc_assert (EDGE_COUNT (bb->succs) == 0);
2964 }
2965
2966 gsi_remove (&gsi, true);
2967
2968 return ret;
2969 }
2970
2971 static unsigned
2972 execute_lower_resx (void)
2973 {
2974 basic_block bb;
2975 struct pointer_map_t *mnt_map;
2976 bool dominance_invalidated = false;
2977 bool any_rewritten = false;
2978
2979 mnt_map = pointer_map_create ();
2980
2981 FOR_EACH_BB (bb)
2982 {
2983 gimple last = last_stmt (bb);
2984 if (last && is_gimple_resx (last))
2985 {
2986 dominance_invalidated |= lower_resx (bb, last, mnt_map);
2987 any_rewritten = true;
2988 }
2989 }
2990
2991 pointer_map_destroy (mnt_map);
2992
2993 if (dominance_invalidated)
2994 {
2995 free_dominance_info (CDI_DOMINATORS);
2996 free_dominance_info (CDI_POST_DOMINATORS);
2997 }
2998
2999 return any_rewritten ? TODO_update_ssa_only_virtuals : 0;
3000 }
3001
3002 static bool
3003 gate_lower_ehcontrol (void)
3004 {
3005 return cfun->eh->region_tree != NULL;
3006 }
3007
3008 struct gimple_opt_pass pass_lower_resx =
3009 {
3010 {
3011 GIMPLE_PASS,
3012 "resx", /* name */
3013 gate_lower_ehcontrol, /* gate */
3014 execute_lower_resx, /* execute */
3015 NULL, /* sub */
3016 NULL, /* next */
3017 0, /* static_pass_number */
3018 TV_TREE_EH, /* tv_id */
3019 PROP_gimple_lcf, /* properties_required */
3020 0, /* properties_provided */
3021 0, /* properties_destroyed */
3022 0, /* todo_flags_start */
3023 TODO_dump_func | TODO_verify_flow /* todo_flags_finish */
3024 }
3025 };
3026
3027
3028 /* At the end of inlining, we can lower EH_DISPATCH. */
3029
3030 static void
3031 lower_eh_dispatch (basic_block src, gimple stmt)
3032 {
3033 gimple_stmt_iterator gsi;
3034 int region_nr;
3035 eh_region r;
3036 tree filter, fn;
3037 gimple x;
3038
3039 region_nr = gimple_eh_dispatch_region (stmt);
3040 r = get_eh_region_from_number (region_nr);
3041
3042 gsi = gsi_last_bb (src);
3043
3044 switch (r->type)
3045 {
3046 case ERT_TRY:
3047 {
3048 VEC (tree, heap) *labels = NULL;
3049 tree default_label = NULL;
3050 eh_catch c;
3051 edge_iterator ei;
3052 edge e;
3053
3054 /* Collect the labels for a switch. Zero the post_landing_pad
3055 field becase we'll no longer have anything keeping these labels
3056 in existance and the optimizer will be free to merge these
3057 blocks at will. */
3058 for (c = r->u.eh_try.first_catch; c ; c = c->next_catch)
3059 {
3060 tree tp_node, flt_node, lab = c->label;
3061
3062 c->label = NULL;
3063 tp_node = c->type_list;
3064 flt_node = c->filter_list;
3065
3066 if (tp_node == NULL)
3067 {
3068 default_label = lab;
3069 break;
3070 }
3071 do
3072 {
3073 tree t = build3 (CASE_LABEL_EXPR, void_type_node,
3074 TREE_VALUE (flt_node), NULL, lab);
3075 VEC_safe_push (tree, heap, labels, t);
3076
3077 tp_node = TREE_CHAIN (tp_node);
3078 flt_node = TREE_CHAIN (flt_node);
3079 }
3080 while (tp_node);
3081 }
3082
3083 /* Clean up the edge flags. */
3084 FOR_EACH_EDGE (e, ei, src->succs)
3085 {
3086 if (e->flags & EDGE_FALLTHRU)
3087 {
3088 /* If there was no catch-all, use the fallthru edge. */
3089 if (default_label == NULL)
3090 default_label = gimple_block_label (e->dest);
3091 e->flags &= ~EDGE_FALLTHRU;
3092 }
3093 }
3094 gcc_assert (default_label != NULL);
3095
3096 /* Don't generate a switch if there's only a default case.
3097 This is common in the form of try { A; } catch (...) { B; }. */
3098 if (labels == NULL)
3099 {
3100 e = single_succ_edge (src);
3101 e->flags |= EDGE_FALLTHRU;
3102 }
3103 else
3104 {
3105 fn = implicit_built_in_decls[BUILT_IN_EH_FILTER];
3106 x = gimple_build_call (fn, 1, build_int_cst (NULL, region_nr));
3107 filter = create_tmp_var (TREE_TYPE (TREE_TYPE (fn)), NULL);
3108 filter = make_ssa_name (filter, x);
3109 gimple_call_set_lhs (x, filter);
3110 gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3111
3112 /* Turn the default label into a default case. */
3113 default_label = build3 (CASE_LABEL_EXPR, void_type_node,
3114 NULL, NULL, default_label);
3115 sort_case_labels (labels);
3116
3117 x = gimple_build_switch_vec (filter, default_label, labels);
3118 gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3119
3120 VEC_free (tree, heap, labels);
3121 }
3122 }
3123 break;
3124
3125 case ERT_ALLOWED_EXCEPTIONS:
3126 {
3127 edge b_e = BRANCH_EDGE (src);
3128 edge f_e = FALLTHRU_EDGE (src);
3129
3130 fn = implicit_built_in_decls[BUILT_IN_EH_FILTER];
3131 x = gimple_build_call (fn, 1, build_int_cst (NULL, region_nr));
3132 filter = create_tmp_var (TREE_TYPE (TREE_TYPE (fn)), NULL);
3133 filter = make_ssa_name (filter, x);
3134 gimple_call_set_lhs (x, filter);
3135 gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3136
3137 r->u.allowed.label = NULL;
3138 x = gimple_build_cond (EQ_EXPR, filter,
3139 build_int_cst (TREE_TYPE (filter),
3140 r->u.allowed.filter),
3141 NULL_TREE, NULL_TREE);
3142 gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3143
3144 b_e->flags = b_e->flags | EDGE_TRUE_VALUE;
3145 f_e->flags = (f_e->flags & ~EDGE_FALLTHRU) | EDGE_FALSE_VALUE;
3146 }
3147 break;
3148
3149 default:
3150 gcc_unreachable ();
3151 }
3152
3153 /* Replace the EH_DISPATCH with the SWITCH or COND generated above. */
3154 gsi_remove (&gsi, true);
3155 }
3156
3157 static unsigned
3158 execute_lower_eh_dispatch (void)
3159 {
3160 basic_block bb;
3161 bool any_rewritten = false;
3162
3163 assign_filter_values ();
3164
3165 FOR_EACH_BB (bb)
3166 {
3167 gimple last = last_stmt (bb);
3168 if (last && gimple_code (last) == GIMPLE_EH_DISPATCH)
3169 {
3170 lower_eh_dispatch (bb, last);
3171 any_rewritten = true;
3172 }
3173 }
3174
3175 return any_rewritten ? TODO_update_ssa_only_virtuals : 0;
3176 }
3177
3178 struct gimple_opt_pass pass_lower_eh_dispatch =
3179 {
3180 {
3181 GIMPLE_PASS,
3182 "ehdisp", /* name */
3183 gate_lower_ehcontrol, /* gate */
3184 execute_lower_eh_dispatch, /* execute */
3185 NULL, /* sub */
3186 NULL, /* next */
3187 0, /* static_pass_number */
3188 TV_TREE_EH, /* tv_id */
3189 PROP_gimple_lcf, /* properties_required */
3190 0, /* properties_provided */
3191 0, /* properties_destroyed */
3192 0, /* todo_flags_start */
3193 TODO_dump_func | TODO_verify_flow /* todo_flags_finish */
3194 }
3195 };
3196 \f
3197 /* Walk statements, see what regions are really referenced and remove
3198 those that are unused. */
3199
3200 static void
3201 remove_unreachable_handlers (void)
3202 {
3203 sbitmap r_reachable, lp_reachable;
3204 eh_region region;
3205 eh_landing_pad lp;
3206 basic_block bb;
3207 int lp_nr, r_nr;
3208
3209 r_reachable = sbitmap_alloc (VEC_length (eh_region, cfun->eh->region_array));
3210 lp_reachable
3211 = sbitmap_alloc (VEC_length (eh_landing_pad, cfun->eh->lp_array));
3212 sbitmap_zero (r_reachable);
3213 sbitmap_zero (lp_reachable);
3214
3215 FOR_EACH_BB (bb)
3216 {
3217 gimple_stmt_iterator gsi = gsi_start_bb (bb);
3218
3219 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
3220 {
3221 gimple stmt = gsi_stmt (gsi);
3222 lp_nr = lookup_stmt_eh_lp (stmt);
3223
3224 /* Negative LP numbers are MUST_NOT_THROW regions which
3225 are not considered BB enders. */
3226 if (lp_nr < 0)
3227 SET_BIT (r_reachable, -lp_nr);
3228
3229 /* Positive LP numbers are real landing pads, are are BB enders. */
3230 else if (lp_nr > 0)
3231 {
3232 gcc_assert (gsi_one_before_end_p (gsi));
3233 region = get_eh_region_from_lp_number (lp_nr);
3234 SET_BIT (r_reachable, region->index);
3235 SET_BIT (lp_reachable, lp_nr);
3236 }
3237 }
3238 }
3239
3240 if (dump_file)
3241 {
3242 fprintf (dump_file, "Before removal of unreachable regions:\n");
3243 dump_eh_tree (dump_file, cfun);
3244 fprintf (dump_file, "Reachable regions: ");
3245 dump_sbitmap_file (dump_file, r_reachable);
3246 fprintf (dump_file, "Reachable landing pads: ");
3247 dump_sbitmap_file (dump_file, lp_reachable);
3248 }
3249
3250 for (r_nr = 1;
3251 VEC_iterate (eh_region, cfun->eh->region_array, r_nr, region); ++r_nr)
3252 if (region && !TEST_BIT (r_reachable, r_nr))
3253 {
3254 if (dump_file)
3255 fprintf (dump_file, "Removing unreachable region %d\n", r_nr);
3256 remove_eh_handler (region);
3257 }
3258
3259 for (lp_nr = 1;
3260 VEC_iterate (eh_landing_pad, cfun->eh->lp_array, lp_nr, lp); ++lp_nr)
3261 if (lp && !TEST_BIT (lp_reachable, lp_nr))
3262 {
3263 if (dump_file)
3264 fprintf (dump_file, "Removing unreachable landing pad %d\n", lp_nr);
3265 remove_eh_landing_pad (lp);
3266 }
3267
3268 if (dump_file)
3269 {
3270 fprintf (dump_file, "\n\nAfter removal of unreachable regions:\n");
3271 dump_eh_tree (dump_file, cfun);
3272 fprintf (dump_file, "\n\n");
3273 }
3274
3275 sbitmap_free (r_reachable);
3276 sbitmap_free (lp_reachable);
3277
3278 #ifdef ENABLE_CHECKING
3279 verify_eh_tree (cfun);
3280 #endif
3281 }
3282
3283 /* Remove regions that do not have landing pads. This assumes
3284 that remove_unreachable_handlers has already been run, and
3285 that we've just manipulated the landing pads since then. */
3286
3287 static void
3288 remove_unreachable_handlers_no_lp (void)
3289 {
3290 eh_region r;
3291 int i;
3292
3293 for (i = 1; VEC_iterate (eh_region, cfun->eh->region_array, i, r); ++i)
3294 if (r && r->landing_pads == NULL && r->type != ERT_MUST_NOT_THROW)
3295 {
3296 if (dump_file)
3297 fprintf (dump_file, "Removing unreachable region %d\n", i);
3298 remove_eh_handler (r);
3299 }
3300 }
3301
3302 /* Undo critical edge splitting on an EH landing pad. Earlier, we
3303 optimisticaly split all sorts of edges, including EH edges. The
3304 optimization passes in between may not have needed them; if not,
3305 we should undo the split.
3306
3307 Recognize this case by having one EH edge incoming to the BB and
3308 one normal edge outgoing; BB should be empty apart from the
3309 post_landing_pad label.
3310
3311 Note that this is slightly different from the empty handler case
3312 handled by cleanup_empty_eh, in that the actual handler may yet
3313 have actual code but the landing pad has been separated from the
3314 handler. As such, cleanup_empty_eh relies on this transformation
3315 having been done first. */
3316
3317 static bool
3318 unsplit_eh (eh_landing_pad lp)
3319 {
3320 basic_block bb = label_to_block (lp->post_landing_pad);
3321 gimple_stmt_iterator gsi;
3322 edge e_in, e_out;
3323
3324 /* Quickly check the edge counts on BB for singularity. */
3325 if (EDGE_COUNT (bb->preds) != 1 || EDGE_COUNT (bb->succs) != 1)
3326 return false;
3327 e_in = EDGE_PRED (bb, 0);
3328 e_out = EDGE_SUCC (bb, 0);
3329
3330 /* Input edge must be EH and output edge must be normal. */
3331 if ((e_in->flags & EDGE_EH) == 0 || (e_out->flags & EDGE_EH) != 0)
3332 return false;
3333
3334 /* The block must be empty except for the labels. */
3335 if (!gsi_end_p (gsi_after_labels (bb)))
3336 return false;
3337
3338 /* The destination block must not already have a landing pad
3339 for a different region. */
3340 for (gsi = gsi_start_bb (e_out->dest); !gsi_end_p (gsi); gsi_next (&gsi))
3341 {
3342 gimple stmt = gsi_stmt (gsi);
3343 tree lab;
3344 int lp_nr;
3345
3346 if (gimple_code (stmt) != GIMPLE_LABEL)
3347 break;
3348 lab = gimple_label_label (stmt);
3349 lp_nr = EH_LANDING_PAD_NR (lab);
3350 if (lp_nr && get_eh_region_from_lp_number (lp_nr) != lp->region)
3351 return false;
3352 }
3353
3354 /* ??? I can't imagine there would be PHI nodes, since by nature
3355 of critical edge splitting this block should never have been
3356 a dominance frontier. If cfg cleanups somehow confuse this,
3357 due to single edges in and out we ought to have degenerate PHIs
3358 and can easily propagate the PHI arguments. */
3359 gcc_assert (gimple_seq_empty_p (phi_nodes (bb)));
3360
3361 if (dump_file && (dump_flags & TDF_DETAILS))
3362 fprintf (dump_file, "Unsplit EH landing pad %d to block %i.\n",
3363 lp->index, e_out->dest->index);
3364
3365 /* Redirect the edge. Since redirect_eh_edge_1 expects to be moving
3366 a successor edge, humor it. But do the real CFG change with the
3367 predecessor of E_OUT in order to preserve the ordering of arguments
3368 to the PHI nodes in E_OUT->DEST. */
3369 redirect_eh_edge_1 (e_in, e_out->dest, false);
3370 redirect_edge_pred (e_out, e_in->src);
3371 e_out->flags = e_in->flags;
3372 e_out->probability = e_in->probability;
3373 e_out->count = e_in->count;
3374 remove_edge (e_in);
3375
3376 return true;
3377 }
3378
3379 /* Examine each landing pad block and see if it matches unsplit_eh. */
3380
3381 static bool
3382 unsplit_all_eh (void)
3383 {
3384 bool changed = false;
3385 eh_landing_pad lp;
3386 int i;
3387
3388 for (i = 1; VEC_iterate (eh_landing_pad, cfun->eh->lp_array, i, lp); ++i)
3389 if (lp)
3390 changed |= unsplit_eh (lp);
3391
3392 return changed;
3393 }
3394
3395 /* A subroutine of cleanup_empty_eh. Redirect all EH edges incoming
3396 to OLD_BB to NEW_BB; return true on success, false on failure.
3397
3398 OLD_BB_OUT is the edge into NEW_BB from OLD_BB, so if we miss any
3399 PHI variables from OLD_BB we can pick them up from OLD_BB_OUT.
3400 Virtual PHIs may be deleted and marked for renaming. */
3401
3402 static bool
3403 cleanup_empty_eh_merge_phis (basic_block new_bb, basic_block old_bb,
3404 edge old_bb_out)
3405 {
3406 gimple_stmt_iterator ngsi, ogsi;
3407 edge_iterator ei;
3408 edge e;
3409 bitmap rename_virts;
3410 bitmap ophi_handled;
3411
3412 FOR_EACH_EDGE (e, ei, old_bb->preds)
3413 redirect_edge_var_map_clear (e);
3414
3415 ophi_handled = BITMAP_ALLOC (NULL);
3416 rename_virts = BITMAP_ALLOC (NULL);
3417
3418 /* First, iterate through the PHIs on NEW_BB and set up the edge_var_map
3419 for the edges we're going to move. */
3420 for (ngsi = gsi_start_phis (new_bb); !gsi_end_p (ngsi); gsi_next (&ngsi))
3421 {
3422 gimple ophi, nphi = gsi_stmt (ngsi);
3423 tree nresult, nop;
3424
3425 nresult = gimple_phi_result (nphi);
3426 nop = gimple_phi_arg_def (nphi, old_bb_out->dest_idx);
3427
3428 /* Find the corresponding PHI in OLD_BB so we can forward-propagate
3429 the source ssa_name. */
3430 ophi = NULL;
3431 for (ogsi = gsi_start_phis (old_bb); !gsi_end_p (ogsi); gsi_next (&ogsi))
3432 {
3433 ophi = gsi_stmt (ogsi);
3434 if (gimple_phi_result (ophi) == nop)
3435 break;
3436 ophi = NULL;
3437 }
3438
3439 /* If we did find the corresponding PHI, copy those inputs. */
3440 if (ophi)
3441 {
3442 bitmap_set_bit (ophi_handled, SSA_NAME_VERSION (nop));
3443 FOR_EACH_EDGE (e, ei, old_bb->preds)
3444 {
3445 location_t oloc;
3446 tree oop;
3447
3448 if ((e->flags & EDGE_EH) == 0)
3449 continue;
3450 oop = gimple_phi_arg_def (ophi, e->dest_idx);
3451 oloc = gimple_phi_arg_location (ophi, e->dest_idx);
3452 redirect_edge_var_map_add (e, nresult, oop, oloc);
3453 }
3454 }
3455 /* If we didn't find the PHI, but it's a VOP, remember to rename
3456 it later, assuming all other tests succeed. */
3457 else if (!is_gimple_reg (nresult))
3458 bitmap_set_bit (rename_virts, SSA_NAME_VERSION (nresult));
3459 /* If we didn't find the PHI, and it's a real variable, we know
3460 from the fact that OLD_BB is tree_empty_eh_handler_p that the
3461 variable is unchanged from input to the block and we can simply
3462 re-use the input to NEW_BB from the OLD_BB_OUT edge. */
3463 else
3464 {
3465 location_t nloc
3466 = gimple_phi_arg_location (nphi, old_bb_out->dest_idx);
3467 FOR_EACH_EDGE (e, ei, old_bb->preds)
3468 redirect_edge_var_map_add (e, nresult, nop, nloc);
3469 }
3470 }
3471
3472 /* Second, verify that all PHIs from OLD_BB have been handled. If not,
3473 we don't know what values from the other edges into NEW_BB to use. */
3474 for (ogsi = gsi_start_phis (old_bb); !gsi_end_p (ogsi); gsi_next (&ogsi))
3475 {
3476 gimple ophi = gsi_stmt (ogsi);
3477 tree oresult = gimple_phi_result (ophi);
3478 if (!bitmap_bit_p (ophi_handled, SSA_NAME_VERSION (oresult)))
3479 goto fail;
3480 }
3481
3482 /* At this point we know that the merge will succeed. Remove the PHI
3483 nodes for the virtuals that we want to rename. */
3484 if (!bitmap_empty_p (rename_virts))
3485 {
3486 for (ngsi = gsi_start_phis (new_bb); !gsi_end_p (ngsi); )
3487 {
3488 gimple nphi = gsi_stmt (ngsi);
3489 tree nresult = gimple_phi_result (nphi);
3490 if (bitmap_bit_p (rename_virts, SSA_NAME_VERSION (nresult)))
3491 {
3492 mark_virtual_phi_result_for_renaming (nphi);
3493 remove_phi_node (&ngsi, true);
3494 }
3495 else
3496 gsi_next (&ngsi);
3497 }
3498 }
3499
3500 /* Finally, move the edges and update the PHIs. */
3501 for (ei = ei_start (old_bb->preds); (e = ei_safe_edge (ei)); )
3502 if (e->flags & EDGE_EH)
3503 {
3504 redirect_eh_edge_1 (e, new_bb, true);
3505 redirect_edge_succ (e, new_bb);
3506 flush_pending_stmts (e);
3507 }
3508 else
3509 ei_next (&ei);
3510
3511 BITMAP_FREE (ophi_handled);
3512 BITMAP_FREE (rename_virts);
3513 return true;
3514
3515 fail:
3516 FOR_EACH_EDGE (e, ei, old_bb->preds)
3517 redirect_edge_var_map_clear (e);
3518 BITMAP_FREE (ophi_handled);
3519 BITMAP_FREE (rename_virts);
3520 return false;
3521 }
3522
3523 /* A subroutine of cleanup_empty_eh. Move a landing pad LP from its
3524 old region to NEW_REGION at BB. */
3525
3526 static void
3527 cleanup_empty_eh_move_lp (basic_block bb, edge e_out,
3528 eh_landing_pad lp, eh_region new_region)
3529 {
3530 gimple_stmt_iterator gsi;
3531 eh_landing_pad *pp;
3532
3533 for (pp = &lp->region->landing_pads; *pp != lp; pp = &(*pp)->next_lp)
3534 continue;
3535 *pp = lp->next_lp;
3536
3537 lp->region = new_region;
3538 lp->next_lp = new_region->landing_pads;
3539 new_region->landing_pads = lp;
3540
3541 /* Delete the RESX that was matched within the empty handler block. */
3542 gsi = gsi_last_bb (bb);
3543 mark_virtual_ops_for_renaming (gsi_stmt (gsi));
3544 gsi_remove (&gsi, true);
3545
3546 /* Clean up E_OUT for the fallthru. */
3547 e_out->flags = (e_out->flags & ~EDGE_EH) | EDGE_FALLTHRU;
3548 e_out->probability = REG_BR_PROB_BASE;
3549 }
3550
3551 /* A subroutine of cleanup_empty_eh. Handle more complex cases of
3552 unsplitting than unsplit_eh was prepared to handle, e.g. when
3553 multiple incoming edges and phis are involved. */
3554
3555 static bool
3556 cleanup_empty_eh_unsplit (basic_block bb, edge e_out, eh_landing_pad olp)
3557 {
3558 gimple_stmt_iterator gsi;
3559 eh_landing_pad nlp;
3560 tree lab;
3561
3562 /* We really ought not have totally lost everything following
3563 a landing pad label. Given that BB is empty, there had better
3564 be a successor. */
3565 gcc_assert (e_out != NULL);
3566
3567 /* Look for an EH label in the successor block. */
3568 lab = NULL;
3569 for (gsi = gsi_start_bb (e_out->dest); !gsi_end_p (gsi); gsi_next (&gsi))
3570 {
3571 gimple stmt = gsi_stmt (gsi);
3572 if (gimple_code (stmt) != GIMPLE_LABEL)
3573 break;
3574 lab = gimple_label_label (stmt);
3575 if (EH_LANDING_PAD_NR (lab))
3576 goto found;
3577 }
3578 return false;
3579 found:
3580
3581 /* The other label had better be part of the same EH region. Given that
3582 we've not lowered RESX, there should be no way to have a totally empty
3583 landing pad that crosses to another EH region. */
3584 nlp = get_eh_landing_pad_from_number (EH_LANDING_PAD_NR (lab));
3585 gcc_assert (nlp->region == olp->region);
3586
3587 /* Attempt to move the PHIs into the successor block. */
3588 if (cleanup_empty_eh_merge_phis (e_out->dest, bb, e_out))
3589 {
3590 if (dump_file && (dump_flags & TDF_DETAILS))
3591 fprintf (dump_file,
3592 "Unsplit EH landing pad %d to block %d via lp %d.\n",
3593 olp->index, e_out->dest->index, nlp->index);
3594
3595 remove_eh_landing_pad (olp);
3596 return true;
3597 }
3598
3599 return false;
3600 }
3601
3602 /* Examine the block associated with LP to determine if it's an empty
3603 handler for its EH region. If so, attempt to redirect EH edges to
3604 an outer region. Return true the CFG was updated in any way. This
3605 is similar to jump forwarding, just across EH edges. */
3606
3607 static bool
3608 cleanup_empty_eh (eh_landing_pad lp)
3609 {
3610 basic_block bb = label_to_block (lp->post_landing_pad);
3611 gimple_stmt_iterator gsi;
3612 gimple resx;
3613 eh_region new_region;
3614 edge_iterator ei;
3615 edge e, e_out;
3616 bool has_non_eh_pred;
3617 int new_lp_nr;
3618
3619 /* There can be zero or one edges out of BB. This is the quickest test. */
3620 switch (EDGE_COUNT (bb->succs))
3621 {
3622 case 0:
3623 e_out = NULL;
3624 break;
3625 case 1:
3626 e_out = EDGE_SUCC (bb, 0);
3627 break;
3628 default:
3629 return false;
3630 }
3631 gsi = gsi_after_labels (bb);
3632
3633 /* Make sure to skip debug statements. */
3634 if (!gsi_end_p (gsi) && is_gimple_debug (gsi_stmt (gsi)))
3635 gsi_next_nondebug (&gsi);
3636
3637 /* If the block is totally empty, look for more unsplitting cases. */
3638 if (gsi_end_p (gsi))
3639 return cleanup_empty_eh_unsplit (bb, e_out, lp);
3640
3641 /* The block should consist only of a single RESX statement. */
3642 resx = gsi_stmt (gsi);
3643 if (!is_gimple_resx (resx))
3644 return false;
3645 gcc_assert (gsi_one_before_end_p (gsi));
3646
3647 /* Determine if there are non-EH edges, or resx edges into the handler. */
3648 has_non_eh_pred = false;
3649 FOR_EACH_EDGE (e, ei, bb->preds)
3650 if (!(e->flags & EDGE_EH))
3651 has_non_eh_pred = true;
3652
3653 /* Find the handler that's outer of the empty handler by looking at
3654 where the RESX instruction was vectored. */
3655 new_lp_nr = lookup_stmt_eh_lp (resx);
3656 new_region = get_eh_region_from_lp_number (new_lp_nr);
3657
3658 /* If there's no destination region within the current function,
3659 redirection is trivial via removing the throwing statements from
3660 the EH region, removing the EH edges, and allowing the block
3661 to go unreachable. */
3662 if (new_region == NULL)
3663 {
3664 gcc_assert (e_out == NULL);
3665 for (ei = ei_start (bb->preds); (e = ei_safe_edge (ei)); )
3666 if (e->flags & EDGE_EH)
3667 {
3668 gimple stmt = last_stmt (e->src);
3669 remove_stmt_from_eh_lp (stmt);
3670 remove_edge (e);
3671 }
3672 else
3673 ei_next (&ei);
3674 goto succeed;
3675 }
3676
3677 /* If the destination region is a MUST_NOT_THROW, allow the runtime
3678 to handle the abort and allow the blocks to go unreachable. */
3679 if (new_region->type == ERT_MUST_NOT_THROW)
3680 {
3681 for (ei = ei_start (bb->preds); (e = ei_safe_edge (ei)); )
3682 if (e->flags & EDGE_EH)
3683 {
3684 gimple stmt = last_stmt (e->src);
3685 remove_stmt_from_eh_lp (stmt);
3686 add_stmt_to_eh_lp (stmt, new_lp_nr);
3687 remove_edge (e);
3688 }
3689 else
3690 ei_next (&ei);
3691 goto succeed;
3692 }
3693
3694 /* Try to redirect the EH edges and merge the PHIs into the destination
3695 landing pad block. If the merge succeeds, we'll already have redirected
3696 all the EH edges. The handler itself will go unreachable if there were
3697 no normal edges. */
3698 if (cleanup_empty_eh_merge_phis (e_out->dest, bb, e_out))
3699 goto succeed;
3700
3701 /* Finally, if all input edges are EH edges, then we can (potentially)
3702 reduce the number of transfers from the runtime by moving the landing
3703 pad from the original region to the new region. This is a win when
3704 we remove the last CLEANUP region along a particular exception
3705 propagation path. Since nothing changes except for the region with
3706 which the landing pad is associated, the PHI nodes do not need to be
3707 adjusted at all. */
3708 if (!has_non_eh_pred)
3709 {
3710 cleanup_empty_eh_move_lp (bb, e_out, lp, new_region);
3711 if (dump_file && (dump_flags & TDF_DETAILS))
3712 fprintf (dump_file, "Empty EH handler %i moved to EH region %i.\n",
3713 lp->index, new_region->index);
3714
3715 /* ??? The CFG didn't change, but we may have rendered the
3716 old EH region unreachable. Trigger a cleanup there. */
3717 return true;
3718 }
3719
3720 return false;
3721
3722 succeed:
3723 if (dump_file && (dump_flags & TDF_DETAILS))
3724 fprintf (dump_file, "Empty EH handler %i removed.\n", lp->index);
3725 remove_eh_landing_pad (lp);
3726 return true;
3727 }
3728
3729 /* Do a post-order traversal of the EH region tree. Examine each
3730 post_landing_pad block and see if we can eliminate it as empty. */
3731
3732 static bool
3733 cleanup_all_empty_eh (void)
3734 {
3735 bool changed = false;
3736 eh_landing_pad lp;
3737 int i;
3738
3739 for (i = 1; VEC_iterate (eh_landing_pad, cfun->eh->lp_array, i, lp); ++i)
3740 if (lp)
3741 changed |= cleanup_empty_eh (lp);
3742
3743 return changed;
3744 }
3745
3746 /* Perform cleanups and lowering of exception handling
3747 1) cleanups regions with handlers doing nothing are optimized out
3748 2) MUST_NOT_THROW regions that became dead because of 1) are optimized out
3749 3) Info about regions that are containing instructions, and regions
3750 reachable via local EH edges is collected
3751 4) Eh tree is pruned for regions no longer neccesary.
3752
3753 TODO: Push MUST_NOT_THROW regions to the root of the EH tree.
3754 Unify those that have the same failure decl and locus.
3755 */
3756
3757 static unsigned int
3758 execute_cleanup_eh (void)
3759 {
3760 /* Do this first: unsplit_all_eh and cleanup_all_empty_eh can die
3761 looking up unreachable landing pads. */
3762 remove_unreachable_handlers ();
3763
3764 /* Watch out for the region tree vanishing due to all unreachable. */
3765 if (cfun->eh->region_tree && optimize)
3766 {
3767 bool changed = false;
3768
3769 changed |= unsplit_all_eh ();
3770 changed |= cleanup_all_empty_eh ();
3771
3772 if (changed)
3773 {
3774 free_dominance_info (CDI_DOMINATORS);
3775 free_dominance_info (CDI_POST_DOMINATORS);
3776
3777 /* We delayed all basic block deletion, as we may have performed
3778 cleanups on EH edges while non-EH edges were still present. */
3779 delete_unreachable_blocks ();
3780
3781 /* We manipulated the landing pads. Remove any region that no
3782 longer has a landing pad. */
3783 remove_unreachable_handlers_no_lp ();
3784
3785 return TODO_cleanup_cfg | TODO_update_ssa_only_virtuals;
3786 }
3787 }
3788
3789 return 0;
3790 }
3791
3792 static bool
3793 gate_cleanup_eh (void)
3794 {
3795 return cfun->eh != NULL && cfun->eh->region_tree != NULL;
3796 }
3797
3798 struct gimple_opt_pass pass_cleanup_eh = {
3799 {
3800 GIMPLE_PASS,
3801 "ehcleanup", /* name */
3802 gate_cleanup_eh, /* gate */
3803 execute_cleanup_eh, /* execute */
3804 NULL, /* sub */
3805 NULL, /* next */
3806 0, /* static_pass_number */
3807 TV_TREE_EH, /* tv_id */
3808 PROP_gimple_lcf, /* properties_required */
3809 0, /* properties_provided */
3810 0, /* properties_destroyed */
3811 0, /* todo_flags_start */
3812 TODO_dump_func /* todo_flags_finish */
3813 }
3814 };
3815 \f
3816 /* Verify that BB containing STMT as the last statement, has precisely the
3817 edge that make_eh_edges would create. */
3818
3819 bool
3820 verify_eh_edges (gimple stmt)
3821 {
3822 basic_block bb = gimple_bb (stmt);
3823 eh_landing_pad lp = NULL;
3824 int lp_nr;
3825 edge_iterator ei;
3826 edge e, eh_edge;
3827
3828 lp_nr = lookup_stmt_eh_lp (stmt);
3829 if (lp_nr > 0)
3830 lp = get_eh_landing_pad_from_number (lp_nr);
3831
3832 eh_edge = NULL;
3833 FOR_EACH_EDGE (e, ei, bb->succs)
3834 {
3835 if (e->flags & EDGE_EH)
3836 {
3837 if (eh_edge)
3838 {
3839 error ("BB %i has multiple EH edges", bb->index);
3840 return true;
3841 }
3842 else
3843 eh_edge = e;
3844 }
3845 }
3846
3847 if (lp == NULL)
3848 {
3849 if (eh_edge)
3850 {
3851 error ("BB %i can not throw but has an EH edge", bb->index);
3852 return true;
3853 }
3854 return false;
3855 }
3856
3857 if (!stmt_could_throw_p (stmt))
3858 {
3859 error ("BB %i last statement has incorrectly set lp", bb->index);
3860 return true;
3861 }
3862
3863 if (eh_edge == NULL)
3864 {
3865 error ("BB %i is missing an EH edge", bb->index);
3866 return true;
3867 }
3868
3869 if (eh_edge->dest != label_to_block (lp->post_landing_pad))
3870 {
3871 error ("Incorrect EH edge %i->%i", bb->index, eh_edge->dest->index);
3872 return true;
3873 }
3874
3875 return false;
3876 }
3877
3878 /* Similarly, but handle GIMPLE_EH_DISPATCH specifically. */
3879
3880 bool
3881 verify_eh_dispatch_edge (gimple stmt)
3882 {
3883 eh_region r;
3884 eh_catch c;
3885 basic_block src, dst;
3886 bool want_fallthru = true;
3887 edge_iterator ei;
3888 edge e, fall_edge;
3889
3890 r = get_eh_region_from_number (gimple_eh_dispatch_region (stmt));
3891 src = gimple_bb (stmt);
3892
3893 FOR_EACH_EDGE (e, ei, src->succs)
3894 gcc_assert (e->aux == NULL);
3895
3896 switch (r->type)
3897 {
3898 case ERT_TRY:
3899 for (c = r->u.eh_try.first_catch; c ; c = c->next_catch)
3900 {
3901 dst = label_to_block (c->label);
3902 e = find_edge (src, dst);
3903 if (e == NULL)
3904 {
3905 error ("BB %i is missing an edge", src->index);
3906 return true;
3907 }
3908 e->aux = (void *)e;
3909
3910 /* A catch-all handler doesn't have a fallthru. */
3911 if (c->type_list == NULL)
3912 {
3913 want_fallthru = false;
3914 break;
3915 }
3916 }
3917 break;
3918
3919 case ERT_ALLOWED_EXCEPTIONS:
3920 dst = label_to_block (r->u.allowed.label);
3921 e = find_edge (src, dst);
3922 if (e == NULL)
3923 {
3924 error ("BB %i is missing an edge", src->index);
3925 return true;
3926 }
3927 e->aux = (void *)e;
3928 break;
3929
3930 default:
3931 gcc_unreachable ();
3932 }
3933
3934 fall_edge = NULL;
3935 FOR_EACH_EDGE (e, ei, src->succs)
3936 {
3937 if (e->flags & EDGE_FALLTHRU)
3938 {
3939 if (fall_edge != NULL)
3940 {
3941 error ("BB %i too many fallthru edges", src->index);
3942 return true;
3943 }
3944 fall_edge = e;
3945 }
3946 else if (e->aux)
3947 e->aux = NULL;
3948 else
3949 {
3950 error ("BB %i has incorrect edge", src->index);
3951 return true;
3952 }
3953 }
3954 if ((fall_edge != NULL) ^ want_fallthru)
3955 {
3956 error ("BB %i has incorrect fallthru edge", src->index);
3957 return true;
3958 }
3959
3960 return false;
3961 }