neon-vcond-gt.c: Scan for vbsl or vbit or vbif.
[gcc.git] / gcc / tree-eh.c
1 /* Exception handling semantics and decomposition for trees.
2 Copyright (C) 2003-2013 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
9 any later version.
10
11 GCC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "hash-table.h"
24 #include "tm.h"
25 #include "tree.h"
26 #include "flags.h"
27 #include "function.h"
28 #include "except.h"
29 #include "pointer-set.h"
30 #include "gimple.h"
31 #include "gimple-ssa.h"
32 #include "cgraph.h"
33 #include "tree-cfg.h"
34 #include "tree-phinodes.h"
35 #include "ssa-iterators.h"
36 #include "tree-ssanames.h"
37 #include "tree-into-ssa.h"
38 #include "tree-ssa.h"
39 #include "tree-inline.h"
40 #include "tree-pass.h"
41 #include "langhooks.h"
42 #include "ggc.h"
43 #include "diagnostic-core.h"
44 #include "target.h"
45 #include "cfgloop.h"
46 #include "gimple-low.h"
47
48 /* In some instances a tree and a gimple need to be stored in a same table,
49 i.e. in hash tables. This is a structure to do this. */
50 typedef union {tree *tp; tree t; gimple g;} treemple;
51
52 /* Misc functions used in this file. */
53
54 /* Remember and lookup EH landing pad data for arbitrary statements.
55 Really this means any statement that could_throw_p. We could
56 stuff this information into the stmt_ann data structure, but:
57
58 (1) We absolutely rely on this information being kept until
59 we get to rtl. Once we're done with lowering here, if we lose
60 the information there's no way to recover it!
61
62 (2) There are many more statements that *cannot* throw as
63 compared to those that can. We should be saving some amount
64 of space by only allocating memory for those that can throw. */
65
66 /* Add statement T in function IFUN to landing pad NUM. */
67
68 static void
69 add_stmt_to_eh_lp_fn (struct function *ifun, gimple t, int num)
70 {
71 struct throw_stmt_node *n;
72 void **slot;
73
74 gcc_assert (num != 0);
75
76 n = ggc_alloc_throw_stmt_node ();
77 n->stmt = t;
78 n->lp_nr = num;
79
80 if (!get_eh_throw_stmt_table (ifun))
81 set_eh_throw_stmt_table (ifun, htab_create_ggc (31, struct_ptr_hash,
82 struct_ptr_eq,
83 ggc_free));
84
85 slot = htab_find_slot (get_eh_throw_stmt_table (ifun), n, INSERT);
86 gcc_assert (!*slot);
87 *slot = n;
88 }
89
90 /* Add statement T in the current function (cfun) to EH landing pad NUM. */
91
92 void
93 add_stmt_to_eh_lp (gimple t, int num)
94 {
95 add_stmt_to_eh_lp_fn (cfun, t, num);
96 }
97
98 /* Add statement T to the single EH landing pad in REGION. */
99
100 static void
101 record_stmt_eh_region (eh_region region, gimple t)
102 {
103 if (region == NULL)
104 return;
105 if (region->type == ERT_MUST_NOT_THROW)
106 add_stmt_to_eh_lp_fn (cfun, t, -region->index);
107 else
108 {
109 eh_landing_pad lp = region->landing_pads;
110 if (lp == NULL)
111 lp = gen_eh_landing_pad (region);
112 else
113 gcc_assert (lp->next_lp == NULL);
114 add_stmt_to_eh_lp_fn (cfun, t, lp->index);
115 }
116 }
117
118
119 /* Remove statement T in function IFUN from its EH landing pad. */
120
121 bool
122 remove_stmt_from_eh_lp_fn (struct function *ifun, gimple t)
123 {
124 struct throw_stmt_node dummy;
125 void **slot;
126
127 if (!get_eh_throw_stmt_table (ifun))
128 return false;
129
130 dummy.stmt = t;
131 slot = htab_find_slot (get_eh_throw_stmt_table (ifun), &dummy,
132 NO_INSERT);
133 if (slot)
134 {
135 htab_clear_slot (get_eh_throw_stmt_table (ifun), slot);
136 return true;
137 }
138 else
139 return false;
140 }
141
142
143 /* Remove statement T in the current function (cfun) from its
144 EH landing pad. */
145
146 bool
147 remove_stmt_from_eh_lp (gimple t)
148 {
149 return remove_stmt_from_eh_lp_fn (cfun, t);
150 }
151
152 /* Determine if statement T is inside an EH region in function IFUN.
153 Positive numbers indicate a landing pad index; negative numbers
154 indicate a MUST_NOT_THROW region index; zero indicates that the
155 statement is not recorded in the region table. */
156
157 int
158 lookup_stmt_eh_lp_fn (struct function *ifun, gimple t)
159 {
160 struct throw_stmt_node *p, n;
161
162 if (ifun->eh->throw_stmt_table == NULL)
163 return 0;
164
165 n.stmt = t;
166 p = (struct throw_stmt_node *) htab_find (ifun->eh->throw_stmt_table, &n);
167 return p ? p->lp_nr : 0;
168 }
169
170 /* Likewise, but always use the current function. */
171
172 int
173 lookup_stmt_eh_lp (gimple t)
174 {
175 /* We can get called from initialized data when -fnon-call-exceptions
176 is on; prevent crash. */
177 if (!cfun)
178 return 0;
179 return lookup_stmt_eh_lp_fn (cfun, t);
180 }
181
182 /* First pass of EH node decomposition. Build up a tree of GIMPLE_TRY_FINALLY
183 nodes and LABEL_DECL nodes. We will use this during the second phase to
184 determine if a goto leaves the body of a TRY_FINALLY_EXPR node. */
185
186 struct finally_tree_node
187 {
188 /* When storing a GIMPLE_TRY, we have to record a gimple. However
189 when deciding whether a GOTO to a certain LABEL_DECL (which is a
190 tree) leaves the TRY block, its necessary to record a tree in
191 this field. Thus a treemple is used. */
192 treemple child;
193 gimple parent;
194 };
195
196 /* Hashtable helpers. */
197
198 struct finally_tree_hasher : typed_free_remove <finally_tree_node>
199 {
200 typedef finally_tree_node value_type;
201 typedef finally_tree_node compare_type;
202 static inline hashval_t hash (const value_type *);
203 static inline bool equal (const value_type *, const compare_type *);
204 };
205
206 inline hashval_t
207 finally_tree_hasher::hash (const value_type *v)
208 {
209 return (intptr_t)v->child.t >> 4;
210 }
211
212 inline bool
213 finally_tree_hasher::equal (const value_type *v, const compare_type *c)
214 {
215 return v->child.t == c->child.t;
216 }
217
218 /* Note that this table is *not* marked GTY. It is short-lived. */
219 static hash_table <finally_tree_hasher> finally_tree;
220
221 static void
222 record_in_finally_tree (treemple child, gimple parent)
223 {
224 struct finally_tree_node *n;
225 finally_tree_node **slot;
226
227 n = XNEW (struct finally_tree_node);
228 n->child = child;
229 n->parent = parent;
230
231 slot = finally_tree.find_slot (n, INSERT);
232 gcc_assert (!*slot);
233 *slot = n;
234 }
235
236 static void
237 collect_finally_tree (gimple stmt, gimple region);
238
239 /* Go through the gimple sequence. Works with collect_finally_tree to
240 record all GIMPLE_LABEL and GIMPLE_TRY statements. */
241
242 static void
243 collect_finally_tree_1 (gimple_seq seq, gimple region)
244 {
245 gimple_stmt_iterator gsi;
246
247 for (gsi = gsi_start (seq); !gsi_end_p (gsi); gsi_next (&gsi))
248 collect_finally_tree (gsi_stmt (gsi), region);
249 }
250
251 static void
252 collect_finally_tree (gimple stmt, gimple region)
253 {
254 treemple temp;
255
256 switch (gimple_code (stmt))
257 {
258 case GIMPLE_LABEL:
259 temp.t = gimple_label_label (stmt);
260 record_in_finally_tree (temp, region);
261 break;
262
263 case GIMPLE_TRY:
264 if (gimple_try_kind (stmt) == GIMPLE_TRY_FINALLY)
265 {
266 temp.g = stmt;
267 record_in_finally_tree (temp, region);
268 collect_finally_tree_1 (gimple_try_eval (stmt), stmt);
269 collect_finally_tree_1 (gimple_try_cleanup (stmt), region);
270 }
271 else if (gimple_try_kind (stmt) == GIMPLE_TRY_CATCH)
272 {
273 collect_finally_tree_1 (gimple_try_eval (stmt), region);
274 collect_finally_tree_1 (gimple_try_cleanup (stmt), region);
275 }
276 break;
277
278 case GIMPLE_CATCH:
279 collect_finally_tree_1 (gimple_catch_handler (stmt), region);
280 break;
281
282 case GIMPLE_EH_FILTER:
283 collect_finally_tree_1 (gimple_eh_filter_failure (stmt), region);
284 break;
285
286 case GIMPLE_EH_ELSE:
287 collect_finally_tree_1 (gimple_eh_else_n_body (stmt), region);
288 collect_finally_tree_1 (gimple_eh_else_e_body (stmt), region);
289 break;
290
291 default:
292 /* A type, a decl, or some kind of statement that we're not
293 interested in. Don't walk them. */
294 break;
295 }
296 }
297
298
299 /* Use the finally tree to determine if a jump from START to TARGET
300 would leave the try_finally node that START lives in. */
301
302 static bool
303 outside_finally_tree (treemple start, gimple target)
304 {
305 struct finally_tree_node n, *p;
306
307 do
308 {
309 n.child = start;
310 p = finally_tree.find (&n);
311 if (!p)
312 return true;
313 start.g = p->parent;
314 }
315 while (start.g != target);
316
317 return false;
318 }
319
320 /* Second pass of EH node decomposition. Actually transform the GIMPLE_TRY
321 nodes into a set of gotos, magic labels, and eh regions.
322 The eh region creation is straight-forward, but frobbing all the gotos
323 and such into shape isn't. */
324
325 /* The sequence into which we record all EH stuff. This will be
326 placed at the end of the function when we're all done. */
327 static gimple_seq eh_seq;
328
329 /* Record whether an EH region contains something that can throw,
330 indexed by EH region number. */
331 static bitmap eh_region_may_contain_throw_map;
332
333 /* The GOTO_QUEUE is is an array of GIMPLE_GOTO and GIMPLE_RETURN
334 statements that are seen to escape this GIMPLE_TRY_FINALLY node.
335 The idea is to record a gimple statement for everything except for
336 the conditionals, which get their labels recorded. Since labels are
337 of type 'tree', we need this node to store both gimple and tree
338 objects. REPL_STMT is the sequence used to replace the goto/return
339 statement. CONT_STMT is used to store the statement that allows
340 the return/goto to jump to the original destination. */
341
342 struct goto_queue_node
343 {
344 treemple stmt;
345 location_t location;
346 gimple_seq repl_stmt;
347 gimple cont_stmt;
348 int index;
349 /* This is used when index >= 0 to indicate that stmt is a label (as
350 opposed to a goto stmt). */
351 int is_label;
352 };
353
354 /* State of the world while lowering. */
355
356 struct leh_state
357 {
358 /* What's "current" while constructing the eh region tree. These
359 correspond to variables of the same name in cfun->eh, which we
360 don't have easy access to. */
361 eh_region cur_region;
362
363 /* What's "current" for the purposes of __builtin_eh_pointer. For
364 a CATCH, this is the associated TRY. For an EH_FILTER, this is
365 the associated ALLOWED_EXCEPTIONS, etc. */
366 eh_region ehp_region;
367
368 /* Processing of TRY_FINALLY requires a bit more state. This is
369 split out into a separate structure so that we don't have to
370 copy so much when processing other nodes. */
371 struct leh_tf_state *tf;
372 };
373
374 struct leh_tf_state
375 {
376 /* Pointer to the GIMPLE_TRY_FINALLY node under discussion. The
377 try_finally_expr is the original GIMPLE_TRY_FINALLY. We need to retain
378 this so that outside_finally_tree can reliably reference the tree used
379 in the collect_finally_tree data structures. */
380 gimple try_finally_expr;
381 gimple top_p;
382
383 /* While lowering a top_p usually it is expanded into multiple statements,
384 thus we need the following field to store them. */
385 gimple_seq top_p_seq;
386
387 /* The state outside this try_finally node. */
388 struct leh_state *outer;
389
390 /* The exception region created for it. */
391 eh_region region;
392
393 /* The goto queue. */
394 struct goto_queue_node *goto_queue;
395 size_t goto_queue_size;
396 size_t goto_queue_active;
397
398 /* Pointer map to help in searching goto_queue when it is large. */
399 struct pointer_map_t *goto_queue_map;
400
401 /* The set of unique labels seen as entries in the goto queue. */
402 vec<tree> dest_array;
403
404 /* A label to be added at the end of the completed transformed
405 sequence. It will be set if may_fallthru was true *at one time*,
406 though subsequent transformations may have cleared that flag. */
407 tree fallthru_label;
408
409 /* True if it is possible to fall out the bottom of the try block.
410 Cleared if the fallthru is converted to a goto. */
411 bool may_fallthru;
412
413 /* True if any entry in goto_queue is a GIMPLE_RETURN. */
414 bool may_return;
415
416 /* True if the finally block can receive an exception edge.
417 Cleared if the exception case is handled by code duplication. */
418 bool may_throw;
419 };
420
421 static gimple_seq lower_eh_must_not_throw (struct leh_state *, gimple);
422
423 /* Search for STMT in the goto queue. Return the replacement,
424 or null if the statement isn't in the queue. */
425
426 #define LARGE_GOTO_QUEUE 20
427
428 static void lower_eh_constructs_1 (struct leh_state *state, gimple_seq *seq);
429
430 static gimple_seq
431 find_goto_replacement (struct leh_tf_state *tf, treemple stmt)
432 {
433 unsigned int i;
434 void **slot;
435
436 if (tf->goto_queue_active < LARGE_GOTO_QUEUE)
437 {
438 for (i = 0; i < tf->goto_queue_active; i++)
439 if ( tf->goto_queue[i].stmt.g == stmt.g)
440 return tf->goto_queue[i].repl_stmt;
441 return NULL;
442 }
443
444 /* If we have a large number of entries in the goto_queue, create a
445 pointer map and use that for searching. */
446
447 if (!tf->goto_queue_map)
448 {
449 tf->goto_queue_map = pointer_map_create ();
450 for (i = 0; i < tf->goto_queue_active; i++)
451 {
452 slot = pointer_map_insert (tf->goto_queue_map,
453 tf->goto_queue[i].stmt.g);
454 gcc_assert (*slot == NULL);
455 *slot = &tf->goto_queue[i];
456 }
457 }
458
459 slot = pointer_map_contains (tf->goto_queue_map, stmt.g);
460 if (slot != NULL)
461 return (((struct goto_queue_node *) *slot)->repl_stmt);
462
463 return NULL;
464 }
465
466 /* A subroutine of replace_goto_queue_1. Handles the sub-clauses of a
467 lowered GIMPLE_COND. If, by chance, the replacement is a simple goto,
468 then we can just splat it in, otherwise we add the new stmts immediately
469 after the GIMPLE_COND and redirect. */
470
471 static void
472 replace_goto_queue_cond_clause (tree *tp, struct leh_tf_state *tf,
473 gimple_stmt_iterator *gsi)
474 {
475 tree label;
476 gimple_seq new_seq;
477 treemple temp;
478 location_t loc = gimple_location (gsi_stmt (*gsi));
479
480 temp.tp = tp;
481 new_seq = find_goto_replacement (tf, temp);
482 if (!new_seq)
483 return;
484
485 if (gimple_seq_singleton_p (new_seq)
486 && gimple_code (gimple_seq_first_stmt (new_seq)) == GIMPLE_GOTO)
487 {
488 *tp = gimple_goto_dest (gimple_seq_first_stmt (new_seq));
489 return;
490 }
491
492 label = create_artificial_label (loc);
493 /* Set the new label for the GIMPLE_COND */
494 *tp = label;
495
496 gsi_insert_after (gsi, gimple_build_label (label), GSI_CONTINUE_LINKING);
497 gsi_insert_seq_after (gsi, gimple_seq_copy (new_seq), GSI_CONTINUE_LINKING);
498 }
499
500 /* The real work of replace_goto_queue. Returns with TSI updated to
501 point to the next statement. */
502
503 static void replace_goto_queue_stmt_list (gimple_seq *, struct leh_tf_state *);
504
505 static void
506 replace_goto_queue_1 (gimple stmt, struct leh_tf_state *tf,
507 gimple_stmt_iterator *gsi)
508 {
509 gimple_seq seq;
510 treemple temp;
511 temp.g = NULL;
512
513 switch (gimple_code (stmt))
514 {
515 case GIMPLE_GOTO:
516 case GIMPLE_RETURN:
517 temp.g = stmt;
518 seq = find_goto_replacement (tf, temp);
519 if (seq)
520 {
521 gsi_insert_seq_before (gsi, gimple_seq_copy (seq), GSI_SAME_STMT);
522 gsi_remove (gsi, false);
523 return;
524 }
525 break;
526
527 case GIMPLE_COND:
528 replace_goto_queue_cond_clause (gimple_op_ptr (stmt, 2), tf, gsi);
529 replace_goto_queue_cond_clause (gimple_op_ptr (stmt, 3), tf, gsi);
530 break;
531
532 case GIMPLE_TRY:
533 replace_goto_queue_stmt_list (gimple_try_eval_ptr (stmt), tf);
534 replace_goto_queue_stmt_list (gimple_try_cleanup_ptr (stmt), tf);
535 break;
536 case GIMPLE_CATCH:
537 replace_goto_queue_stmt_list (gimple_catch_handler_ptr (stmt), tf);
538 break;
539 case GIMPLE_EH_FILTER:
540 replace_goto_queue_stmt_list (gimple_eh_filter_failure_ptr (stmt), tf);
541 break;
542 case GIMPLE_EH_ELSE:
543 replace_goto_queue_stmt_list (gimple_eh_else_n_body_ptr (stmt), tf);
544 replace_goto_queue_stmt_list (gimple_eh_else_e_body_ptr (stmt), tf);
545 break;
546
547 default:
548 /* These won't have gotos in them. */
549 break;
550 }
551
552 gsi_next (gsi);
553 }
554
555 /* A subroutine of replace_goto_queue. Handles GIMPLE_SEQ. */
556
557 static void
558 replace_goto_queue_stmt_list (gimple_seq *seq, struct leh_tf_state *tf)
559 {
560 gimple_stmt_iterator gsi = gsi_start (*seq);
561
562 while (!gsi_end_p (gsi))
563 replace_goto_queue_1 (gsi_stmt (gsi), tf, &gsi);
564 }
565
566 /* Replace all goto queue members. */
567
568 static void
569 replace_goto_queue (struct leh_tf_state *tf)
570 {
571 if (tf->goto_queue_active == 0)
572 return;
573 replace_goto_queue_stmt_list (&tf->top_p_seq, tf);
574 replace_goto_queue_stmt_list (&eh_seq, tf);
575 }
576
577 /* Add a new record to the goto queue contained in TF. NEW_STMT is the
578 data to be added, IS_LABEL indicates whether NEW_STMT is a label or
579 a gimple return. */
580
581 static void
582 record_in_goto_queue (struct leh_tf_state *tf,
583 treemple new_stmt,
584 int index,
585 bool is_label,
586 location_t location)
587 {
588 size_t active, size;
589 struct goto_queue_node *q;
590
591 gcc_assert (!tf->goto_queue_map);
592
593 active = tf->goto_queue_active;
594 size = tf->goto_queue_size;
595 if (active >= size)
596 {
597 size = (size ? size * 2 : 32);
598 tf->goto_queue_size = size;
599 tf->goto_queue
600 = XRESIZEVEC (struct goto_queue_node, tf->goto_queue, size);
601 }
602
603 q = &tf->goto_queue[active];
604 tf->goto_queue_active = active + 1;
605
606 memset (q, 0, sizeof (*q));
607 q->stmt = new_stmt;
608 q->index = index;
609 q->location = location;
610 q->is_label = is_label;
611 }
612
613 /* Record the LABEL label in the goto queue contained in TF.
614 TF is not null. */
615
616 static void
617 record_in_goto_queue_label (struct leh_tf_state *tf, treemple stmt, tree label,
618 location_t location)
619 {
620 int index;
621 treemple temp, new_stmt;
622
623 if (!label)
624 return;
625
626 /* Computed and non-local gotos do not get processed. Given
627 their nature we can neither tell whether we've escaped the
628 finally block nor redirect them if we knew. */
629 if (TREE_CODE (label) != LABEL_DECL)
630 return;
631
632 /* No need to record gotos that don't leave the try block. */
633 temp.t = label;
634 if (!outside_finally_tree (temp, tf->try_finally_expr))
635 return;
636
637 if (! tf->dest_array.exists ())
638 {
639 tf->dest_array.create (10);
640 tf->dest_array.quick_push (label);
641 index = 0;
642 }
643 else
644 {
645 int n = tf->dest_array.length ();
646 for (index = 0; index < n; ++index)
647 if (tf->dest_array[index] == label)
648 break;
649 if (index == n)
650 tf->dest_array.safe_push (label);
651 }
652
653 /* In the case of a GOTO we want to record the destination label,
654 since with a GIMPLE_COND we have an easy access to the then/else
655 labels. */
656 new_stmt = stmt;
657 record_in_goto_queue (tf, new_stmt, index, true, location);
658 }
659
660 /* For any GIMPLE_GOTO or GIMPLE_RETURN, decide whether it leaves a try_finally
661 node, and if so record that fact in the goto queue associated with that
662 try_finally node. */
663
664 static void
665 maybe_record_in_goto_queue (struct leh_state *state, gimple stmt)
666 {
667 struct leh_tf_state *tf = state->tf;
668 treemple new_stmt;
669
670 if (!tf)
671 return;
672
673 switch (gimple_code (stmt))
674 {
675 case GIMPLE_COND:
676 new_stmt.tp = gimple_op_ptr (stmt, 2);
677 record_in_goto_queue_label (tf, new_stmt, gimple_cond_true_label (stmt),
678 EXPR_LOCATION (*new_stmt.tp));
679 new_stmt.tp = gimple_op_ptr (stmt, 3);
680 record_in_goto_queue_label (tf, new_stmt, gimple_cond_false_label (stmt),
681 EXPR_LOCATION (*new_stmt.tp));
682 break;
683 case GIMPLE_GOTO:
684 new_stmt.g = stmt;
685 record_in_goto_queue_label (tf, new_stmt, gimple_goto_dest (stmt),
686 gimple_location (stmt));
687 break;
688
689 case GIMPLE_RETURN:
690 tf->may_return = true;
691 new_stmt.g = stmt;
692 record_in_goto_queue (tf, new_stmt, -1, false, gimple_location (stmt));
693 break;
694
695 default:
696 gcc_unreachable ();
697 }
698 }
699
700
701 #ifdef ENABLE_CHECKING
702 /* We do not process GIMPLE_SWITCHes for now. As long as the original source
703 was in fact structured, and we've not yet done jump threading, then none
704 of the labels will leave outer GIMPLE_TRY_FINALLY nodes. Verify this. */
705
706 static void
707 verify_norecord_switch_expr (struct leh_state *state, gimple switch_expr)
708 {
709 struct leh_tf_state *tf = state->tf;
710 size_t i, n;
711
712 if (!tf)
713 return;
714
715 n = gimple_switch_num_labels (switch_expr);
716
717 for (i = 0; i < n; ++i)
718 {
719 treemple temp;
720 tree lab = CASE_LABEL (gimple_switch_label (switch_expr, i));
721 temp.t = lab;
722 gcc_assert (!outside_finally_tree (temp, tf->try_finally_expr));
723 }
724 }
725 #else
726 #define verify_norecord_switch_expr(state, switch_expr)
727 #endif
728
729 /* Redirect a RETURN_EXPR pointed to by Q to FINLAB. If MOD is
730 non-null, insert it before the new branch. */
731
732 static void
733 do_return_redirection (struct goto_queue_node *q, tree finlab, gimple_seq mod)
734 {
735 gimple x;
736
737 /* In the case of a return, the queue node must be a gimple statement. */
738 gcc_assert (!q->is_label);
739
740 /* Note that the return value may have already been computed, e.g.,
741
742 int x;
743 int foo (void)
744 {
745 x = 0;
746 try {
747 return x;
748 } finally {
749 x++;
750 }
751 }
752
753 should return 0, not 1. We don't have to do anything to make
754 this happens because the return value has been placed in the
755 RESULT_DECL already. */
756
757 q->cont_stmt = q->stmt.g;
758
759 if (mod)
760 gimple_seq_add_seq (&q->repl_stmt, mod);
761
762 x = gimple_build_goto (finlab);
763 gimple_set_location (x, q->location);
764 gimple_seq_add_stmt (&q->repl_stmt, x);
765 }
766
767 /* Similar, but easier, for GIMPLE_GOTO. */
768
769 static void
770 do_goto_redirection (struct goto_queue_node *q, tree finlab, gimple_seq mod,
771 struct leh_tf_state *tf)
772 {
773 gimple x;
774
775 gcc_assert (q->is_label);
776
777 q->cont_stmt = gimple_build_goto (tf->dest_array[q->index]);
778
779 if (mod)
780 gimple_seq_add_seq (&q->repl_stmt, mod);
781
782 x = gimple_build_goto (finlab);
783 gimple_set_location (x, q->location);
784 gimple_seq_add_stmt (&q->repl_stmt, x);
785 }
786
787 /* Emit a standard landing pad sequence into SEQ for REGION. */
788
789 static void
790 emit_post_landing_pad (gimple_seq *seq, eh_region region)
791 {
792 eh_landing_pad lp = region->landing_pads;
793 gimple x;
794
795 if (lp == NULL)
796 lp = gen_eh_landing_pad (region);
797
798 lp->post_landing_pad = create_artificial_label (UNKNOWN_LOCATION);
799 EH_LANDING_PAD_NR (lp->post_landing_pad) = lp->index;
800
801 x = gimple_build_label (lp->post_landing_pad);
802 gimple_seq_add_stmt (seq, x);
803 }
804
805 /* Emit a RESX statement into SEQ for REGION. */
806
807 static void
808 emit_resx (gimple_seq *seq, eh_region region)
809 {
810 gimple x = gimple_build_resx (region->index);
811 gimple_seq_add_stmt (seq, x);
812 if (region->outer)
813 record_stmt_eh_region (region->outer, x);
814 }
815
816 /* Emit an EH_DISPATCH statement into SEQ for REGION. */
817
818 static void
819 emit_eh_dispatch (gimple_seq *seq, eh_region region)
820 {
821 gimple x = gimple_build_eh_dispatch (region->index);
822 gimple_seq_add_stmt (seq, x);
823 }
824
825 /* Note that the current EH region may contain a throw, or a
826 call to a function which itself may contain a throw. */
827
828 static void
829 note_eh_region_may_contain_throw (eh_region region)
830 {
831 while (bitmap_set_bit (eh_region_may_contain_throw_map, region->index))
832 {
833 if (region->type == ERT_MUST_NOT_THROW)
834 break;
835 region = region->outer;
836 if (region == NULL)
837 break;
838 }
839 }
840
841 /* Check if REGION has been marked as containing a throw. If REGION is
842 NULL, this predicate is false. */
843
844 static inline bool
845 eh_region_may_contain_throw (eh_region r)
846 {
847 return r && bitmap_bit_p (eh_region_may_contain_throw_map, r->index);
848 }
849
850 /* We want to transform
851 try { body; } catch { stuff; }
852 to
853 normal_seqence:
854 body;
855 over:
856 eh_seqence:
857 landing_pad:
858 stuff;
859 goto over;
860
861 TP is a GIMPLE_TRY node. REGION is the region whose post_landing_pad
862 should be placed before the second operand, or NULL. OVER is
863 an existing label that should be put at the exit, or NULL. */
864
865 static gimple_seq
866 frob_into_branch_around (gimple tp, eh_region region, tree over)
867 {
868 gimple x;
869 gimple_seq cleanup, result;
870 location_t loc = gimple_location (tp);
871
872 cleanup = gimple_try_cleanup (tp);
873 result = gimple_try_eval (tp);
874
875 if (region)
876 emit_post_landing_pad (&eh_seq, region);
877
878 if (gimple_seq_may_fallthru (cleanup))
879 {
880 if (!over)
881 over = create_artificial_label (loc);
882 x = gimple_build_goto (over);
883 gimple_set_location (x, loc);
884 gimple_seq_add_stmt (&cleanup, x);
885 }
886 gimple_seq_add_seq (&eh_seq, cleanup);
887
888 if (over)
889 {
890 x = gimple_build_label (over);
891 gimple_seq_add_stmt (&result, x);
892 }
893 return result;
894 }
895
896 /* A subroutine of lower_try_finally. Duplicate the tree rooted at T.
897 Make sure to record all new labels found. */
898
899 static gimple_seq
900 lower_try_finally_dup_block (gimple_seq seq, struct leh_state *outer_state,
901 location_t loc)
902 {
903 gimple region = NULL;
904 gimple_seq new_seq;
905 gimple_stmt_iterator gsi;
906
907 new_seq = copy_gimple_seq_and_replace_locals (seq);
908
909 for (gsi = gsi_start (new_seq); !gsi_end_p (gsi); gsi_next (&gsi))
910 {
911 gimple stmt = gsi_stmt (gsi);
912 if (LOCATION_LOCUS (gimple_location (stmt)) == UNKNOWN_LOCATION)
913 {
914 tree block = gimple_block (stmt);
915 gimple_set_location (stmt, loc);
916 gimple_set_block (stmt, block);
917 }
918 }
919
920 if (outer_state->tf)
921 region = outer_state->tf->try_finally_expr;
922 collect_finally_tree_1 (new_seq, region);
923
924 return new_seq;
925 }
926
927 /* A subroutine of lower_try_finally. Create a fallthru label for
928 the given try_finally state. The only tricky bit here is that
929 we have to make sure to record the label in our outer context. */
930
931 static tree
932 lower_try_finally_fallthru_label (struct leh_tf_state *tf)
933 {
934 tree label = tf->fallthru_label;
935 treemple temp;
936
937 if (!label)
938 {
939 label = create_artificial_label (gimple_location (tf->try_finally_expr));
940 tf->fallthru_label = label;
941 if (tf->outer->tf)
942 {
943 temp.t = label;
944 record_in_finally_tree (temp, tf->outer->tf->try_finally_expr);
945 }
946 }
947 return label;
948 }
949
950 /* A subroutine of lower_try_finally. If FINALLY consits of a
951 GIMPLE_EH_ELSE node, return it. */
952
953 static inline gimple
954 get_eh_else (gimple_seq finally)
955 {
956 gimple x = gimple_seq_first_stmt (finally);
957 if (gimple_code (x) == GIMPLE_EH_ELSE)
958 {
959 gcc_assert (gimple_seq_singleton_p (finally));
960 return x;
961 }
962 return NULL;
963 }
964
965 /* A subroutine of lower_try_finally. If the eh_protect_cleanup_actions
966 langhook returns non-null, then the language requires that the exception
967 path out of a try_finally be treated specially. To wit: the code within
968 the finally block may not itself throw an exception. We have two choices
969 here. First we can duplicate the finally block and wrap it in a
970 must_not_throw region. Second, we can generate code like
971
972 try {
973 finally_block;
974 } catch {
975 if (fintmp == eh_edge)
976 protect_cleanup_actions;
977 }
978
979 where "fintmp" is the temporary used in the switch statement generation
980 alternative considered below. For the nonce, we always choose the first
981 option.
982
983 THIS_STATE may be null if this is a try-cleanup, not a try-finally. */
984
985 static void
986 honor_protect_cleanup_actions (struct leh_state *outer_state,
987 struct leh_state *this_state,
988 struct leh_tf_state *tf)
989 {
990 tree protect_cleanup_actions;
991 gimple_stmt_iterator gsi;
992 bool finally_may_fallthru;
993 gimple_seq finally;
994 gimple x, eh_else;
995
996 /* First check for nothing to do. */
997 if (lang_hooks.eh_protect_cleanup_actions == NULL)
998 return;
999 protect_cleanup_actions = lang_hooks.eh_protect_cleanup_actions ();
1000 if (protect_cleanup_actions == NULL)
1001 return;
1002
1003 finally = gimple_try_cleanup (tf->top_p);
1004 eh_else = get_eh_else (finally);
1005
1006 /* Duplicate the FINALLY block. Only need to do this for try-finally,
1007 and not for cleanups. If we've got an EH_ELSE, extract it now. */
1008 if (eh_else)
1009 {
1010 finally = gimple_eh_else_e_body (eh_else);
1011 gimple_try_set_cleanup (tf->top_p, gimple_eh_else_n_body (eh_else));
1012 }
1013 else if (this_state)
1014 finally = lower_try_finally_dup_block (finally, outer_state,
1015 gimple_location (tf->try_finally_expr));
1016 finally_may_fallthru = gimple_seq_may_fallthru (finally);
1017
1018 /* If this cleanup consists of a TRY_CATCH_EXPR with TRY_CATCH_IS_CLEANUP
1019 set, the handler of the TRY_CATCH_EXPR is another cleanup which ought
1020 to be in an enclosing scope, but needs to be implemented at this level
1021 to avoid a nesting violation (see wrap_temporary_cleanups in
1022 cp/decl.c). Since it's logically at an outer level, we should call
1023 terminate before we get to it, so strip it away before adding the
1024 MUST_NOT_THROW filter. */
1025 gsi = gsi_start (finally);
1026 x = gsi_stmt (gsi);
1027 if (gimple_code (x) == GIMPLE_TRY
1028 && gimple_try_kind (x) == GIMPLE_TRY_CATCH
1029 && gimple_try_catch_is_cleanup (x))
1030 {
1031 gsi_insert_seq_before (&gsi, gimple_try_eval (x), GSI_SAME_STMT);
1032 gsi_remove (&gsi, false);
1033 }
1034
1035 /* Wrap the block with protect_cleanup_actions as the action. */
1036 x = gimple_build_eh_must_not_throw (protect_cleanup_actions);
1037 x = gimple_build_try (finally, gimple_seq_alloc_with_stmt (x),
1038 GIMPLE_TRY_CATCH);
1039 finally = lower_eh_must_not_throw (outer_state, x);
1040
1041 /* Drop all of this into the exception sequence. */
1042 emit_post_landing_pad (&eh_seq, tf->region);
1043 gimple_seq_add_seq (&eh_seq, finally);
1044 if (finally_may_fallthru)
1045 emit_resx (&eh_seq, tf->region);
1046
1047 /* Having now been handled, EH isn't to be considered with
1048 the rest of the outgoing edges. */
1049 tf->may_throw = false;
1050 }
1051
1052 /* A subroutine of lower_try_finally. We have determined that there is
1053 no fallthru edge out of the finally block. This means that there is
1054 no outgoing edge corresponding to any incoming edge. Restructure the
1055 try_finally node for this special case. */
1056
1057 static void
1058 lower_try_finally_nofallthru (struct leh_state *state,
1059 struct leh_tf_state *tf)
1060 {
1061 tree lab;
1062 gimple x, eh_else;
1063 gimple_seq finally;
1064 struct goto_queue_node *q, *qe;
1065
1066 lab = create_artificial_label (gimple_location (tf->try_finally_expr));
1067
1068 /* We expect that tf->top_p is a GIMPLE_TRY. */
1069 finally = gimple_try_cleanup (tf->top_p);
1070 tf->top_p_seq = gimple_try_eval (tf->top_p);
1071
1072 x = gimple_build_label (lab);
1073 gimple_seq_add_stmt (&tf->top_p_seq, x);
1074
1075 q = tf->goto_queue;
1076 qe = q + tf->goto_queue_active;
1077 for (; q < qe; ++q)
1078 if (q->index < 0)
1079 do_return_redirection (q, lab, NULL);
1080 else
1081 do_goto_redirection (q, lab, NULL, tf);
1082
1083 replace_goto_queue (tf);
1084
1085 /* Emit the finally block into the stream. Lower EH_ELSE at this time. */
1086 eh_else = get_eh_else (finally);
1087 if (eh_else)
1088 {
1089 finally = gimple_eh_else_n_body (eh_else);
1090 lower_eh_constructs_1 (state, &finally);
1091 gimple_seq_add_seq (&tf->top_p_seq, finally);
1092
1093 if (tf->may_throw)
1094 {
1095 finally = gimple_eh_else_e_body (eh_else);
1096 lower_eh_constructs_1 (state, &finally);
1097
1098 emit_post_landing_pad (&eh_seq, tf->region);
1099 gimple_seq_add_seq (&eh_seq, finally);
1100 }
1101 }
1102 else
1103 {
1104 lower_eh_constructs_1 (state, &finally);
1105 gimple_seq_add_seq (&tf->top_p_seq, finally);
1106
1107 if (tf->may_throw)
1108 {
1109 emit_post_landing_pad (&eh_seq, tf->region);
1110
1111 x = gimple_build_goto (lab);
1112 gimple_set_location (x, gimple_location (tf->try_finally_expr));
1113 gimple_seq_add_stmt (&eh_seq, x);
1114 }
1115 }
1116 }
1117
1118 /* A subroutine of lower_try_finally. We have determined that there is
1119 exactly one destination of the finally block. Restructure the
1120 try_finally node for this special case. */
1121
1122 static void
1123 lower_try_finally_onedest (struct leh_state *state, struct leh_tf_state *tf)
1124 {
1125 struct goto_queue_node *q, *qe;
1126 gimple x;
1127 gimple_seq finally;
1128 gimple_stmt_iterator gsi;
1129 tree finally_label;
1130 location_t loc = gimple_location (tf->try_finally_expr);
1131
1132 finally = gimple_try_cleanup (tf->top_p);
1133 tf->top_p_seq = gimple_try_eval (tf->top_p);
1134
1135 /* Since there's only one destination, and the destination edge can only
1136 either be EH or non-EH, that implies that all of our incoming edges
1137 are of the same type. Therefore we can lower EH_ELSE immediately. */
1138 x = get_eh_else (finally);
1139 if (x)
1140 {
1141 if (tf->may_throw)
1142 finally = gimple_eh_else_e_body (x);
1143 else
1144 finally = gimple_eh_else_n_body (x);
1145 }
1146
1147 lower_eh_constructs_1 (state, &finally);
1148
1149 for (gsi = gsi_start (finally); !gsi_end_p (gsi); gsi_next (&gsi))
1150 {
1151 gimple stmt = gsi_stmt (gsi);
1152 if (LOCATION_LOCUS (gimple_location (stmt)) == UNKNOWN_LOCATION)
1153 {
1154 tree block = gimple_block (stmt);
1155 gimple_set_location (stmt, gimple_location (tf->try_finally_expr));
1156 gimple_set_block (stmt, block);
1157 }
1158 }
1159
1160 if (tf->may_throw)
1161 {
1162 /* Only reachable via the exception edge. Add the given label to
1163 the head of the FINALLY block. Append a RESX at the end. */
1164 emit_post_landing_pad (&eh_seq, tf->region);
1165 gimple_seq_add_seq (&eh_seq, finally);
1166 emit_resx (&eh_seq, tf->region);
1167 return;
1168 }
1169
1170 if (tf->may_fallthru)
1171 {
1172 /* Only reachable via the fallthru edge. Do nothing but let
1173 the two blocks run together; we'll fall out the bottom. */
1174 gimple_seq_add_seq (&tf->top_p_seq, finally);
1175 return;
1176 }
1177
1178 finally_label = create_artificial_label (loc);
1179 x = gimple_build_label (finally_label);
1180 gimple_seq_add_stmt (&tf->top_p_seq, x);
1181
1182 gimple_seq_add_seq (&tf->top_p_seq, finally);
1183
1184 q = tf->goto_queue;
1185 qe = q + tf->goto_queue_active;
1186
1187 if (tf->may_return)
1188 {
1189 /* Reachable by return expressions only. Redirect them. */
1190 for (; q < qe; ++q)
1191 do_return_redirection (q, finally_label, NULL);
1192 replace_goto_queue (tf);
1193 }
1194 else
1195 {
1196 /* Reachable by goto expressions only. Redirect them. */
1197 for (; q < qe; ++q)
1198 do_goto_redirection (q, finally_label, NULL, tf);
1199 replace_goto_queue (tf);
1200
1201 if (tf->dest_array[0] == tf->fallthru_label)
1202 {
1203 /* Reachable by goto to fallthru label only. Redirect it
1204 to the new label (already created, sadly), and do not
1205 emit the final branch out, or the fallthru label. */
1206 tf->fallthru_label = NULL;
1207 return;
1208 }
1209 }
1210
1211 /* Place the original return/goto to the original destination
1212 immediately after the finally block. */
1213 x = tf->goto_queue[0].cont_stmt;
1214 gimple_seq_add_stmt (&tf->top_p_seq, x);
1215 maybe_record_in_goto_queue (state, x);
1216 }
1217
1218 /* A subroutine of lower_try_finally. There are multiple edges incoming
1219 and outgoing from the finally block. Implement this by duplicating the
1220 finally block for every destination. */
1221
1222 static void
1223 lower_try_finally_copy (struct leh_state *state, struct leh_tf_state *tf)
1224 {
1225 gimple_seq finally;
1226 gimple_seq new_stmt;
1227 gimple_seq seq;
1228 gimple x, eh_else;
1229 tree tmp;
1230 location_t tf_loc = gimple_location (tf->try_finally_expr);
1231
1232 finally = gimple_try_cleanup (tf->top_p);
1233
1234 /* Notice EH_ELSE, and simplify some of the remaining code
1235 by considering FINALLY to be the normal return path only. */
1236 eh_else = get_eh_else (finally);
1237 if (eh_else)
1238 finally = gimple_eh_else_n_body (eh_else);
1239
1240 tf->top_p_seq = gimple_try_eval (tf->top_p);
1241 new_stmt = NULL;
1242
1243 if (tf->may_fallthru)
1244 {
1245 seq = lower_try_finally_dup_block (finally, state, tf_loc);
1246 lower_eh_constructs_1 (state, &seq);
1247 gimple_seq_add_seq (&new_stmt, seq);
1248
1249 tmp = lower_try_finally_fallthru_label (tf);
1250 x = gimple_build_goto (tmp);
1251 gimple_set_location (x, tf_loc);
1252 gimple_seq_add_stmt (&new_stmt, x);
1253 }
1254
1255 if (tf->may_throw)
1256 {
1257 /* We don't need to copy the EH path of EH_ELSE,
1258 since it is only emitted once. */
1259 if (eh_else)
1260 seq = gimple_eh_else_e_body (eh_else);
1261 else
1262 seq = lower_try_finally_dup_block (finally, state, tf_loc);
1263 lower_eh_constructs_1 (state, &seq);
1264
1265 emit_post_landing_pad (&eh_seq, tf->region);
1266 gimple_seq_add_seq (&eh_seq, seq);
1267 emit_resx (&eh_seq, tf->region);
1268 }
1269
1270 if (tf->goto_queue)
1271 {
1272 struct goto_queue_node *q, *qe;
1273 int return_index, index;
1274 struct labels_s
1275 {
1276 struct goto_queue_node *q;
1277 tree label;
1278 } *labels;
1279
1280 return_index = tf->dest_array.length ();
1281 labels = XCNEWVEC (struct labels_s, return_index + 1);
1282
1283 q = tf->goto_queue;
1284 qe = q + tf->goto_queue_active;
1285 for (; q < qe; q++)
1286 {
1287 index = q->index < 0 ? return_index : q->index;
1288
1289 if (!labels[index].q)
1290 labels[index].q = q;
1291 }
1292
1293 for (index = 0; index < return_index + 1; index++)
1294 {
1295 tree lab;
1296
1297 q = labels[index].q;
1298 if (! q)
1299 continue;
1300
1301 lab = labels[index].label
1302 = create_artificial_label (tf_loc);
1303
1304 if (index == return_index)
1305 do_return_redirection (q, lab, NULL);
1306 else
1307 do_goto_redirection (q, lab, NULL, tf);
1308
1309 x = gimple_build_label (lab);
1310 gimple_seq_add_stmt (&new_stmt, x);
1311
1312 seq = lower_try_finally_dup_block (finally, state, q->location);
1313 lower_eh_constructs_1 (state, &seq);
1314 gimple_seq_add_seq (&new_stmt, seq);
1315
1316 gimple_seq_add_stmt (&new_stmt, q->cont_stmt);
1317 maybe_record_in_goto_queue (state, q->cont_stmt);
1318 }
1319
1320 for (q = tf->goto_queue; q < qe; q++)
1321 {
1322 tree lab;
1323
1324 index = q->index < 0 ? return_index : q->index;
1325
1326 if (labels[index].q == q)
1327 continue;
1328
1329 lab = labels[index].label;
1330
1331 if (index == return_index)
1332 do_return_redirection (q, lab, NULL);
1333 else
1334 do_goto_redirection (q, lab, NULL, tf);
1335 }
1336
1337 replace_goto_queue (tf);
1338 free (labels);
1339 }
1340
1341 /* Need to link new stmts after running replace_goto_queue due
1342 to not wanting to process the same goto stmts twice. */
1343 gimple_seq_add_seq (&tf->top_p_seq, new_stmt);
1344 }
1345
1346 /* A subroutine of lower_try_finally. There are multiple edges incoming
1347 and outgoing from the finally block. Implement this by instrumenting
1348 each incoming edge and creating a switch statement at the end of the
1349 finally block that branches to the appropriate destination. */
1350
1351 static void
1352 lower_try_finally_switch (struct leh_state *state, struct leh_tf_state *tf)
1353 {
1354 struct goto_queue_node *q, *qe;
1355 tree finally_tmp, finally_label;
1356 int return_index, eh_index, fallthru_index;
1357 int nlabels, ndests, j, last_case_index;
1358 tree last_case;
1359 vec<tree> case_label_vec;
1360 gimple_seq switch_body = NULL;
1361 gimple x, eh_else;
1362 tree tmp;
1363 gimple switch_stmt;
1364 gimple_seq finally;
1365 struct pointer_map_t *cont_map = NULL;
1366 /* The location of the TRY_FINALLY stmt. */
1367 location_t tf_loc = gimple_location (tf->try_finally_expr);
1368 /* The location of the finally block. */
1369 location_t finally_loc;
1370
1371 finally = gimple_try_cleanup (tf->top_p);
1372 eh_else = get_eh_else (finally);
1373
1374 /* Mash the TRY block to the head of the chain. */
1375 tf->top_p_seq = gimple_try_eval (tf->top_p);
1376
1377 /* The location of the finally is either the last stmt in the finally
1378 block or the location of the TRY_FINALLY itself. */
1379 x = gimple_seq_last_stmt (finally);
1380 finally_loc = x ? gimple_location (x) : tf_loc;
1381
1382 /* Lower the finally block itself. */
1383 lower_eh_constructs_1 (state, &finally);
1384
1385 /* Prepare for switch statement generation. */
1386 nlabels = tf->dest_array.length ();
1387 return_index = nlabels;
1388 eh_index = return_index + tf->may_return;
1389 fallthru_index = eh_index + (tf->may_throw && !eh_else);
1390 ndests = fallthru_index + tf->may_fallthru;
1391
1392 finally_tmp = create_tmp_var (integer_type_node, "finally_tmp");
1393 finally_label = create_artificial_label (finally_loc);
1394
1395 /* We use vec::quick_push on case_label_vec throughout this function,
1396 since we know the size in advance and allocate precisely as muce
1397 space as needed. */
1398 case_label_vec.create (ndests);
1399 last_case = NULL;
1400 last_case_index = 0;
1401
1402 /* Begin inserting code for getting to the finally block. Things
1403 are done in this order to correspond to the sequence the code is
1404 laid out. */
1405
1406 if (tf->may_fallthru)
1407 {
1408 x = gimple_build_assign (finally_tmp,
1409 build_int_cst (integer_type_node,
1410 fallthru_index));
1411 gimple_seq_add_stmt (&tf->top_p_seq, x);
1412
1413 tmp = build_int_cst (integer_type_node, fallthru_index);
1414 last_case = build_case_label (tmp, NULL,
1415 create_artificial_label (tf_loc));
1416 case_label_vec.quick_push (last_case);
1417 last_case_index++;
1418
1419 x = gimple_build_label (CASE_LABEL (last_case));
1420 gimple_seq_add_stmt (&switch_body, x);
1421
1422 tmp = lower_try_finally_fallthru_label (tf);
1423 x = gimple_build_goto (tmp);
1424 gimple_set_location (x, tf_loc);
1425 gimple_seq_add_stmt (&switch_body, x);
1426 }
1427
1428 /* For EH_ELSE, emit the exception path (plus resx) now, then
1429 subsequently we only need consider the normal path. */
1430 if (eh_else)
1431 {
1432 if (tf->may_throw)
1433 {
1434 finally = gimple_eh_else_e_body (eh_else);
1435 lower_eh_constructs_1 (state, &finally);
1436
1437 emit_post_landing_pad (&eh_seq, tf->region);
1438 gimple_seq_add_seq (&eh_seq, finally);
1439 emit_resx (&eh_seq, tf->region);
1440 }
1441
1442 finally = gimple_eh_else_n_body (eh_else);
1443 }
1444 else if (tf->may_throw)
1445 {
1446 emit_post_landing_pad (&eh_seq, tf->region);
1447
1448 x = gimple_build_assign (finally_tmp,
1449 build_int_cst (integer_type_node, eh_index));
1450 gimple_seq_add_stmt (&eh_seq, x);
1451
1452 x = gimple_build_goto (finally_label);
1453 gimple_set_location (x, tf_loc);
1454 gimple_seq_add_stmt (&eh_seq, x);
1455
1456 tmp = build_int_cst (integer_type_node, eh_index);
1457 last_case = build_case_label (tmp, NULL,
1458 create_artificial_label (tf_loc));
1459 case_label_vec.quick_push (last_case);
1460 last_case_index++;
1461
1462 x = gimple_build_label (CASE_LABEL (last_case));
1463 gimple_seq_add_stmt (&eh_seq, x);
1464 emit_resx (&eh_seq, tf->region);
1465 }
1466
1467 x = gimple_build_label (finally_label);
1468 gimple_seq_add_stmt (&tf->top_p_seq, x);
1469
1470 gimple_seq_add_seq (&tf->top_p_seq, finally);
1471
1472 /* Redirect each incoming goto edge. */
1473 q = tf->goto_queue;
1474 qe = q + tf->goto_queue_active;
1475 j = last_case_index + tf->may_return;
1476 /* Prepare the assignments to finally_tmp that are executed upon the
1477 entrance through a particular edge. */
1478 for (; q < qe; ++q)
1479 {
1480 gimple_seq mod = NULL;
1481 int switch_id;
1482 unsigned int case_index;
1483
1484 if (q->index < 0)
1485 {
1486 x = gimple_build_assign (finally_tmp,
1487 build_int_cst (integer_type_node,
1488 return_index));
1489 gimple_seq_add_stmt (&mod, x);
1490 do_return_redirection (q, finally_label, mod);
1491 switch_id = return_index;
1492 }
1493 else
1494 {
1495 x = gimple_build_assign (finally_tmp,
1496 build_int_cst (integer_type_node, q->index));
1497 gimple_seq_add_stmt (&mod, x);
1498 do_goto_redirection (q, finally_label, mod, tf);
1499 switch_id = q->index;
1500 }
1501
1502 case_index = j + q->index;
1503 if (case_label_vec.length () <= case_index || !case_label_vec[case_index])
1504 {
1505 tree case_lab;
1506 void **slot;
1507 tmp = build_int_cst (integer_type_node, switch_id);
1508 case_lab = build_case_label (tmp, NULL,
1509 create_artificial_label (tf_loc));
1510 /* We store the cont_stmt in the pointer map, so that we can recover
1511 it in the loop below. */
1512 if (!cont_map)
1513 cont_map = pointer_map_create ();
1514 slot = pointer_map_insert (cont_map, case_lab);
1515 *slot = q->cont_stmt;
1516 case_label_vec.quick_push (case_lab);
1517 }
1518 }
1519 for (j = last_case_index; j < last_case_index + nlabels; j++)
1520 {
1521 gimple cont_stmt;
1522 void **slot;
1523
1524 last_case = case_label_vec[j];
1525
1526 gcc_assert (last_case);
1527 gcc_assert (cont_map);
1528
1529 slot = pointer_map_contains (cont_map, last_case);
1530 gcc_assert (slot);
1531 cont_stmt = *(gimple *) slot;
1532
1533 x = gimple_build_label (CASE_LABEL (last_case));
1534 gimple_seq_add_stmt (&switch_body, x);
1535 gimple_seq_add_stmt (&switch_body, cont_stmt);
1536 maybe_record_in_goto_queue (state, cont_stmt);
1537 }
1538 if (cont_map)
1539 pointer_map_destroy (cont_map);
1540
1541 replace_goto_queue (tf);
1542
1543 /* Make sure that the last case is the default label, as one is required.
1544 Then sort the labels, which is also required in GIMPLE. */
1545 CASE_LOW (last_case) = NULL;
1546 sort_case_labels (case_label_vec);
1547
1548 /* Build the switch statement, setting last_case to be the default
1549 label. */
1550 switch_stmt = gimple_build_switch (finally_tmp, last_case,
1551 case_label_vec);
1552 gimple_set_location (switch_stmt, finally_loc);
1553
1554 /* Need to link SWITCH_STMT after running replace_goto_queue
1555 due to not wanting to process the same goto stmts twice. */
1556 gimple_seq_add_stmt (&tf->top_p_seq, switch_stmt);
1557 gimple_seq_add_seq (&tf->top_p_seq, switch_body);
1558 }
1559
1560 /* Decide whether or not we are going to duplicate the finally block.
1561 There are several considerations.
1562
1563 First, if this is Java, then the finally block contains code
1564 written by the user. It has line numbers associated with it,
1565 so duplicating the block means it's difficult to set a breakpoint.
1566 Since controlling code generation via -g is verboten, we simply
1567 never duplicate code without optimization.
1568
1569 Second, we'd like to prevent egregious code growth. One way to
1570 do this is to estimate the size of the finally block, multiply
1571 that by the number of copies we'd need to make, and compare against
1572 the estimate of the size of the switch machinery we'd have to add. */
1573
1574 static bool
1575 decide_copy_try_finally (int ndests, bool may_throw, gimple_seq finally)
1576 {
1577 int f_estimate, sw_estimate;
1578 gimple eh_else;
1579
1580 /* If there's an EH_ELSE involved, the exception path is separate
1581 and really doesn't come into play for this computation. */
1582 eh_else = get_eh_else (finally);
1583 if (eh_else)
1584 {
1585 ndests -= may_throw;
1586 finally = gimple_eh_else_n_body (eh_else);
1587 }
1588
1589 if (!optimize)
1590 {
1591 gimple_stmt_iterator gsi;
1592
1593 if (ndests == 1)
1594 return true;
1595
1596 for (gsi = gsi_start (finally); !gsi_end_p (gsi); gsi_next (&gsi))
1597 {
1598 gimple stmt = gsi_stmt (gsi);
1599 if (!is_gimple_debug (stmt) && !gimple_clobber_p (stmt))
1600 return false;
1601 }
1602 return true;
1603 }
1604
1605 /* Finally estimate N times, plus N gotos. */
1606 f_estimate = count_insns_seq (finally, &eni_size_weights);
1607 f_estimate = (f_estimate + 1) * ndests;
1608
1609 /* Switch statement (cost 10), N variable assignments, N gotos. */
1610 sw_estimate = 10 + 2 * ndests;
1611
1612 /* Optimize for size clearly wants our best guess. */
1613 if (optimize_function_for_size_p (cfun))
1614 return f_estimate < sw_estimate;
1615
1616 /* ??? These numbers are completely made up so far. */
1617 if (optimize > 1)
1618 return f_estimate < 100 || f_estimate < sw_estimate * 2;
1619 else
1620 return f_estimate < 40 || f_estimate * 2 < sw_estimate * 3;
1621 }
1622
1623 /* REG is the enclosing region for a possible cleanup region, or the region
1624 itself. Returns TRUE if such a region would be unreachable.
1625
1626 Cleanup regions within a must-not-throw region aren't actually reachable
1627 even if there are throwing stmts within them, because the personality
1628 routine will call terminate before unwinding. */
1629
1630 static bool
1631 cleanup_is_dead_in (eh_region reg)
1632 {
1633 while (reg && reg->type == ERT_CLEANUP)
1634 reg = reg->outer;
1635 return (reg && reg->type == ERT_MUST_NOT_THROW);
1636 }
1637
1638 /* A subroutine of lower_eh_constructs_1. Lower a GIMPLE_TRY_FINALLY nodes
1639 to a sequence of labels and blocks, plus the exception region trees
1640 that record all the magic. This is complicated by the need to
1641 arrange for the FINALLY block to be executed on all exits. */
1642
1643 static gimple_seq
1644 lower_try_finally (struct leh_state *state, gimple tp)
1645 {
1646 struct leh_tf_state this_tf;
1647 struct leh_state this_state;
1648 int ndests;
1649 gimple_seq old_eh_seq;
1650
1651 /* Process the try block. */
1652
1653 memset (&this_tf, 0, sizeof (this_tf));
1654 this_tf.try_finally_expr = tp;
1655 this_tf.top_p = tp;
1656 this_tf.outer = state;
1657 if (using_eh_for_cleanups_p () && !cleanup_is_dead_in (state->cur_region))
1658 {
1659 this_tf.region = gen_eh_region_cleanup (state->cur_region);
1660 this_state.cur_region = this_tf.region;
1661 }
1662 else
1663 {
1664 this_tf.region = NULL;
1665 this_state.cur_region = state->cur_region;
1666 }
1667
1668 this_state.ehp_region = state->ehp_region;
1669 this_state.tf = &this_tf;
1670
1671 old_eh_seq = eh_seq;
1672 eh_seq = NULL;
1673
1674 lower_eh_constructs_1 (&this_state, gimple_try_eval_ptr (tp));
1675
1676 /* Determine if the try block is escaped through the bottom. */
1677 this_tf.may_fallthru = gimple_seq_may_fallthru (gimple_try_eval (tp));
1678
1679 /* Determine if any exceptions are possible within the try block. */
1680 if (this_tf.region)
1681 this_tf.may_throw = eh_region_may_contain_throw (this_tf.region);
1682 if (this_tf.may_throw)
1683 honor_protect_cleanup_actions (state, &this_state, &this_tf);
1684
1685 /* Determine how many edges (still) reach the finally block. Or rather,
1686 how many destinations are reached by the finally block. Use this to
1687 determine how we process the finally block itself. */
1688
1689 ndests = this_tf.dest_array.length ();
1690 ndests += this_tf.may_fallthru;
1691 ndests += this_tf.may_return;
1692 ndests += this_tf.may_throw;
1693
1694 /* If the FINALLY block is not reachable, dike it out. */
1695 if (ndests == 0)
1696 {
1697 gimple_seq_add_seq (&this_tf.top_p_seq, gimple_try_eval (tp));
1698 gimple_try_set_cleanup (tp, NULL);
1699 }
1700 /* If the finally block doesn't fall through, then any destination
1701 we might try to impose there isn't reached either. There may be
1702 some minor amount of cleanup and redirection still needed. */
1703 else if (!gimple_seq_may_fallthru (gimple_try_cleanup (tp)))
1704 lower_try_finally_nofallthru (state, &this_tf);
1705
1706 /* We can easily special-case redirection to a single destination. */
1707 else if (ndests == 1)
1708 lower_try_finally_onedest (state, &this_tf);
1709 else if (decide_copy_try_finally (ndests, this_tf.may_throw,
1710 gimple_try_cleanup (tp)))
1711 lower_try_finally_copy (state, &this_tf);
1712 else
1713 lower_try_finally_switch (state, &this_tf);
1714
1715 /* If someone requested we add a label at the end of the transformed
1716 block, do so. */
1717 if (this_tf.fallthru_label)
1718 {
1719 /* This must be reached only if ndests == 0. */
1720 gimple x = gimple_build_label (this_tf.fallthru_label);
1721 gimple_seq_add_stmt (&this_tf.top_p_seq, x);
1722 }
1723
1724 this_tf.dest_array.release ();
1725 free (this_tf.goto_queue);
1726 if (this_tf.goto_queue_map)
1727 pointer_map_destroy (this_tf.goto_queue_map);
1728
1729 /* If there was an old (aka outer) eh_seq, append the current eh_seq.
1730 If there was no old eh_seq, then the append is trivially already done. */
1731 if (old_eh_seq)
1732 {
1733 if (eh_seq == NULL)
1734 eh_seq = old_eh_seq;
1735 else
1736 {
1737 gimple_seq new_eh_seq = eh_seq;
1738 eh_seq = old_eh_seq;
1739 gimple_seq_add_seq (&eh_seq, new_eh_seq);
1740 }
1741 }
1742
1743 return this_tf.top_p_seq;
1744 }
1745
1746 /* A subroutine of lower_eh_constructs_1. Lower a GIMPLE_TRY_CATCH with a
1747 list of GIMPLE_CATCH to a sequence of labels and blocks, plus the
1748 exception region trees that records all the magic. */
1749
1750 static gimple_seq
1751 lower_catch (struct leh_state *state, gimple tp)
1752 {
1753 eh_region try_region = NULL;
1754 struct leh_state this_state = *state;
1755 gimple_stmt_iterator gsi;
1756 tree out_label;
1757 gimple_seq new_seq, cleanup;
1758 gimple x;
1759 location_t try_catch_loc = gimple_location (tp);
1760
1761 if (flag_exceptions)
1762 {
1763 try_region = gen_eh_region_try (state->cur_region);
1764 this_state.cur_region = try_region;
1765 }
1766
1767 lower_eh_constructs_1 (&this_state, gimple_try_eval_ptr (tp));
1768
1769 if (!eh_region_may_contain_throw (try_region))
1770 return gimple_try_eval (tp);
1771
1772 new_seq = NULL;
1773 emit_eh_dispatch (&new_seq, try_region);
1774 emit_resx (&new_seq, try_region);
1775
1776 this_state.cur_region = state->cur_region;
1777 this_state.ehp_region = try_region;
1778
1779 out_label = NULL;
1780 cleanup = gimple_try_cleanup (tp);
1781 for (gsi = gsi_start (cleanup);
1782 !gsi_end_p (gsi);
1783 gsi_next (&gsi))
1784 {
1785 eh_catch c;
1786 gimple gcatch;
1787 gimple_seq handler;
1788
1789 gcatch = gsi_stmt (gsi);
1790 c = gen_eh_region_catch (try_region, gimple_catch_types (gcatch));
1791
1792 handler = gimple_catch_handler (gcatch);
1793 lower_eh_constructs_1 (&this_state, &handler);
1794
1795 c->label = create_artificial_label (UNKNOWN_LOCATION);
1796 x = gimple_build_label (c->label);
1797 gimple_seq_add_stmt (&new_seq, x);
1798
1799 gimple_seq_add_seq (&new_seq, handler);
1800
1801 if (gimple_seq_may_fallthru (new_seq))
1802 {
1803 if (!out_label)
1804 out_label = create_artificial_label (try_catch_loc);
1805
1806 x = gimple_build_goto (out_label);
1807 gimple_seq_add_stmt (&new_seq, x);
1808 }
1809 if (!c->type_list)
1810 break;
1811 }
1812
1813 gimple_try_set_cleanup (tp, new_seq);
1814
1815 return frob_into_branch_around (tp, try_region, out_label);
1816 }
1817
1818 /* A subroutine of lower_eh_constructs_1. Lower a GIMPLE_TRY with a
1819 GIMPLE_EH_FILTER to a sequence of labels and blocks, plus the exception
1820 region trees that record all the magic. */
1821
1822 static gimple_seq
1823 lower_eh_filter (struct leh_state *state, gimple tp)
1824 {
1825 struct leh_state this_state = *state;
1826 eh_region this_region = NULL;
1827 gimple inner, x;
1828 gimple_seq new_seq;
1829
1830 inner = gimple_seq_first_stmt (gimple_try_cleanup (tp));
1831
1832 if (flag_exceptions)
1833 {
1834 this_region = gen_eh_region_allowed (state->cur_region,
1835 gimple_eh_filter_types (inner));
1836 this_state.cur_region = this_region;
1837 }
1838
1839 lower_eh_constructs_1 (&this_state, gimple_try_eval_ptr (tp));
1840
1841 if (!eh_region_may_contain_throw (this_region))
1842 return gimple_try_eval (tp);
1843
1844 new_seq = NULL;
1845 this_state.cur_region = state->cur_region;
1846 this_state.ehp_region = this_region;
1847
1848 emit_eh_dispatch (&new_seq, this_region);
1849 emit_resx (&new_seq, this_region);
1850
1851 this_region->u.allowed.label = create_artificial_label (UNKNOWN_LOCATION);
1852 x = gimple_build_label (this_region->u.allowed.label);
1853 gimple_seq_add_stmt (&new_seq, x);
1854
1855 lower_eh_constructs_1 (&this_state, gimple_eh_filter_failure_ptr (inner));
1856 gimple_seq_add_seq (&new_seq, gimple_eh_filter_failure (inner));
1857
1858 gimple_try_set_cleanup (tp, new_seq);
1859
1860 return frob_into_branch_around (tp, this_region, NULL);
1861 }
1862
1863 /* A subroutine of lower_eh_constructs_1. Lower a GIMPLE_TRY with
1864 an GIMPLE_EH_MUST_NOT_THROW to a sequence of labels and blocks,
1865 plus the exception region trees that record all the magic. */
1866
1867 static gimple_seq
1868 lower_eh_must_not_throw (struct leh_state *state, gimple tp)
1869 {
1870 struct leh_state this_state = *state;
1871
1872 if (flag_exceptions)
1873 {
1874 gimple inner = gimple_seq_first_stmt (gimple_try_cleanup (tp));
1875 eh_region this_region;
1876
1877 this_region = gen_eh_region_must_not_throw (state->cur_region);
1878 this_region->u.must_not_throw.failure_decl
1879 = gimple_eh_must_not_throw_fndecl (inner);
1880 this_region->u.must_not_throw.failure_loc
1881 = LOCATION_LOCUS (gimple_location (tp));
1882
1883 /* In order to get mangling applied to this decl, we must mark it
1884 used now. Otherwise, pass_ipa_free_lang_data won't think it
1885 needs to happen. */
1886 TREE_USED (this_region->u.must_not_throw.failure_decl) = 1;
1887
1888 this_state.cur_region = this_region;
1889 }
1890
1891 lower_eh_constructs_1 (&this_state, gimple_try_eval_ptr (tp));
1892
1893 return gimple_try_eval (tp);
1894 }
1895
1896 /* Implement a cleanup expression. This is similar to try-finally,
1897 except that we only execute the cleanup block for exception edges. */
1898
1899 static gimple_seq
1900 lower_cleanup (struct leh_state *state, gimple tp)
1901 {
1902 struct leh_state this_state = *state;
1903 eh_region this_region = NULL;
1904 struct leh_tf_state fake_tf;
1905 gimple_seq result;
1906 bool cleanup_dead = cleanup_is_dead_in (state->cur_region);
1907
1908 if (flag_exceptions && !cleanup_dead)
1909 {
1910 this_region = gen_eh_region_cleanup (state->cur_region);
1911 this_state.cur_region = this_region;
1912 }
1913
1914 lower_eh_constructs_1 (&this_state, gimple_try_eval_ptr (tp));
1915
1916 if (cleanup_dead || !eh_region_may_contain_throw (this_region))
1917 return gimple_try_eval (tp);
1918
1919 /* Build enough of a try-finally state so that we can reuse
1920 honor_protect_cleanup_actions. */
1921 memset (&fake_tf, 0, sizeof (fake_tf));
1922 fake_tf.top_p = fake_tf.try_finally_expr = tp;
1923 fake_tf.outer = state;
1924 fake_tf.region = this_region;
1925 fake_tf.may_fallthru = gimple_seq_may_fallthru (gimple_try_eval (tp));
1926 fake_tf.may_throw = true;
1927
1928 honor_protect_cleanup_actions (state, NULL, &fake_tf);
1929
1930 if (fake_tf.may_throw)
1931 {
1932 /* In this case honor_protect_cleanup_actions had nothing to do,
1933 and we should process this normally. */
1934 lower_eh_constructs_1 (state, gimple_try_cleanup_ptr (tp));
1935 result = frob_into_branch_around (tp, this_region,
1936 fake_tf.fallthru_label);
1937 }
1938 else
1939 {
1940 /* In this case honor_protect_cleanup_actions did nearly all of
1941 the work. All we have left is to append the fallthru_label. */
1942
1943 result = gimple_try_eval (tp);
1944 if (fake_tf.fallthru_label)
1945 {
1946 gimple x = gimple_build_label (fake_tf.fallthru_label);
1947 gimple_seq_add_stmt (&result, x);
1948 }
1949 }
1950 return result;
1951 }
1952
1953 /* Main loop for lowering eh constructs. Also moves gsi to the next
1954 statement. */
1955
1956 static void
1957 lower_eh_constructs_2 (struct leh_state *state, gimple_stmt_iterator *gsi)
1958 {
1959 gimple_seq replace;
1960 gimple x;
1961 gimple stmt = gsi_stmt (*gsi);
1962
1963 switch (gimple_code (stmt))
1964 {
1965 case GIMPLE_CALL:
1966 {
1967 tree fndecl = gimple_call_fndecl (stmt);
1968 tree rhs, lhs;
1969
1970 if (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
1971 switch (DECL_FUNCTION_CODE (fndecl))
1972 {
1973 case BUILT_IN_EH_POINTER:
1974 /* The front end may have generated a call to
1975 __builtin_eh_pointer (0) within a catch region. Replace
1976 this zero argument with the current catch region number. */
1977 if (state->ehp_region)
1978 {
1979 tree nr = build_int_cst (integer_type_node,
1980 state->ehp_region->index);
1981 gimple_call_set_arg (stmt, 0, nr);
1982 }
1983 else
1984 {
1985 /* The user has dome something silly. Remove it. */
1986 rhs = null_pointer_node;
1987 goto do_replace;
1988 }
1989 break;
1990
1991 case BUILT_IN_EH_FILTER:
1992 /* ??? This should never appear, but since it's a builtin it
1993 is accessible to abuse by users. Just remove it and
1994 replace the use with the arbitrary value zero. */
1995 rhs = build_int_cst (TREE_TYPE (TREE_TYPE (fndecl)), 0);
1996 do_replace:
1997 lhs = gimple_call_lhs (stmt);
1998 x = gimple_build_assign (lhs, rhs);
1999 gsi_insert_before (gsi, x, GSI_SAME_STMT);
2000 /* FALLTHRU */
2001
2002 case BUILT_IN_EH_COPY_VALUES:
2003 /* Likewise this should not appear. Remove it. */
2004 gsi_remove (gsi, true);
2005 return;
2006
2007 default:
2008 break;
2009 }
2010 }
2011 /* FALLTHRU */
2012
2013 case GIMPLE_ASSIGN:
2014 /* If the stmt can throw use a new temporary for the assignment
2015 to a LHS. This makes sure the old value of the LHS is
2016 available on the EH edge. Only do so for statements that
2017 potentially fall through (no noreturn calls e.g.), otherwise
2018 this new assignment might create fake fallthru regions. */
2019 if (stmt_could_throw_p (stmt)
2020 && gimple_has_lhs (stmt)
2021 && gimple_stmt_may_fallthru (stmt)
2022 && !tree_could_throw_p (gimple_get_lhs (stmt))
2023 && is_gimple_reg_type (TREE_TYPE (gimple_get_lhs (stmt))))
2024 {
2025 tree lhs = gimple_get_lhs (stmt);
2026 tree tmp = create_tmp_var (TREE_TYPE (lhs), NULL);
2027 gimple s = gimple_build_assign (lhs, tmp);
2028 gimple_set_location (s, gimple_location (stmt));
2029 gimple_set_block (s, gimple_block (stmt));
2030 gimple_set_lhs (stmt, tmp);
2031 if (TREE_CODE (TREE_TYPE (tmp)) == COMPLEX_TYPE
2032 || TREE_CODE (TREE_TYPE (tmp)) == VECTOR_TYPE)
2033 DECL_GIMPLE_REG_P (tmp) = 1;
2034 gsi_insert_after (gsi, s, GSI_SAME_STMT);
2035 }
2036 /* Look for things that can throw exceptions, and record them. */
2037 if (state->cur_region && stmt_could_throw_p (stmt))
2038 {
2039 record_stmt_eh_region (state->cur_region, stmt);
2040 note_eh_region_may_contain_throw (state->cur_region);
2041 }
2042 break;
2043
2044 case GIMPLE_COND:
2045 case GIMPLE_GOTO:
2046 case GIMPLE_RETURN:
2047 maybe_record_in_goto_queue (state, stmt);
2048 break;
2049
2050 case GIMPLE_SWITCH:
2051 verify_norecord_switch_expr (state, stmt);
2052 break;
2053
2054 case GIMPLE_TRY:
2055 if (gimple_try_kind (stmt) == GIMPLE_TRY_FINALLY)
2056 replace = lower_try_finally (state, stmt);
2057 else
2058 {
2059 x = gimple_seq_first_stmt (gimple_try_cleanup (stmt));
2060 if (!x)
2061 {
2062 replace = gimple_try_eval (stmt);
2063 lower_eh_constructs_1 (state, &replace);
2064 }
2065 else
2066 switch (gimple_code (x))
2067 {
2068 case GIMPLE_CATCH:
2069 replace = lower_catch (state, stmt);
2070 break;
2071 case GIMPLE_EH_FILTER:
2072 replace = lower_eh_filter (state, stmt);
2073 break;
2074 case GIMPLE_EH_MUST_NOT_THROW:
2075 replace = lower_eh_must_not_throw (state, stmt);
2076 break;
2077 case GIMPLE_EH_ELSE:
2078 /* This code is only valid with GIMPLE_TRY_FINALLY. */
2079 gcc_unreachable ();
2080 default:
2081 replace = lower_cleanup (state, stmt);
2082 break;
2083 }
2084 }
2085
2086 /* Remove the old stmt and insert the transformed sequence
2087 instead. */
2088 gsi_insert_seq_before (gsi, replace, GSI_SAME_STMT);
2089 gsi_remove (gsi, true);
2090
2091 /* Return since we don't want gsi_next () */
2092 return;
2093
2094 case GIMPLE_EH_ELSE:
2095 /* We should be eliminating this in lower_try_finally et al. */
2096 gcc_unreachable ();
2097
2098 default:
2099 /* A type, a decl, or some kind of statement that we're not
2100 interested in. Don't walk them. */
2101 break;
2102 }
2103
2104 gsi_next (gsi);
2105 }
2106
2107 /* A helper to unwrap a gimple_seq and feed stmts to lower_eh_constructs_2. */
2108
2109 static void
2110 lower_eh_constructs_1 (struct leh_state *state, gimple_seq *pseq)
2111 {
2112 gimple_stmt_iterator gsi;
2113 for (gsi = gsi_start (*pseq); !gsi_end_p (gsi);)
2114 lower_eh_constructs_2 (state, &gsi);
2115 }
2116
2117 static unsigned int
2118 lower_eh_constructs (void)
2119 {
2120 struct leh_state null_state;
2121 gimple_seq bodyp;
2122
2123 bodyp = gimple_body (current_function_decl);
2124 if (bodyp == NULL)
2125 return 0;
2126
2127 finally_tree.create (31);
2128 eh_region_may_contain_throw_map = BITMAP_ALLOC (NULL);
2129 memset (&null_state, 0, sizeof (null_state));
2130
2131 collect_finally_tree_1 (bodyp, NULL);
2132 lower_eh_constructs_1 (&null_state, &bodyp);
2133 gimple_set_body (current_function_decl, bodyp);
2134
2135 /* We assume there's a return statement, or something, at the end of
2136 the function, and thus ploping the EH sequence afterward won't
2137 change anything. */
2138 gcc_assert (!gimple_seq_may_fallthru (bodyp));
2139 gimple_seq_add_seq (&bodyp, eh_seq);
2140
2141 /* We assume that since BODYP already existed, adding EH_SEQ to it
2142 didn't change its value, and we don't have to re-set the function. */
2143 gcc_assert (bodyp == gimple_body (current_function_decl));
2144
2145 finally_tree.dispose ();
2146 BITMAP_FREE (eh_region_may_contain_throw_map);
2147 eh_seq = NULL;
2148
2149 /* If this function needs a language specific EH personality routine
2150 and the frontend didn't already set one do so now. */
2151 if (function_needs_eh_personality (cfun) == eh_personality_lang
2152 && !DECL_FUNCTION_PERSONALITY (current_function_decl))
2153 DECL_FUNCTION_PERSONALITY (current_function_decl)
2154 = lang_hooks.eh_personality ();
2155
2156 return 0;
2157 }
2158
2159 namespace {
2160
2161 const pass_data pass_data_lower_eh =
2162 {
2163 GIMPLE_PASS, /* type */
2164 "eh", /* name */
2165 OPTGROUP_NONE, /* optinfo_flags */
2166 false, /* has_gate */
2167 true, /* has_execute */
2168 TV_TREE_EH, /* tv_id */
2169 PROP_gimple_lcf, /* properties_required */
2170 PROP_gimple_leh, /* properties_provided */
2171 0, /* properties_destroyed */
2172 0, /* todo_flags_start */
2173 0, /* todo_flags_finish */
2174 };
2175
2176 class pass_lower_eh : public gimple_opt_pass
2177 {
2178 public:
2179 pass_lower_eh (gcc::context *ctxt)
2180 : gimple_opt_pass (pass_data_lower_eh, ctxt)
2181 {}
2182
2183 /* opt_pass methods: */
2184 unsigned int execute () { return lower_eh_constructs (); }
2185
2186 }; // class pass_lower_eh
2187
2188 } // anon namespace
2189
2190 gimple_opt_pass *
2191 make_pass_lower_eh (gcc::context *ctxt)
2192 {
2193 return new pass_lower_eh (ctxt);
2194 }
2195 \f
2196 /* Create the multiple edges from an EH_DISPATCH statement to all of
2197 the possible handlers for its EH region. Return true if there's
2198 no fallthru edge; false if there is. */
2199
2200 bool
2201 make_eh_dispatch_edges (gimple stmt)
2202 {
2203 eh_region r;
2204 eh_catch c;
2205 basic_block src, dst;
2206
2207 r = get_eh_region_from_number (gimple_eh_dispatch_region (stmt));
2208 src = gimple_bb (stmt);
2209
2210 switch (r->type)
2211 {
2212 case ERT_TRY:
2213 for (c = r->u.eh_try.first_catch; c ; c = c->next_catch)
2214 {
2215 dst = label_to_block (c->label);
2216 make_edge (src, dst, 0);
2217
2218 /* A catch-all handler doesn't have a fallthru. */
2219 if (c->type_list == NULL)
2220 return false;
2221 }
2222 break;
2223
2224 case ERT_ALLOWED_EXCEPTIONS:
2225 dst = label_to_block (r->u.allowed.label);
2226 make_edge (src, dst, 0);
2227 break;
2228
2229 default:
2230 gcc_unreachable ();
2231 }
2232
2233 return true;
2234 }
2235
2236 /* Create the single EH edge from STMT to its nearest landing pad,
2237 if there is such a landing pad within the current function. */
2238
2239 void
2240 make_eh_edges (gimple stmt)
2241 {
2242 basic_block src, dst;
2243 eh_landing_pad lp;
2244 int lp_nr;
2245
2246 lp_nr = lookup_stmt_eh_lp (stmt);
2247 if (lp_nr <= 0)
2248 return;
2249
2250 lp = get_eh_landing_pad_from_number (lp_nr);
2251 gcc_assert (lp != NULL);
2252
2253 src = gimple_bb (stmt);
2254 dst = label_to_block (lp->post_landing_pad);
2255 make_edge (src, dst, EDGE_EH);
2256 }
2257
2258 /* Do the work in redirecting EDGE_IN to NEW_BB within the EH region tree;
2259 do not actually perform the final edge redirection.
2260
2261 CHANGE_REGION is true when we're being called from cleanup_empty_eh and
2262 we intend to change the destination EH region as well; this means
2263 EH_LANDING_PAD_NR must already be set on the destination block label.
2264 If false, we're being called from generic cfg manipulation code and we
2265 should preserve our place within the region tree. */
2266
2267 static void
2268 redirect_eh_edge_1 (edge edge_in, basic_block new_bb, bool change_region)
2269 {
2270 eh_landing_pad old_lp, new_lp;
2271 basic_block old_bb;
2272 gimple throw_stmt;
2273 int old_lp_nr, new_lp_nr;
2274 tree old_label, new_label;
2275 edge_iterator ei;
2276 edge e;
2277
2278 old_bb = edge_in->dest;
2279 old_label = gimple_block_label (old_bb);
2280 old_lp_nr = EH_LANDING_PAD_NR (old_label);
2281 gcc_assert (old_lp_nr > 0);
2282 old_lp = get_eh_landing_pad_from_number (old_lp_nr);
2283
2284 throw_stmt = last_stmt (edge_in->src);
2285 gcc_assert (lookup_stmt_eh_lp (throw_stmt) == old_lp_nr);
2286
2287 new_label = gimple_block_label (new_bb);
2288
2289 /* Look for an existing region that might be using NEW_BB already. */
2290 new_lp_nr = EH_LANDING_PAD_NR (new_label);
2291 if (new_lp_nr)
2292 {
2293 new_lp = get_eh_landing_pad_from_number (new_lp_nr);
2294 gcc_assert (new_lp);
2295
2296 /* Unless CHANGE_REGION is true, the new and old landing pad
2297 had better be associated with the same EH region. */
2298 gcc_assert (change_region || new_lp->region == old_lp->region);
2299 }
2300 else
2301 {
2302 new_lp = NULL;
2303 gcc_assert (!change_region);
2304 }
2305
2306 /* Notice when we redirect the last EH edge away from OLD_BB. */
2307 FOR_EACH_EDGE (e, ei, old_bb->preds)
2308 if (e != edge_in && (e->flags & EDGE_EH))
2309 break;
2310
2311 if (new_lp)
2312 {
2313 /* NEW_LP already exists. If there are still edges into OLD_LP,
2314 there's nothing to do with the EH tree. If there are no more
2315 edges into OLD_LP, then we want to remove OLD_LP as it is unused.
2316 If CHANGE_REGION is true, then our caller is expecting to remove
2317 the landing pad. */
2318 if (e == NULL && !change_region)
2319 remove_eh_landing_pad (old_lp);
2320 }
2321 else
2322 {
2323 /* No correct landing pad exists. If there are no more edges
2324 into OLD_LP, then we can simply re-use the existing landing pad.
2325 Otherwise, we have to create a new landing pad. */
2326 if (e == NULL)
2327 {
2328 EH_LANDING_PAD_NR (old_lp->post_landing_pad) = 0;
2329 new_lp = old_lp;
2330 }
2331 else
2332 new_lp = gen_eh_landing_pad (old_lp->region);
2333 new_lp->post_landing_pad = new_label;
2334 EH_LANDING_PAD_NR (new_label) = new_lp->index;
2335 }
2336
2337 /* Maybe move the throwing statement to the new region. */
2338 if (old_lp != new_lp)
2339 {
2340 remove_stmt_from_eh_lp (throw_stmt);
2341 add_stmt_to_eh_lp (throw_stmt, new_lp->index);
2342 }
2343 }
2344
2345 /* Redirect EH edge E to NEW_BB. */
2346
2347 edge
2348 redirect_eh_edge (edge edge_in, basic_block new_bb)
2349 {
2350 redirect_eh_edge_1 (edge_in, new_bb, false);
2351 return ssa_redirect_edge (edge_in, new_bb);
2352 }
2353
2354 /* This is a subroutine of gimple_redirect_edge_and_branch. Update the
2355 labels for redirecting a non-fallthru EH_DISPATCH edge E to NEW_BB.
2356 The actual edge update will happen in the caller. */
2357
2358 void
2359 redirect_eh_dispatch_edge (gimple stmt, edge e, basic_block new_bb)
2360 {
2361 tree new_lab = gimple_block_label (new_bb);
2362 bool any_changed = false;
2363 basic_block old_bb;
2364 eh_region r;
2365 eh_catch c;
2366
2367 r = get_eh_region_from_number (gimple_eh_dispatch_region (stmt));
2368 switch (r->type)
2369 {
2370 case ERT_TRY:
2371 for (c = r->u.eh_try.first_catch; c ; c = c->next_catch)
2372 {
2373 old_bb = label_to_block (c->label);
2374 if (old_bb == e->dest)
2375 {
2376 c->label = new_lab;
2377 any_changed = true;
2378 }
2379 }
2380 break;
2381
2382 case ERT_ALLOWED_EXCEPTIONS:
2383 old_bb = label_to_block (r->u.allowed.label);
2384 gcc_assert (old_bb == e->dest);
2385 r->u.allowed.label = new_lab;
2386 any_changed = true;
2387 break;
2388
2389 default:
2390 gcc_unreachable ();
2391 }
2392
2393 gcc_assert (any_changed);
2394 }
2395 \f
2396 /* Helper function for operation_could_trap_p and stmt_could_throw_p. */
2397
2398 bool
2399 operation_could_trap_helper_p (enum tree_code op,
2400 bool fp_operation,
2401 bool honor_trapv,
2402 bool honor_nans,
2403 bool honor_snans,
2404 tree divisor,
2405 bool *handled)
2406 {
2407 *handled = true;
2408 switch (op)
2409 {
2410 case TRUNC_DIV_EXPR:
2411 case CEIL_DIV_EXPR:
2412 case FLOOR_DIV_EXPR:
2413 case ROUND_DIV_EXPR:
2414 case EXACT_DIV_EXPR:
2415 case CEIL_MOD_EXPR:
2416 case FLOOR_MOD_EXPR:
2417 case ROUND_MOD_EXPR:
2418 case TRUNC_MOD_EXPR:
2419 case RDIV_EXPR:
2420 if (honor_snans || honor_trapv)
2421 return true;
2422 if (fp_operation)
2423 return flag_trapping_math;
2424 if (!TREE_CONSTANT (divisor) || integer_zerop (divisor))
2425 return true;
2426 return false;
2427
2428 case LT_EXPR:
2429 case LE_EXPR:
2430 case GT_EXPR:
2431 case GE_EXPR:
2432 case LTGT_EXPR:
2433 /* Some floating point comparisons may trap. */
2434 return honor_nans;
2435
2436 case EQ_EXPR:
2437 case NE_EXPR:
2438 case UNORDERED_EXPR:
2439 case ORDERED_EXPR:
2440 case UNLT_EXPR:
2441 case UNLE_EXPR:
2442 case UNGT_EXPR:
2443 case UNGE_EXPR:
2444 case UNEQ_EXPR:
2445 return honor_snans;
2446
2447 case CONVERT_EXPR:
2448 case FIX_TRUNC_EXPR:
2449 /* Conversion of floating point might trap. */
2450 return honor_nans;
2451
2452 case NEGATE_EXPR:
2453 case ABS_EXPR:
2454 case CONJ_EXPR:
2455 /* These operations don't trap with floating point. */
2456 if (honor_trapv)
2457 return true;
2458 return false;
2459
2460 case PLUS_EXPR:
2461 case MINUS_EXPR:
2462 case MULT_EXPR:
2463 /* Any floating arithmetic may trap. */
2464 if (fp_operation && flag_trapping_math)
2465 return true;
2466 if (honor_trapv)
2467 return true;
2468 return false;
2469
2470 case COMPLEX_EXPR:
2471 case CONSTRUCTOR:
2472 /* Constructing an object cannot trap. */
2473 return false;
2474
2475 default:
2476 /* Any floating arithmetic may trap. */
2477 if (fp_operation && flag_trapping_math)
2478 return true;
2479
2480 *handled = false;
2481 return false;
2482 }
2483 }
2484
2485 /* Return true if operation OP may trap. FP_OPERATION is true if OP is applied
2486 on floating-point values. HONOR_TRAPV is true if OP is applied on integer
2487 type operands that may trap. If OP is a division operator, DIVISOR contains
2488 the value of the divisor. */
2489
2490 bool
2491 operation_could_trap_p (enum tree_code op, bool fp_operation, bool honor_trapv,
2492 tree divisor)
2493 {
2494 bool honor_nans = (fp_operation && flag_trapping_math
2495 && !flag_finite_math_only);
2496 bool honor_snans = fp_operation && flag_signaling_nans != 0;
2497 bool handled;
2498
2499 if (TREE_CODE_CLASS (op) != tcc_comparison
2500 && TREE_CODE_CLASS (op) != tcc_unary
2501 && TREE_CODE_CLASS (op) != tcc_binary)
2502 return false;
2503
2504 return operation_could_trap_helper_p (op, fp_operation, honor_trapv,
2505 honor_nans, honor_snans, divisor,
2506 &handled);
2507 }
2508
2509 /* Return true if EXPR can trap, as in dereferencing an invalid pointer
2510 location or floating point arithmetic. C.f. the rtl version, may_trap_p.
2511 This routine expects only GIMPLE lhs or rhs input. */
2512
2513 bool
2514 tree_could_trap_p (tree expr)
2515 {
2516 enum tree_code code;
2517 bool fp_operation = false;
2518 bool honor_trapv = false;
2519 tree t, base, div = NULL_TREE;
2520
2521 if (!expr)
2522 return false;
2523
2524 code = TREE_CODE (expr);
2525 t = TREE_TYPE (expr);
2526
2527 if (t)
2528 {
2529 if (COMPARISON_CLASS_P (expr))
2530 fp_operation = FLOAT_TYPE_P (TREE_TYPE (TREE_OPERAND (expr, 0)));
2531 else
2532 fp_operation = FLOAT_TYPE_P (t);
2533 honor_trapv = INTEGRAL_TYPE_P (t) && TYPE_OVERFLOW_TRAPS (t);
2534 }
2535
2536 if (TREE_CODE_CLASS (code) == tcc_binary)
2537 div = TREE_OPERAND (expr, 1);
2538 if (operation_could_trap_p (code, fp_operation, honor_trapv, div))
2539 return true;
2540
2541 restart:
2542 switch (code)
2543 {
2544 case TARGET_MEM_REF:
2545 if (TREE_CODE (TMR_BASE (expr)) == ADDR_EXPR
2546 && !TMR_INDEX (expr) && !TMR_INDEX2 (expr))
2547 return false;
2548 return !TREE_THIS_NOTRAP (expr);
2549
2550 case COMPONENT_REF:
2551 case REALPART_EXPR:
2552 case IMAGPART_EXPR:
2553 case BIT_FIELD_REF:
2554 case VIEW_CONVERT_EXPR:
2555 case WITH_SIZE_EXPR:
2556 expr = TREE_OPERAND (expr, 0);
2557 code = TREE_CODE (expr);
2558 goto restart;
2559
2560 case ARRAY_RANGE_REF:
2561 base = TREE_OPERAND (expr, 0);
2562 if (tree_could_trap_p (base))
2563 return true;
2564 if (TREE_THIS_NOTRAP (expr))
2565 return false;
2566 return !range_in_array_bounds_p (expr);
2567
2568 case ARRAY_REF:
2569 base = TREE_OPERAND (expr, 0);
2570 if (tree_could_trap_p (base))
2571 return true;
2572 if (TREE_THIS_NOTRAP (expr))
2573 return false;
2574 return !in_array_bounds_p (expr);
2575
2576 case MEM_REF:
2577 if (TREE_CODE (TREE_OPERAND (expr, 0)) == ADDR_EXPR)
2578 return false;
2579 /* Fallthru. */
2580 case INDIRECT_REF:
2581 return !TREE_THIS_NOTRAP (expr);
2582
2583 case ASM_EXPR:
2584 return TREE_THIS_VOLATILE (expr);
2585
2586 case CALL_EXPR:
2587 t = get_callee_fndecl (expr);
2588 /* Assume that calls to weak functions may trap. */
2589 if (!t || !DECL_P (t))
2590 return true;
2591 if (DECL_WEAK (t))
2592 return tree_could_trap_p (t);
2593 return false;
2594
2595 case FUNCTION_DECL:
2596 /* Assume that accesses to weak functions may trap, unless we know
2597 they are certainly defined in current TU or in some other
2598 LTO partition. */
2599 if (DECL_WEAK (expr) && !DECL_COMDAT (expr))
2600 {
2601 struct cgraph_node *node;
2602 if (!DECL_EXTERNAL (expr))
2603 return false;
2604 node = cgraph_function_node (cgraph_get_node (expr), NULL);
2605 if (node && node->in_other_partition)
2606 return false;
2607 return true;
2608 }
2609 return false;
2610
2611 case VAR_DECL:
2612 /* Assume that accesses to weak vars may trap, unless we know
2613 they are certainly defined in current TU or in some other
2614 LTO partition. */
2615 if (DECL_WEAK (expr) && !DECL_COMDAT (expr))
2616 {
2617 struct varpool_node *node;
2618 if (!DECL_EXTERNAL (expr))
2619 return false;
2620 node = varpool_variable_node (varpool_get_node (expr), NULL);
2621 if (node && node->in_other_partition)
2622 return false;
2623 return true;
2624 }
2625 return false;
2626
2627 default:
2628 return false;
2629 }
2630 }
2631
2632
2633 /* Helper for stmt_could_throw_p. Return true if STMT (assumed to be a
2634 an assignment or a conditional) may throw. */
2635
2636 static bool
2637 stmt_could_throw_1_p (gimple stmt)
2638 {
2639 enum tree_code code = gimple_expr_code (stmt);
2640 bool honor_nans = false;
2641 bool honor_snans = false;
2642 bool fp_operation = false;
2643 bool honor_trapv = false;
2644 tree t;
2645 size_t i;
2646 bool handled, ret;
2647
2648 if (TREE_CODE_CLASS (code) == tcc_comparison
2649 || TREE_CODE_CLASS (code) == tcc_unary
2650 || TREE_CODE_CLASS (code) == tcc_binary)
2651 {
2652 if (is_gimple_assign (stmt)
2653 && TREE_CODE_CLASS (code) == tcc_comparison)
2654 t = TREE_TYPE (gimple_assign_rhs1 (stmt));
2655 else if (gimple_code (stmt) == GIMPLE_COND)
2656 t = TREE_TYPE (gimple_cond_lhs (stmt));
2657 else
2658 t = gimple_expr_type (stmt);
2659 fp_operation = FLOAT_TYPE_P (t);
2660 if (fp_operation)
2661 {
2662 honor_nans = flag_trapping_math && !flag_finite_math_only;
2663 honor_snans = flag_signaling_nans != 0;
2664 }
2665 else if (INTEGRAL_TYPE_P (t) && TYPE_OVERFLOW_TRAPS (t))
2666 honor_trapv = true;
2667 }
2668
2669 /* Check if the main expression may trap. */
2670 t = is_gimple_assign (stmt) ? gimple_assign_rhs2 (stmt) : NULL;
2671 ret = operation_could_trap_helper_p (code, fp_operation, honor_trapv,
2672 honor_nans, honor_snans, t,
2673 &handled);
2674 if (handled)
2675 return ret;
2676
2677 /* If the expression does not trap, see if any of the individual operands may
2678 trap. */
2679 for (i = 0; i < gimple_num_ops (stmt); i++)
2680 if (tree_could_trap_p (gimple_op (stmt, i)))
2681 return true;
2682
2683 return false;
2684 }
2685
2686
2687 /* Return true if statement STMT could throw an exception. */
2688
2689 bool
2690 stmt_could_throw_p (gimple stmt)
2691 {
2692 if (!flag_exceptions)
2693 return false;
2694
2695 /* The only statements that can throw an exception are assignments,
2696 conditionals, calls, resx, and asms. */
2697 switch (gimple_code (stmt))
2698 {
2699 case GIMPLE_RESX:
2700 return true;
2701
2702 case GIMPLE_CALL:
2703 return !gimple_call_nothrow_p (stmt);
2704
2705 case GIMPLE_ASSIGN:
2706 case GIMPLE_COND:
2707 if (!cfun->can_throw_non_call_exceptions)
2708 return false;
2709 return stmt_could_throw_1_p (stmt);
2710
2711 case GIMPLE_ASM:
2712 if (!cfun->can_throw_non_call_exceptions)
2713 return false;
2714 return gimple_asm_volatile_p (stmt);
2715
2716 default:
2717 return false;
2718 }
2719 }
2720
2721
2722 /* Return true if expression T could throw an exception. */
2723
2724 bool
2725 tree_could_throw_p (tree t)
2726 {
2727 if (!flag_exceptions)
2728 return false;
2729 if (TREE_CODE (t) == MODIFY_EXPR)
2730 {
2731 if (cfun->can_throw_non_call_exceptions
2732 && tree_could_trap_p (TREE_OPERAND (t, 0)))
2733 return true;
2734 t = TREE_OPERAND (t, 1);
2735 }
2736
2737 if (TREE_CODE (t) == WITH_SIZE_EXPR)
2738 t = TREE_OPERAND (t, 0);
2739 if (TREE_CODE (t) == CALL_EXPR)
2740 return (call_expr_flags (t) & ECF_NOTHROW) == 0;
2741 if (cfun->can_throw_non_call_exceptions)
2742 return tree_could_trap_p (t);
2743 return false;
2744 }
2745
2746 /* Return true if STMT can throw an exception that is not caught within
2747 the current function (CFUN). */
2748
2749 bool
2750 stmt_can_throw_external (gimple stmt)
2751 {
2752 int lp_nr;
2753
2754 if (!stmt_could_throw_p (stmt))
2755 return false;
2756
2757 lp_nr = lookup_stmt_eh_lp (stmt);
2758 return lp_nr == 0;
2759 }
2760
2761 /* Return true if STMT can throw an exception that is caught within
2762 the current function (CFUN). */
2763
2764 bool
2765 stmt_can_throw_internal (gimple stmt)
2766 {
2767 int lp_nr;
2768
2769 if (!stmt_could_throw_p (stmt))
2770 return false;
2771
2772 lp_nr = lookup_stmt_eh_lp (stmt);
2773 return lp_nr > 0;
2774 }
2775
2776 /* Given a statement STMT in IFUN, if STMT can no longer throw, then
2777 remove any entry it might have from the EH table. Return true if
2778 any change was made. */
2779
2780 bool
2781 maybe_clean_eh_stmt_fn (struct function *ifun, gimple stmt)
2782 {
2783 if (stmt_could_throw_p (stmt))
2784 return false;
2785 return remove_stmt_from_eh_lp_fn (ifun, stmt);
2786 }
2787
2788 /* Likewise, but always use the current function. */
2789
2790 bool
2791 maybe_clean_eh_stmt (gimple stmt)
2792 {
2793 return maybe_clean_eh_stmt_fn (cfun, stmt);
2794 }
2795
2796 /* Given a statement OLD_STMT and a new statement NEW_STMT that has replaced
2797 OLD_STMT in the function, remove OLD_STMT from the EH table and put NEW_STMT
2798 in the table if it should be in there. Return TRUE if a replacement was
2799 done that my require an EH edge purge. */
2800
2801 bool
2802 maybe_clean_or_replace_eh_stmt (gimple old_stmt, gimple new_stmt)
2803 {
2804 int lp_nr = lookup_stmt_eh_lp (old_stmt);
2805
2806 if (lp_nr != 0)
2807 {
2808 bool new_stmt_could_throw = stmt_could_throw_p (new_stmt);
2809
2810 if (new_stmt == old_stmt && new_stmt_could_throw)
2811 return false;
2812
2813 remove_stmt_from_eh_lp (old_stmt);
2814 if (new_stmt_could_throw)
2815 {
2816 add_stmt_to_eh_lp (new_stmt, lp_nr);
2817 return false;
2818 }
2819 else
2820 return true;
2821 }
2822
2823 return false;
2824 }
2825
2826 /* Given a statement OLD_STMT in OLD_FUN and a duplicate statement NEW_STMT
2827 in NEW_FUN, copy the EH table data from OLD_STMT to NEW_STMT. The MAP
2828 operand is the return value of duplicate_eh_regions. */
2829
2830 bool
2831 maybe_duplicate_eh_stmt_fn (struct function *new_fun, gimple new_stmt,
2832 struct function *old_fun, gimple old_stmt,
2833 struct pointer_map_t *map, int default_lp_nr)
2834 {
2835 int old_lp_nr, new_lp_nr;
2836 void **slot;
2837
2838 if (!stmt_could_throw_p (new_stmt))
2839 return false;
2840
2841 old_lp_nr = lookup_stmt_eh_lp_fn (old_fun, old_stmt);
2842 if (old_lp_nr == 0)
2843 {
2844 if (default_lp_nr == 0)
2845 return false;
2846 new_lp_nr = default_lp_nr;
2847 }
2848 else if (old_lp_nr > 0)
2849 {
2850 eh_landing_pad old_lp, new_lp;
2851
2852 old_lp = (*old_fun->eh->lp_array)[old_lp_nr];
2853 slot = pointer_map_contains (map, old_lp);
2854 new_lp = (eh_landing_pad) *slot;
2855 new_lp_nr = new_lp->index;
2856 }
2857 else
2858 {
2859 eh_region old_r, new_r;
2860
2861 old_r = (*old_fun->eh->region_array)[-old_lp_nr];
2862 slot = pointer_map_contains (map, old_r);
2863 new_r = (eh_region) *slot;
2864 new_lp_nr = -new_r->index;
2865 }
2866
2867 add_stmt_to_eh_lp_fn (new_fun, new_stmt, new_lp_nr);
2868 return true;
2869 }
2870
2871 /* Similar, but both OLD_STMT and NEW_STMT are within the current function,
2872 and thus no remapping is required. */
2873
2874 bool
2875 maybe_duplicate_eh_stmt (gimple new_stmt, gimple old_stmt)
2876 {
2877 int lp_nr;
2878
2879 if (!stmt_could_throw_p (new_stmt))
2880 return false;
2881
2882 lp_nr = lookup_stmt_eh_lp (old_stmt);
2883 if (lp_nr == 0)
2884 return false;
2885
2886 add_stmt_to_eh_lp (new_stmt, lp_nr);
2887 return true;
2888 }
2889 \f
2890 /* Returns TRUE if oneh and twoh are exception handlers (gimple_try_cleanup of
2891 GIMPLE_TRY) that are similar enough to be considered the same. Currently
2892 this only handles handlers consisting of a single call, as that's the
2893 important case for C++: a destructor call for a particular object showing
2894 up in multiple handlers. */
2895
2896 static bool
2897 same_handler_p (gimple_seq oneh, gimple_seq twoh)
2898 {
2899 gimple_stmt_iterator gsi;
2900 gimple ones, twos;
2901 unsigned int ai;
2902
2903 gsi = gsi_start (oneh);
2904 if (!gsi_one_before_end_p (gsi))
2905 return false;
2906 ones = gsi_stmt (gsi);
2907
2908 gsi = gsi_start (twoh);
2909 if (!gsi_one_before_end_p (gsi))
2910 return false;
2911 twos = gsi_stmt (gsi);
2912
2913 if (!is_gimple_call (ones)
2914 || !is_gimple_call (twos)
2915 || gimple_call_lhs (ones)
2916 || gimple_call_lhs (twos)
2917 || gimple_call_chain (ones)
2918 || gimple_call_chain (twos)
2919 || !gimple_call_same_target_p (ones, twos)
2920 || gimple_call_num_args (ones) != gimple_call_num_args (twos))
2921 return false;
2922
2923 for (ai = 0; ai < gimple_call_num_args (ones); ++ai)
2924 if (!operand_equal_p (gimple_call_arg (ones, ai),
2925 gimple_call_arg (twos, ai), 0))
2926 return false;
2927
2928 return true;
2929 }
2930
2931 /* Optimize
2932 try { A() } finally { try { ~B() } catch { ~A() } }
2933 try { ... } finally { ~A() }
2934 into
2935 try { A() } catch { ~B() }
2936 try { ~B() ... } finally { ~A() }
2937
2938 This occurs frequently in C++, where A is a local variable and B is a
2939 temporary used in the initializer for A. */
2940
2941 static void
2942 optimize_double_finally (gimple one, gimple two)
2943 {
2944 gimple oneh;
2945 gimple_stmt_iterator gsi;
2946 gimple_seq cleanup;
2947
2948 cleanup = gimple_try_cleanup (one);
2949 gsi = gsi_start (cleanup);
2950 if (!gsi_one_before_end_p (gsi))
2951 return;
2952
2953 oneh = gsi_stmt (gsi);
2954 if (gimple_code (oneh) != GIMPLE_TRY
2955 || gimple_try_kind (oneh) != GIMPLE_TRY_CATCH)
2956 return;
2957
2958 if (same_handler_p (gimple_try_cleanup (oneh), gimple_try_cleanup (two)))
2959 {
2960 gimple_seq seq = gimple_try_eval (oneh);
2961
2962 gimple_try_set_cleanup (one, seq);
2963 gimple_try_set_kind (one, GIMPLE_TRY_CATCH);
2964 seq = copy_gimple_seq_and_replace_locals (seq);
2965 gimple_seq_add_seq (&seq, gimple_try_eval (two));
2966 gimple_try_set_eval (two, seq);
2967 }
2968 }
2969
2970 /* Perform EH refactoring optimizations that are simpler to do when code
2971 flow has been lowered but EH structures haven't. */
2972
2973 static void
2974 refactor_eh_r (gimple_seq seq)
2975 {
2976 gimple_stmt_iterator gsi;
2977 gimple one, two;
2978
2979 one = NULL;
2980 two = NULL;
2981 gsi = gsi_start (seq);
2982 while (1)
2983 {
2984 one = two;
2985 if (gsi_end_p (gsi))
2986 two = NULL;
2987 else
2988 two = gsi_stmt (gsi);
2989 if (one
2990 && two
2991 && gimple_code (one) == GIMPLE_TRY
2992 && gimple_code (two) == GIMPLE_TRY
2993 && gimple_try_kind (one) == GIMPLE_TRY_FINALLY
2994 && gimple_try_kind (two) == GIMPLE_TRY_FINALLY)
2995 optimize_double_finally (one, two);
2996 if (one)
2997 switch (gimple_code (one))
2998 {
2999 case GIMPLE_TRY:
3000 refactor_eh_r (gimple_try_eval (one));
3001 refactor_eh_r (gimple_try_cleanup (one));
3002 break;
3003 case GIMPLE_CATCH:
3004 refactor_eh_r (gimple_catch_handler (one));
3005 break;
3006 case GIMPLE_EH_FILTER:
3007 refactor_eh_r (gimple_eh_filter_failure (one));
3008 break;
3009 case GIMPLE_EH_ELSE:
3010 refactor_eh_r (gimple_eh_else_n_body (one));
3011 refactor_eh_r (gimple_eh_else_e_body (one));
3012 break;
3013 default:
3014 break;
3015 }
3016 if (two)
3017 gsi_next (&gsi);
3018 else
3019 break;
3020 }
3021 }
3022
3023 static unsigned
3024 refactor_eh (void)
3025 {
3026 refactor_eh_r (gimple_body (current_function_decl));
3027 return 0;
3028 }
3029
3030 static bool
3031 gate_refactor_eh (void)
3032 {
3033 return flag_exceptions != 0;
3034 }
3035
3036 namespace {
3037
3038 const pass_data pass_data_refactor_eh =
3039 {
3040 GIMPLE_PASS, /* type */
3041 "ehopt", /* name */
3042 OPTGROUP_NONE, /* optinfo_flags */
3043 true, /* has_gate */
3044 true, /* has_execute */
3045 TV_TREE_EH, /* tv_id */
3046 PROP_gimple_lcf, /* properties_required */
3047 0, /* properties_provided */
3048 0, /* properties_destroyed */
3049 0, /* todo_flags_start */
3050 0, /* todo_flags_finish */
3051 };
3052
3053 class pass_refactor_eh : public gimple_opt_pass
3054 {
3055 public:
3056 pass_refactor_eh (gcc::context *ctxt)
3057 : gimple_opt_pass (pass_data_refactor_eh, ctxt)
3058 {}
3059
3060 /* opt_pass methods: */
3061 bool gate () { return gate_refactor_eh (); }
3062 unsigned int execute () { return refactor_eh (); }
3063
3064 }; // class pass_refactor_eh
3065
3066 } // anon namespace
3067
3068 gimple_opt_pass *
3069 make_pass_refactor_eh (gcc::context *ctxt)
3070 {
3071 return new pass_refactor_eh (ctxt);
3072 }
3073 \f
3074 /* At the end of gimple optimization, we can lower RESX. */
3075
3076 static bool
3077 lower_resx (basic_block bb, gimple stmt, struct pointer_map_t *mnt_map)
3078 {
3079 int lp_nr;
3080 eh_region src_r, dst_r;
3081 gimple_stmt_iterator gsi;
3082 gimple x;
3083 tree fn, src_nr;
3084 bool ret = false;
3085
3086 lp_nr = lookup_stmt_eh_lp (stmt);
3087 if (lp_nr != 0)
3088 dst_r = get_eh_region_from_lp_number (lp_nr);
3089 else
3090 dst_r = NULL;
3091
3092 src_r = get_eh_region_from_number (gimple_resx_region (stmt));
3093 gsi = gsi_last_bb (bb);
3094
3095 if (src_r == NULL)
3096 {
3097 /* We can wind up with no source region when pass_cleanup_eh shows
3098 that there are no entries into an eh region and deletes it, but
3099 then the block that contains the resx isn't removed. This can
3100 happen without optimization when the switch statement created by
3101 lower_try_finally_switch isn't simplified to remove the eh case.
3102
3103 Resolve this by expanding the resx node to an abort. */
3104
3105 fn = builtin_decl_implicit (BUILT_IN_TRAP);
3106 x = gimple_build_call (fn, 0);
3107 gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3108
3109 while (EDGE_COUNT (bb->succs) > 0)
3110 remove_edge (EDGE_SUCC (bb, 0));
3111 }
3112 else if (dst_r)
3113 {
3114 /* When we have a destination region, we resolve this by copying
3115 the excptr and filter values into place, and changing the edge
3116 to immediately after the landing pad. */
3117 edge e;
3118
3119 if (lp_nr < 0)
3120 {
3121 basic_block new_bb;
3122 void **slot;
3123 tree lab;
3124
3125 /* We are resuming into a MUST_NOT_CALL region. Expand a call to
3126 the failure decl into a new block, if needed. */
3127 gcc_assert (dst_r->type == ERT_MUST_NOT_THROW);
3128
3129 slot = pointer_map_contains (mnt_map, dst_r);
3130 if (slot == NULL)
3131 {
3132 gimple_stmt_iterator gsi2;
3133
3134 new_bb = create_empty_bb (bb);
3135 if (current_loops)
3136 add_bb_to_loop (new_bb, bb->loop_father);
3137 lab = gimple_block_label (new_bb);
3138 gsi2 = gsi_start_bb (new_bb);
3139
3140 fn = dst_r->u.must_not_throw.failure_decl;
3141 x = gimple_build_call (fn, 0);
3142 gimple_set_location (x, dst_r->u.must_not_throw.failure_loc);
3143 gsi_insert_after (&gsi2, x, GSI_CONTINUE_LINKING);
3144
3145 slot = pointer_map_insert (mnt_map, dst_r);
3146 *slot = lab;
3147 }
3148 else
3149 {
3150 lab = (tree) *slot;
3151 new_bb = label_to_block (lab);
3152 }
3153
3154 gcc_assert (EDGE_COUNT (bb->succs) == 0);
3155 e = make_edge (bb, new_bb, EDGE_FALLTHRU);
3156 e->count = bb->count;
3157 e->probability = REG_BR_PROB_BASE;
3158 }
3159 else
3160 {
3161 edge_iterator ei;
3162 tree dst_nr = build_int_cst (integer_type_node, dst_r->index);
3163
3164 fn = builtin_decl_implicit (BUILT_IN_EH_COPY_VALUES);
3165 src_nr = build_int_cst (integer_type_node, src_r->index);
3166 x = gimple_build_call (fn, 2, dst_nr, src_nr);
3167 gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3168
3169 /* Update the flags for the outgoing edge. */
3170 e = single_succ_edge (bb);
3171 gcc_assert (e->flags & EDGE_EH);
3172 e->flags = (e->flags & ~EDGE_EH) | EDGE_FALLTHRU;
3173
3174 /* If there are no more EH users of the landing pad, delete it. */
3175 FOR_EACH_EDGE (e, ei, e->dest->preds)
3176 if (e->flags & EDGE_EH)
3177 break;
3178 if (e == NULL)
3179 {
3180 eh_landing_pad lp = get_eh_landing_pad_from_number (lp_nr);
3181 remove_eh_landing_pad (lp);
3182 }
3183 }
3184
3185 ret = true;
3186 }
3187 else
3188 {
3189 tree var;
3190
3191 /* When we don't have a destination region, this exception escapes
3192 up the call chain. We resolve this by generating a call to the
3193 _Unwind_Resume library function. */
3194
3195 /* The ARM EABI redefines _Unwind_Resume as __cxa_end_cleanup
3196 with no arguments for C++ and Java. Check for that. */
3197 if (src_r->use_cxa_end_cleanup)
3198 {
3199 fn = builtin_decl_implicit (BUILT_IN_CXA_END_CLEANUP);
3200 x = gimple_build_call (fn, 0);
3201 gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3202 }
3203 else
3204 {
3205 fn = builtin_decl_implicit (BUILT_IN_EH_POINTER);
3206 src_nr = build_int_cst (integer_type_node, src_r->index);
3207 x = gimple_build_call (fn, 1, src_nr);
3208 var = create_tmp_var (ptr_type_node, NULL);
3209 var = make_ssa_name (var, x);
3210 gimple_call_set_lhs (x, var);
3211 gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3212
3213 fn = builtin_decl_implicit (BUILT_IN_UNWIND_RESUME);
3214 x = gimple_build_call (fn, 1, var);
3215 gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3216 }
3217
3218 gcc_assert (EDGE_COUNT (bb->succs) == 0);
3219 }
3220
3221 gsi_remove (&gsi, true);
3222
3223 return ret;
3224 }
3225
3226 static unsigned
3227 execute_lower_resx (void)
3228 {
3229 basic_block bb;
3230 struct pointer_map_t *mnt_map;
3231 bool dominance_invalidated = false;
3232 bool any_rewritten = false;
3233
3234 mnt_map = pointer_map_create ();
3235
3236 FOR_EACH_BB (bb)
3237 {
3238 gimple last = last_stmt (bb);
3239 if (last && is_gimple_resx (last))
3240 {
3241 dominance_invalidated |= lower_resx (bb, last, mnt_map);
3242 any_rewritten = true;
3243 }
3244 }
3245
3246 pointer_map_destroy (mnt_map);
3247
3248 if (dominance_invalidated)
3249 {
3250 free_dominance_info (CDI_DOMINATORS);
3251 free_dominance_info (CDI_POST_DOMINATORS);
3252 }
3253
3254 return any_rewritten ? TODO_update_ssa_only_virtuals : 0;
3255 }
3256
3257 static bool
3258 gate_lower_resx (void)
3259 {
3260 return flag_exceptions != 0;
3261 }
3262
3263 namespace {
3264
3265 const pass_data pass_data_lower_resx =
3266 {
3267 GIMPLE_PASS, /* type */
3268 "resx", /* name */
3269 OPTGROUP_NONE, /* optinfo_flags */
3270 true, /* has_gate */
3271 true, /* has_execute */
3272 TV_TREE_EH, /* tv_id */
3273 PROP_gimple_lcf, /* properties_required */
3274 0, /* properties_provided */
3275 0, /* properties_destroyed */
3276 0, /* todo_flags_start */
3277 TODO_verify_flow, /* todo_flags_finish */
3278 };
3279
3280 class pass_lower_resx : public gimple_opt_pass
3281 {
3282 public:
3283 pass_lower_resx (gcc::context *ctxt)
3284 : gimple_opt_pass (pass_data_lower_resx, ctxt)
3285 {}
3286
3287 /* opt_pass methods: */
3288 bool gate () { return gate_lower_resx (); }
3289 unsigned int execute () { return execute_lower_resx (); }
3290
3291 }; // class pass_lower_resx
3292
3293 } // anon namespace
3294
3295 gimple_opt_pass *
3296 make_pass_lower_resx (gcc::context *ctxt)
3297 {
3298 return new pass_lower_resx (ctxt);
3299 }
3300
3301 /* Try to optimize var = {v} {CLOBBER} stmts followed just by
3302 external throw. */
3303
3304 static void
3305 optimize_clobbers (basic_block bb)
3306 {
3307 gimple_stmt_iterator gsi = gsi_last_bb (bb);
3308 bool any_clobbers = false;
3309 bool seen_stack_restore = false;
3310 edge_iterator ei;
3311 edge e;
3312
3313 /* Only optimize anything if the bb contains at least one clobber,
3314 ends with resx (checked by caller), optionally contains some
3315 debug stmts or labels, or at most one __builtin_stack_restore
3316 call, and has an incoming EH edge. */
3317 for (gsi_prev (&gsi); !gsi_end_p (gsi); gsi_prev (&gsi))
3318 {
3319 gimple stmt = gsi_stmt (gsi);
3320 if (is_gimple_debug (stmt))
3321 continue;
3322 if (gimple_clobber_p (stmt))
3323 {
3324 any_clobbers = true;
3325 continue;
3326 }
3327 if (!seen_stack_restore
3328 && gimple_call_builtin_p (stmt, BUILT_IN_STACK_RESTORE))
3329 {
3330 seen_stack_restore = true;
3331 continue;
3332 }
3333 if (gimple_code (stmt) == GIMPLE_LABEL)
3334 break;
3335 return;
3336 }
3337 if (!any_clobbers)
3338 return;
3339 FOR_EACH_EDGE (e, ei, bb->preds)
3340 if (e->flags & EDGE_EH)
3341 break;
3342 if (e == NULL)
3343 return;
3344 gsi = gsi_last_bb (bb);
3345 for (gsi_prev (&gsi); !gsi_end_p (gsi); gsi_prev (&gsi))
3346 {
3347 gimple stmt = gsi_stmt (gsi);
3348 if (!gimple_clobber_p (stmt))
3349 continue;
3350 unlink_stmt_vdef (stmt);
3351 gsi_remove (&gsi, true);
3352 release_defs (stmt);
3353 }
3354 }
3355
3356 /* Try to sink var = {v} {CLOBBER} stmts followed just by
3357 internal throw to successor BB. */
3358
3359 static int
3360 sink_clobbers (basic_block bb)
3361 {
3362 edge e;
3363 edge_iterator ei;
3364 gimple_stmt_iterator gsi, dgsi;
3365 basic_block succbb;
3366 bool any_clobbers = false;
3367 unsigned todo = 0;
3368
3369 /* Only optimize if BB has a single EH successor and
3370 all predecessor edges are EH too. */
3371 if (!single_succ_p (bb)
3372 || (single_succ_edge (bb)->flags & EDGE_EH) == 0)
3373 return 0;
3374
3375 FOR_EACH_EDGE (e, ei, bb->preds)
3376 {
3377 if ((e->flags & EDGE_EH) == 0)
3378 return 0;
3379 }
3380
3381 /* And BB contains only CLOBBER stmts before the final
3382 RESX. */
3383 gsi = gsi_last_bb (bb);
3384 for (gsi_prev (&gsi); !gsi_end_p (gsi); gsi_prev (&gsi))
3385 {
3386 gimple stmt = gsi_stmt (gsi);
3387 if (is_gimple_debug (stmt))
3388 continue;
3389 if (gimple_code (stmt) == GIMPLE_LABEL)
3390 break;
3391 if (!gimple_clobber_p (stmt))
3392 return 0;
3393 any_clobbers = true;
3394 }
3395 if (!any_clobbers)
3396 return 0;
3397
3398 edge succe = single_succ_edge (bb);
3399 succbb = succe->dest;
3400
3401 /* See if there is a virtual PHI node to take an updated virtual
3402 operand from. */
3403 gimple vphi = NULL;
3404 tree vuse = NULL_TREE;
3405 for (gsi = gsi_start_phis (succbb); !gsi_end_p (gsi); gsi_next (&gsi))
3406 {
3407 tree res = gimple_phi_result (gsi_stmt (gsi));
3408 if (virtual_operand_p (res))
3409 {
3410 vphi = gsi_stmt (gsi);
3411 vuse = res;
3412 break;
3413 }
3414 }
3415
3416 dgsi = gsi_after_labels (succbb);
3417 gsi = gsi_last_bb (bb);
3418 for (gsi_prev (&gsi); !gsi_end_p (gsi); gsi_prev (&gsi))
3419 {
3420 gimple stmt = gsi_stmt (gsi);
3421 tree lhs;
3422 if (is_gimple_debug (stmt))
3423 continue;
3424 if (gimple_code (stmt) == GIMPLE_LABEL)
3425 break;
3426 lhs = gimple_assign_lhs (stmt);
3427 /* Unfortunately we don't have dominance info updated at this
3428 point, so checking if
3429 dominated_by_p (CDI_DOMINATORS, succbb,
3430 gimple_bb (SSA_NAME_DEF_STMT (TREE_OPERAND (lhs, 0)))
3431 would be too costly. Thus, avoid sinking any clobbers that
3432 refer to non-(D) SSA_NAMEs. */
3433 if (TREE_CODE (lhs) == MEM_REF
3434 && TREE_CODE (TREE_OPERAND (lhs, 0)) == SSA_NAME
3435 && !SSA_NAME_IS_DEFAULT_DEF (TREE_OPERAND (lhs, 0)))
3436 {
3437 unlink_stmt_vdef (stmt);
3438 gsi_remove (&gsi, true);
3439 release_defs (stmt);
3440 continue;
3441 }
3442
3443 /* As we do not change stmt order when sinking across a
3444 forwarder edge we can keep virtual operands in place. */
3445 gsi_remove (&gsi, false);
3446 gsi_insert_before (&dgsi, stmt, GSI_NEW_STMT);
3447
3448 /* But adjust virtual operands if we sunk across a PHI node. */
3449 if (vuse)
3450 {
3451 gimple use_stmt;
3452 imm_use_iterator iter;
3453 use_operand_p use_p;
3454 FOR_EACH_IMM_USE_STMT (use_stmt, iter, vuse)
3455 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
3456 SET_USE (use_p, gimple_vdef (stmt));
3457 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (vuse))
3458 {
3459 SSA_NAME_OCCURS_IN_ABNORMAL_PHI (gimple_vdef (stmt)) = 1;
3460 SSA_NAME_OCCURS_IN_ABNORMAL_PHI (vuse) = 0;
3461 }
3462 /* Adjust the incoming virtual operand. */
3463 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (vphi, succe), gimple_vuse (stmt));
3464 SET_USE (gimple_vuse_op (stmt), vuse);
3465 }
3466 /* If there isn't a single predecessor but no virtual PHI node
3467 arrange for virtual operands to be renamed. */
3468 else if (gimple_vuse_op (stmt) != NULL_USE_OPERAND_P
3469 && !single_pred_p (succbb))
3470 {
3471 /* In this case there will be no use of the VDEF of this stmt.
3472 ??? Unless this is a secondary opportunity and we have not
3473 removed unreachable blocks yet, so we cannot assert this.
3474 Which also means we will end up renaming too many times. */
3475 SET_USE (gimple_vuse_op (stmt), gimple_vop (cfun));
3476 mark_virtual_operands_for_renaming (cfun);
3477 todo |= TODO_update_ssa_only_virtuals;
3478 }
3479 }
3480
3481 return todo;
3482 }
3483
3484 /* At the end of inlining, we can lower EH_DISPATCH. Return true when
3485 we have found some duplicate labels and removed some edges. */
3486
3487 static bool
3488 lower_eh_dispatch (basic_block src, gimple stmt)
3489 {
3490 gimple_stmt_iterator gsi;
3491 int region_nr;
3492 eh_region r;
3493 tree filter, fn;
3494 gimple x;
3495 bool redirected = false;
3496
3497 region_nr = gimple_eh_dispatch_region (stmt);
3498 r = get_eh_region_from_number (region_nr);
3499
3500 gsi = gsi_last_bb (src);
3501
3502 switch (r->type)
3503 {
3504 case ERT_TRY:
3505 {
3506 vec<tree> labels = vNULL;
3507 tree default_label = NULL;
3508 eh_catch c;
3509 edge_iterator ei;
3510 edge e;
3511 struct pointer_set_t *seen_values = pointer_set_create ();
3512
3513 /* Collect the labels for a switch. Zero the post_landing_pad
3514 field becase we'll no longer have anything keeping these labels
3515 in existence and the optimizer will be free to merge these
3516 blocks at will. */
3517 for (c = r->u.eh_try.first_catch; c ; c = c->next_catch)
3518 {
3519 tree tp_node, flt_node, lab = c->label;
3520 bool have_label = false;
3521
3522 c->label = NULL;
3523 tp_node = c->type_list;
3524 flt_node = c->filter_list;
3525
3526 if (tp_node == NULL)
3527 {
3528 default_label = lab;
3529 break;
3530 }
3531 do
3532 {
3533 /* Filter out duplicate labels that arise when this handler
3534 is shadowed by an earlier one. When no labels are
3535 attached to the handler anymore, we remove
3536 the corresponding edge and then we delete unreachable
3537 blocks at the end of this pass. */
3538 if (! pointer_set_contains (seen_values, TREE_VALUE (flt_node)))
3539 {
3540 tree t = build_case_label (TREE_VALUE (flt_node),
3541 NULL, lab);
3542 labels.safe_push (t);
3543 pointer_set_insert (seen_values, TREE_VALUE (flt_node));
3544 have_label = true;
3545 }
3546
3547 tp_node = TREE_CHAIN (tp_node);
3548 flt_node = TREE_CHAIN (flt_node);
3549 }
3550 while (tp_node);
3551 if (! have_label)
3552 {
3553 remove_edge (find_edge (src, label_to_block (lab)));
3554 redirected = true;
3555 }
3556 }
3557
3558 /* Clean up the edge flags. */
3559 FOR_EACH_EDGE (e, ei, src->succs)
3560 {
3561 if (e->flags & EDGE_FALLTHRU)
3562 {
3563 /* If there was no catch-all, use the fallthru edge. */
3564 if (default_label == NULL)
3565 default_label = gimple_block_label (e->dest);
3566 e->flags &= ~EDGE_FALLTHRU;
3567 }
3568 }
3569 gcc_assert (default_label != NULL);
3570
3571 /* Don't generate a switch if there's only a default case.
3572 This is common in the form of try { A; } catch (...) { B; }. */
3573 if (!labels.exists ())
3574 {
3575 e = single_succ_edge (src);
3576 e->flags |= EDGE_FALLTHRU;
3577 }
3578 else
3579 {
3580 fn = builtin_decl_implicit (BUILT_IN_EH_FILTER);
3581 x = gimple_build_call (fn, 1, build_int_cst (integer_type_node,
3582 region_nr));
3583 filter = create_tmp_var (TREE_TYPE (TREE_TYPE (fn)), NULL);
3584 filter = make_ssa_name (filter, x);
3585 gimple_call_set_lhs (x, filter);
3586 gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3587
3588 /* Turn the default label into a default case. */
3589 default_label = build_case_label (NULL, NULL, default_label);
3590 sort_case_labels (labels);
3591
3592 x = gimple_build_switch (filter, default_label, labels);
3593 gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3594
3595 labels.release ();
3596 }
3597 pointer_set_destroy (seen_values);
3598 }
3599 break;
3600
3601 case ERT_ALLOWED_EXCEPTIONS:
3602 {
3603 edge b_e = BRANCH_EDGE (src);
3604 edge f_e = FALLTHRU_EDGE (src);
3605
3606 fn = builtin_decl_implicit (BUILT_IN_EH_FILTER);
3607 x = gimple_build_call (fn, 1, build_int_cst (integer_type_node,
3608 region_nr));
3609 filter = create_tmp_var (TREE_TYPE (TREE_TYPE (fn)), NULL);
3610 filter = make_ssa_name (filter, x);
3611 gimple_call_set_lhs (x, filter);
3612 gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3613
3614 r->u.allowed.label = NULL;
3615 x = gimple_build_cond (EQ_EXPR, filter,
3616 build_int_cst (TREE_TYPE (filter),
3617 r->u.allowed.filter),
3618 NULL_TREE, NULL_TREE);
3619 gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3620
3621 b_e->flags = b_e->flags | EDGE_TRUE_VALUE;
3622 f_e->flags = (f_e->flags & ~EDGE_FALLTHRU) | EDGE_FALSE_VALUE;
3623 }
3624 break;
3625
3626 default:
3627 gcc_unreachable ();
3628 }
3629
3630 /* Replace the EH_DISPATCH with the SWITCH or COND generated above. */
3631 gsi_remove (&gsi, true);
3632 return redirected;
3633 }
3634
3635 static unsigned
3636 execute_lower_eh_dispatch (void)
3637 {
3638 basic_block bb;
3639 int flags = 0;
3640 bool redirected = false;
3641
3642 assign_filter_values ();
3643
3644 FOR_EACH_BB (bb)
3645 {
3646 gimple last = last_stmt (bb);
3647 if (last == NULL)
3648 continue;
3649 if (gimple_code (last) == GIMPLE_EH_DISPATCH)
3650 {
3651 redirected |= lower_eh_dispatch (bb, last);
3652 flags |= TODO_update_ssa_only_virtuals;
3653 }
3654 else if (gimple_code (last) == GIMPLE_RESX)
3655 {
3656 if (stmt_can_throw_external (last))
3657 optimize_clobbers (bb);
3658 else
3659 flags |= sink_clobbers (bb);
3660 }
3661 }
3662
3663 if (redirected)
3664 delete_unreachable_blocks ();
3665 return flags;
3666 }
3667
3668 static bool
3669 gate_lower_eh_dispatch (void)
3670 {
3671 return cfun->eh->region_tree != NULL;
3672 }
3673
3674 namespace {
3675
3676 const pass_data pass_data_lower_eh_dispatch =
3677 {
3678 GIMPLE_PASS, /* type */
3679 "ehdisp", /* name */
3680 OPTGROUP_NONE, /* optinfo_flags */
3681 true, /* has_gate */
3682 true, /* has_execute */
3683 TV_TREE_EH, /* tv_id */
3684 PROP_gimple_lcf, /* properties_required */
3685 0, /* properties_provided */
3686 0, /* properties_destroyed */
3687 0, /* todo_flags_start */
3688 TODO_verify_flow, /* todo_flags_finish */
3689 };
3690
3691 class pass_lower_eh_dispatch : public gimple_opt_pass
3692 {
3693 public:
3694 pass_lower_eh_dispatch (gcc::context *ctxt)
3695 : gimple_opt_pass (pass_data_lower_eh_dispatch, ctxt)
3696 {}
3697
3698 /* opt_pass methods: */
3699 bool gate () { return gate_lower_eh_dispatch (); }
3700 unsigned int execute () { return execute_lower_eh_dispatch (); }
3701
3702 }; // class pass_lower_eh_dispatch
3703
3704 } // anon namespace
3705
3706 gimple_opt_pass *
3707 make_pass_lower_eh_dispatch (gcc::context *ctxt)
3708 {
3709 return new pass_lower_eh_dispatch (ctxt);
3710 }
3711 \f
3712 /* Walk statements, see what regions and, optionally, landing pads
3713 are really referenced.
3714
3715 Returns in R_REACHABLEP an sbitmap with bits set for reachable regions,
3716 and in LP_REACHABLE an sbitmap with bits set for reachable landing pads.
3717
3718 Passing NULL for LP_REACHABLE is valid, in this case only reachable
3719 regions are marked.
3720
3721 The caller is responsible for freeing the returned sbitmaps. */
3722
3723 static void
3724 mark_reachable_handlers (sbitmap *r_reachablep, sbitmap *lp_reachablep)
3725 {
3726 sbitmap r_reachable, lp_reachable;
3727 basic_block bb;
3728 bool mark_landing_pads = (lp_reachablep != NULL);
3729 gcc_checking_assert (r_reachablep != NULL);
3730
3731 r_reachable = sbitmap_alloc (cfun->eh->region_array->length ());
3732 bitmap_clear (r_reachable);
3733 *r_reachablep = r_reachable;
3734
3735 if (mark_landing_pads)
3736 {
3737 lp_reachable = sbitmap_alloc (cfun->eh->lp_array->length ());
3738 bitmap_clear (lp_reachable);
3739 *lp_reachablep = lp_reachable;
3740 }
3741 else
3742 lp_reachable = NULL;
3743
3744 FOR_EACH_BB (bb)
3745 {
3746 gimple_stmt_iterator gsi;
3747
3748 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
3749 {
3750 gimple stmt = gsi_stmt (gsi);
3751
3752 if (mark_landing_pads)
3753 {
3754 int lp_nr = lookup_stmt_eh_lp (stmt);
3755
3756 /* Negative LP numbers are MUST_NOT_THROW regions which
3757 are not considered BB enders. */
3758 if (lp_nr < 0)
3759 bitmap_set_bit (r_reachable, -lp_nr);
3760
3761 /* Positive LP numbers are real landing pads, and BB enders. */
3762 else if (lp_nr > 0)
3763 {
3764 gcc_assert (gsi_one_before_end_p (gsi));
3765 eh_region region = get_eh_region_from_lp_number (lp_nr);
3766 bitmap_set_bit (r_reachable, region->index);
3767 bitmap_set_bit (lp_reachable, lp_nr);
3768 }
3769 }
3770
3771 /* Avoid removing regions referenced from RESX/EH_DISPATCH. */
3772 switch (gimple_code (stmt))
3773 {
3774 case GIMPLE_RESX:
3775 bitmap_set_bit (r_reachable, gimple_resx_region (stmt));
3776 break;
3777 case GIMPLE_EH_DISPATCH:
3778 bitmap_set_bit (r_reachable, gimple_eh_dispatch_region (stmt));
3779 break;
3780 default:
3781 break;
3782 }
3783 }
3784 }
3785 }
3786
3787 /* Remove unreachable handlers and unreachable landing pads. */
3788
3789 static void
3790 remove_unreachable_handlers (void)
3791 {
3792 sbitmap r_reachable, lp_reachable;
3793 eh_region region;
3794 eh_landing_pad lp;
3795 unsigned i;
3796
3797 mark_reachable_handlers (&r_reachable, &lp_reachable);
3798
3799 if (dump_file)
3800 {
3801 fprintf (dump_file, "Before removal of unreachable regions:\n");
3802 dump_eh_tree (dump_file, cfun);
3803 fprintf (dump_file, "Reachable regions: ");
3804 dump_bitmap_file (dump_file, r_reachable);
3805 fprintf (dump_file, "Reachable landing pads: ");
3806 dump_bitmap_file (dump_file, lp_reachable);
3807 }
3808
3809 if (dump_file)
3810 {
3811 FOR_EACH_VEC_SAFE_ELT (cfun->eh->region_array, i, region)
3812 if (region && !bitmap_bit_p (r_reachable, region->index))
3813 fprintf (dump_file,
3814 "Removing unreachable region %d\n",
3815 region->index);
3816 }
3817
3818 remove_unreachable_eh_regions (r_reachable);
3819
3820 FOR_EACH_VEC_SAFE_ELT (cfun->eh->lp_array, i, lp)
3821 if (lp && !bitmap_bit_p (lp_reachable, lp->index))
3822 {
3823 if (dump_file)
3824 fprintf (dump_file,
3825 "Removing unreachable landing pad %d\n",
3826 lp->index);
3827 remove_eh_landing_pad (lp);
3828 }
3829
3830 if (dump_file)
3831 {
3832 fprintf (dump_file, "\n\nAfter removal of unreachable regions:\n");
3833 dump_eh_tree (dump_file, cfun);
3834 fprintf (dump_file, "\n\n");
3835 }
3836
3837 sbitmap_free (r_reachable);
3838 sbitmap_free (lp_reachable);
3839
3840 #ifdef ENABLE_CHECKING
3841 verify_eh_tree (cfun);
3842 #endif
3843 }
3844
3845 /* Remove unreachable handlers if any landing pads have been removed after
3846 last ehcleanup pass (due to gimple_purge_dead_eh_edges). */
3847
3848 void
3849 maybe_remove_unreachable_handlers (void)
3850 {
3851 eh_landing_pad lp;
3852 unsigned i;
3853
3854 if (cfun->eh == NULL)
3855 return;
3856
3857 FOR_EACH_VEC_SAFE_ELT (cfun->eh->lp_array, i, lp)
3858 if (lp && lp->post_landing_pad)
3859 {
3860 if (label_to_block (lp->post_landing_pad) == NULL)
3861 {
3862 remove_unreachable_handlers ();
3863 return;
3864 }
3865 }
3866 }
3867
3868 /* Remove regions that do not have landing pads. This assumes
3869 that remove_unreachable_handlers has already been run, and
3870 that we've just manipulated the landing pads since then.
3871
3872 Preserve regions with landing pads and regions that prevent
3873 exceptions from propagating further, even if these regions
3874 are not reachable. */
3875
3876 static void
3877 remove_unreachable_handlers_no_lp (void)
3878 {
3879 eh_region region;
3880 sbitmap r_reachable;
3881 unsigned i;
3882
3883 mark_reachable_handlers (&r_reachable, /*lp_reachablep=*/NULL);
3884
3885 FOR_EACH_VEC_SAFE_ELT (cfun->eh->region_array, i, region)
3886 {
3887 if (! region)
3888 continue;
3889
3890 if (region->landing_pads != NULL
3891 || region->type == ERT_MUST_NOT_THROW)
3892 bitmap_set_bit (r_reachable, region->index);
3893
3894 if (dump_file
3895 && !bitmap_bit_p (r_reachable, region->index))
3896 fprintf (dump_file,
3897 "Removing unreachable region %d\n",
3898 region->index);
3899 }
3900
3901 remove_unreachable_eh_regions (r_reachable);
3902
3903 sbitmap_free (r_reachable);
3904 }
3905
3906 /* Undo critical edge splitting on an EH landing pad. Earlier, we
3907 optimisticaly split all sorts of edges, including EH edges. The
3908 optimization passes in between may not have needed them; if not,
3909 we should undo the split.
3910
3911 Recognize this case by having one EH edge incoming to the BB and
3912 one normal edge outgoing; BB should be empty apart from the
3913 post_landing_pad label.
3914
3915 Note that this is slightly different from the empty handler case
3916 handled by cleanup_empty_eh, in that the actual handler may yet
3917 have actual code but the landing pad has been separated from the
3918 handler. As such, cleanup_empty_eh relies on this transformation
3919 having been done first. */
3920
3921 static bool
3922 unsplit_eh (eh_landing_pad lp)
3923 {
3924 basic_block bb = label_to_block (lp->post_landing_pad);
3925 gimple_stmt_iterator gsi;
3926 edge e_in, e_out;
3927
3928 /* Quickly check the edge counts on BB for singularity. */
3929 if (!single_pred_p (bb) || !single_succ_p (bb))
3930 return false;
3931 e_in = single_pred_edge (bb);
3932 e_out = single_succ_edge (bb);
3933
3934 /* Input edge must be EH and output edge must be normal. */
3935 if ((e_in->flags & EDGE_EH) == 0 || (e_out->flags & EDGE_EH) != 0)
3936 return false;
3937
3938 /* The block must be empty except for the labels and debug insns. */
3939 gsi = gsi_after_labels (bb);
3940 if (!gsi_end_p (gsi) && is_gimple_debug (gsi_stmt (gsi)))
3941 gsi_next_nondebug (&gsi);
3942 if (!gsi_end_p (gsi))
3943 return false;
3944
3945 /* The destination block must not already have a landing pad
3946 for a different region. */
3947 for (gsi = gsi_start_bb (e_out->dest); !gsi_end_p (gsi); gsi_next (&gsi))
3948 {
3949 gimple stmt = gsi_stmt (gsi);
3950 tree lab;
3951 int lp_nr;
3952
3953 if (gimple_code (stmt) != GIMPLE_LABEL)
3954 break;
3955 lab = gimple_label_label (stmt);
3956 lp_nr = EH_LANDING_PAD_NR (lab);
3957 if (lp_nr && get_eh_region_from_lp_number (lp_nr) != lp->region)
3958 return false;
3959 }
3960
3961 /* The new destination block must not already be a destination of
3962 the source block, lest we merge fallthru and eh edges and get
3963 all sorts of confused. */
3964 if (find_edge (e_in->src, e_out->dest))
3965 return false;
3966
3967 /* ??? We can get degenerate phis due to cfg cleanups. I would have
3968 thought this should have been cleaned up by a phicprop pass, but
3969 that doesn't appear to handle virtuals. Propagate by hand. */
3970 if (!gimple_seq_empty_p (phi_nodes (bb)))
3971 {
3972 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); )
3973 {
3974 gimple use_stmt, phi = gsi_stmt (gsi);
3975 tree lhs = gimple_phi_result (phi);
3976 tree rhs = gimple_phi_arg_def (phi, 0);
3977 use_operand_p use_p;
3978 imm_use_iterator iter;
3979
3980 FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
3981 {
3982 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
3983 SET_USE (use_p, rhs);
3984 }
3985
3986 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs))
3987 SSA_NAME_OCCURS_IN_ABNORMAL_PHI (rhs) = 1;
3988
3989 remove_phi_node (&gsi, true);
3990 }
3991 }
3992
3993 if (dump_file && (dump_flags & TDF_DETAILS))
3994 fprintf (dump_file, "Unsplit EH landing pad %d to block %i.\n",
3995 lp->index, e_out->dest->index);
3996
3997 /* Redirect the edge. Since redirect_eh_edge_1 expects to be moving
3998 a successor edge, humor it. But do the real CFG change with the
3999 predecessor of E_OUT in order to preserve the ordering of arguments
4000 to the PHI nodes in E_OUT->DEST. */
4001 redirect_eh_edge_1 (e_in, e_out->dest, false);
4002 redirect_edge_pred (e_out, e_in->src);
4003 e_out->flags = e_in->flags;
4004 e_out->probability = e_in->probability;
4005 e_out->count = e_in->count;
4006 remove_edge (e_in);
4007
4008 return true;
4009 }
4010
4011 /* Examine each landing pad block and see if it matches unsplit_eh. */
4012
4013 static bool
4014 unsplit_all_eh (void)
4015 {
4016 bool changed = false;
4017 eh_landing_pad lp;
4018 int i;
4019
4020 for (i = 1; vec_safe_iterate (cfun->eh->lp_array, i, &lp); ++i)
4021 if (lp)
4022 changed |= unsplit_eh (lp);
4023
4024 return changed;
4025 }
4026
4027 /* A subroutine of cleanup_empty_eh. Redirect all EH edges incoming
4028 to OLD_BB to NEW_BB; return true on success, false on failure.
4029
4030 OLD_BB_OUT is the edge into NEW_BB from OLD_BB, so if we miss any
4031 PHI variables from OLD_BB we can pick them up from OLD_BB_OUT.
4032 Virtual PHIs may be deleted and marked for renaming. */
4033
4034 static bool
4035 cleanup_empty_eh_merge_phis (basic_block new_bb, basic_block old_bb,
4036 edge old_bb_out, bool change_region)
4037 {
4038 gimple_stmt_iterator ngsi, ogsi;
4039 edge_iterator ei;
4040 edge e;
4041 bitmap ophi_handled;
4042
4043 /* The destination block must not be a regular successor for any
4044 of the preds of the landing pad. Thus, avoid turning
4045 <..>
4046 | \ EH
4047 | <..>
4048 | /
4049 <..>
4050 into
4051 <..>
4052 | | EH
4053 <..>
4054 which CFG verification would choke on. See PR45172 and PR51089. */
4055 FOR_EACH_EDGE (e, ei, old_bb->preds)
4056 if (find_edge (e->src, new_bb))
4057 return false;
4058
4059 FOR_EACH_EDGE (e, ei, old_bb->preds)
4060 redirect_edge_var_map_clear (e);
4061
4062 ophi_handled = BITMAP_ALLOC (NULL);
4063
4064 /* First, iterate through the PHIs on NEW_BB and set up the edge_var_map
4065 for the edges we're going to move. */
4066 for (ngsi = gsi_start_phis (new_bb); !gsi_end_p (ngsi); gsi_next (&ngsi))
4067 {
4068 gimple ophi, nphi = gsi_stmt (ngsi);
4069 tree nresult, nop;
4070
4071 nresult = gimple_phi_result (nphi);
4072 nop = gimple_phi_arg_def (nphi, old_bb_out->dest_idx);
4073
4074 /* Find the corresponding PHI in OLD_BB so we can forward-propagate
4075 the source ssa_name. */
4076 ophi = NULL;
4077 for (ogsi = gsi_start_phis (old_bb); !gsi_end_p (ogsi); gsi_next (&ogsi))
4078 {
4079 ophi = gsi_stmt (ogsi);
4080 if (gimple_phi_result (ophi) == nop)
4081 break;
4082 ophi = NULL;
4083 }
4084
4085 /* If we did find the corresponding PHI, copy those inputs. */
4086 if (ophi)
4087 {
4088 /* If NOP is used somewhere else beyond phis in new_bb, give up. */
4089 if (!has_single_use (nop))
4090 {
4091 imm_use_iterator imm_iter;
4092 use_operand_p use_p;
4093
4094 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, nop)
4095 {
4096 if (!gimple_debug_bind_p (USE_STMT (use_p))
4097 && (gimple_code (USE_STMT (use_p)) != GIMPLE_PHI
4098 || gimple_bb (USE_STMT (use_p)) != new_bb))
4099 goto fail;
4100 }
4101 }
4102 bitmap_set_bit (ophi_handled, SSA_NAME_VERSION (nop));
4103 FOR_EACH_EDGE (e, ei, old_bb->preds)
4104 {
4105 location_t oloc;
4106 tree oop;
4107
4108 if ((e->flags & EDGE_EH) == 0)
4109 continue;
4110 oop = gimple_phi_arg_def (ophi, e->dest_idx);
4111 oloc = gimple_phi_arg_location (ophi, e->dest_idx);
4112 redirect_edge_var_map_add (e, nresult, oop, oloc);
4113 }
4114 }
4115 /* If we didn't find the PHI, if it's a real variable or a VOP, we know
4116 from the fact that OLD_BB is tree_empty_eh_handler_p that the
4117 variable is unchanged from input to the block and we can simply
4118 re-use the input to NEW_BB from the OLD_BB_OUT edge. */
4119 else
4120 {
4121 location_t nloc
4122 = gimple_phi_arg_location (nphi, old_bb_out->dest_idx);
4123 FOR_EACH_EDGE (e, ei, old_bb->preds)
4124 redirect_edge_var_map_add (e, nresult, nop, nloc);
4125 }
4126 }
4127
4128 /* Second, verify that all PHIs from OLD_BB have been handled. If not,
4129 we don't know what values from the other edges into NEW_BB to use. */
4130 for (ogsi = gsi_start_phis (old_bb); !gsi_end_p (ogsi); gsi_next (&ogsi))
4131 {
4132 gimple ophi = gsi_stmt (ogsi);
4133 tree oresult = gimple_phi_result (ophi);
4134 if (!bitmap_bit_p (ophi_handled, SSA_NAME_VERSION (oresult)))
4135 goto fail;
4136 }
4137
4138 /* Finally, move the edges and update the PHIs. */
4139 for (ei = ei_start (old_bb->preds); (e = ei_safe_edge (ei)); )
4140 if (e->flags & EDGE_EH)
4141 {
4142 /* ??? CFG manipluation routines do not try to update loop
4143 form on edge redirection. Do so manually here for now. */
4144 /* If we redirect a loop entry or latch edge that will either create
4145 a multiple entry loop or rotate the loop. If the loops merge
4146 we may have created a loop with multiple latches.
4147 All of this isn't easily fixed thus cancel the affected loop
4148 and mark the other loop as possibly having multiple latches. */
4149 if (current_loops
4150 && e->dest == e->dest->loop_father->header)
4151 {
4152 e->dest->loop_father->header = NULL;
4153 e->dest->loop_father->latch = NULL;
4154 new_bb->loop_father->latch = NULL;
4155 loops_state_set (LOOPS_NEED_FIXUP|LOOPS_MAY_HAVE_MULTIPLE_LATCHES);
4156 }
4157 redirect_eh_edge_1 (e, new_bb, change_region);
4158 redirect_edge_succ (e, new_bb);
4159 flush_pending_stmts (e);
4160 }
4161 else
4162 ei_next (&ei);
4163
4164 BITMAP_FREE (ophi_handled);
4165 return true;
4166
4167 fail:
4168 FOR_EACH_EDGE (e, ei, old_bb->preds)
4169 redirect_edge_var_map_clear (e);
4170 BITMAP_FREE (ophi_handled);
4171 return false;
4172 }
4173
4174 /* A subroutine of cleanup_empty_eh. Move a landing pad LP from its
4175 old region to NEW_REGION at BB. */
4176
4177 static void
4178 cleanup_empty_eh_move_lp (basic_block bb, edge e_out,
4179 eh_landing_pad lp, eh_region new_region)
4180 {
4181 gimple_stmt_iterator gsi;
4182 eh_landing_pad *pp;
4183
4184 for (pp = &lp->region->landing_pads; *pp != lp; pp = &(*pp)->next_lp)
4185 continue;
4186 *pp = lp->next_lp;
4187
4188 lp->region = new_region;
4189 lp->next_lp = new_region->landing_pads;
4190 new_region->landing_pads = lp;
4191
4192 /* Delete the RESX that was matched within the empty handler block. */
4193 gsi = gsi_last_bb (bb);
4194 unlink_stmt_vdef (gsi_stmt (gsi));
4195 gsi_remove (&gsi, true);
4196
4197 /* Clean up E_OUT for the fallthru. */
4198 e_out->flags = (e_out->flags & ~EDGE_EH) | EDGE_FALLTHRU;
4199 e_out->probability = REG_BR_PROB_BASE;
4200 }
4201
4202 /* A subroutine of cleanup_empty_eh. Handle more complex cases of
4203 unsplitting than unsplit_eh was prepared to handle, e.g. when
4204 multiple incoming edges and phis are involved. */
4205
4206 static bool
4207 cleanup_empty_eh_unsplit (basic_block bb, edge e_out, eh_landing_pad lp)
4208 {
4209 gimple_stmt_iterator gsi;
4210 tree lab;
4211
4212 /* We really ought not have totally lost everything following
4213 a landing pad label. Given that BB is empty, there had better
4214 be a successor. */
4215 gcc_assert (e_out != NULL);
4216
4217 /* The destination block must not already have a landing pad
4218 for a different region. */
4219 lab = NULL;
4220 for (gsi = gsi_start_bb (e_out->dest); !gsi_end_p (gsi); gsi_next (&gsi))
4221 {
4222 gimple stmt = gsi_stmt (gsi);
4223 int lp_nr;
4224
4225 if (gimple_code (stmt) != GIMPLE_LABEL)
4226 break;
4227 lab = gimple_label_label (stmt);
4228 lp_nr = EH_LANDING_PAD_NR (lab);
4229 if (lp_nr && get_eh_region_from_lp_number (lp_nr) != lp->region)
4230 return false;
4231 }
4232
4233 /* Attempt to move the PHIs into the successor block. */
4234 if (cleanup_empty_eh_merge_phis (e_out->dest, bb, e_out, false))
4235 {
4236 if (dump_file && (dump_flags & TDF_DETAILS))
4237 fprintf (dump_file,
4238 "Unsplit EH landing pad %d to block %i "
4239 "(via cleanup_empty_eh).\n",
4240 lp->index, e_out->dest->index);
4241 return true;
4242 }
4243
4244 return false;
4245 }
4246
4247 /* Return true if edge E_FIRST is part of an empty infinite loop
4248 or leads to such a loop through a series of single successor
4249 empty bbs. */
4250
4251 static bool
4252 infinite_empty_loop_p (edge e_first)
4253 {
4254 bool inf_loop = false;
4255 edge e;
4256
4257 if (e_first->dest == e_first->src)
4258 return true;
4259
4260 e_first->src->aux = (void *) 1;
4261 for (e = e_first; single_succ_p (e->dest); e = single_succ_edge (e->dest))
4262 {
4263 gimple_stmt_iterator gsi;
4264 if (e->dest->aux)
4265 {
4266 inf_loop = true;
4267 break;
4268 }
4269 e->dest->aux = (void *) 1;
4270 gsi = gsi_after_labels (e->dest);
4271 if (!gsi_end_p (gsi) && is_gimple_debug (gsi_stmt (gsi)))
4272 gsi_next_nondebug (&gsi);
4273 if (!gsi_end_p (gsi))
4274 break;
4275 }
4276 e_first->src->aux = NULL;
4277 for (e = e_first; e->dest->aux; e = single_succ_edge (e->dest))
4278 e->dest->aux = NULL;
4279
4280 return inf_loop;
4281 }
4282
4283 /* Examine the block associated with LP to determine if it's an empty
4284 handler for its EH region. If so, attempt to redirect EH edges to
4285 an outer region. Return true the CFG was updated in any way. This
4286 is similar to jump forwarding, just across EH edges. */
4287
4288 static bool
4289 cleanup_empty_eh (eh_landing_pad lp)
4290 {
4291 basic_block bb = label_to_block (lp->post_landing_pad);
4292 gimple_stmt_iterator gsi;
4293 gimple resx;
4294 eh_region new_region;
4295 edge_iterator ei;
4296 edge e, e_out;
4297 bool has_non_eh_pred;
4298 bool ret = false;
4299 int new_lp_nr;
4300
4301 /* There can be zero or one edges out of BB. This is the quickest test. */
4302 switch (EDGE_COUNT (bb->succs))
4303 {
4304 case 0:
4305 e_out = NULL;
4306 break;
4307 case 1:
4308 e_out = single_succ_edge (bb);
4309 break;
4310 default:
4311 return false;
4312 }
4313
4314 resx = last_stmt (bb);
4315 if (resx && is_gimple_resx (resx))
4316 {
4317 if (stmt_can_throw_external (resx))
4318 optimize_clobbers (bb);
4319 else if (sink_clobbers (bb))
4320 ret = true;
4321 }
4322
4323 gsi = gsi_after_labels (bb);
4324
4325 /* Make sure to skip debug statements. */
4326 if (!gsi_end_p (gsi) && is_gimple_debug (gsi_stmt (gsi)))
4327 gsi_next_nondebug (&gsi);
4328
4329 /* If the block is totally empty, look for more unsplitting cases. */
4330 if (gsi_end_p (gsi))
4331 {
4332 /* For the degenerate case of an infinite loop bail out. */
4333 if (infinite_empty_loop_p (e_out))
4334 return ret;
4335
4336 return ret | cleanup_empty_eh_unsplit (bb, e_out, lp);
4337 }
4338
4339 /* The block should consist only of a single RESX statement, modulo a
4340 preceding call to __builtin_stack_restore if there is no outgoing
4341 edge, since the call can be eliminated in this case. */
4342 resx = gsi_stmt (gsi);
4343 if (!e_out && gimple_call_builtin_p (resx, BUILT_IN_STACK_RESTORE))
4344 {
4345 gsi_next (&gsi);
4346 resx = gsi_stmt (gsi);
4347 }
4348 if (!is_gimple_resx (resx))
4349 return ret;
4350 gcc_assert (gsi_one_before_end_p (gsi));
4351
4352 /* Determine if there are non-EH edges, or resx edges into the handler. */
4353 has_non_eh_pred = false;
4354 FOR_EACH_EDGE (e, ei, bb->preds)
4355 if (!(e->flags & EDGE_EH))
4356 has_non_eh_pred = true;
4357
4358 /* Find the handler that's outer of the empty handler by looking at
4359 where the RESX instruction was vectored. */
4360 new_lp_nr = lookup_stmt_eh_lp (resx);
4361 new_region = get_eh_region_from_lp_number (new_lp_nr);
4362
4363 /* If there's no destination region within the current function,
4364 redirection is trivial via removing the throwing statements from
4365 the EH region, removing the EH edges, and allowing the block
4366 to go unreachable. */
4367 if (new_region == NULL)
4368 {
4369 gcc_assert (e_out == NULL);
4370 for (ei = ei_start (bb->preds); (e = ei_safe_edge (ei)); )
4371 if (e->flags & EDGE_EH)
4372 {
4373 gimple stmt = last_stmt (e->src);
4374 remove_stmt_from_eh_lp (stmt);
4375 remove_edge (e);
4376 }
4377 else
4378 ei_next (&ei);
4379 goto succeed;
4380 }
4381
4382 /* If the destination region is a MUST_NOT_THROW, allow the runtime
4383 to handle the abort and allow the blocks to go unreachable. */
4384 if (new_region->type == ERT_MUST_NOT_THROW)
4385 {
4386 for (ei = ei_start (bb->preds); (e = ei_safe_edge (ei)); )
4387 if (e->flags & EDGE_EH)
4388 {
4389 gimple stmt = last_stmt (e->src);
4390 remove_stmt_from_eh_lp (stmt);
4391 add_stmt_to_eh_lp (stmt, new_lp_nr);
4392 remove_edge (e);
4393 }
4394 else
4395 ei_next (&ei);
4396 goto succeed;
4397 }
4398
4399 /* Try to redirect the EH edges and merge the PHIs into the destination
4400 landing pad block. If the merge succeeds, we'll already have redirected
4401 all the EH edges. The handler itself will go unreachable if there were
4402 no normal edges. */
4403 if (cleanup_empty_eh_merge_phis (e_out->dest, bb, e_out, true))
4404 goto succeed;
4405
4406 /* Finally, if all input edges are EH edges, then we can (potentially)
4407 reduce the number of transfers from the runtime by moving the landing
4408 pad from the original region to the new region. This is a win when
4409 we remove the last CLEANUP region along a particular exception
4410 propagation path. Since nothing changes except for the region with
4411 which the landing pad is associated, the PHI nodes do not need to be
4412 adjusted at all. */
4413 if (!has_non_eh_pred)
4414 {
4415 cleanup_empty_eh_move_lp (bb, e_out, lp, new_region);
4416 if (dump_file && (dump_flags & TDF_DETAILS))
4417 fprintf (dump_file, "Empty EH handler %i moved to EH region %i.\n",
4418 lp->index, new_region->index);
4419
4420 /* ??? The CFG didn't change, but we may have rendered the
4421 old EH region unreachable. Trigger a cleanup there. */
4422 return true;
4423 }
4424
4425 return ret;
4426
4427 succeed:
4428 if (dump_file && (dump_flags & TDF_DETAILS))
4429 fprintf (dump_file, "Empty EH handler %i removed.\n", lp->index);
4430 remove_eh_landing_pad (lp);
4431 return true;
4432 }
4433
4434 /* Do a post-order traversal of the EH region tree. Examine each
4435 post_landing_pad block and see if we can eliminate it as empty. */
4436
4437 static bool
4438 cleanup_all_empty_eh (void)
4439 {
4440 bool changed = false;
4441 eh_landing_pad lp;
4442 int i;
4443
4444 for (i = 1; vec_safe_iterate (cfun->eh->lp_array, i, &lp); ++i)
4445 if (lp)
4446 changed |= cleanup_empty_eh (lp);
4447
4448 return changed;
4449 }
4450
4451 /* Perform cleanups and lowering of exception handling
4452 1) cleanups regions with handlers doing nothing are optimized out
4453 2) MUST_NOT_THROW regions that became dead because of 1) are optimized out
4454 3) Info about regions that are containing instructions, and regions
4455 reachable via local EH edges is collected
4456 4) Eh tree is pruned for regions no longer necessary.
4457
4458 TODO: Push MUST_NOT_THROW regions to the root of the EH tree.
4459 Unify those that have the same failure decl and locus.
4460 */
4461
4462 static unsigned int
4463 execute_cleanup_eh_1 (void)
4464 {
4465 /* Do this first: unsplit_all_eh and cleanup_all_empty_eh can die
4466 looking up unreachable landing pads. */
4467 remove_unreachable_handlers ();
4468
4469 /* Watch out for the region tree vanishing due to all unreachable. */
4470 if (cfun->eh->region_tree && optimize)
4471 {
4472 bool changed = false;
4473
4474 changed |= unsplit_all_eh ();
4475 changed |= cleanup_all_empty_eh ();
4476
4477 if (changed)
4478 {
4479 free_dominance_info (CDI_DOMINATORS);
4480 free_dominance_info (CDI_POST_DOMINATORS);
4481
4482 /* We delayed all basic block deletion, as we may have performed
4483 cleanups on EH edges while non-EH edges were still present. */
4484 delete_unreachable_blocks ();
4485
4486 /* We manipulated the landing pads. Remove any region that no
4487 longer has a landing pad. */
4488 remove_unreachable_handlers_no_lp ();
4489
4490 return TODO_cleanup_cfg | TODO_update_ssa_only_virtuals;
4491 }
4492 }
4493
4494 return 0;
4495 }
4496
4497 static unsigned int
4498 execute_cleanup_eh (void)
4499 {
4500 int ret = execute_cleanup_eh_1 ();
4501
4502 /* If the function no longer needs an EH personality routine
4503 clear it. This exposes cross-language inlining opportunities
4504 and avoids references to a never defined personality routine. */
4505 if (DECL_FUNCTION_PERSONALITY (current_function_decl)
4506 && function_needs_eh_personality (cfun) != eh_personality_lang)
4507 DECL_FUNCTION_PERSONALITY (current_function_decl) = NULL_TREE;
4508
4509 return ret;
4510 }
4511
4512 static bool
4513 gate_cleanup_eh (void)
4514 {
4515 return cfun->eh != NULL && cfun->eh->region_tree != NULL;
4516 }
4517
4518 namespace {
4519
4520 const pass_data pass_data_cleanup_eh =
4521 {
4522 GIMPLE_PASS, /* type */
4523 "ehcleanup", /* name */
4524 OPTGROUP_NONE, /* optinfo_flags */
4525 true, /* has_gate */
4526 true, /* has_execute */
4527 TV_TREE_EH, /* tv_id */
4528 PROP_gimple_lcf, /* properties_required */
4529 0, /* properties_provided */
4530 0, /* properties_destroyed */
4531 0, /* todo_flags_start */
4532 TODO_verify_ssa, /* todo_flags_finish */
4533 };
4534
4535 class pass_cleanup_eh : public gimple_opt_pass
4536 {
4537 public:
4538 pass_cleanup_eh (gcc::context *ctxt)
4539 : gimple_opt_pass (pass_data_cleanup_eh, ctxt)
4540 {}
4541
4542 /* opt_pass methods: */
4543 opt_pass * clone () { return new pass_cleanup_eh (m_ctxt); }
4544 bool gate () { return gate_cleanup_eh (); }
4545 unsigned int execute () { return execute_cleanup_eh (); }
4546
4547 }; // class pass_cleanup_eh
4548
4549 } // anon namespace
4550
4551 gimple_opt_pass *
4552 make_pass_cleanup_eh (gcc::context *ctxt)
4553 {
4554 return new pass_cleanup_eh (ctxt);
4555 }
4556 \f
4557 /* Verify that BB containing STMT as the last statement, has precisely the
4558 edge that make_eh_edges would create. */
4559
4560 DEBUG_FUNCTION bool
4561 verify_eh_edges (gimple stmt)
4562 {
4563 basic_block bb = gimple_bb (stmt);
4564 eh_landing_pad lp = NULL;
4565 int lp_nr;
4566 edge_iterator ei;
4567 edge e, eh_edge;
4568
4569 lp_nr = lookup_stmt_eh_lp (stmt);
4570 if (lp_nr > 0)
4571 lp = get_eh_landing_pad_from_number (lp_nr);
4572
4573 eh_edge = NULL;
4574 FOR_EACH_EDGE (e, ei, bb->succs)
4575 {
4576 if (e->flags & EDGE_EH)
4577 {
4578 if (eh_edge)
4579 {
4580 error ("BB %i has multiple EH edges", bb->index);
4581 return true;
4582 }
4583 else
4584 eh_edge = e;
4585 }
4586 }
4587
4588 if (lp == NULL)
4589 {
4590 if (eh_edge)
4591 {
4592 error ("BB %i can not throw but has an EH edge", bb->index);
4593 return true;
4594 }
4595 return false;
4596 }
4597
4598 if (!stmt_could_throw_p (stmt))
4599 {
4600 error ("BB %i last statement has incorrectly set lp", bb->index);
4601 return true;
4602 }
4603
4604 if (eh_edge == NULL)
4605 {
4606 error ("BB %i is missing an EH edge", bb->index);
4607 return true;
4608 }
4609
4610 if (eh_edge->dest != label_to_block (lp->post_landing_pad))
4611 {
4612 error ("Incorrect EH edge %i->%i", bb->index, eh_edge->dest->index);
4613 return true;
4614 }
4615
4616 return false;
4617 }
4618
4619 /* Similarly, but handle GIMPLE_EH_DISPATCH specifically. */
4620
4621 DEBUG_FUNCTION bool
4622 verify_eh_dispatch_edge (gimple stmt)
4623 {
4624 eh_region r;
4625 eh_catch c;
4626 basic_block src, dst;
4627 bool want_fallthru = true;
4628 edge_iterator ei;
4629 edge e, fall_edge;
4630
4631 r = get_eh_region_from_number (gimple_eh_dispatch_region (stmt));
4632 src = gimple_bb (stmt);
4633
4634 FOR_EACH_EDGE (e, ei, src->succs)
4635 gcc_assert (e->aux == NULL);
4636
4637 switch (r->type)
4638 {
4639 case ERT_TRY:
4640 for (c = r->u.eh_try.first_catch; c ; c = c->next_catch)
4641 {
4642 dst = label_to_block (c->label);
4643 e = find_edge (src, dst);
4644 if (e == NULL)
4645 {
4646 error ("BB %i is missing an edge", src->index);
4647 return true;
4648 }
4649 e->aux = (void *)e;
4650
4651 /* A catch-all handler doesn't have a fallthru. */
4652 if (c->type_list == NULL)
4653 {
4654 want_fallthru = false;
4655 break;
4656 }
4657 }
4658 break;
4659
4660 case ERT_ALLOWED_EXCEPTIONS:
4661 dst = label_to_block (r->u.allowed.label);
4662 e = find_edge (src, dst);
4663 if (e == NULL)
4664 {
4665 error ("BB %i is missing an edge", src->index);
4666 return true;
4667 }
4668 e->aux = (void *)e;
4669 break;
4670
4671 default:
4672 gcc_unreachable ();
4673 }
4674
4675 fall_edge = NULL;
4676 FOR_EACH_EDGE (e, ei, src->succs)
4677 {
4678 if (e->flags & EDGE_FALLTHRU)
4679 {
4680 if (fall_edge != NULL)
4681 {
4682 error ("BB %i too many fallthru edges", src->index);
4683 return true;
4684 }
4685 fall_edge = e;
4686 }
4687 else if (e->aux)
4688 e->aux = NULL;
4689 else
4690 {
4691 error ("BB %i has incorrect edge", src->index);
4692 return true;
4693 }
4694 }
4695 if ((fall_edge != NULL) ^ want_fallthru)
4696 {
4697 error ("BB %i has incorrect fallthru edge", src->index);
4698 return true;
4699 }
4700
4701 return false;
4702 }