re PR ada/62019 (gnat.dg/weak2.adb fails everywhere)
[gcc.git] / gcc / tree-eh.c
1 /* Exception handling semantics and decomposition for trees.
2 Copyright (C) 2003-2014 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
9 any later version.
10
11 GCC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "hash-table.h"
24 #include "tm.h"
25 #include "tree.h"
26 #include "expr.h"
27 #include "calls.h"
28 #include "flags.h"
29 #include "function.h"
30 #include "except.h"
31 #include "hash-set.h"
32 #include "basic-block.h"
33 #include "tree-ssa-alias.h"
34 #include "internal-fn.h"
35 #include "tree-eh.h"
36 #include "gimple-expr.h"
37 #include "is-a.h"
38 #include "gimple.h"
39 #include "gimple-iterator.h"
40 #include "gimple-ssa.h"
41 #include "cgraph.h"
42 #include "tree-cfg.h"
43 #include "tree-phinodes.h"
44 #include "ssa-iterators.h"
45 #include "stringpool.h"
46 #include "tree-ssanames.h"
47 #include "tree-into-ssa.h"
48 #include "tree-ssa.h"
49 #include "tree-inline.h"
50 #include "tree-pass.h"
51 #include "langhooks.h"
52 #include "diagnostic-core.h"
53 #include "target.h"
54 #include "cfgloop.h"
55 #include "gimple-low.h"
56
57 /* In some instances a tree and a gimple need to be stored in a same table,
58 i.e. in hash tables. This is a structure to do this. */
59 typedef union {tree *tp; tree t; gimple g;} treemple;
60
61 /* Misc functions used in this file. */
62
63 /* Remember and lookup EH landing pad data for arbitrary statements.
64 Really this means any statement that could_throw_p. We could
65 stuff this information into the stmt_ann data structure, but:
66
67 (1) We absolutely rely on this information being kept until
68 we get to rtl. Once we're done with lowering here, if we lose
69 the information there's no way to recover it!
70
71 (2) There are many more statements that *cannot* throw as
72 compared to those that can. We should be saving some amount
73 of space by only allocating memory for those that can throw. */
74
75 /* Add statement T in function IFUN to landing pad NUM. */
76
77 static void
78 add_stmt_to_eh_lp_fn (struct function *ifun, gimple t, int num)
79 {
80 gcc_assert (num != 0);
81
82 if (!get_eh_throw_stmt_table (ifun))
83 set_eh_throw_stmt_table (ifun, hash_map<gimple, int>::create_ggc (31));
84
85 gcc_assert (!get_eh_throw_stmt_table (ifun)->put (t, num));
86 }
87
88 /* Add statement T in the current function (cfun) to EH landing pad NUM. */
89
90 void
91 add_stmt_to_eh_lp (gimple t, int num)
92 {
93 add_stmt_to_eh_lp_fn (cfun, t, num);
94 }
95
96 /* Add statement T to the single EH landing pad in REGION. */
97
98 static void
99 record_stmt_eh_region (eh_region region, gimple t)
100 {
101 if (region == NULL)
102 return;
103 if (region->type == ERT_MUST_NOT_THROW)
104 add_stmt_to_eh_lp_fn (cfun, t, -region->index);
105 else
106 {
107 eh_landing_pad lp = region->landing_pads;
108 if (lp == NULL)
109 lp = gen_eh_landing_pad (region);
110 else
111 gcc_assert (lp->next_lp == NULL);
112 add_stmt_to_eh_lp_fn (cfun, t, lp->index);
113 }
114 }
115
116
117 /* Remove statement T in function IFUN from its EH landing pad. */
118
119 bool
120 remove_stmt_from_eh_lp_fn (struct function *ifun, gimple t)
121 {
122 if (!get_eh_throw_stmt_table (ifun))
123 return false;
124
125 if (!get_eh_throw_stmt_table (ifun)->get (t))
126 return false;
127
128 get_eh_throw_stmt_table (ifun)->remove (t);
129 return true;
130 }
131
132
133 /* Remove statement T in the current function (cfun) from its
134 EH landing pad. */
135
136 bool
137 remove_stmt_from_eh_lp (gimple t)
138 {
139 return remove_stmt_from_eh_lp_fn (cfun, t);
140 }
141
142 /* Determine if statement T is inside an EH region in function IFUN.
143 Positive numbers indicate a landing pad index; negative numbers
144 indicate a MUST_NOT_THROW region index; zero indicates that the
145 statement is not recorded in the region table. */
146
147 int
148 lookup_stmt_eh_lp_fn (struct function *ifun, gimple t)
149 {
150 if (ifun->eh->throw_stmt_table == NULL)
151 return 0;
152
153 int *lp_nr = ifun->eh->throw_stmt_table->get (t);
154 return lp_nr ? *lp_nr : 0;
155 }
156
157 /* Likewise, but always use the current function. */
158
159 int
160 lookup_stmt_eh_lp (gimple t)
161 {
162 /* We can get called from initialized data when -fnon-call-exceptions
163 is on; prevent crash. */
164 if (!cfun)
165 return 0;
166 return lookup_stmt_eh_lp_fn (cfun, t);
167 }
168
169 /* First pass of EH node decomposition. Build up a tree of GIMPLE_TRY_FINALLY
170 nodes and LABEL_DECL nodes. We will use this during the second phase to
171 determine if a goto leaves the body of a TRY_FINALLY_EXPR node. */
172
173 struct finally_tree_node
174 {
175 /* When storing a GIMPLE_TRY, we have to record a gimple. However
176 when deciding whether a GOTO to a certain LABEL_DECL (which is a
177 tree) leaves the TRY block, its necessary to record a tree in
178 this field. Thus a treemple is used. */
179 treemple child;
180 gimple parent;
181 };
182
183 /* Hashtable helpers. */
184
185 struct finally_tree_hasher : typed_free_remove <finally_tree_node>
186 {
187 typedef finally_tree_node value_type;
188 typedef finally_tree_node compare_type;
189 static inline hashval_t hash (const value_type *);
190 static inline bool equal (const value_type *, const compare_type *);
191 };
192
193 inline hashval_t
194 finally_tree_hasher::hash (const value_type *v)
195 {
196 return (intptr_t)v->child.t >> 4;
197 }
198
199 inline bool
200 finally_tree_hasher::equal (const value_type *v, const compare_type *c)
201 {
202 return v->child.t == c->child.t;
203 }
204
205 /* Note that this table is *not* marked GTY. It is short-lived. */
206 static hash_table<finally_tree_hasher> *finally_tree;
207
208 static void
209 record_in_finally_tree (treemple child, gimple parent)
210 {
211 struct finally_tree_node *n;
212 finally_tree_node **slot;
213
214 n = XNEW (struct finally_tree_node);
215 n->child = child;
216 n->parent = parent;
217
218 slot = finally_tree->find_slot (n, INSERT);
219 gcc_assert (!*slot);
220 *slot = n;
221 }
222
223 static void
224 collect_finally_tree (gimple stmt, gimple region);
225
226 /* Go through the gimple sequence. Works with collect_finally_tree to
227 record all GIMPLE_LABEL and GIMPLE_TRY statements. */
228
229 static void
230 collect_finally_tree_1 (gimple_seq seq, gimple region)
231 {
232 gimple_stmt_iterator gsi;
233
234 for (gsi = gsi_start (seq); !gsi_end_p (gsi); gsi_next (&gsi))
235 collect_finally_tree (gsi_stmt (gsi), region);
236 }
237
238 static void
239 collect_finally_tree (gimple stmt, gimple region)
240 {
241 treemple temp;
242
243 switch (gimple_code (stmt))
244 {
245 case GIMPLE_LABEL:
246 temp.t = gimple_label_label (stmt);
247 record_in_finally_tree (temp, region);
248 break;
249
250 case GIMPLE_TRY:
251 if (gimple_try_kind (stmt) == GIMPLE_TRY_FINALLY)
252 {
253 temp.g = stmt;
254 record_in_finally_tree (temp, region);
255 collect_finally_tree_1 (gimple_try_eval (stmt), stmt);
256 collect_finally_tree_1 (gimple_try_cleanup (stmt), region);
257 }
258 else if (gimple_try_kind (stmt) == GIMPLE_TRY_CATCH)
259 {
260 collect_finally_tree_1 (gimple_try_eval (stmt), region);
261 collect_finally_tree_1 (gimple_try_cleanup (stmt), region);
262 }
263 break;
264
265 case GIMPLE_CATCH:
266 collect_finally_tree_1 (gimple_catch_handler (stmt), region);
267 break;
268
269 case GIMPLE_EH_FILTER:
270 collect_finally_tree_1 (gimple_eh_filter_failure (stmt), region);
271 break;
272
273 case GIMPLE_EH_ELSE:
274 collect_finally_tree_1 (gimple_eh_else_n_body (stmt), region);
275 collect_finally_tree_1 (gimple_eh_else_e_body (stmt), region);
276 break;
277
278 default:
279 /* A type, a decl, or some kind of statement that we're not
280 interested in. Don't walk them. */
281 break;
282 }
283 }
284
285
286 /* Use the finally tree to determine if a jump from START to TARGET
287 would leave the try_finally node that START lives in. */
288
289 static bool
290 outside_finally_tree (treemple start, gimple target)
291 {
292 struct finally_tree_node n, *p;
293
294 do
295 {
296 n.child = start;
297 p = finally_tree->find (&n);
298 if (!p)
299 return true;
300 start.g = p->parent;
301 }
302 while (start.g != target);
303
304 return false;
305 }
306
307 /* Second pass of EH node decomposition. Actually transform the GIMPLE_TRY
308 nodes into a set of gotos, magic labels, and eh regions.
309 The eh region creation is straight-forward, but frobbing all the gotos
310 and such into shape isn't. */
311
312 /* The sequence into which we record all EH stuff. This will be
313 placed at the end of the function when we're all done. */
314 static gimple_seq eh_seq;
315
316 /* Record whether an EH region contains something that can throw,
317 indexed by EH region number. */
318 static bitmap eh_region_may_contain_throw_map;
319
320 /* The GOTO_QUEUE is is an array of GIMPLE_GOTO and GIMPLE_RETURN
321 statements that are seen to escape this GIMPLE_TRY_FINALLY node.
322 The idea is to record a gimple statement for everything except for
323 the conditionals, which get their labels recorded. Since labels are
324 of type 'tree', we need this node to store both gimple and tree
325 objects. REPL_STMT is the sequence used to replace the goto/return
326 statement. CONT_STMT is used to store the statement that allows
327 the return/goto to jump to the original destination. */
328
329 struct goto_queue_node
330 {
331 treemple stmt;
332 location_t location;
333 gimple_seq repl_stmt;
334 gimple cont_stmt;
335 int index;
336 /* This is used when index >= 0 to indicate that stmt is a label (as
337 opposed to a goto stmt). */
338 int is_label;
339 };
340
341 /* State of the world while lowering. */
342
343 struct leh_state
344 {
345 /* What's "current" while constructing the eh region tree. These
346 correspond to variables of the same name in cfun->eh, which we
347 don't have easy access to. */
348 eh_region cur_region;
349
350 /* What's "current" for the purposes of __builtin_eh_pointer. For
351 a CATCH, this is the associated TRY. For an EH_FILTER, this is
352 the associated ALLOWED_EXCEPTIONS, etc. */
353 eh_region ehp_region;
354
355 /* Processing of TRY_FINALLY requires a bit more state. This is
356 split out into a separate structure so that we don't have to
357 copy so much when processing other nodes. */
358 struct leh_tf_state *tf;
359 };
360
361 struct leh_tf_state
362 {
363 /* Pointer to the GIMPLE_TRY_FINALLY node under discussion. The
364 try_finally_expr is the original GIMPLE_TRY_FINALLY. We need to retain
365 this so that outside_finally_tree can reliably reference the tree used
366 in the collect_finally_tree data structures. */
367 gimple try_finally_expr;
368 gimple top_p;
369
370 /* While lowering a top_p usually it is expanded into multiple statements,
371 thus we need the following field to store them. */
372 gimple_seq top_p_seq;
373
374 /* The state outside this try_finally node. */
375 struct leh_state *outer;
376
377 /* The exception region created for it. */
378 eh_region region;
379
380 /* The goto queue. */
381 struct goto_queue_node *goto_queue;
382 size_t goto_queue_size;
383 size_t goto_queue_active;
384
385 /* Pointer map to help in searching goto_queue when it is large. */
386 hash_map<gimple, goto_queue_node *> *goto_queue_map;
387
388 /* The set of unique labels seen as entries in the goto queue. */
389 vec<tree> dest_array;
390
391 /* A label to be added at the end of the completed transformed
392 sequence. It will be set if may_fallthru was true *at one time*,
393 though subsequent transformations may have cleared that flag. */
394 tree fallthru_label;
395
396 /* True if it is possible to fall out the bottom of the try block.
397 Cleared if the fallthru is converted to a goto. */
398 bool may_fallthru;
399
400 /* True if any entry in goto_queue is a GIMPLE_RETURN. */
401 bool may_return;
402
403 /* True if the finally block can receive an exception edge.
404 Cleared if the exception case is handled by code duplication. */
405 bool may_throw;
406 };
407
408 static gimple_seq lower_eh_must_not_throw (struct leh_state *, gimple);
409
410 /* Search for STMT in the goto queue. Return the replacement,
411 or null if the statement isn't in the queue. */
412
413 #define LARGE_GOTO_QUEUE 20
414
415 static void lower_eh_constructs_1 (struct leh_state *state, gimple_seq *seq);
416
417 static gimple_seq
418 find_goto_replacement (struct leh_tf_state *tf, treemple stmt)
419 {
420 unsigned int i;
421
422 if (tf->goto_queue_active < LARGE_GOTO_QUEUE)
423 {
424 for (i = 0; i < tf->goto_queue_active; i++)
425 if ( tf->goto_queue[i].stmt.g == stmt.g)
426 return tf->goto_queue[i].repl_stmt;
427 return NULL;
428 }
429
430 /* If we have a large number of entries in the goto_queue, create a
431 pointer map and use that for searching. */
432
433 if (!tf->goto_queue_map)
434 {
435 tf->goto_queue_map = new hash_map<gimple, goto_queue_node *>;
436 for (i = 0; i < tf->goto_queue_active; i++)
437 {
438 bool existed = tf->goto_queue_map->put (tf->goto_queue[i].stmt.g,
439 &tf->goto_queue[i]);
440 gcc_assert (!existed);
441 }
442 }
443
444 goto_queue_node **slot = tf->goto_queue_map->get (stmt.g);
445 if (slot != NULL)
446 return ((*slot)->repl_stmt);
447
448 return NULL;
449 }
450
451 /* A subroutine of replace_goto_queue_1. Handles the sub-clauses of a
452 lowered GIMPLE_COND. If, by chance, the replacement is a simple goto,
453 then we can just splat it in, otherwise we add the new stmts immediately
454 after the GIMPLE_COND and redirect. */
455
456 static void
457 replace_goto_queue_cond_clause (tree *tp, struct leh_tf_state *tf,
458 gimple_stmt_iterator *gsi)
459 {
460 tree label;
461 gimple_seq new_seq;
462 treemple temp;
463 location_t loc = gimple_location (gsi_stmt (*gsi));
464
465 temp.tp = tp;
466 new_seq = find_goto_replacement (tf, temp);
467 if (!new_seq)
468 return;
469
470 if (gimple_seq_singleton_p (new_seq)
471 && gimple_code (gimple_seq_first_stmt (new_seq)) == GIMPLE_GOTO)
472 {
473 *tp = gimple_goto_dest (gimple_seq_first_stmt (new_seq));
474 return;
475 }
476
477 label = create_artificial_label (loc);
478 /* Set the new label for the GIMPLE_COND */
479 *tp = label;
480
481 gsi_insert_after (gsi, gimple_build_label (label), GSI_CONTINUE_LINKING);
482 gsi_insert_seq_after (gsi, gimple_seq_copy (new_seq), GSI_CONTINUE_LINKING);
483 }
484
485 /* The real work of replace_goto_queue. Returns with TSI updated to
486 point to the next statement. */
487
488 static void replace_goto_queue_stmt_list (gimple_seq *, struct leh_tf_state *);
489
490 static void
491 replace_goto_queue_1 (gimple stmt, struct leh_tf_state *tf,
492 gimple_stmt_iterator *gsi)
493 {
494 gimple_seq seq;
495 treemple temp;
496 temp.g = NULL;
497
498 switch (gimple_code (stmt))
499 {
500 case GIMPLE_GOTO:
501 case GIMPLE_RETURN:
502 temp.g = stmt;
503 seq = find_goto_replacement (tf, temp);
504 if (seq)
505 {
506 gsi_insert_seq_before (gsi, gimple_seq_copy (seq), GSI_SAME_STMT);
507 gsi_remove (gsi, false);
508 return;
509 }
510 break;
511
512 case GIMPLE_COND:
513 replace_goto_queue_cond_clause (gimple_op_ptr (stmt, 2), tf, gsi);
514 replace_goto_queue_cond_clause (gimple_op_ptr (stmt, 3), tf, gsi);
515 break;
516
517 case GIMPLE_TRY:
518 replace_goto_queue_stmt_list (gimple_try_eval_ptr (stmt), tf);
519 replace_goto_queue_stmt_list (gimple_try_cleanup_ptr (stmt), tf);
520 break;
521 case GIMPLE_CATCH:
522 replace_goto_queue_stmt_list (gimple_catch_handler_ptr (stmt), tf);
523 break;
524 case GIMPLE_EH_FILTER:
525 replace_goto_queue_stmt_list (gimple_eh_filter_failure_ptr (stmt), tf);
526 break;
527 case GIMPLE_EH_ELSE:
528 replace_goto_queue_stmt_list (gimple_eh_else_n_body_ptr (stmt), tf);
529 replace_goto_queue_stmt_list (gimple_eh_else_e_body_ptr (stmt), tf);
530 break;
531
532 default:
533 /* These won't have gotos in them. */
534 break;
535 }
536
537 gsi_next (gsi);
538 }
539
540 /* A subroutine of replace_goto_queue. Handles GIMPLE_SEQ. */
541
542 static void
543 replace_goto_queue_stmt_list (gimple_seq *seq, struct leh_tf_state *tf)
544 {
545 gimple_stmt_iterator gsi = gsi_start (*seq);
546
547 while (!gsi_end_p (gsi))
548 replace_goto_queue_1 (gsi_stmt (gsi), tf, &gsi);
549 }
550
551 /* Replace all goto queue members. */
552
553 static void
554 replace_goto_queue (struct leh_tf_state *tf)
555 {
556 if (tf->goto_queue_active == 0)
557 return;
558 replace_goto_queue_stmt_list (&tf->top_p_seq, tf);
559 replace_goto_queue_stmt_list (&eh_seq, tf);
560 }
561
562 /* Add a new record to the goto queue contained in TF. NEW_STMT is the
563 data to be added, IS_LABEL indicates whether NEW_STMT is a label or
564 a gimple return. */
565
566 static void
567 record_in_goto_queue (struct leh_tf_state *tf,
568 treemple new_stmt,
569 int index,
570 bool is_label,
571 location_t location)
572 {
573 size_t active, size;
574 struct goto_queue_node *q;
575
576 gcc_assert (!tf->goto_queue_map);
577
578 active = tf->goto_queue_active;
579 size = tf->goto_queue_size;
580 if (active >= size)
581 {
582 size = (size ? size * 2 : 32);
583 tf->goto_queue_size = size;
584 tf->goto_queue
585 = XRESIZEVEC (struct goto_queue_node, tf->goto_queue, size);
586 }
587
588 q = &tf->goto_queue[active];
589 tf->goto_queue_active = active + 1;
590
591 memset (q, 0, sizeof (*q));
592 q->stmt = new_stmt;
593 q->index = index;
594 q->location = location;
595 q->is_label = is_label;
596 }
597
598 /* Record the LABEL label in the goto queue contained in TF.
599 TF is not null. */
600
601 static void
602 record_in_goto_queue_label (struct leh_tf_state *tf, treemple stmt, tree label,
603 location_t location)
604 {
605 int index;
606 treemple temp, new_stmt;
607
608 if (!label)
609 return;
610
611 /* Computed and non-local gotos do not get processed. Given
612 their nature we can neither tell whether we've escaped the
613 finally block nor redirect them if we knew. */
614 if (TREE_CODE (label) != LABEL_DECL)
615 return;
616
617 /* No need to record gotos that don't leave the try block. */
618 temp.t = label;
619 if (!outside_finally_tree (temp, tf->try_finally_expr))
620 return;
621
622 if (! tf->dest_array.exists ())
623 {
624 tf->dest_array.create (10);
625 tf->dest_array.quick_push (label);
626 index = 0;
627 }
628 else
629 {
630 int n = tf->dest_array.length ();
631 for (index = 0; index < n; ++index)
632 if (tf->dest_array[index] == label)
633 break;
634 if (index == n)
635 tf->dest_array.safe_push (label);
636 }
637
638 /* In the case of a GOTO we want to record the destination label,
639 since with a GIMPLE_COND we have an easy access to the then/else
640 labels. */
641 new_stmt = stmt;
642 record_in_goto_queue (tf, new_stmt, index, true, location);
643 }
644
645 /* For any GIMPLE_GOTO or GIMPLE_RETURN, decide whether it leaves a try_finally
646 node, and if so record that fact in the goto queue associated with that
647 try_finally node. */
648
649 static void
650 maybe_record_in_goto_queue (struct leh_state *state, gimple stmt)
651 {
652 struct leh_tf_state *tf = state->tf;
653 treemple new_stmt;
654
655 if (!tf)
656 return;
657
658 switch (gimple_code (stmt))
659 {
660 case GIMPLE_COND:
661 new_stmt.tp = gimple_op_ptr (stmt, 2);
662 record_in_goto_queue_label (tf, new_stmt, gimple_cond_true_label (stmt),
663 EXPR_LOCATION (*new_stmt.tp));
664 new_stmt.tp = gimple_op_ptr (stmt, 3);
665 record_in_goto_queue_label (tf, new_stmt, gimple_cond_false_label (stmt),
666 EXPR_LOCATION (*new_stmt.tp));
667 break;
668 case GIMPLE_GOTO:
669 new_stmt.g = stmt;
670 record_in_goto_queue_label (tf, new_stmt, gimple_goto_dest (stmt),
671 gimple_location (stmt));
672 break;
673
674 case GIMPLE_RETURN:
675 tf->may_return = true;
676 new_stmt.g = stmt;
677 record_in_goto_queue (tf, new_stmt, -1, false, gimple_location (stmt));
678 break;
679
680 default:
681 gcc_unreachable ();
682 }
683 }
684
685
686 #ifdef ENABLE_CHECKING
687 /* We do not process GIMPLE_SWITCHes for now. As long as the original source
688 was in fact structured, and we've not yet done jump threading, then none
689 of the labels will leave outer GIMPLE_TRY_FINALLY nodes. Verify this. */
690
691 static void
692 verify_norecord_switch_expr (struct leh_state *state, gimple switch_expr)
693 {
694 struct leh_tf_state *tf = state->tf;
695 size_t i, n;
696
697 if (!tf)
698 return;
699
700 n = gimple_switch_num_labels (switch_expr);
701
702 for (i = 0; i < n; ++i)
703 {
704 treemple temp;
705 tree lab = CASE_LABEL (gimple_switch_label (switch_expr, i));
706 temp.t = lab;
707 gcc_assert (!outside_finally_tree (temp, tf->try_finally_expr));
708 }
709 }
710 #else
711 #define verify_norecord_switch_expr(state, switch_expr)
712 #endif
713
714 /* Redirect a RETURN_EXPR pointed to by Q to FINLAB. If MOD is
715 non-null, insert it before the new branch. */
716
717 static void
718 do_return_redirection (struct goto_queue_node *q, tree finlab, gimple_seq mod)
719 {
720 gimple x;
721
722 /* In the case of a return, the queue node must be a gimple statement. */
723 gcc_assert (!q->is_label);
724
725 /* Note that the return value may have already been computed, e.g.,
726
727 int x;
728 int foo (void)
729 {
730 x = 0;
731 try {
732 return x;
733 } finally {
734 x++;
735 }
736 }
737
738 should return 0, not 1. We don't have to do anything to make
739 this happens because the return value has been placed in the
740 RESULT_DECL already. */
741
742 q->cont_stmt = q->stmt.g;
743
744 if (mod)
745 gimple_seq_add_seq (&q->repl_stmt, mod);
746
747 x = gimple_build_goto (finlab);
748 gimple_set_location (x, q->location);
749 gimple_seq_add_stmt (&q->repl_stmt, x);
750 }
751
752 /* Similar, but easier, for GIMPLE_GOTO. */
753
754 static void
755 do_goto_redirection (struct goto_queue_node *q, tree finlab, gimple_seq mod,
756 struct leh_tf_state *tf)
757 {
758 gimple x;
759
760 gcc_assert (q->is_label);
761
762 q->cont_stmt = gimple_build_goto (tf->dest_array[q->index]);
763
764 if (mod)
765 gimple_seq_add_seq (&q->repl_stmt, mod);
766
767 x = gimple_build_goto (finlab);
768 gimple_set_location (x, q->location);
769 gimple_seq_add_stmt (&q->repl_stmt, x);
770 }
771
772 /* Emit a standard landing pad sequence into SEQ for REGION. */
773
774 static void
775 emit_post_landing_pad (gimple_seq *seq, eh_region region)
776 {
777 eh_landing_pad lp = region->landing_pads;
778 gimple x;
779
780 if (lp == NULL)
781 lp = gen_eh_landing_pad (region);
782
783 lp->post_landing_pad = create_artificial_label (UNKNOWN_LOCATION);
784 EH_LANDING_PAD_NR (lp->post_landing_pad) = lp->index;
785
786 x = gimple_build_label (lp->post_landing_pad);
787 gimple_seq_add_stmt (seq, x);
788 }
789
790 /* Emit a RESX statement into SEQ for REGION. */
791
792 static void
793 emit_resx (gimple_seq *seq, eh_region region)
794 {
795 gimple x = gimple_build_resx (region->index);
796 gimple_seq_add_stmt (seq, x);
797 if (region->outer)
798 record_stmt_eh_region (region->outer, x);
799 }
800
801 /* Emit an EH_DISPATCH statement into SEQ for REGION. */
802
803 static void
804 emit_eh_dispatch (gimple_seq *seq, eh_region region)
805 {
806 gimple x = gimple_build_eh_dispatch (region->index);
807 gimple_seq_add_stmt (seq, x);
808 }
809
810 /* Note that the current EH region may contain a throw, or a
811 call to a function which itself may contain a throw. */
812
813 static void
814 note_eh_region_may_contain_throw (eh_region region)
815 {
816 while (bitmap_set_bit (eh_region_may_contain_throw_map, region->index))
817 {
818 if (region->type == ERT_MUST_NOT_THROW)
819 break;
820 region = region->outer;
821 if (region == NULL)
822 break;
823 }
824 }
825
826 /* Check if REGION has been marked as containing a throw. If REGION is
827 NULL, this predicate is false. */
828
829 static inline bool
830 eh_region_may_contain_throw (eh_region r)
831 {
832 return r && bitmap_bit_p (eh_region_may_contain_throw_map, r->index);
833 }
834
835 /* We want to transform
836 try { body; } catch { stuff; }
837 to
838 normal_seqence:
839 body;
840 over:
841 eh_seqence:
842 landing_pad:
843 stuff;
844 goto over;
845
846 TP is a GIMPLE_TRY node. REGION is the region whose post_landing_pad
847 should be placed before the second operand, or NULL. OVER is
848 an existing label that should be put at the exit, or NULL. */
849
850 static gimple_seq
851 frob_into_branch_around (gimple tp, eh_region region, tree over)
852 {
853 gimple x;
854 gimple_seq cleanup, result;
855 location_t loc = gimple_location (tp);
856
857 cleanup = gimple_try_cleanup (tp);
858 result = gimple_try_eval (tp);
859
860 if (region)
861 emit_post_landing_pad (&eh_seq, region);
862
863 if (gimple_seq_may_fallthru (cleanup))
864 {
865 if (!over)
866 over = create_artificial_label (loc);
867 x = gimple_build_goto (over);
868 gimple_set_location (x, loc);
869 gimple_seq_add_stmt (&cleanup, x);
870 }
871 gimple_seq_add_seq (&eh_seq, cleanup);
872
873 if (over)
874 {
875 x = gimple_build_label (over);
876 gimple_seq_add_stmt (&result, x);
877 }
878 return result;
879 }
880
881 /* A subroutine of lower_try_finally. Duplicate the tree rooted at T.
882 Make sure to record all new labels found. */
883
884 static gimple_seq
885 lower_try_finally_dup_block (gimple_seq seq, struct leh_state *outer_state,
886 location_t loc)
887 {
888 gimple region = NULL;
889 gimple_seq new_seq;
890 gimple_stmt_iterator gsi;
891
892 new_seq = copy_gimple_seq_and_replace_locals (seq);
893
894 for (gsi = gsi_start (new_seq); !gsi_end_p (gsi); gsi_next (&gsi))
895 {
896 gimple stmt = gsi_stmt (gsi);
897 if (LOCATION_LOCUS (gimple_location (stmt)) == UNKNOWN_LOCATION)
898 {
899 tree block = gimple_block (stmt);
900 gimple_set_location (stmt, loc);
901 gimple_set_block (stmt, block);
902 }
903 }
904
905 if (outer_state->tf)
906 region = outer_state->tf->try_finally_expr;
907 collect_finally_tree_1 (new_seq, region);
908
909 return new_seq;
910 }
911
912 /* A subroutine of lower_try_finally. Create a fallthru label for
913 the given try_finally state. The only tricky bit here is that
914 we have to make sure to record the label in our outer context. */
915
916 static tree
917 lower_try_finally_fallthru_label (struct leh_tf_state *tf)
918 {
919 tree label = tf->fallthru_label;
920 treemple temp;
921
922 if (!label)
923 {
924 label = create_artificial_label (gimple_location (tf->try_finally_expr));
925 tf->fallthru_label = label;
926 if (tf->outer->tf)
927 {
928 temp.t = label;
929 record_in_finally_tree (temp, tf->outer->tf->try_finally_expr);
930 }
931 }
932 return label;
933 }
934
935 /* A subroutine of lower_try_finally. If FINALLY consits of a
936 GIMPLE_EH_ELSE node, return it. */
937
938 static inline gimple
939 get_eh_else (gimple_seq finally)
940 {
941 gimple x = gimple_seq_first_stmt (finally);
942 if (gimple_code (x) == GIMPLE_EH_ELSE)
943 {
944 gcc_assert (gimple_seq_singleton_p (finally));
945 return x;
946 }
947 return NULL;
948 }
949
950 /* A subroutine of lower_try_finally. If the eh_protect_cleanup_actions
951 langhook returns non-null, then the language requires that the exception
952 path out of a try_finally be treated specially. To wit: the code within
953 the finally block may not itself throw an exception. We have two choices
954 here. First we can duplicate the finally block and wrap it in a
955 must_not_throw region. Second, we can generate code like
956
957 try {
958 finally_block;
959 } catch {
960 if (fintmp == eh_edge)
961 protect_cleanup_actions;
962 }
963
964 where "fintmp" is the temporary used in the switch statement generation
965 alternative considered below. For the nonce, we always choose the first
966 option.
967
968 THIS_STATE may be null if this is a try-cleanup, not a try-finally. */
969
970 static void
971 honor_protect_cleanup_actions (struct leh_state *outer_state,
972 struct leh_state *this_state,
973 struct leh_tf_state *tf)
974 {
975 tree protect_cleanup_actions;
976 gimple_stmt_iterator gsi;
977 bool finally_may_fallthru;
978 gimple_seq finally;
979 gimple x, eh_else;
980
981 /* First check for nothing to do. */
982 if (lang_hooks.eh_protect_cleanup_actions == NULL)
983 return;
984 protect_cleanup_actions = lang_hooks.eh_protect_cleanup_actions ();
985 if (protect_cleanup_actions == NULL)
986 return;
987
988 finally = gimple_try_cleanup (tf->top_p);
989 eh_else = get_eh_else (finally);
990
991 /* Duplicate the FINALLY block. Only need to do this for try-finally,
992 and not for cleanups. If we've got an EH_ELSE, extract it now. */
993 if (eh_else)
994 {
995 finally = gimple_eh_else_e_body (eh_else);
996 gimple_try_set_cleanup (tf->top_p, gimple_eh_else_n_body (eh_else));
997 }
998 else if (this_state)
999 finally = lower_try_finally_dup_block (finally, outer_state,
1000 gimple_location (tf->try_finally_expr));
1001 finally_may_fallthru = gimple_seq_may_fallthru (finally);
1002
1003 /* If this cleanup consists of a TRY_CATCH_EXPR with TRY_CATCH_IS_CLEANUP
1004 set, the handler of the TRY_CATCH_EXPR is another cleanup which ought
1005 to be in an enclosing scope, but needs to be implemented at this level
1006 to avoid a nesting violation (see wrap_temporary_cleanups in
1007 cp/decl.c). Since it's logically at an outer level, we should call
1008 terminate before we get to it, so strip it away before adding the
1009 MUST_NOT_THROW filter. */
1010 gsi = gsi_start (finally);
1011 x = gsi_stmt (gsi);
1012 if (gimple_code (x) == GIMPLE_TRY
1013 && gimple_try_kind (x) == GIMPLE_TRY_CATCH
1014 && gimple_try_catch_is_cleanup (x))
1015 {
1016 gsi_insert_seq_before (&gsi, gimple_try_eval (x), GSI_SAME_STMT);
1017 gsi_remove (&gsi, false);
1018 }
1019
1020 /* Wrap the block with protect_cleanup_actions as the action. */
1021 x = gimple_build_eh_must_not_throw (protect_cleanup_actions);
1022 x = gimple_build_try (finally, gimple_seq_alloc_with_stmt (x),
1023 GIMPLE_TRY_CATCH);
1024 finally = lower_eh_must_not_throw (outer_state, x);
1025
1026 /* Drop all of this into the exception sequence. */
1027 emit_post_landing_pad (&eh_seq, tf->region);
1028 gimple_seq_add_seq (&eh_seq, finally);
1029 if (finally_may_fallthru)
1030 emit_resx (&eh_seq, tf->region);
1031
1032 /* Having now been handled, EH isn't to be considered with
1033 the rest of the outgoing edges. */
1034 tf->may_throw = false;
1035 }
1036
1037 /* A subroutine of lower_try_finally. We have determined that there is
1038 no fallthru edge out of the finally block. This means that there is
1039 no outgoing edge corresponding to any incoming edge. Restructure the
1040 try_finally node for this special case. */
1041
1042 static void
1043 lower_try_finally_nofallthru (struct leh_state *state,
1044 struct leh_tf_state *tf)
1045 {
1046 tree lab;
1047 gimple x, eh_else;
1048 gimple_seq finally;
1049 struct goto_queue_node *q, *qe;
1050
1051 lab = create_artificial_label (gimple_location (tf->try_finally_expr));
1052
1053 /* We expect that tf->top_p is a GIMPLE_TRY. */
1054 finally = gimple_try_cleanup (tf->top_p);
1055 tf->top_p_seq = gimple_try_eval (tf->top_p);
1056
1057 x = gimple_build_label (lab);
1058 gimple_seq_add_stmt (&tf->top_p_seq, x);
1059
1060 q = tf->goto_queue;
1061 qe = q + tf->goto_queue_active;
1062 for (; q < qe; ++q)
1063 if (q->index < 0)
1064 do_return_redirection (q, lab, NULL);
1065 else
1066 do_goto_redirection (q, lab, NULL, tf);
1067
1068 replace_goto_queue (tf);
1069
1070 /* Emit the finally block into the stream. Lower EH_ELSE at this time. */
1071 eh_else = get_eh_else (finally);
1072 if (eh_else)
1073 {
1074 finally = gimple_eh_else_n_body (eh_else);
1075 lower_eh_constructs_1 (state, &finally);
1076 gimple_seq_add_seq (&tf->top_p_seq, finally);
1077
1078 if (tf->may_throw)
1079 {
1080 finally = gimple_eh_else_e_body (eh_else);
1081 lower_eh_constructs_1 (state, &finally);
1082
1083 emit_post_landing_pad (&eh_seq, tf->region);
1084 gimple_seq_add_seq (&eh_seq, finally);
1085 }
1086 }
1087 else
1088 {
1089 lower_eh_constructs_1 (state, &finally);
1090 gimple_seq_add_seq (&tf->top_p_seq, finally);
1091
1092 if (tf->may_throw)
1093 {
1094 emit_post_landing_pad (&eh_seq, tf->region);
1095
1096 x = gimple_build_goto (lab);
1097 gimple_set_location (x, gimple_location (tf->try_finally_expr));
1098 gimple_seq_add_stmt (&eh_seq, x);
1099 }
1100 }
1101 }
1102
1103 /* A subroutine of lower_try_finally. We have determined that there is
1104 exactly one destination of the finally block. Restructure the
1105 try_finally node for this special case. */
1106
1107 static void
1108 lower_try_finally_onedest (struct leh_state *state, struct leh_tf_state *tf)
1109 {
1110 struct goto_queue_node *q, *qe;
1111 gimple x;
1112 gimple_seq finally;
1113 gimple_stmt_iterator gsi;
1114 tree finally_label;
1115 location_t loc = gimple_location (tf->try_finally_expr);
1116
1117 finally = gimple_try_cleanup (tf->top_p);
1118 tf->top_p_seq = gimple_try_eval (tf->top_p);
1119
1120 /* Since there's only one destination, and the destination edge can only
1121 either be EH or non-EH, that implies that all of our incoming edges
1122 are of the same type. Therefore we can lower EH_ELSE immediately. */
1123 x = get_eh_else (finally);
1124 if (x)
1125 {
1126 if (tf->may_throw)
1127 finally = gimple_eh_else_e_body (x);
1128 else
1129 finally = gimple_eh_else_n_body (x);
1130 }
1131
1132 lower_eh_constructs_1 (state, &finally);
1133
1134 for (gsi = gsi_start (finally); !gsi_end_p (gsi); gsi_next (&gsi))
1135 {
1136 gimple stmt = gsi_stmt (gsi);
1137 if (LOCATION_LOCUS (gimple_location (stmt)) == UNKNOWN_LOCATION)
1138 {
1139 tree block = gimple_block (stmt);
1140 gimple_set_location (stmt, gimple_location (tf->try_finally_expr));
1141 gimple_set_block (stmt, block);
1142 }
1143 }
1144
1145 if (tf->may_throw)
1146 {
1147 /* Only reachable via the exception edge. Add the given label to
1148 the head of the FINALLY block. Append a RESX at the end. */
1149 emit_post_landing_pad (&eh_seq, tf->region);
1150 gimple_seq_add_seq (&eh_seq, finally);
1151 emit_resx (&eh_seq, tf->region);
1152 return;
1153 }
1154
1155 if (tf->may_fallthru)
1156 {
1157 /* Only reachable via the fallthru edge. Do nothing but let
1158 the two blocks run together; we'll fall out the bottom. */
1159 gimple_seq_add_seq (&tf->top_p_seq, finally);
1160 return;
1161 }
1162
1163 finally_label = create_artificial_label (loc);
1164 x = gimple_build_label (finally_label);
1165 gimple_seq_add_stmt (&tf->top_p_seq, x);
1166
1167 gimple_seq_add_seq (&tf->top_p_seq, finally);
1168
1169 q = tf->goto_queue;
1170 qe = q + tf->goto_queue_active;
1171
1172 if (tf->may_return)
1173 {
1174 /* Reachable by return expressions only. Redirect them. */
1175 for (; q < qe; ++q)
1176 do_return_redirection (q, finally_label, NULL);
1177 replace_goto_queue (tf);
1178 }
1179 else
1180 {
1181 /* Reachable by goto expressions only. Redirect them. */
1182 for (; q < qe; ++q)
1183 do_goto_redirection (q, finally_label, NULL, tf);
1184 replace_goto_queue (tf);
1185
1186 if (tf->dest_array[0] == tf->fallthru_label)
1187 {
1188 /* Reachable by goto to fallthru label only. Redirect it
1189 to the new label (already created, sadly), and do not
1190 emit the final branch out, or the fallthru label. */
1191 tf->fallthru_label = NULL;
1192 return;
1193 }
1194 }
1195
1196 /* Place the original return/goto to the original destination
1197 immediately after the finally block. */
1198 x = tf->goto_queue[0].cont_stmt;
1199 gimple_seq_add_stmt (&tf->top_p_seq, x);
1200 maybe_record_in_goto_queue (state, x);
1201 }
1202
1203 /* A subroutine of lower_try_finally. There are multiple edges incoming
1204 and outgoing from the finally block. Implement this by duplicating the
1205 finally block for every destination. */
1206
1207 static void
1208 lower_try_finally_copy (struct leh_state *state, struct leh_tf_state *tf)
1209 {
1210 gimple_seq finally;
1211 gimple_seq new_stmt;
1212 gimple_seq seq;
1213 gimple x, eh_else;
1214 tree tmp;
1215 location_t tf_loc = gimple_location (tf->try_finally_expr);
1216
1217 finally = gimple_try_cleanup (tf->top_p);
1218
1219 /* Notice EH_ELSE, and simplify some of the remaining code
1220 by considering FINALLY to be the normal return path only. */
1221 eh_else = get_eh_else (finally);
1222 if (eh_else)
1223 finally = gimple_eh_else_n_body (eh_else);
1224
1225 tf->top_p_seq = gimple_try_eval (tf->top_p);
1226 new_stmt = NULL;
1227
1228 if (tf->may_fallthru)
1229 {
1230 seq = lower_try_finally_dup_block (finally, state, tf_loc);
1231 lower_eh_constructs_1 (state, &seq);
1232 gimple_seq_add_seq (&new_stmt, seq);
1233
1234 tmp = lower_try_finally_fallthru_label (tf);
1235 x = gimple_build_goto (tmp);
1236 gimple_set_location (x, tf_loc);
1237 gimple_seq_add_stmt (&new_stmt, x);
1238 }
1239
1240 if (tf->may_throw)
1241 {
1242 /* We don't need to copy the EH path of EH_ELSE,
1243 since it is only emitted once. */
1244 if (eh_else)
1245 seq = gimple_eh_else_e_body (eh_else);
1246 else
1247 seq = lower_try_finally_dup_block (finally, state, tf_loc);
1248 lower_eh_constructs_1 (state, &seq);
1249
1250 emit_post_landing_pad (&eh_seq, tf->region);
1251 gimple_seq_add_seq (&eh_seq, seq);
1252 emit_resx (&eh_seq, tf->region);
1253 }
1254
1255 if (tf->goto_queue)
1256 {
1257 struct goto_queue_node *q, *qe;
1258 int return_index, index;
1259 struct labels_s
1260 {
1261 struct goto_queue_node *q;
1262 tree label;
1263 } *labels;
1264
1265 return_index = tf->dest_array.length ();
1266 labels = XCNEWVEC (struct labels_s, return_index + 1);
1267
1268 q = tf->goto_queue;
1269 qe = q + tf->goto_queue_active;
1270 for (; q < qe; q++)
1271 {
1272 index = q->index < 0 ? return_index : q->index;
1273
1274 if (!labels[index].q)
1275 labels[index].q = q;
1276 }
1277
1278 for (index = 0; index < return_index + 1; index++)
1279 {
1280 tree lab;
1281
1282 q = labels[index].q;
1283 if (! q)
1284 continue;
1285
1286 lab = labels[index].label
1287 = create_artificial_label (tf_loc);
1288
1289 if (index == return_index)
1290 do_return_redirection (q, lab, NULL);
1291 else
1292 do_goto_redirection (q, lab, NULL, tf);
1293
1294 x = gimple_build_label (lab);
1295 gimple_seq_add_stmt (&new_stmt, x);
1296
1297 seq = lower_try_finally_dup_block (finally, state, q->location);
1298 lower_eh_constructs_1 (state, &seq);
1299 gimple_seq_add_seq (&new_stmt, seq);
1300
1301 gimple_seq_add_stmt (&new_stmt, q->cont_stmt);
1302 maybe_record_in_goto_queue (state, q->cont_stmt);
1303 }
1304
1305 for (q = tf->goto_queue; q < qe; q++)
1306 {
1307 tree lab;
1308
1309 index = q->index < 0 ? return_index : q->index;
1310
1311 if (labels[index].q == q)
1312 continue;
1313
1314 lab = labels[index].label;
1315
1316 if (index == return_index)
1317 do_return_redirection (q, lab, NULL);
1318 else
1319 do_goto_redirection (q, lab, NULL, tf);
1320 }
1321
1322 replace_goto_queue (tf);
1323 free (labels);
1324 }
1325
1326 /* Need to link new stmts after running replace_goto_queue due
1327 to not wanting to process the same goto stmts twice. */
1328 gimple_seq_add_seq (&tf->top_p_seq, new_stmt);
1329 }
1330
1331 /* A subroutine of lower_try_finally. There are multiple edges incoming
1332 and outgoing from the finally block. Implement this by instrumenting
1333 each incoming edge and creating a switch statement at the end of the
1334 finally block that branches to the appropriate destination. */
1335
1336 static void
1337 lower_try_finally_switch (struct leh_state *state, struct leh_tf_state *tf)
1338 {
1339 struct goto_queue_node *q, *qe;
1340 tree finally_tmp, finally_label;
1341 int return_index, eh_index, fallthru_index;
1342 int nlabels, ndests, j, last_case_index;
1343 tree last_case;
1344 vec<tree> case_label_vec;
1345 gimple_seq switch_body = NULL;
1346 gimple x, eh_else;
1347 tree tmp;
1348 gimple switch_stmt;
1349 gimple_seq finally;
1350 hash_map<tree, gimple> *cont_map = NULL;
1351 /* The location of the TRY_FINALLY stmt. */
1352 location_t tf_loc = gimple_location (tf->try_finally_expr);
1353 /* The location of the finally block. */
1354 location_t finally_loc;
1355
1356 finally = gimple_try_cleanup (tf->top_p);
1357 eh_else = get_eh_else (finally);
1358
1359 /* Mash the TRY block to the head of the chain. */
1360 tf->top_p_seq = gimple_try_eval (tf->top_p);
1361
1362 /* The location of the finally is either the last stmt in the finally
1363 block or the location of the TRY_FINALLY itself. */
1364 x = gimple_seq_last_stmt (finally);
1365 finally_loc = x ? gimple_location (x) : tf_loc;
1366
1367 /* Prepare for switch statement generation. */
1368 nlabels = tf->dest_array.length ();
1369 return_index = nlabels;
1370 eh_index = return_index + tf->may_return;
1371 fallthru_index = eh_index + (tf->may_throw && !eh_else);
1372 ndests = fallthru_index + tf->may_fallthru;
1373
1374 finally_tmp = create_tmp_var (integer_type_node, "finally_tmp");
1375 finally_label = create_artificial_label (finally_loc);
1376
1377 /* We use vec::quick_push on case_label_vec throughout this function,
1378 since we know the size in advance and allocate precisely as muce
1379 space as needed. */
1380 case_label_vec.create (ndests);
1381 last_case = NULL;
1382 last_case_index = 0;
1383
1384 /* Begin inserting code for getting to the finally block. Things
1385 are done in this order to correspond to the sequence the code is
1386 laid out. */
1387
1388 if (tf->may_fallthru)
1389 {
1390 x = gimple_build_assign (finally_tmp,
1391 build_int_cst (integer_type_node,
1392 fallthru_index));
1393 gimple_seq_add_stmt (&tf->top_p_seq, x);
1394
1395 tmp = build_int_cst (integer_type_node, fallthru_index);
1396 last_case = build_case_label (tmp, NULL,
1397 create_artificial_label (tf_loc));
1398 case_label_vec.quick_push (last_case);
1399 last_case_index++;
1400
1401 x = gimple_build_label (CASE_LABEL (last_case));
1402 gimple_seq_add_stmt (&switch_body, x);
1403
1404 tmp = lower_try_finally_fallthru_label (tf);
1405 x = gimple_build_goto (tmp);
1406 gimple_set_location (x, tf_loc);
1407 gimple_seq_add_stmt (&switch_body, x);
1408 }
1409
1410 /* For EH_ELSE, emit the exception path (plus resx) now, then
1411 subsequently we only need consider the normal path. */
1412 if (eh_else)
1413 {
1414 if (tf->may_throw)
1415 {
1416 finally = gimple_eh_else_e_body (eh_else);
1417 lower_eh_constructs_1 (state, &finally);
1418
1419 emit_post_landing_pad (&eh_seq, tf->region);
1420 gimple_seq_add_seq (&eh_seq, finally);
1421 emit_resx (&eh_seq, tf->region);
1422 }
1423
1424 finally = gimple_eh_else_n_body (eh_else);
1425 }
1426 else if (tf->may_throw)
1427 {
1428 emit_post_landing_pad (&eh_seq, tf->region);
1429
1430 x = gimple_build_assign (finally_tmp,
1431 build_int_cst (integer_type_node, eh_index));
1432 gimple_seq_add_stmt (&eh_seq, x);
1433
1434 x = gimple_build_goto (finally_label);
1435 gimple_set_location (x, tf_loc);
1436 gimple_seq_add_stmt (&eh_seq, x);
1437
1438 tmp = build_int_cst (integer_type_node, eh_index);
1439 last_case = build_case_label (tmp, NULL,
1440 create_artificial_label (tf_loc));
1441 case_label_vec.quick_push (last_case);
1442 last_case_index++;
1443
1444 x = gimple_build_label (CASE_LABEL (last_case));
1445 gimple_seq_add_stmt (&eh_seq, x);
1446 emit_resx (&eh_seq, tf->region);
1447 }
1448
1449 x = gimple_build_label (finally_label);
1450 gimple_seq_add_stmt (&tf->top_p_seq, x);
1451
1452 lower_eh_constructs_1 (state, &finally);
1453 gimple_seq_add_seq (&tf->top_p_seq, finally);
1454
1455 /* Redirect each incoming goto edge. */
1456 q = tf->goto_queue;
1457 qe = q + tf->goto_queue_active;
1458 j = last_case_index + tf->may_return;
1459 /* Prepare the assignments to finally_tmp that are executed upon the
1460 entrance through a particular edge. */
1461 for (; q < qe; ++q)
1462 {
1463 gimple_seq mod = NULL;
1464 int switch_id;
1465 unsigned int case_index;
1466
1467 if (q->index < 0)
1468 {
1469 x = gimple_build_assign (finally_tmp,
1470 build_int_cst (integer_type_node,
1471 return_index));
1472 gimple_seq_add_stmt (&mod, x);
1473 do_return_redirection (q, finally_label, mod);
1474 switch_id = return_index;
1475 }
1476 else
1477 {
1478 x = gimple_build_assign (finally_tmp,
1479 build_int_cst (integer_type_node, q->index));
1480 gimple_seq_add_stmt (&mod, x);
1481 do_goto_redirection (q, finally_label, mod, tf);
1482 switch_id = q->index;
1483 }
1484
1485 case_index = j + q->index;
1486 if (case_label_vec.length () <= case_index || !case_label_vec[case_index])
1487 {
1488 tree case_lab;
1489 tmp = build_int_cst (integer_type_node, switch_id);
1490 case_lab = build_case_label (tmp, NULL,
1491 create_artificial_label (tf_loc));
1492 /* We store the cont_stmt in the pointer map, so that we can recover
1493 it in the loop below. */
1494 if (!cont_map)
1495 cont_map = new hash_map<tree, gimple>;
1496 cont_map->put (case_lab, q->cont_stmt);
1497 case_label_vec.quick_push (case_lab);
1498 }
1499 }
1500 for (j = last_case_index; j < last_case_index + nlabels; j++)
1501 {
1502 gimple cont_stmt;
1503
1504 last_case = case_label_vec[j];
1505
1506 gcc_assert (last_case);
1507 gcc_assert (cont_map);
1508
1509 cont_stmt = *cont_map->get (last_case);
1510
1511 x = gimple_build_label (CASE_LABEL (last_case));
1512 gimple_seq_add_stmt (&switch_body, x);
1513 gimple_seq_add_stmt (&switch_body, cont_stmt);
1514 maybe_record_in_goto_queue (state, cont_stmt);
1515 }
1516 if (cont_map)
1517 delete cont_map;
1518
1519 replace_goto_queue (tf);
1520
1521 /* Make sure that the last case is the default label, as one is required.
1522 Then sort the labels, which is also required in GIMPLE. */
1523 CASE_LOW (last_case) = NULL;
1524 tree tem = case_label_vec.pop ();
1525 gcc_assert (tem == last_case);
1526 sort_case_labels (case_label_vec);
1527
1528 /* Build the switch statement, setting last_case to be the default
1529 label. */
1530 switch_stmt = gimple_build_switch (finally_tmp, last_case,
1531 case_label_vec);
1532 gimple_set_location (switch_stmt, finally_loc);
1533
1534 /* Need to link SWITCH_STMT after running replace_goto_queue
1535 due to not wanting to process the same goto stmts twice. */
1536 gimple_seq_add_stmt (&tf->top_p_seq, switch_stmt);
1537 gimple_seq_add_seq (&tf->top_p_seq, switch_body);
1538 }
1539
1540 /* Decide whether or not we are going to duplicate the finally block.
1541 There are several considerations.
1542
1543 First, if this is Java, then the finally block contains code
1544 written by the user. It has line numbers associated with it,
1545 so duplicating the block means it's difficult to set a breakpoint.
1546 Since controlling code generation via -g is verboten, we simply
1547 never duplicate code without optimization.
1548
1549 Second, we'd like to prevent egregious code growth. One way to
1550 do this is to estimate the size of the finally block, multiply
1551 that by the number of copies we'd need to make, and compare against
1552 the estimate of the size of the switch machinery we'd have to add. */
1553
1554 static bool
1555 decide_copy_try_finally (int ndests, bool may_throw, gimple_seq finally)
1556 {
1557 int f_estimate, sw_estimate;
1558 gimple eh_else;
1559
1560 /* If there's an EH_ELSE involved, the exception path is separate
1561 and really doesn't come into play for this computation. */
1562 eh_else = get_eh_else (finally);
1563 if (eh_else)
1564 {
1565 ndests -= may_throw;
1566 finally = gimple_eh_else_n_body (eh_else);
1567 }
1568
1569 if (!optimize)
1570 {
1571 gimple_stmt_iterator gsi;
1572
1573 if (ndests == 1)
1574 return true;
1575
1576 for (gsi = gsi_start (finally); !gsi_end_p (gsi); gsi_next (&gsi))
1577 {
1578 gimple stmt = gsi_stmt (gsi);
1579 if (!is_gimple_debug (stmt) && !gimple_clobber_p (stmt))
1580 return false;
1581 }
1582 return true;
1583 }
1584
1585 /* Finally estimate N times, plus N gotos. */
1586 f_estimate = count_insns_seq (finally, &eni_size_weights);
1587 f_estimate = (f_estimate + 1) * ndests;
1588
1589 /* Switch statement (cost 10), N variable assignments, N gotos. */
1590 sw_estimate = 10 + 2 * ndests;
1591
1592 /* Optimize for size clearly wants our best guess. */
1593 if (optimize_function_for_size_p (cfun))
1594 return f_estimate < sw_estimate;
1595
1596 /* ??? These numbers are completely made up so far. */
1597 if (optimize > 1)
1598 return f_estimate < 100 || f_estimate < sw_estimate * 2;
1599 else
1600 return f_estimate < 40 || f_estimate * 2 < sw_estimate * 3;
1601 }
1602
1603 /* REG is the enclosing region for a possible cleanup region, or the region
1604 itself. Returns TRUE if such a region would be unreachable.
1605
1606 Cleanup regions within a must-not-throw region aren't actually reachable
1607 even if there are throwing stmts within them, because the personality
1608 routine will call terminate before unwinding. */
1609
1610 static bool
1611 cleanup_is_dead_in (eh_region reg)
1612 {
1613 while (reg && reg->type == ERT_CLEANUP)
1614 reg = reg->outer;
1615 return (reg && reg->type == ERT_MUST_NOT_THROW);
1616 }
1617
1618 /* A subroutine of lower_eh_constructs_1. Lower a GIMPLE_TRY_FINALLY nodes
1619 to a sequence of labels and blocks, plus the exception region trees
1620 that record all the magic. This is complicated by the need to
1621 arrange for the FINALLY block to be executed on all exits. */
1622
1623 static gimple_seq
1624 lower_try_finally (struct leh_state *state, gimple tp)
1625 {
1626 struct leh_tf_state this_tf;
1627 struct leh_state this_state;
1628 int ndests;
1629 gimple_seq old_eh_seq;
1630
1631 /* Process the try block. */
1632
1633 memset (&this_tf, 0, sizeof (this_tf));
1634 this_tf.try_finally_expr = tp;
1635 this_tf.top_p = tp;
1636 this_tf.outer = state;
1637 if (using_eh_for_cleanups_p () && !cleanup_is_dead_in (state->cur_region))
1638 {
1639 this_tf.region = gen_eh_region_cleanup (state->cur_region);
1640 this_state.cur_region = this_tf.region;
1641 }
1642 else
1643 {
1644 this_tf.region = NULL;
1645 this_state.cur_region = state->cur_region;
1646 }
1647
1648 this_state.ehp_region = state->ehp_region;
1649 this_state.tf = &this_tf;
1650
1651 old_eh_seq = eh_seq;
1652 eh_seq = NULL;
1653
1654 lower_eh_constructs_1 (&this_state, gimple_try_eval_ptr (tp));
1655
1656 /* Determine if the try block is escaped through the bottom. */
1657 this_tf.may_fallthru = gimple_seq_may_fallthru (gimple_try_eval (tp));
1658
1659 /* Determine if any exceptions are possible within the try block. */
1660 if (this_tf.region)
1661 this_tf.may_throw = eh_region_may_contain_throw (this_tf.region);
1662 if (this_tf.may_throw)
1663 honor_protect_cleanup_actions (state, &this_state, &this_tf);
1664
1665 /* Determine how many edges (still) reach the finally block. Or rather,
1666 how many destinations are reached by the finally block. Use this to
1667 determine how we process the finally block itself. */
1668
1669 ndests = this_tf.dest_array.length ();
1670 ndests += this_tf.may_fallthru;
1671 ndests += this_tf.may_return;
1672 ndests += this_tf.may_throw;
1673
1674 /* If the FINALLY block is not reachable, dike it out. */
1675 if (ndests == 0)
1676 {
1677 gimple_seq_add_seq (&this_tf.top_p_seq, gimple_try_eval (tp));
1678 gimple_try_set_cleanup (tp, NULL);
1679 }
1680 /* If the finally block doesn't fall through, then any destination
1681 we might try to impose there isn't reached either. There may be
1682 some minor amount of cleanup and redirection still needed. */
1683 else if (!gimple_seq_may_fallthru (gimple_try_cleanup (tp)))
1684 lower_try_finally_nofallthru (state, &this_tf);
1685
1686 /* We can easily special-case redirection to a single destination. */
1687 else if (ndests == 1)
1688 lower_try_finally_onedest (state, &this_tf);
1689 else if (decide_copy_try_finally (ndests, this_tf.may_throw,
1690 gimple_try_cleanup (tp)))
1691 lower_try_finally_copy (state, &this_tf);
1692 else
1693 lower_try_finally_switch (state, &this_tf);
1694
1695 /* If someone requested we add a label at the end of the transformed
1696 block, do so. */
1697 if (this_tf.fallthru_label)
1698 {
1699 /* This must be reached only if ndests == 0. */
1700 gimple x = gimple_build_label (this_tf.fallthru_label);
1701 gimple_seq_add_stmt (&this_tf.top_p_seq, x);
1702 }
1703
1704 this_tf.dest_array.release ();
1705 free (this_tf.goto_queue);
1706 if (this_tf.goto_queue_map)
1707 delete this_tf.goto_queue_map;
1708
1709 /* If there was an old (aka outer) eh_seq, append the current eh_seq.
1710 If there was no old eh_seq, then the append is trivially already done. */
1711 if (old_eh_seq)
1712 {
1713 if (eh_seq == NULL)
1714 eh_seq = old_eh_seq;
1715 else
1716 {
1717 gimple_seq new_eh_seq = eh_seq;
1718 eh_seq = old_eh_seq;
1719 gimple_seq_add_seq (&eh_seq, new_eh_seq);
1720 }
1721 }
1722
1723 return this_tf.top_p_seq;
1724 }
1725
1726 /* A subroutine of lower_eh_constructs_1. Lower a GIMPLE_TRY_CATCH with a
1727 list of GIMPLE_CATCH to a sequence of labels and blocks, plus the
1728 exception region trees that records all the magic. */
1729
1730 static gimple_seq
1731 lower_catch (struct leh_state *state, gimple tp)
1732 {
1733 eh_region try_region = NULL;
1734 struct leh_state this_state = *state;
1735 gimple_stmt_iterator gsi;
1736 tree out_label;
1737 gimple_seq new_seq, cleanup;
1738 gimple x;
1739 location_t try_catch_loc = gimple_location (tp);
1740
1741 if (flag_exceptions)
1742 {
1743 try_region = gen_eh_region_try (state->cur_region);
1744 this_state.cur_region = try_region;
1745 }
1746
1747 lower_eh_constructs_1 (&this_state, gimple_try_eval_ptr (tp));
1748
1749 if (!eh_region_may_contain_throw (try_region))
1750 return gimple_try_eval (tp);
1751
1752 new_seq = NULL;
1753 emit_eh_dispatch (&new_seq, try_region);
1754 emit_resx (&new_seq, try_region);
1755
1756 this_state.cur_region = state->cur_region;
1757 this_state.ehp_region = try_region;
1758
1759 out_label = NULL;
1760 cleanup = gimple_try_cleanup (tp);
1761 for (gsi = gsi_start (cleanup);
1762 !gsi_end_p (gsi);
1763 gsi_next (&gsi))
1764 {
1765 eh_catch c;
1766 gimple gcatch;
1767 gimple_seq handler;
1768
1769 gcatch = gsi_stmt (gsi);
1770 c = gen_eh_region_catch (try_region, gimple_catch_types (gcatch));
1771
1772 handler = gimple_catch_handler (gcatch);
1773 lower_eh_constructs_1 (&this_state, &handler);
1774
1775 c->label = create_artificial_label (UNKNOWN_LOCATION);
1776 x = gimple_build_label (c->label);
1777 gimple_seq_add_stmt (&new_seq, x);
1778
1779 gimple_seq_add_seq (&new_seq, handler);
1780
1781 if (gimple_seq_may_fallthru (new_seq))
1782 {
1783 if (!out_label)
1784 out_label = create_artificial_label (try_catch_loc);
1785
1786 x = gimple_build_goto (out_label);
1787 gimple_seq_add_stmt (&new_seq, x);
1788 }
1789 if (!c->type_list)
1790 break;
1791 }
1792
1793 gimple_try_set_cleanup (tp, new_seq);
1794
1795 return frob_into_branch_around (tp, try_region, out_label);
1796 }
1797
1798 /* A subroutine of lower_eh_constructs_1. Lower a GIMPLE_TRY with a
1799 GIMPLE_EH_FILTER to a sequence of labels and blocks, plus the exception
1800 region trees that record all the magic. */
1801
1802 static gimple_seq
1803 lower_eh_filter (struct leh_state *state, gimple tp)
1804 {
1805 struct leh_state this_state = *state;
1806 eh_region this_region = NULL;
1807 gimple inner, x;
1808 gimple_seq new_seq;
1809
1810 inner = gimple_seq_first_stmt (gimple_try_cleanup (tp));
1811
1812 if (flag_exceptions)
1813 {
1814 this_region = gen_eh_region_allowed (state->cur_region,
1815 gimple_eh_filter_types (inner));
1816 this_state.cur_region = this_region;
1817 }
1818
1819 lower_eh_constructs_1 (&this_state, gimple_try_eval_ptr (tp));
1820
1821 if (!eh_region_may_contain_throw (this_region))
1822 return gimple_try_eval (tp);
1823
1824 new_seq = NULL;
1825 this_state.cur_region = state->cur_region;
1826 this_state.ehp_region = this_region;
1827
1828 emit_eh_dispatch (&new_seq, this_region);
1829 emit_resx (&new_seq, this_region);
1830
1831 this_region->u.allowed.label = create_artificial_label (UNKNOWN_LOCATION);
1832 x = gimple_build_label (this_region->u.allowed.label);
1833 gimple_seq_add_stmt (&new_seq, x);
1834
1835 lower_eh_constructs_1 (&this_state, gimple_eh_filter_failure_ptr (inner));
1836 gimple_seq_add_seq (&new_seq, gimple_eh_filter_failure (inner));
1837
1838 gimple_try_set_cleanup (tp, new_seq);
1839
1840 return frob_into_branch_around (tp, this_region, NULL);
1841 }
1842
1843 /* A subroutine of lower_eh_constructs_1. Lower a GIMPLE_TRY with
1844 an GIMPLE_EH_MUST_NOT_THROW to a sequence of labels and blocks,
1845 plus the exception region trees that record all the magic. */
1846
1847 static gimple_seq
1848 lower_eh_must_not_throw (struct leh_state *state, gimple tp)
1849 {
1850 struct leh_state this_state = *state;
1851
1852 if (flag_exceptions)
1853 {
1854 gimple inner = gimple_seq_first_stmt (gimple_try_cleanup (tp));
1855 eh_region this_region;
1856
1857 this_region = gen_eh_region_must_not_throw (state->cur_region);
1858 this_region->u.must_not_throw.failure_decl
1859 = gimple_eh_must_not_throw_fndecl (inner);
1860 this_region->u.must_not_throw.failure_loc
1861 = LOCATION_LOCUS (gimple_location (tp));
1862
1863 /* In order to get mangling applied to this decl, we must mark it
1864 used now. Otherwise, pass_ipa_free_lang_data won't think it
1865 needs to happen. */
1866 TREE_USED (this_region->u.must_not_throw.failure_decl) = 1;
1867
1868 this_state.cur_region = this_region;
1869 }
1870
1871 lower_eh_constructs_1 (&this_state, gimple_try_eval_ptr (tp));
1872
1873 return gimple_try_eval (tp);
1874 }
1875
1876 /* Implement a cleanup expression. This is similar to try-finally,
1877 except that we only execute the cleanup block for exception edges. */
1878
1879 static gimple_seq
1880 lower_cleanup (struct leh_state *state, gimple tp)
1881 {
1882 struct leh_state this_state = *state;
1883 eh_region this_region = NULL;
1884 struct leh_tf_state fake_tf;
1885 gimple_seq result;
1886 bool cleanup_dead = cleanup_is_dead_in (state->cur_region);
1887
1888 if (flag_exceptions && !cleanup_dead)
1889 {
1890 this_region = gen_eh_region_cleanup (state->cur_region);
1891 this_state.cur_region = this_region;
1892 }
1893
1894 lower_eh_constructs_1 (&this_state, gimple_try_eval_ptr (tp));
1895
1896 if (cleanup_dead || !eh_region_may_contain_throw (this_region))
1897 return gimple_try_eval (tp);
1898
1899 /* Build enough of a try-finally state so that we can reuse
1900 honor_protect_cleanup_actions. */
1901 memset (&fake_tf, 0, sizeof (fake_tf));
1902 fake_tf.top_p = fake_tf.try_finally_expr = tp;
1903 fake_tf.outer = state;
1904 fake_tf.region = this_region;
1905 fake_tf.may_fallthru = gimple_seq_may_fallthru (gimple_try_eval (tp));
1906 fake_tf.may_throw = true;
1907
1908 honor_protect_cleanup_actions (state, NULL, &fake_tf);
1909
1910 if (fake_tf.may_throw)
1911 {
1912 /* In this case honor_protect_cleanup_actions had nothing to do,
1913 and we should process this normally. */
1914 lower_eh_constructs_1 (state, gimple_try_cleanup_ptr (tp));
1915 result = frob_into_branch_around (tp, this_region,
1916 fake_tf.fallthru_label);
1917 }
1918 else
1919 {
1920 /* In this case honor_protect_cleanup_actions did nearly all of
1921 the work. All we have left is to append the fallthru_label. */
1922
1923 result = gimple_try_eval (tp);
1924 if (fake_tf.fallthru_label)
1925 {
1926 gimple x = gimple_build_label (fake_tf.fallthru_label);
1927 gimple_seq_add_stmt (&result, x);
1928 }
1929 }
1930 return result;
1931 }
1932
1933 /* Main loop for lowering eh constructs. Also moves gsi to the next
1934 statement. */
1935
1936 static void
1937 lower_eh_constructs_2 (struct leh_state *state, gimple_stmt_iterator *gsi)
1938 {
1939 gimple_seq replace;
1940 gimple x;
1941 gimple stmt = gsi_stmt (*gsi);
1942
1943 switch (gimple_code (stmt))
1944 {
1945 case GIMPLE_CALL:
1946 {
1947 tree fndecl = gimple_call_fndecl (stmt);
1948 tree rhs, lhs;
1949
1950 if (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
1951 switch (DECL_FUNCTION_CODE (fndecl))
1952 {
1953 case BUILT_IN_EH_POINTER:
1954 /* The front end may have generated a call to
1955 __builtin_eh_pointer (0) within a catch region. Replace
1956 this zero argument with the current catch region number. */
1957 if (state->ehp_region)
1958 {
1959 tree nr = build_int_cst (integer_type_node,
1960 state->ehp_region->index);
1961 gimple_call_set_arg (stmt, 0, nr);
1962 }
1963 else
1964 {
1965 /* The user has dome something silly. Remove it. */
1966 rhs = null_pointer_node;
1967 goto do_replace;
1968 }
1969 break;
1970
1971 case BUILT_IN_EH_FILTER:
1972 /* ??? This should never appear, but since it's a builtin it
1973 is accessible to abuse by users. Just remove it and
1974 replace the use with the arbitrary value zero. */
1975 rhs = build_int_cst (TREE_TYPE (TREE_TYPE (fndecl)), 0);
1976 do_replace:
1977 lhs = gimple_call_lhs (stmt);
1978 x = gimple_build_assign (lhs, rhs);
1979 gsi_insert_before (gsi, x, GSI_SAME_STMT);
1980 /* FALLTHRU */
1981
1982 case BUILT_IN_EH_COPY_VALUES:
1983 /* Likewise this should not appear. Remove it. */
1984 gsi_remove (gsi, true);
1985 return;
1986
1987 default:
1988 break;
1989 }
1990 }
1991 /* FALLTHRU */
1992
1993 case GIMPLE_ASSIGN:
1994 /* If the stmt can throw use a new temporary for the assignment
1995 to a LHS. This makes sure the old value of the LHS is
1996 available on the EH edge. Only do so for statements that
1997 potentially fall through (no noreturn calls e.g.), otherwise
1998 this new assignment might create fake fallthru regions. */
1999 if (stmt_could_throw_p (stmt)
2000 && gimple_has_lhs (stmt)
2001 && gimple_stmt_may_fallthru (stmt)
2002 && !tree_could_throw_p (gimple_get_lhs (stmt))
2003 && is_gimple_reg_type (TREE_TYPE (gimple_get_lhs (stmt))))
2004 {
2005 tree lhs = gimple_get_lhs (stmt);
2006 tree tmp = create_tmp_var (TREE_TYPE (lhs), NULL);
2007 gimple s = gimple_build_assign (lhs, tmp);
2008 gimple_set_location (s, gimple_location (stmt));
2009 gimple_set_block (s, gimple_block (stmt));
2010 gimple_set_lhs (stmt, tmp);
2011 if (TREE_CODE (TREE_TYPE (tmp)) == COMPLEX_TYPE
2012 || TREE_CODE (TREE_TYPE (tmp)) == VECTOR_TYPE)
2013 DECL_GIMPLE_REG_P (tmp) = 1;
2014 gsi_insert_after (gsi, s, GSI_SAME_STMT);
2015 }
2016 /* Look for things that can throw exceptions, and record them. */
2017 if (state->cur_region && stmt_could_throw_p (stmt))
2018 {
2019 record_stmt_eh_region (state->cur_region, stmt);
2020 note_eh_region_may_contain_throw (state->cur_region);
2021 }
2022 break;
2023
2024 case GIMPLE_COND:
2025 case GIMPLE_GOTO:
2026 case GIMPLE_RETURN:
2027 maybe_record_in_goto_queue (state, stmt);
2028 break;
2029
2030 case GIMPLE_SWITCH:
2031 verify_norecord_switch_expr (state, stmt);
2032 break;
2033
2034 case GIMPLE_TRY:
2035 if (gimple_try_kind (stmt) == GIMPLE_TRY_FINALLY)
2036 replace = lower_try_finally (state, stmt);
2037 else
2038 {
2039 x = gimple_seq_first_stmt (gimple_try_cleanup (stmt));
2040 if (!x)
2041 {
2042 replace = gimple_try_eval (stmt);
2043 lower_eh_constructs_1 (state, &replace);
2044 }
2045 else
2046 switch (gimple_code (x))
2047 {
2048 case GIMPLE_CATCH:
2049 replace = lower_catch (state, stmt);
2050 break;
2051 case GIMPLE_EH_FILTER:
2052 replace = lower_eh_filter (state, stmt);
2053 break;
2054 case GIMPLE_EH_MUST_NOT_THROW:
2055 replace = lower_eh_must_not_throw (state, stmt);
2056 break;
2057 case GIMPLE_EH_ELSE:
2058 /* This code is only valid with GIMPLE_TRY_FINALLY. */
2059 gcc_unreachable ();
2060 default:
2061 replace = lower_cleanup (state, stmt);
2062 break;
2063 }
2064 }
2065
2066 /* Remove the old stmt and insert the transformed sequence
2067 instead. */
2068 gsi_insert_seq_before (gsi, replace, GSI_SAME_STMT);
2069 gsi_remove (gsi, true);
2070
2071 /* Return since we don't want gsi_next () */
2072 return;
2073
2074 case GIMPLE_EH_ELSE:
2075 /* We should be eliminating this in lower_try_finally et al. */
2076 gcc_unreachable ();
2077
2078 default:
2079 /* A type, a decl, or some kind of statement that we're not
2080 interested in. Don't walk them. */
2081 break;
2082 }
2083
2084 gsi_next (gsi);
2085 }
2086
2087 /* A helper to unwrap a gimple_seq and feed stmts to lower_eh_constructs_2. */
2088
2089 static void
2090 lower_eh_constructs_1 (struct leh_state *state, gimple_seq *pseq)
2091 {
2092 gimple_stmt_iterator gsi;
2093 for (gsi = gsi_start (*pseq); !gsi_end_p (gsi);)
2094 lower_eh_constructs_2 (state, &gsi);
2095 }
2096
2097 namespace {
2098
2099 const pass_data pass_data_lower_eh =
2100 {
2101 GIMPLE_PASS, /* type */
2102 "eh", /* name */
2103 OPTGROUP_NONE, /* optinfo_flags */
2104 TV_TREE_EH, /* tv_id */
2105 PROP_gimple_lcf, /* properties_required */
2106 PROP_gimple_leh, /* properties_provided */
2107 0, /* properties_destroyed */
2108 0, /* todo_flags_start */
2109 0, /* todo_flags_finish */
2110 };
2111
2112 class pass_lower_eh : public gimple_opt_pass
2113 {
2114 public:
2115 pass_lower_eh (gcc::context *ctxt)
2116 : gimple_opt_pass (pass_data_lower_eh, ctxt)
2117 {}
2118
2119 /* opt_pass methods: */
2120 virtual unsigned int execute (function *);
2121
2122 }; // class pass_lower_eh
2123
2124 unsigned int
2125 pass_lower_eh::execute (function *fun)
2126 {
2127 struct leh_state null_state;
2128 gimple_seq bodyp;
2129
2130 bodyp = gimple_body (current_function_decl);
2131 if (bodyp == NULL)
2132 return 0;
2133
2134 finally_tree = new hash_table<finally_tree_hasher> (31);
2135 eh_region_may_contain_throw_map = BITMAP_ALLOC (NULL);
2136 memset (&null_state, 0, sizeof (null_state));
2137
2138 collect_finally_tree_1 (bodyp, NULL);
2139 lower_eh_constructs_1 (&null_state, &bodyp);
2140 gimple_set_body (current_function_decl, bodyp);
2141
2142 /* We assume there's a return statement, or something, at the end of
2143 the function, and thus ploping the EH sequence afterward won't
2144 change anything. */
2145 gcc_assert (!gimple_seq_may_fallthru (bodyp));
2146 gimple_seq_add_seq (&bodyp, eh_seq);
2147
2148 /* We assume that since BODYP already existed, adding EH_SEQ to it
2149 didn't change its value, and we don't have to re-set the function. */
2150 gcc_assert (bodyp == gimple_body (current_function_decl));
2151
2152 delete finally_tree;
2153 finally_tree = NULL;
2154 BITMAP_FREE (eh_region_may_contain_throw_map);
2155 eh_seq = NULL;
2156
2157 /* If this function needs a language specific EH personality routine
2158 and the frontend didn't already set one do so now. */
2159 if (function_needs_eh_personality (fun) == eh_personality_lang
2160 && !DECL_FUNCTION_PERSONALITY (current_function_decl))
2161 DECL_FUNCTION_PERSONALITY (current_function_decl)
2162 = lang_hooks.eh_personality ();
2163
2164 return 0;
2165 }
2166
2167 } // anon namespace
2168
2169 gimple_opt_pass *
2170 make_pass_lower_eh (gcc::context *ctxt)
2171 {
2172 return new pass_lower_eh (ctxt);
2173 }
2174 \f
2175 /* Create the multiple edges from an EH_DISPATCH statement to all of
2176 the possible handlers for its EH region. Return true if there's
2177 no fallthru edge; false if there is. */
2178
2179 bool
2180 make_eh_dispatch_edges (gimple stmt)
2181 {
2182 eh_region r;
2183 eh_catch c;
2184 basic_block src, dst;
2185
2186 r = get_eh_region_from_number (gimple_eh_dispatch_region (stmt));
2187 src = gimple_bb (stmt);
2188
2189 switch (r->type)
2190 {
2191 case ERT_TRY:
2192 for (c = r->u.eh_try.first_catch; c ; c = c->next_catch)
2193 {
2194 dst = label_to_block (c->label);
2195 make_edge (src, dst, 0);
2196
2197 /* A catch-all handler doesn't have a fallthru. */
2198 if (c->type_list == NULL)
2199 return false;
2200 }
2201 break;
2202
2203 case ERT_ALLOWED_EXCEPTIONS:
2204 dst = label_to_block (r->u.allowed.label);
2205 make_edge (src, dst, 0);
2206 break;
2207
2208 default:
2209 gcc_unreachable ();
2210 }
2211
2212 return true;
2213 }
2214
2215 /* Create the single EH edge from STMT to its nearest landing pad,
2216 if there is such a landing pad within the current function. */
2217
2218 void
2219 make_eh_edges (gimple stmt)
2220 {
2221 basic_block src, dst;
2222 eh_landing_pad lp;
2223 int lp_nr;
2224
2225 lp_nr = lookup_stmt_eh_lp (stmt);
2226 if (lp_nr <= 0)
2227 return;
2228
2229 lp = get_eh_landing_pad_from_number (lp_nr);
2230 gcc_assert (lp != NULL);
2231
2232 src = gimple_bb (stmt);
2233 dst = label_to_block (lp->post_landing_pad);
2234 make_edge (src, dst, EDGE_EH);
2235 }
2236
2237 /* Do the work in redirecting EDGE_IN to NEW_BB within the EH region tree;
2238 do not actually perform the final edge redirection.
2239
2240 CHANGE_REGION is true when we're being called from cleanup_empty_eh and
2241 we intend to change the destination EH region as well; this means
2242 EH_LANDING_PAD_NR must already be set on the destination block label.
2243 If false, we're being called from generic cfg manipulation code and we
2244 should preserve our place within the region tree. */
2245
2246 static void
2247 redirect_eh_edge_1 (edge edge_in, basic_block new_bb, bool change_region)
2248 {
2249 eh_landing_pad old_lp, new_lp;
2250 basic_block old_bb;
2251 gimple throw_stmt;
2252 int old_lp_nr, new_lp_nr;
2253 tree old_label, new_label;
2254 edge_iterator ei;
2255 edge e;
2256
2257 old_bb = edge_in->dest;
2258 old_label = gimple_block_label (old_bb);
2259 old_lp_nr = EH_LANDING_PAD_NR (old_label);
2260 gcc_assert (old_lp_nr > 0);
2261 old_lp = get_eh_landing_pad_from_number (old_lp_nr);
2262
2263 throw_stmt = last_stmt (edge_in->src);
2264 gcc_assert (lookup_stmt_eh_lp (throw_stmt) == old_lp_nr);
2265
2266 new_label = gimple_block_label (new_bb);
2267
2268 /* Look for an existing region that might be using NEW_BB already. */
2269 new_lp_nr = EH_LANDING_PAD_NR (new_label);
2270 if (new_lp_nr)
2271 {
2272 new_lp = get_eh_landing_pad_from_number (new_lp_nr);
2273 gcc_assert (new_lp);
2274
2275 /* Unless CHANGE_REGION is true, the new and old landing pad
2276 had better be associated with the same EH region. */
2277 gcc_assert (change_region || new_lp->region == old_lp->region);
2278 }
2279 else
2280 {
2281 new_lp = NULL;
2282 gcc_assert (!change_region);
2283 }
2284
2285 /* Notice when we redirect the last EH edge away from OLD_BB. */
2286 FOR_EACH_EDGE (e, ei, old_bb->preds)
2287 if (e != edge_in && (e->flags & EDGE_EH))
2288 break;
2289
2290 if (new_lp)
2291 {
2292 /* NEW_LP already exists. If there are still edges into OLD_LP,
2293 there's nothing to do with the EH tree. If there are no more
2294 edges into OLD_LP, then we want to remove OLD_LP as it is unused.
2295 If CHANGE_REGION is true, then our caller is expecting to remove
2296 the landing pad. */
2297 if (e == NULL && !change_region)
2298 remove_eh_landing_pad (old_lp);
2299 }
2300 else
2301 {
2302 /* No correct landing pad exists. If there are no more edges
2303 into OLD_LP, then we can simply re-use the existing landing pad.
2304 Otherwise, we have to create a new landing pad. */
2305 if (e == NULL)
2306 {
2307 EH_LANDING_PAD_NR (old_lp->post_landing_pad) = 0;
2308 new_lp = old_lp;
2309 }
2310 else
2311 new_lp = gen_eh_landing_pad (old_lp->region);
2312 new_lp->post_landing_pad = new_label;
2313 EH_LANDING_PAD_NR (new_label) = new_lp->index;
2314 }
2315
2316 /* Maybe move the throwing statement to the new region. */
2317 if (old_lp != new_lp)
2318 {
2319 remove_stmt_from_eh_lp (throw_stmt);
2320 add_stmt_to_eh_lp (throw_stmt, new_lp->index);
2321 }
2322 }
2323
2324 /* Redirect EH edge E to NEW_BB. */
2325
2326 edge
2327 redirect_eh_edge (edge edge_in, basic_block new_bb)
2328 {
2329 redirect_eh_edge_1 (edge_in, new_bb, false);
2330 return ssa_redirect_edge (edge_in, new_bb);
2331 }
2332
2333 /* This is a subroutine of gimple_redirect_edge_and_branch. Update the
2334 labels for redirecting a non-fallthru EH_DISPATCH edge E to NEW_BB.
2335 The actual edge update will happen in the caller. */
2336
2337 void
2338 redirect_eh_dispatch_edge (gimple stmt, edge e, basic_block new_bb)
2339 {
2340 tree new_lab = gimple_block_label (new_bb);
2341 bool any_changed = false;
2342 basic_block old_bb;
2343 eh_region r;
2344 eh_catch c;
2345
2346 r = get_eh_region_from_number (gimple_eh_dispatch_region (stmt));
2347 switch (r->type)
2348 {
2349 case ERT_TRY:
2350 for (c = r->u.eh_try.first_catch; c ; c = c->next_catch)
2351 {
2352 old_bb = label_to_block (c->label);
2353 if (old_bb == e->dest)
2354 {
2355 c->label = new_lab;
2356 any_changed = true;
2357 }
2358 }
2359 break;
2360
2361 case ERT_ALLOWED_EXCEPTIONS:
2362 old_bb = label_to_block (r->u.allowed.label);
2363 gcc_assert (old_bb == e->dest);
2364 r->u.allowed.label = new_lab;
2365 any_changed = true;
2366 break;
2367
2368 default:
2369 gcc_unreachable ();
2370 }
2371
2372 gcc_assert (any_changed);
2373 }
2374 \f
2375 /* Helper function for operation_could_trap_p and stmt_could_throw_p. */
2376
2377 bool
2378 operation_could_trap_helper_p (enum tree_code op,
2379 bool fp_operation,
2380 bool honor_trapv,
2381 bool honor_nans,
2382 bool honor_snans,
2383 tree divisor,
2384 bool *handled)
2385 {
2386 *handled = true;
2387 switch (op)
2388 {
2389 case TRUNC_DIV_EXPR:
2390 case CEIL_DIV_EXPR:
2391 case FLOOR_DIV_EXPR:
2392 case ROUND_DIV_EXPR:
2393 case EXACT_DIV_EXPR:
2394 case CEIL_MOD_EXPR:
2395 case FLOOR_MOD_EXPR:
2396 case ROUND_MOD_EXPR:
2397 case TRUNC_MOD_EXPR:
2398 case RDIV_EXPR:
2399 if (honor_snans || honor_trapv)
2400 return true;
2401 if (fp_operation)
2402 return flag_trapping_math;
2403 if (!TREE_CONSTANT (divisor) || integer_zerop (divisor))
2404 return true;
2405 return false;
2406
2407 case LT_EXPR:
2408 case LE_EXPR:
2409 case GT_EXPR:
2410 case GE_EXPR:
2411 case LTGT_EXPR:
2412 /* Some floating point comparisons may trap. */
2413 return honor_nans;
2414
2415 case EQ_EXPR:
2416 case NE_EXPR:
2417 case UNORDERED_EXPR:
2418 case ORDERED_EXPR:
2419 case UNLT_EXPR:
2420 case UNLE_EXPR:
2421 case UNGT_EXPR:
2422 case UNGE_EXPR:
2423 case UNEQ_EXPR:
2424 return honor_snans;
2425
2426 case CONVERT_EXPR:
2427 case FIX_TRUNC_EXPR:
2428 /* Conversion of floating point might trap. */
2429 return honor_nans;
2430
2431 case NEGATE_EXPR:
2432 case ABS_EXPR:
2433 case CONJ_EXPR:
2434 /* These operations don't trap with floating point. */
2435 if (honor_trapv)
2436 return true;
2437 return false;
2438
2439 case PLUS_EXPR:
2440 case MINUS_EXPR:
2441 case MULT_EXPR:
2442 /* Any floating arithmetic may trap. */
2443 if (fp_operation && flag_trapping_math)
2444 return true;
2445 if (honor_trapv)
2446 return true;
2447 return false;
2448
2449 case COMPLEX_EXPR:
2450 case CONSTRUCTOR:
2451 /* Constructing an object cannot trap. */
2452 return false;
2453
2454 default:
2455 /* Any floating arithmetic may trap. */
2456 if (fp_operation && flag_trapping_math)
2457 return true;
2458
2459 *handled = false;
2460 return false;
2461 }
2462 }
2463
2464 /* Return true if operation OP may trap. FP_OPERATION is true if OP is applied
2465 on floating-point values. HONOR_TRAPV is true if OP is applied on integer
2466 type operands that may trap. If OP is a division operator, DIVISOR contains
2467 the value of the divisor. */
2468
2469 bool
2470 operation_could_trap_p (enum tree_code op, bool fp_operation, bool honor_trapv,
2471 tree divisor)
2472 {
2473 bool honor_nans = (fp_operation && flag_trapping_math
2474 && !flag_finite_math_only);
2475 bool honor_snans = fp_operation && flag_signaling_nans != 0;
2476 bool handled;
2477
2478 if (TREE_CODE_CLASS (op) != tcc_comparison
2479 && TREE_CODE_CLASS (op) != tcc_unary
2480 && TREE_CODE_CLASS (op) != tcc_binary)
2481 return false;
2482
2483 return operation_could_trap_helper_p (op, fp_operation, honor_trapv,
2484 honor_nans, honor_snans, divisor,
2485 &handled);
2486 }
2487
2488
2489 /* Returns true if it is possible to prove that the index of
2490 an array access REF (an ARRAY_REF expression) falls into the
2491 array bounds. */
2492
2493 static bool
2494 in_array_bounds_p (tree ref)
2495 {
2496 tree idx = TREE_OPERAND (ref, 1);
2497 tree min, max;
2498
2499 if (TREE_CODE (idx) != INTEGER_CST)
2500 return false;
2501
2502 min = array_ref_low_bound (ref);
2503 max = array_ref_up_bound (ref);
2504 if (!min
2505 || !max
2506 || TREE_CODE (min) != INTEGER_CST
2507 || TREE_CODE (max) != INTEGER_CST)
2508 return false;
2509
2510 if (tree_int_cst_lt (idx, min)
2511 || tree_int_cst_lt (max, idx))
2512 return false;
2513
2514 return true;
2515 }
2516
2517 /* Returns true if it is possible to prove that the range of
2518 an array access REF (an ARRAY_RANGE_REF expression) falls
2519 into the array bounds. */
2520
2521 static bool
2522 range_in_array_bounds_p (tree ref)
2523 {
2524 tree domain_type = TYPE_DOMAIN (TREE_TYPE (ref));
2525 tree range_min, range_max, min, max;
2526
2527 range_min = TYPE_MIN_VALUE (domain_type);
2528 range_max = TYPE_MAX_VALUE (domain_type);
2529 if (!range_min
2530 || !range_max
2531 || TREE_CODE (range_min) != INTEGER_CST
2532 || TREE_CODE (range_max) != INTEGER_CST)
2533 return false;
2534
2535 min = array_ref_low_bound (ref);
2536 max = array_ref_up_bound (ref);
2537 if (!min
2538 || !max
2539 || TREE_CODE (min) != INTEGER_CST
2540 || TREE_CODE (max) != INTEGER_CST)
2541 return false;
2542
2543 if (tree_int_cst_lt (range_min, min)
2544 || tree_int_cst_lt (max, range_max))
2545 return false;
2546
2547 return true;
2548 }
2549
2550 /* Return true if EXPR can trap, as in dereferencing an invalid pointer
2551 location or floating point arithmetic. C.f. the rtl version, may_trap_p.
2552 This routine expects only GIMPLE lhs or rhs input. */
2553
2554 bool
2555 tree_could_trap_p (tree expr)
2556 {
2557 enum tree_code code;
2558 bool fp_operation = false;
2559 bool honor_trapv = false;
2560 tree t, base, div = NULL_TREE;
2561
2562 if (!expr)
2563 return false;
2564
2565 code = TREE_CODE (expr);
2566 t = TREE_TYPE (expr);
2567
2568 if (t)
2569 {
2570 if (COMPARISON_CLASS_P (expr))
2571 fp_operation = FLOAT_TYPE_P (TREE_TYPE (TREE_OPERAND (expr, 0)));
2572 else
2573 fp_operation = FLOAT_TYPE_P (t);
2574 honor_trapv = INTEGRAL_TYPE_P (t) && TYPE_OVERFLOW_TRAPS (t);
2575 }
2576
2577 if (TREE_CODE_CLASS (code) == tcc_binary)
2578 div = TREE_OPERAND (expr, 1);
2579 if (operation_could_trap_p (code, fp_operation, honor_trapv, div))
2580 return true;
2581
2582 restart:
2583 switch (code)
2584 {
2585 case COMPONENT_REF:
2586 case REALPART_EXPR:
2587 case IMAGPART_EXPR:
2588 case BIT_FIELD_REF:
2589 case VIEW_CONVERT_EXPR:
2590 case WITH_SIZE_EXPR:
2591 expr = TREE_OPERAND (expr, 0);
2592 code = TREE_CODE (expr);
2593 goto restart;
2594
2595 case ARRAY_RANGE_REF:
2596 base = TREE_OPERAND (expr, 0);
2597 if (tree_could_trap_p (base))
2598 return true;
2599 if (TREE_THIS_NOTRAP (expr))
2600 return false;
2601 return !range_in_array_bounds_p (expr);
2602
2603 case ARRAY_REF:
2604 base = TREE_OPERAND (expr, 0);
2605 if (tree_could_trap_p (base))
2606 return true;
2607 if (TREE_THIS_NOTRAP (expr))
2608 return false;
2609 return !in_array_bounds_p (expr);
2610
2611 case TARGET_MEM_REF:
2612 case MEM_REF:
2613 if (TREE_CODE (TREE_OPERAND (expr, 0)) == ADDR_EXPR
2614 && tree_could_trap_p (TREE_OPERAND (TREE_OPERAND (expr, 0), 0)))
2615 return true;
2616 if (TREE_THIS_NOTRAP (expr))
2617 return false;
2618 /* We cannot prove that the access is in-bounds when we have
2619 variable-index TARGET_MEM_REFs. */
2620 if (code == TARGET_MEM_REF
2621 && (TMR_INDEX (expr) || TMR_INDEX2 (expr)))
2622 return true;
2623 if (TREE_CODE (TREE_OPERAND (expr, 0)) == ADDR_EXPR)
2624 {
2625 tree base = TREE_OPERAND (TREE_OPERAND (expr, 0), 0);
2626 offset_int off = mem_ref_offset (expr);
2627 if (wi::neg_p (off, SIGNED))
2628 return true;
2629 if (TREE_CODE (base) == STRING_CST)
2630 return wi::leu_p (TREE_STRING_LENGTH (base), off);
2631 else if (DECL_SIZE_UNIT (base) == NULL_TREE
2632 || TREE_CODE (DECL_SIZE_UNIT (base)) != INTEGER_CST
2633 || wi::leu_p (wi::to_offset (DECL_SIZE_UNIT (base)), off))
2634 return true;
2635 /* Now we are sure the first byte of the access is inside
2636 the object. */
2637 return false;
2638 }
2639 return true;
2640
2641 case INDIRECT_REF:
2642 return !TREE_THIS_NOTRAP (expr);
2643
2644 case ASM_EXPR:
2645 return TREE_THIS_VOLATILE (expr);
2646
2647 case CALL_EXPR:
2648 t = get_callee_fndecl (expr);
2649 /* Assume that calls to weak functions may trap. */
2650 if (!t || !DECL_P (t))
2651 return true;
2652 if (DECL_WEAK (t))
2653 return tree_could_trap_p (t);
2654 return false;
2655
2656 case FUNCTION_DECL:
2657 /* Assume that accesses to weak functions may trap, unless we know
2658 they are certainly defined in current TU or in some other
2659 LTO partition. */
2660 if (DECL_WEAK (expr) && !DECL_COMDAT (expr) && DECL_EXTERNAL (expr))
2661 {
2662 cgraph_node *node = cgraph_node::get (expr);
2663 if (node)
2664 node = node->function_symbol ();
2665 return !(node && node->in_other_partition);
2666 }
2667 return false;
2668
2669 case VAR_DECL:
2670 /* Assume that accesses to weak vars may trap, unless we know
2671 they are certainly defined in current TU or in some other
2672 LTO partition. */
2673 if (DECL_WEAK (expr) && !DECL_COMDAT (expr) && DECL_EXTERNAL (expr))
2674 {
2675 varpool_node *node = varpool_node::get (expr);
2676 if (node)
2677 node = node->ultimate_alias_target ();
2678 return !(node && node->in_other_partition);
2679 }
2680 return false;
2681
2682 default:
2683 return false;
2684 }
2685 }
2686
2687
2688 /* Helper for stmt_could_throw_p. Return true if STMT (assumed to be a
2689 an assignment or a conditional) may throw. */
2690
2691 static bool
2692 stmt_could_throw_1_p (gimple stmt)
2693 {
2694 enum tree_code code = gimple_expr_code (stmt);
2695 bool honor_nans = false;
2696 bool honor_snans = false;
2697 bool fp_operation = false;
2698 bool honor_trapv = false;
2699 tree t;
2700 size_t i;
2701 bool handled, ret;
2702
2703 if (TREE_CODE_CLASS (code) == tcc_comparison
2704 || TREE_CODE_CLASS (code) == tcc_unary
2705 || TREE_CODE_CLASS (code) == tcc_binary)
2706 {
2707 if (is_gimple_assign (stmt)
2708 && TREE_CODE_CLASS (code) == tcc_comparison)
2709 t = TREE_TYPE (gimple_assign_rhs1 (stmt));
2710 else if (gimple_code (stmt) == GIMPLE_COND)
2711 t = TREE_TYPE (gimple_cond_lhs (stmt));
2712 else
2713 t = gimple_expr_type (stmt);
2714 fp_operation = FLOAT_TYPE_P (t);
2715 if (fp_operation)
2716 {
2717 honor_nans = flag_trapping_math && !flag_finite_math_only;
2718 honor_snans = flag_signaling_nans != 0;
2719 }
2720 else if (INTEGRAL_TYPE_P (t) && TYPE_OVERFLOW_TRAPS (t))
2721 honor_trapv = true;
2722 }
2723
2724 /* Check if the main expression may trap. */
2725 t = is_gimple_assign (stmt) ? gimple_assign_rhs2 (stmt) : NULL;
2726 ret = operation_could_trap_helper_p (code, fp_operation, honor_trapv,
2727 honor_nans, honor_snans, t,
2728 &handled);
2729 if (handled)
2730 return ret;
2731
2732 /* If the expression does not trap, see if any of the individual operands may
2733 trap. */
2734 for (i = 0; i < gimple_num_ops (stmt); i++)
2735 if (tree_could_trap_p (gimple_op (stmt, i)))
2736 return true;
2737
2738 return false;
2739 }
2740
2741
2742 /* Return true if statement STMT could throw an exception. */
2743
2744 bool
2745 stmt_could_throw_p (gimple stmt)
2746 {
2747 if (!flag_exceptions)
2748 return false;
2749
2750 /* The only statements that can throw an exception are assignments,
2751 conditionals, calls, resx, and asms. */
2752 switch (gimple_code (stmt))
2753 {
2754 case GIMPLE_RESX:
2755 return true;
2756
2757 case GIMPLE_CALL:
2758 return !gimple_call_nothrow_p (stmt);
2759
2760 case GIMPLE_ASSIGN:
2761 case GIMPLE_COND:
2762 if (!cfun->can_throw_non_call_exceptions)
2763 return false;
2764 return stmt_could_throw_1_p (stmt);
2765
2766 case GIMPLE_ASM:
2767 if (!cfun->can_throw_non_call_exceptions)
2768 return false;
2769 return gimple_asm_volatile_p (stmt);
2770
2771 default:
2772 return false;
2773 }
2774 }
2775
2776
2777 /* Return true if expression T could throw an exception. */
2778
2779 bool
2780 tree_could_throw_p (tree t)
2781 {
2782 if (!flag_exceptions)
2783 return false;
2784 if (TREE_CODE (t) == MODIFY_EXPR)
2785 {
2786 if (cfun->can_throw_non_call_exceptions
2787 && tree_could_trap_p (TREE_OPERAND (t, 0)))
2788 return true;
2789 t = TREE_OPERAND (t, 1);
2790 }
2791
2792 if (TREE_CODE (t) == WITH_SIZE_EXPR)
2793 t = TREE_OPERAND (t, 0);
2794 if (TREE_CODE (t) == CALL_EXPR)
2795 return (call_expr_flags (t) & ECF_NOTHROW) == 0;
2796 if (cfun->can_throw_non_call_exceptions)
2797 return tree_could_trap_p (t);
2798 return false;
2799 }
2800
2801 /* Return true if STMT can throw an exception that is not caught within
2802 the current function (CFUN). */
2803
2804 bool
2805 stmt_can_throw_external (gimple stmt)
2806 {
2807 int lp_nr;
2808
2809 if (!stmt_could_throw_p (stmt))
2810 return false;
2811
2812 lp_nr = lookup_stmt_eh_lp (stmt);
2813 return lp_nr == 0;
2814 }
2815
2816 /* Return true if STMT can throw an exception that is caught within
2817 the current function (CFUN). */
2818
2819 bool
2820 stmt_can_throw_internal (gimple stmt)
2821 {
2822 int lp_nr;
2823
2824 if (!stmt_could_throw_p (stmt))
2825 return false;
2826
2827 lp_nr = lookup_stmt_eh_lp (stmt);
2828 return lp_nr > 0;
2829 }
2830
2831 /* Given a statement STMT in IFUN, if STMT can no longer throw, then
2832 remove any entry it might have from the EH table. Return true if
2833 any change was made. */
2834
2835 bool
2836 maybe_clean_eh_stmt_fn (struct function *ifun, gimple stmt)
2837 {
2838 if (stmt_could_throw_p (stmt))
2839 return false;
2840 return remove_stmt_from_eh_lp_fn (ifun, stmt);
2841 }
2842
2843 /* Likewise, but always use the current function. */
2844
2845 bool
2846 maybe_clean_eh_stmt (gimple stmt)
2847 {
2848 return maybe_clean_eh_stmt_fn (cfun, stmt);
2849 }
2850
2851 /* Given a statement OLD_STMT and a new statement NEW_STMT that has replaced
2852 OLD_STMT in the function, remove OLD_STMT from the EH table and put NEW_STMT
2853 in the table if it should be in there. Return TRUE if a replacement was
2854 done that my require an EH edge purge. */
2855
2856 bool
2857 maybe_clean_or_replace_eh_stmt (gimple old_stmt, gimple new_stmt)
2858 {
2859 int lp_nr = lookup_stmt_eh_lp (old_stmt);
2860
2861 if (lp_nr != 0)
2862 {
2863 bool new_stmt_could_throw = stmt_could_throw_p (new_stmt);
2864
2865 if (new_stmt == old_stmt && new_stmt_could_throw)
2866 return false;
2867
2868 remove_stmt_from_eh_lp (old_stmt);
2869 if (new_stmt_could_throw)
2870 {
2871 add_stmt_to_eh_lp (new_stmt, lp_nr);
2872 return false;
2873 }
2874 else
2875 return true;
2876 }
2877
2878 return false;
2879 }
2880
2881 /* Given a statement OLD_STMT in OLD_FUN and a duplicate statement NEW_STMT
2882 in NEW_FUN, copy the EH table data from OLD_STMT to NEW_STMT. The MAP
2883 operand is the return value of duplicate_eh_regions. */
2884
2885 bool
2886 maybe_duplicate_eh_stmt_fn (struct function *new_fun, gimple new_stmt,
2887 struct function *old_fun, gimple old_stmt,
2888 hash_map<void *, void *> *map,
2889 int default_lp_nr)
2890 {
2891 int old_lp_nr, new_lp_nr;
2892
2893 if (!stmt_could_throw_p (new_stmt))
2894 return false;
2895
2896 old_lp_nr = lookup_stmt_eh_lp_fn (old_fun, old_stmt);
2897 if (old_lp_nr == 0)
2898 {
2899 if (default_lp_nr == 0)
2900 return false;
2901 new_lp_nr = default_lp_nr;
2902 }
2903 else if (old_lp_nr > 0)
2904 {
2905 eh_landing_pad old_lp, new_lp;
2906
2907 old_lp = (*old_fun->eh->lp_array)[old_lp_nr];
2908 new_lp = static_cast<eh_landing_pad> (*map->get (old_lp));
2909 new_lp_nr = new_lp->index;
2910 }
2911 else
2912 {
2913 eh_region old_r, new_r;
2914
2915 old_r = (*old_fun->eh->region_array)[-old_lp_nr];
2916 new_r = static_cast<eh_region> (*map->get (old_r));
2917 new_lp_nr = -new_r->index;
2918 }
2919
2920 add_stmt_to_eh_lp_fn (new_fun, new_stmt, new_lp_nr);
2921 return true;
2922 }
2923
2924 /* Similar, but both OLD_STMT and NEW_STMT are within the current function,
2925 and thus no remapping is required. */
2926
2927 bool
2928 maybe_duplicate_eh_stmt (gimple new_stmt, gimple old_stmt)
2929 {
2930 int lp_nr;
2931
2932 if (!stmt_could_throw_p (new_stmt))
2933 return false;
2934
2935 lp_nr = lookup_stmt_eh_lp (old_stmt);
2936 if (lp_nr == 0)
2937 return false;
2938
2939 add_stmt_to_eh_lp (new_stmt, lp_nr);
2940 return true;
2941 }
2942 \f
2943 /* Returns TRUE if oneh and twoh are exception handlers (gimple_try_cleanup of
2944 GIMPLE_TRY) that are similar enough to be considered the same. Currently
2945 this only handles handlers consisting of a single call, as that's the
2946 important case for C++: a destructor call for a particular object showing
2947 up in multiple handlers. */
2948
2949 static bool
2950 same_handler_p (gimple_seq oneh, gimple_seq twoh)
2951 {
2952 gimple_stmt_iterator gsi;
2953 gimple ones, twos;
2954 unsigned int ai;
2955
2956 gsi = gsi_start (oneh);
2957 if (!gsi_one_before_end_p (gsi))
2958 return false;
2959 ones = gsi_stmt (gsi);
2960
2961 gsi = gsi_start (twoh);
2962 if (!gsi_one_before_end_p (gsi))
2963 return false;
2964 twos = gsi_stmt (gsi);
2965
2966 if (!is_gimple_call (ones)
2967 || !is_gimple_call (twos)
2968 || gimple_call_lhs (ones)
2969 || gimple_call_lhs (twos)
2970 || gimple_call_chain (ones)
2971 || gimple_call_chain (twos)
2972 || !gimple_call_same_target_p (ones, twos)
2973 || gimple_call_num_args (ones) != gimple_call_num_args (twos))
2974 return false;
2975
2976 for (ai = 0; ai < gimple_call_num_args (ones); ++ai)
2977 if (!operand_equal_p (gimple_call_arg (ones, ai),
2978 gimple_call_arg (twos, ai), 0))
2979 return false;
2980
2981 return true;
2982 }
2983
2984 /* Optimize
2985 try { A() } finally { try { ~B() } catch { ~A() } }
2986 try { ... } finally { ~A() }
2987 into
2988 try { A() } catch { ~B() }
2989 try { ~B() ... } finally { ~A() }
2990
2991 This occurs frequently in C++, where A is a local variable and B is a
2992 temporary used in the initializer for A. */
2993
2994 static void
2995 optimize_double_finally (gimple one, gimple two)
2996 {
2997 gimple oneh;
2998 gimple_stmt_iterator gsi;
2999 gimple_seq cleanup;
3000
3001 cleanup = gimple_try_cleanup (one);
3002 gsi = gsi_start (cleanup);
3003 if (!gsi_one_before_end_p (gsi))
3004 return;
3005
3006 oneh = gsi_stmt (gsi);
3007 if (gimple_code (oneh) != GIMPLE_TRY
3008 || gimple_try_kind (oneh) != GIMPLE_TRY_CATCH)
3009 return;
3010
3011 if (same_handler_p (gimple_try_cleanup (oneh), gimple_try_cleanup (two)))
3012 {
3013 gimple_seq seq = gimple_try_eval (oneh);
3014
3015 gimple_try_set_cleanup (one, seq);
3016 gimple_try_set_kind (one, GIMPLE_TRY_CATCH);
3017 seq = copy_gimple_seq_and_replace_locals (seq);
3018 gimple_seq_add_seq (&seq, gimple_try_eval (two));
3019 gimple_try_set_eval (two, seq);
3020 }
3021 }
3022
3023 /* Perform EH refactoring optimizations that are simpler to do when code
3024 flow has been lowered but EH structures haven't. */
3025
3026 static void
3027 refactor_eh_r (gimple_seq seq)
3028 {
3029 gimple_stmt_iterator gsi;
3030 gimple one, two;
3031
3032 one = NULL;
3033 two = NULL;
3034 gsi = gsi_start (seq);
3035 while (1)
3036 {
3037 one = two;
3038 if (gsi_end_p (gsi))
3039 two = NULL;
3040 else
3041 two = gsi_stmt (gsi);
3042 if (one
3043 && two
3044 && gimple_code (one) == GIMPLE_TRY
3045 && gimple_code (two) == GIMPLE_TRY
3046 && gimple_try_kind (one) == GIMPLE_TRY_FINALLY
3047 && gimple_try_kind (two) == GIMPLE_TRY_FINALLY)
3048 optimize_double_finally (one, two);
3049 if (one)
3050 switch (gimple_code (one))
3051 {
3052 case GIMPLE_TRY:
3053 refactor_eh_r (gimple_try_eval (one));
3054 refactor_eh_r (gimple_try_cleanup (one));
3055 break;
3056 case GIMPLE_CATCH:
3057 refactor_eh_r (gimple_catch_handler (one));
3058 break;
3059 case GIMPLE_EH_FILTER:
3060 refactor_eh_r (gimple_eh_filter_failure (one));
3061 break;
3062 case GIMPLE_EH_ELSE:
3063 refactor_eh_r (gimple_eh_else_n_body (one));
3064 refactor_eh_r (gimple_eh_else_e_body (one));
3065 break;
3066 default:
3067 break;
3068 }
3069 if (two)
3070 gsi_next (&gsi);
3071 else
3072 break;
3073 }
3074 }
3075
3076 namespace {
3077
3078 const pass_data pass_data_refactor_eh =
3079 {
3080 GIMPLE_PASS, /* type */
3081 "ehopt", /* name */
3082 OPTGROUP_NONE, /* optinfo_flags */
3083 TV_TREE_EH, /* tv_id */
3084 PROP_gimple_lcf, /* properties_required */
3085 0, /* properties_provided */
3086 0, /* properties_destroyed */
3087 0, /* todo_flags_start */
3088 0, /* todo_flags_finish */
3089 };
3090
3091 class pass_refactor_eh : public gimple_opt_pass
3092 {
3093 public:
3094 pass_refactor_eh (gcc::context *ctxt)
3095 : gimple_opt_pass (pass_data_refactor_eh, ctxt)
3096 {}
3097
3098 /* opt_pass methods: */
3099 virtual bool gate (function *) { return flag_exceptions != 0; }
3100 virtual unsigned int execute (function *)
3101 {
3102 refactor_eh_r (gimple_body (current_function_decl));
3103 return 0;
3104 }
3105
3106 }; // class pass_refactor_eh
3107
3108 } // anon namespace
3109
3110 gimple_opt_pass *
3111 make_pass_refactor_eh (gcc::context *ctxt)
3112 {
3113 return new pass_refactor_eh (ctxt);
3114 }
3115 \f
3116 /* At the end of gimple optimization, we can lower RESX. */
3117
3118 static bool
3119 lower_resx (basic_block bb, gimple stmt, hash_map<eh_region, tree> *mnt_map)
3120 {
3121 int lp_nr;
3122 eh_region src_r, dst_r;
3123 gimple_stmt_iterator gsi;
3124 gimple x;
3125 tree fn, src_nr;
3126 bool ret = false;
3127
3128 lp_nr = lookup_stmt_eh_lp (stmt);
3129 if (lp_nr != 0)
3130 dst_r = get_eh_region_from_lp_number (lp_nr);
3131 else
3132 dst_r = NULL;
3133
3134 src_r = get_eh_region_from_number (gimple_resx_region (stmt));
3135 gsi = gsi_last_bb (bb);
3136
3137 if (src_r == NULL)
3138 {
3139 /* We can wind up with no source region when pass_cleanup_eh shows
3140 that there are no entries into an eh region and deletes it, but
3141 then the block that contains the resx isn't removed. This can
3142 happen without optimization when the switch statement created by
3143 lower_try_finally_switch isn't simplified to remove the eh case.
3144
3145 Resolve this by expanding the resx node to an abort. */
3146
3147 fn = builtin_decl_implicit (BUILT_IN_TRAP);
3148 x = gimple_build_call (fn, 0);
3149 gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3150
3151 while (EDGE_COUNT (bb->succs) > 0)
3152 remove_edge (EDGE_SUCC (bb, 0));
3153 }
3154 else if (dst_r)
3155 {
3156 /* When we have a destination region, we resolve this by copying
3157 the excptr and filter values into place, and changing the edge
3158 to immediately after the landing pad. */
3159 edge e;
3160
3161 if (lp_nr < 0)
3162 {
3163 basic_block new_bb;
3164 tree lab;
3165
3166 /* We are resuming into a MUST_NOT_CALL region. Expand a call to
3167 the failure decl into a new block, if needed. */
3168 gcc_assert (dst_r->type == ERT_MUST_NOT_THROW);
3169
3170 tree *slot = mnt_map->get (dst_r);
3171 if (slot == NULL)
3172 {
3173 gimple_stmt_iterator gsi2;
3174
3175 new_bb = create_empty_bb (bb);
3176 add_bb_to_loop (new_bb, bb->loop_father);
3177 lab = gimple_block_label (new_bb);
3178 gsi2 = gsi_start_bb (new_bb);
3179
3180 fn = dst_r->u.must_not_throw.failure_decl;
3181 x = gimple_build_call (fn, 0);
3182 gimple_set_location (x, dst_r->u.must_not_throw.failure_loc);
3183 gsi_insert_after (&gsi2, x, GSI_CONTINUE_LINKING);
3184
3185 mnt_map->put (dst_r, lab);
3186 }
3187 else
3188 {
3189 lab = *slot;
3190 new_bb = label_to_block (lab);
3191 }
3192
3193 gcc_assert (EDGE_COUNT (bb->succs) == 0);
3194 e = make_edge (bb, new_bb, EDGE_FALLTHRU);
3195 e->count = bb->count;
3196 e->probability = REG_BR_PROB_BASE;
3197 }
3198 else
3199 {
3200 edge_iterator ei;
3201 tree dst_nr = build_int_cst (integer_type_node, dst_r->index);
3202
3203 fn = builtin_decl_implicit (BUILT_IN_EH_COPY_VALUES);
3204 src_nr = build_int_cst (integer_type_node, src_r->index);
3205 x = gimple_build_call (fn, 2, dst_nr, src_nr);
3206 gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3207
3208 /* Update the flags for the outgoing edge. */
3209 e = single_succ_edge (bb);
3210 gcc_assert (e->flags & EDGE_EH);
3211 e->flags = (e->flags & ~EDGE_EH) | EDGE_FALLTHRU;
3212
3213 /* If there are no more EH users of the landing pad, delete it. */
3214 FOR_EACH_EDGE (e, ei, e->dest->preds)
3215 if (e->flags & EDGE_EH)
3216 break;
3217 if (e == NULL)
3218 {
3219 eh_landing_pad lp = get_eh_landing_pad_from_number (lp_nr);
3220 remove_eh_landing_pad (lp);
3221 }
3222 }
3223
3224 ret = true;
3225 }
3226 else
3227 {
3228 tree var;
3229
3230 /* When we don't have a destination region, this exception escapes
3231 up the call chain. We resolve this by generating a call to the
3232 _Unwind_Resume library function. */
3233
3234 /* The ARM EABI redefines _Unwind_Resume as __cxa_end_cleanup
3235 with no arguments for C++ and Java. Check for that. */
3236 if (src_r->use_cxa_end_cleanup)
3237 {
3238 fn = builtin_decl_implicit (BUILT_IN_CXA_END_CLEANUP);
3239 x = gimple_build_call (fn, 0);
3240 gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3241 }
3242 else
3243 {
3244 fn = builtin_decl_implicit (BUILT_IN_EH_POINTER);
3245 src_nr = build_int_cst (integer_type_node, src_r->index);
3246 x = gimple_build_call (fn, 1, src_nr);
3247 var = create_tmp_var (ptr_type_node, NULL);
3248 var = make_ssa_name (var, x);
3249 gimple_call_set_lhs (x, var);
3250 gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3251
3252 fn = builtin_decl_implicit (BUILT_IN_UNWIND_RESUME);
3253 x = gimple_build_call (fn, 1, var);
3254 gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3255 }
3256
3257 gcc_assert (EDGE_COUNT (bb->succs) == 0);
3258 }
3259
3260 gsi_remove (&gsi, true);
3261
3262 return ret;
3263 }
3264
3265 namespace {
3266
3267 const pass_data pass_data_lower_resx =
3268 {
3269 GIMPLE_PASS, /* type */
3270 "resx", /* name */
3271 OPTGROUP_NONE, /* optinfo_flags */
3272 TV_TREE_EH, /* tv_id */
3273 PROP_gimple_lcf, /* properties_required */
3274 0, /* properties_provided */
3275 0, /* properties_destroyed */
3276 0, /* todo_flags_start */
3277 0, /* todo_flags_finish */
3278 };
3279
3280 class pass_lower_resx : public gimple_opt_pass
3281 {
3282 public:
3283 pass_lower_resx (gcc::context *ctxt)
3284 : gimple_opt_pass (pass_data_lower_resx, ctxt)
3285 {}
3286
3287 /* opt_pass methods: */
3288 virtual bool gate (function *) { return flag_exceptions != 0; }
3289 virtual unsigned int execute (function *);
3290
3291 }; // class pass_lower_resx
3292
3293 unsigned
3294 pass_lower_resx::execute (function *fun)
3295 {
3296 basic_block bb;
3297 bool dominance_invalidated = false;
3298 bool any_rewritten = false;
3299
3300 hash_map<eh_region, tree> mnt_map;
3301
3302 FOR_EACH_BB_FN (bb, fun)
3303 {
3304 gimple last = last_stmt (bb);
3305 if (last && is_gimple_resx (last))
3306 {
3307 dominance_invalidated |= lower_resx (bb, last, &mnt_map);
3308 any_rewritten = true;
3309 }
3310 }
3311
3312 if (dominance_invalidated)
3313 {
3314 free_dominance_info (CDI_DOMINATORS);
3315 free_dominance_info (CDI_POST_DOMINATORS);
3316 }
3317
3318 return any_rewritten ? TODO_update_ssa_only_virtuals : 0;
3319 }
3320
3321 } // anon namespace
3322
3323 gimple_opt_pass *
3324 make_pass_lower_resx (gcc::context *ctxt)
3325 {
3326 return new pass_lower_resx (ctxt);
3327 }
3328
3329 /* Try to optimize var = {v} {CLOBBER} stmts followed just by
3330 external throw. */
3331
3332 static void
3333 optimize_clobbers (basic_block bb)
3334 {
3335 gimple_stmt_iterator gsi = gsi_last_bb (bb);
3336 bool any_clobbers = false;
3337 bool seen_stack_restore = false;
3338 edge_iterator ei;
3339 edge e;
3340
3341 /* Only optimize anything if the bb contains at least one clobber,
3342 ends with resx (checked by caller), optionally contains some
3343 debug stmts or labels, or at most one __builtin_stack_restore
3344 call, and has an incoming EH edge. */
3345 for (gsi_prev (&gsi); !gsi_end_p (gsi); gsi_prev (&gsi))
3346 {
3347 gimple stmt = gsi_stmt (gsi);
3348 if (is_gimple_debug (stmt))
3349 continue;
3350 if (gimple_clobber_p (stmt))
3351 {
3352 any_clobbers = true;
3353 continue;
3354 }
3355 if (!seen_stack_restore
3356 && gimple_call_builtin_p (stmt, BUILT_IN_STACK_RESTORE))
3357 {
3358 seen_stack_restore = true;
3359 continue;
3360 }
3361 if (gimple_code (stmt) == GIMPLE_LABEL)
3362 break;
3363 return;
3364 }
3365 if (!any_clobbers)
3366 return;
3367 FOR_EACH_EDGE (e, ei, bb->preds)
3368 if (e->flags & EDGE_EH)
3369 break;
3370 if (e == NULL)
3371 return;
3372 gsi = gsi_last_bb (bb);
3373 for (gsi_prev (&gsi); !gsi_end_p (gsi); gsi_prev (&gsi))
3374 {
3375 gimple stmt = gsi_stmt (gsi);
3376 if (!gimple_clobber_p (stmt))
3377 continue;
3378 unlink_stmt_vdef (stmt);
3379 gsi_remove (&gsi, true);
3380 release_defs (stmt);
3381 }
3382 }
3383
3384 /* Try to sink var = {v} {CLOBBER} stmts followed just by
3385 internal throw to successor BB. */
3386
3387 static int
3388 sink_clobbers (basic_block bb)
3389 {
3390 edge e;
3391 edge_iterator ei;
3392 gimple_stmt_iterator gsi, dgsi;
3393 basic_block succbb;
3394 bool any_clobbers = false;
3395 unsigned todo = 0;
3396
3397 /* Only optimize if BB has a single EH successor and
3398 all predecessor edges are EH too. */
3399 if (!single_succ_p (bb)
3400 || (single_succ_edge (bb)->flags & EDGE_EH) == 0)
3401 return 0;
3402
3403 FOR_EACH_EDGE (e, ei, bb->preds)
3404 {
3405 if ((e->flags & EDGE_EH) == 0)
3406 return 0;
3407 }
3408
3409 /* And BB contains only CLOBBER stmts before the final
3410 RESX. */
3411 gsi = gsi_last_bb (bb);
3412 for (gsi_prev (&gsi); !gsi_end_p (gsi); gsi_prev (&gsi))
3413 {
3414 gimple stmt = gsi_stmt (gsi);
3415 if (is_gimple_debug (stmt))
3416 continue;
3417 if (gimple_code (stmt) == GIMPLE_LABEL)
3418 break;
3419 if (!gimple_clobber_p (stmt))
3420 return 0;
3421 any_clobbers = true;
3422 }
3423 if (!any_clobbers)
3424 return 0;
3425
3426 edge succe = single_succ_edge (bb);
3427 succbb = succe->dest;
3428
3429 /* See if there is a virtual PHI node to take an updated virtual
3430 operand from. */
3431 gimple vphi = NULL;
3432 tree vuse = NULL_TREE;
3433 for (gsi = gsi_start_phis (succbb); !gsi_end_p (gsi); gsi_next (&gsi))
3434 {
3435 tree res = gimple_phi_result (gsi_stmt (gsi));
3436 if (virtual_operand_p (res))
3437 {
3438 vphi = gsi_stmt (gsi);
3439 vuse = res;
3440 break;
3441 }
3442 }
3443
3444 dgsi = gsi_after_labels (succbb);
3445 gsi = gsi_last_bb (bb);
3446 for (gsi_prev (&gsi); !gsi_end_p (gsi); gsi_prev (&gsi))
3447 {
3448 gimple stmt = gsi_stmt (gsi);
3449 tree lhs;
3450 if (is_gimple_debug (stmt))
3451 continue;
3452 if (gimple_code (stmt) == GIMPLE_LABEL)
3453 break;
3454 lhs = gimple_assign_lhs (stmt);
3455 /* Unfortunately we don't have dominance info updated at this
3456 point, so checking if
3457 dominated_by_p (CDI_DOMINATORS, succbb,
3458 gimple_bb (SSA_NAME_DEF_STMT (TREE_OPERAND (lhs, 0)))
3459 would be too costly. Thus, avoid sinking any clobbers that
3460 refer to non-(D) SSA_NAMEs. */
3461 if (TREE_CODE (lhs) == MEM_REF
3462 && TREE_CODE (TREE_OPERAND (lhs, 0)) == SSA_NAME
3463 && !SSA_NAME_IS_DEFAULT_DEF (TREE_OPERAND (lhs, 0)))
3464 {
3465 unlink_stmt_vdef (stmt);
3466 gsi_remove (&gsi, true);
3467 release_defs (stmt);
3468 continue;
3469 }
3470
3471 /* As we do not change stmt order when sinking across a
3472 forwarder edge we can keep virtual operands in place. */
3473 gsi_remove (&gsi, false);
3474 gsi_insert_before (&dgsi, stmt, GSI_NEW_STMT);
3475
3476 /* But adjust virtual operands if we sunk across a PHI node. */
3477 if (vuse)
3478 {
3479 gimple use_stmt;
3480 imm_use_iterator iter;
3481 use_operand_p use_p;
3482 FOR_EACH_IMM_USE_STMT (use_stmt, iter, vuse)
3483 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
3484 SET_USE (use_p, gimple_vdef (stmt));
3485 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (vuse))
3486 {
3487 SSA_NAME_OCCURS_IN_ABNORMAL_PHI (gimple_vdef (stmt)) = 1;
3488 SSA_NAME_OCCURS_IN_ABNORMAL_PHI (vuse) = 0;
3489 }
3490 /* Adjust the incoming virtual operand. */
3491 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (vphi, succe), gimple_vuse (stmt));
3492 SET_USE (gimple_vuse_op (stmt), vuse);
3493 }
3494 /* If there isn't a single predecessor but no virtual PHI node
3495 arrange for virtual operands to be renamed. */
3496 else if (gimple_vuse_op (stmt) != NULL_USE_OPERAND_P
3497 && !single_pred_p (succbb))
3498 {
3499 /* In this case there will be no use of the VDEF of this stmt.
3500 ??? Unless this is a secondary opportunity and we have not
3501 removed unreachable blocks yet, so we cannot assert this.
3502 Which also means we will end up renaming too many times. */
3503 SET_USE (gimple_vuse_op (stmt), gimple_vop (cfun));
3504 mark_virtual_operands_for_renaming (cfun);
3505 todo |= TODO_update_ssa_only_virtuals;
3506 }
3507 }
3508
3509 return todo;
3510 }
3511
3512 /* At the end of inlining, we can lower EH_DISPATCH. Return true when
3513 we have found some duplicate labels and removed some edges. */
3514
3515 static bool
3516 lower_eh_dispatch (basic_block src, gimple stmt)
3517 {
3518 gimple_stmt_iterator gsi;
3519 int region_nr;
3520 eh_region r;
3521 tree filter, fn;
3522 gimple x;
3523 bool redirected = false;
3524
3525 region_nr = gimple_eh_dispatch_region (stmt);
3526 r = get_eh_region_from_number (region_nr);
3527
3528 gsi = gsi_last_bb (src);
3529
3530 switch (r->type)
3531 {
3532 case ERT_TRY:
3533 {
3534 auto_vec<tree> labels;
3535 tree default_label = NULL;
3536 eh_catch c;
3537 edge_iterator ei;
3538 edge e;
3539 hash_set<tree> seen_values;
3540
3541 /* Collect the labels for a switch. Zero the post_landing_pad
3542 field becase we'll no longer have anything keeping these labels
3543 in existence and the optimizer will be free to merge these
3544 blocks at will. */
3545 for (c = r->u.eh_try.first_catch; c ; c = c->next_catch)
3546 {
3547 tree tp_node, flt_node, lab = c->label;
3548 bool have_label = false;
3549
3550 c->label = NULL;
3551 tp_node = c->type_list;
3552 flt_node = c->filter_list;
3553
3554 if (tp_node == NULL)
3555 {
3556 default_label = lab;
3557 break;
3558 }
3559 do
3560 {
3561 /* Filter out duplicate labels that arise when this handler
3562 is shadowed by an earlier one. When no labels are
3563 attached to the handler anymore, we remove
3564 the corresponding edge and then we delete unreachable
3565 blocks at the end of this pass. */
3566 if (! seen_values.contains (TREE_VALUE (flt_node)))
3567 {
3568 tree t = build_case_label (TREE_VALUE (flt_node),
3569 NULL, lab);
3570 labels.safe_push (t);
3571 seen_values.add (TREE_VALUE (flt_node));
3572 have_label = true;
3573 }
3574
3575 tp_node = TREE_CHAIN (tp_node);
3576 flt_node = TREE_CHAIN (flt_node);
3577 }
3578 while (tp_node);
3579 if (! have_label)
3580 {
3581 remove_edge (find_edge (src, label_to_block (lab)));
3582 redirected = true;
3583 }
3584 }
3585
3586 /* Clean up the edge flags. */
3587 FOR_EACH_EDGE (e, ei, src->succs)
3588 {
3589 if (e->flags & EDGE_FALLTHRU)
3590 {
3591 /* If there was no catch-all, use the fallthru edge. */
3592 if (default_label == NULL)
3593 default_label = gimple_block_label (e->dest);
3594 e->flags &= ~EDGE_FALLTHRU;
3595 }
3596 }
3597 gcc_assert (default_label != NULL);
3598
3599 /* Don't generate a switch if there's only a default case.
3600 This is common in the form of try { A; } catch (...) { B; }. */
3601 if (!labels.exists ())
3602 {
3603 e = single_succ_edge (src);
3604 e->flags |= EDGE_FALLTHRU;
3605 }
3606 else
3607 {
3608 fn = builtin_decl_implicit (BUILT_IN_EH_FILTER);
3609 x = gimple_build_call (fn, 1, build_int_cst (integer_type_node,
3610 region_nr));
3611 filter = create_tmp_var (TREE_TYPE (TREE_TYPE (fn)), NULL);
3612 filter = make_ssa_name (filter, x);
3613 gimple_call_set_lhs (x, filter);
3614 gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3615
3616 /* Turn the default label into a default case. */
3617 default_label = build_case_label (NULL, NULL, default_label);
3618 sort_case_labels (labels);
3619
3620 x = gimple_build_switch (filter, default_label, labels);
3621 gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3622 }
3623 }
3624 break;
3625
3626 case ERT_ALLOWED_EXCEPTIONS:
3627 {
3628 edge b_e = BRANCH_EDGE (src);
3629 edge f_e = FALLTHRU_EDGE (src);
3630
3631 fn = builtin_decl_implicit (BUILT_IN_EH_FILTER);
3632 x = gimple_build_call (fn, 1, build_int_cst (integer_type_node,
3633 region_nr));
3634 filter = create_tmp_var (TREE_TYPE (TREE_TYPE (fn)), NULL);
3635 filter = make_ssa_name (filter, x);
3636 gimple_call_set_lhs (x, filter);
3637 gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3638
3639 r->u.allowed.label = NULL;
3640 x = gimple_build_cond (EQ_EXPR, filter,
3641 build_int_cst (TREE_TYPE (filter),
3642 r->u.allowed.filter),
3643 NULL_TREE, NULL_TREE);
3644 gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3645
3646 b_e->flags = b_e->flags | EDGE_TRUE_VALUE;
3647 f_e->flags = (f_e->flags & ~EDGE_FALLTHRU) | EDGE_FALSE_VALUE;
3648 }
3649 break;
3650
3651 default:
3652 gcc_unreachable ();
3653 }
3654
3655 /* Replace the EH_DISPATCH with the SWITCH or COND generated above. */
3656 gsi_remove (&gsi, true);
3657 return redirected;
3658 }
3659
3660 namespace {
3661
3662 const pass_data pass_data_lower_eh_dispatch =
3663 {
3664 GIMPLE_PASS, /* type */
3665 "ehdisp", /* name */
3666 OPTGROUP_NONE, /* optinfo_flags */
3667 TV_TREE_EH, /* tv_id */
3668 PROP_gimple_lcf, /* properties_required */
3669 0, /* properties_provided */
3670 0, /* properties_destroyed */
3671 0, /* todo_flags_start */
3672 0, /* todo_flags_finish */
3673 };
3674
3675 class pass_lower_eh_dispatch : public gimple_opt_pass
3676 {
3677 public:
3678 pass_lower_eh_dispatch (gcc::context *ctxt)
3679 : gimple_opt_pass (pass_data_lower_eh_dispatch, ctxt)
3680 {}
3681
3682 /* opt_pass methods: */
3683 virtual bool gate (function *fun) { return fun->eh->region_tree != NULL; }
3684 virtual unsigned int execute (function *);
3685
3686 }; // class pass_lower_eh_dispatch
3687
3688 unsigned
3689 pass_lower_eh_dispatch::execute (function *fun)
3690 {
3691 basic_block bb;
3692 int flags = 0;
3693 bool redirected = false;
3694
3695 assign_filter_values ();
3696
3697 FOR_EACH_BB_FN (bb, fun)
3698 {
3699 gimple last = last_stmt (bb);
3700 if (last == NULL)
3701 continue;
3702 if (gimple_code (last) == GIMPLE_EH_DISPATCH)
3703 {
3704 redirected |= lower_eh_dispatch (bb, last);
3705 flags |= TODO_update_ssa_only_virtuals;
3706 }
3707 else if (gimple_code (last) == GIMPLE_RESX)
3708 {
3709 if (stmt_can_throw_external (last))
3710 optimize_clobbers (bb);
3711 else
3712 flags |= sink_clobbers (bb);
3713 }
3714 }
3715
3716 if (redirected)
3717 delete_unreachable_blocks ();
3718 return flags;
3719 }
3720
3721 } // anon namespace
3722
3723 gimple_opt_pass *
3724 make_pass_lower_eh_dispatch (gcc::context *ctxt)
3725 {
3726 return new pass_lower_eh_dispatch (ctxt);
3727 }
3728 \f
3729 /* Walk statements, see what regions and, optionally, landing pads
3730 are really referenced.
3731
3732 Returns in R_REACHABLEP an sbitmap with bits set for reachable regions,
3733 and in LP_REACHABLE an sbitmap with bits set for reachable landing pads.
3734
3735 Passing NULL for LP_REACHABLE is valid, in this case only reachable
3736 regions are marked.
3737
3738 The caller is responsible for freeing the returned sbitmaps. */
3739
3740 static void
3741 mark_reachable_handlers (sbitmap *r_reachablep, sbitmap *lp_reachablep)
3742 {
3743 sbitmap r_reachable, lp_reachable;
3744 basic_block bb;
3745 bool mark_landing_pads = (lp_reachablep != NULL);
3746 gcc_checking_assert (r_reachablep != NULL);
3747
3748 r_reachable = sbitmap_alloc (cfun->eh->region_array->length ());
3749 bitmap_clear (r_reachable);
3750 *r_reachablep = r_reachable;
3751
3752 if (mark_landing_pads)
3753 {
3754 lp_reachable = sbitmap_alloc (cfun->eh->lp_array->length ());
3755 bitmap_clear (lp_reachable);
3756 *lp_reachablep = lp_reachable;
3757 }
3758 else
3759 lp_reachable = NULL;
3760
3761 FOR_EACH_BB_FN (bb, cfun)
3762 {
3763 gimple_stmt_iterator gsi;
3764
3765 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
3766 {
3767 gimple stmt = gsi_stmt (gsi);
3768
3769 if (mark_landing_pads)
3770 {
3771 int lp_nr = lookup_stmt_eh_lp (stmt);
3772
3773 /* Negative LP numbers are MUST_NOT_THROW regions which
3774 are not considered BB enders. */
3775 if (lp_nr < 0)
3776 bitmap_set_bit (r_reachable, -lp_nr);
3777
3778 /* Positive LP numbers are real landing pads, and BB enders. */
3779 else if (lp_nr > 0)
3780 {
3781 gcc_assert (gsi_one_before_end_p (gsi));
3782 eh_region region = get_eh_region_from_lp_number (lp_nr);
3783 bitmap_set_bit (r_reachable, region->index);
3784 bitmap_set_bit (lp_reachable, lp_nr);
3785 }
3786 }
3787
3788 /* Avoid removing regions referenced from RESX/EH_DISPATCH. */
3789 switch (gimple_code (stmt))
3790 {
3791 case GIMPLE_RESX:
3792 bitmap_set_bit (r_reachable, gimple_resx_region (stmt));
3793 break;
3794 case GIMPLE_EH_DISPATCH:
3795 bitmap_set_bit (r_reachable, gimple_eh_dispatch_region (stmt));
3796 break;
3797 default:
3798 break;
3799 }
3800 }
3801 }
3802 }
3803
3804 /* Remove unreachable handlers and unreachable landing pads. */
3805
3806 static void
3807 remove_unreachable_handlers (void)
3808 {
3809 sbitmap r_reachable, lp_reachable;
3810 eh_region region;
3811 eh_landing_pad lp;
3812 unsigned i;
3813
3814 mark_reachable_handlers (&r_reachable, &lp_reachable);
3815
3816 if (dump_file)
3817 {
3818 fprintf (dump_file, "Before removal of unreachable regions:\n");
3819 dump_eh_tree (dump_file, cfun);
3820 fprintf (dump_file, "Reachable regions: ");
3821 dump_bitmap_file (dump_file, r_reachable);
3822 fprintf (dump_file, "Reachable landing pads: ");
3823 dump_bitmap_file (dump_file, lp_reachable);
3824 }
3825
3826 if (dump_file)
3827 {
3828 FOR_EACH_VEC_SAFE_ELT (cfun->eh->region_array, i, region)
3829 if (region && !bitmap_bit_p (r_reachable, region->index))
3830 fprintf (dump_file,
3831 "Removing unreachable region %d\n",
3832 region->index);
3833 }
3834
3835 remove_unreachable_eh_regions (r_reachable);
3836
3837 FOR_EACH_VEC_SAFE_ELT (cfun->eh->lp_array, i, lp)
3838 if (lp && !bitmap_bit_p (lp_reachable, lp->index))
3839 {
3840 if (dump_file)
3841 fprintf (dump_file,
3842 "Removing unreachable landing pad %d\n",
3843 lp->index);
3844 remove_eh_landing_pad (lp);
3845 }
3846
3847 if (dump_file)
3848 {
3849 fprintf (dump_file, "\n\nAfter removal of unreachable regions:\n");
3850 dump_eh_tree (dump_file, cfun);
3851 fprintf (dump_file, "\n\n");
3852 }
3853
3854 sbitmap_free (r_reachable);
3855 sbitmap_free (lp_reachable);
3856
3857 #ifdef ENABLE_CHECKING
3858 verify_eh_tree (cfun);
3859 #endif
3860 }
3861
3862 /* Remove unreachable handlers if any landing pads have been removed after
3863 last ehcleanup pass (due to gimple_purge_dead_eh_edges). */
3864
3865 void
3866 maybe_remove_unreachable_handlers (void)
3867 {
3868 eh_landing_pad lp;
3869 unsigned i;
3870
3871 if (cfun->eh == NULL)
3872 return;
3873
3874 FOR_EACH_VEC_SAFE_ELT (cfun->eh->lp_array, i, lp)
3875 if (lp && lp->post_landing_pad)
3876 {
3877 if (label_to_block (lp->post_landing_pad) == NULL)
3878 {
3879 remove_unreachable_handlers ();
3880 return;
3881 }
3882 }
3883 }
3884
3885 /* Remove regions that do not have landing pads. This assumes
3886 that remove_unreachable_handlers has already been run, and
3887 that we've just manipulated the landing pads since then.
3888
3889 Preserve regions with landing pads and regions that prevent
3890 exceptions from propagating further, even if these regions
3891 are not reachable. */
3892
3893 static void
3894 remove_unreachable_handlers_no_lp (void)
3895 {
3896 eh_region region;
3897 sbitmap r_reachable;
3898 unsigned i;
3899
3900 mark_reachable_handlers (&r_reachable, /*lp_reachablep=*/NULL);
3901
3902 FOR_EACH_VEC_SAFE_ELT (cfun->eh->region_array, i, region)
3903 {
3904 if (! region)
3905 continue;
3906
3907 if (region->landing_pads != NULL
3908 || region->type == ERT_MUST_NOT_THROW)
3909 bitmap_set_bit (r_reachable, region->index);
3910
3911 if (dump_file
3912 && !bitmap_bit_p (r_reachable, region->index))
3913 fprintf (dump_file,
3914 "Removing unreachable region %d\n",
3915 region->index);
3916 }
3917
3918 remove_unreachable_eh_regions (r_reachable);
3919
3920 sbitmap_free (r_reachable);
3921 }
3922
3923 /* Undo critical edge splitting on an EH landing pad. Earlier, we
3924 optimisticaly split all sorts of edges, including EH edges. The
3925 optimization passes in between may not have needed them; if not,
3926 we should undo the split.
3927
3928 Recognize this case by having one EH edge incoming to the BB and
3929 one normal edge outgoing; BB should be empty apart from the
3930 post_landing_pad label.
3931
3932 Note that this is slightly different from the empty handler case
3933 handled by cleanup_empty_eh, in that the actual handler may yet
3934 have actual code but the landing pad has been separated from the
3935 handler. As such, cleanup_empty_eh relies on this transformation
3936 having been done first. */
3937
3938 static bool
3939 unsplit_eh (eh_landing_pad lp)
3940 {
3941 basic_block bb = label_to_block (lp->post_landing_pad);
3942 gimple_stmt_iterator gsi;
3943 edge e_in, e_out;
3944
3945 /* Quickly check the edge counts on BB for singularity. */
3946 if (!single_pred_p (bb) || !single_succ_p (bb))
3947 return false;
3948 e_in = single_pred_edge (bb);
3949 e_out = single_succ_edge (bb);
3950
3951 /* Input edge must be EH and output edge must be normal. */
3952 if ((e_in->flags & EDGE_EH) == 0 || (e_out->flags & EDGE_EH) != 0)
3953 return false;
3954
3955 /* The block must be empty except for the labels and debug insns. */
3956 gsi = gsi_after_labels (bb);
3957 if (!gsi_end_p (gsi) && is_gimple_debug (gsi_stmt (gsi)))
3958 gsi_next_nondebug (&gsi);
3959 if (!gsi_end_p (gsi))
3960 return false;
3961
3962 /* The destination block must not already have a landing pad
3963 for a different region. */
3964 for (gsi = gsi_start_bb (e_out->dest); !gsi_end_p (gsi); gsi_next (&gsi))
3965 {
3966 gimple stmt = gsi_stmt (gsi);
3967 tree lab;
3968 int lp_nr;
3969
3970 if (gimple_code (stmt) != GIMPLE_LABEL)
3971 break;
3972 lab = gimple_label_label (stmt);
3973 lp_nr = EH_LANDING_PAD_NR (lab);
3974 if (lp_nr && get_eh_region_from_lp_number (lp_nr) != lp->region)
3975 return false;
3976 }
3977
3978 /* The new destination block must not already be a destination of
3979 the source block, lest we merge fallthru and eh edges and get
3980 all sorts of confused. */
3981 if (find_edge (e_in->src, e_out->dest))
3982 return false;
3983
3984 /* ??? We can get degenerate phis due to cfg cleanups. I would have
3985 thought this should have been cleaned up by a phicprop pass, but
3986 that doesn't appear to handle virtuals. Propagate by hand. */
3987 if (!gimple_seq_empty_p (phi_nodes (bb)))
3988 {
3989 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); )
3990 {
3991 gimple use_stmt, phi = gsi_stmt (gsi);
3992 tree lhs = gimple_phi_result (phi);
3993 tree rhs = gimple_phi_arg_def (phi, 0);
3994 use_operand_p use_p;
3995 imm_use_iterator iter;
3996
3997 FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
3998 {
3999 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
4000 SET_USE (use_p, rhs);
4001 }
4002
4003 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs))
4004 SSA_NAME_OCCURS_IN_ABNORMAL_PHI (rhs) = 1;
4005
4006 remove_phi_node (&gsi, true);
4007 }
4008 }
4009
4010 if (dump_file && (dump_flags & TDF_DETAILS))
4011 fprintf (dump_file, "Unsplit EH landing pad %d to block %i.\n",
4012 lp->index, e_out->dest->index);
4013
4014 /* Redirect the edge. Since redirect_eh_edge_1 expects to be moving
4015 a successor edge, humor it. But do the real CFG change with the
4016 predecessor of E_OUT in order to preserve the ordering of arguments
4017 to the PHI nodes in E_OUT->DEST. */
4018 redirect_eh_edge_1 (e_in, e_out->dest, false);
4019 redirect_edge_pred (e_out, e_in->src);
4020 e_out->flags = e_in->flags;
4021 e_out->probability = e_in->probability;
4022 e_out->count = e_in->count;
4023 remove_edge (e_in);
4024
4025 return true;
4026 }
4027
4028 /* Examine each landing pad block and see if it matches unsplit_eh. */
4029
4030 static bool
4031 unsplit_all_eh (void)
4032 {
4033 bool changed = false;
4034 eh_landing_pad lp;
4035 int i;
4036
4037 for (i = 1; vec_safe_iterate (cfun->eh->lp_array, i, &lp); ++i)
4038 if (lp)
4039 changed |= unsplit_eh (lp);
4040
4041 return changed;
4042 }
4043
4044 /* A subroutine of cleanup_empty_eh. Redirect all EH edges incoming
4045 to OLD_BB to NEW_BB; return true on success, false on failure.
4046
4047 OLD_BB_OUT is the edge into NEW_BB from OLD_BB, so if we miss any
4048 PHI variables from OLD_BB we can pick them up from OLD_BB_OUT.
4049 Virtual PHIs may be deleted and marked for renaming. */
4050
4051 static bool
4052 cleanup_empty_eh_merge_phis (basic_block new_bb, basic_block old_bb,
4053 edge old_bb_out, bool change_region)
4054 {
4055 gimple_stmt_iterator ngsi, ogsi;
4056 edge_iterator ei;
4057 edge e;
4058 bitmap ophi_handled;
4059
4060 /* The destination block must not be a regular successor for any
4061 of the preds of the landing pad. Thus, avoid turning
4062 <..>
4063 | \ EH
4064 | <..>
4065 | /
4066 <..>
4067 into
4068 <..>
4069 | | EH
4070 <..>
4071 which CFG verification would choke on. See PR45172 and PR51089. */
4072 FOR_EACH_EDGE (e, ei, old_bb->preds)
4073 if (find_edge (e->src, new_bb))
4074 return false;
4075
4076 FOR_EACH_EDGE (e, ei, old_bb->preds)
4077 redirect_edge_var_map_clear (e);
4078
4079 ophi_handled = BITMAP_ALLOC (NULL);
4080
4081 /* First, iterate through the PHIs on NEW_BB and set up the edge_var_map
4082 for the edges we're going to move. */
4083 for (ngsi = gsi_start_phis (new_bb); !gsi_end_p (ngsi); gsi_next (&ngsi))
4084 {
4085 gimple ophi, nphi = gsi_stmt (ngsi);
4086 tree nresult, nop;
4087
4088 nresult = gimple_phi_result (nphi);
4089 nop = gimple_phi_arg_def (nphi, old_bb_out->dest_idx);
4090
4091 /* Find the corresponding PHI in OLD_BB so we can forward-propagate
4092 the source ssa_name. */
4093 ophi = NULL;
4094 for (ogsi = gsi_start_phis (old_bb); !gsi_end_p (ogsi); gsi_next (&ogsi))
4095 {
4096 ophi = gsi_stmt (ogsi);
4097 if (gimple_phi_result (ophi) == nop)
4098 break;
4099 ophi = NULL;
4100 }
4101
4102 /* If we did find the corresponding PHI, copy those inputs. */
4103 if (ophi)
4104 {
4105 /* If NOP is used somewhere else beyond phis in new_bb, give up. */
4106 if (!has_single_use (nop))
4107 {
4108 imm_use_iterator imm_iter;
4109 use_operand_p use_p;
4110
4111 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, nop)
4112 {
4113 if (!gimple_debug_bind_p (USE_STMT (use_p))
4114 && (gimple_code (USE_STMT (use_p)) != GIMPLE_PHI
4115 || gimple_bb (USE_STMT (use_p)) != new_bb))
4116 goto fail;
4117 }
4118 }
4119 bitmap_set_bit (ophi_handled, SSA_NAME_VERSION (nop));
4120 FOR_EACH_EDGE (e, ei, old_bb->preds)
4121 {
4122 location_t oloc;
4123 tree oop;
4124
4125 if ((e->flags & EDGE_EH) == 0)
4126 continue;
4127 oop = gimple_phi_arg_def (ophi, e->dest_idx);
4128 oloc = gimple_phi_arg_location (ophi, e->dest_idx);
4129 redirect_edge_var_map_add (e, nresult, oop, oloc);
4130 }
4131 }
4132 /* If we didn't find the PHI, if it's a real variable or a VOP, we know
4133 from the fact that OLD_BB is tree_empty_eh_handler_p that the
4134 variable is unchanged from input to the block and we can simply
4135 re-use the input to NEW_BB from the OLD_BB_OUT edge. */
4136 else
4137 {
4138 location_t nloc
4139 = gimple_phi_arg_location (nphi, old_bb_out->dest_idx);
4140 FOR_EACH_EDGE (e, ei, old_bb->preds)
4141 redirect_edge_var_map_add (e, nresult, nop, nloc);
4142 }
4143 }
4144
4145 /* Second, verify that all PHIs from OLD_BB have been handled. If not,
4146 we don't know what values from the other edges into NEW_BB to use. */
4147 for (ogsi = gsi_start_phis (old_bb); !gsi_end_p (ogsi); gsi_next (&ogsi))
4148 {
4149 gimple ophi = gsi_stmt (ogsi);
4150 tree oresult = gimple_phi_result (ophi);
4151 if (!bitmap_bit_p (ophi_handled, SSA_NAME_VERSION (oresult)))
4152 goto fail;
4153 }
4154
4155 /* Finally, move the edges and update the PHIs. */
4156 for (ei = ei_start (old_bb->preds); (e = ei_safe_edge (ei)); )
4157 if (e->flags & EDGE_EH)
4158 {
4159 /* ??? CFG manipluation routines do not try to update loop
4160 form on edge redirection. Do so manually here for now. */
4161 /* If we redirect a loop entry or latch edge that will either create
4162 a multiple entry loop or rotate the loop. If the loops merge
4163 we may have created a loop with multiple latches.
4164 All of this isn't easily fixed thus cancel the affected loop
4165 and mark the other loop as possibly having multiple latches. */
4166 if (e->dest == e->dest->loop_father->header)
4167 {
4168 mark_loop_for_removal (e->dest->loop_father);
4169 new_bb->loop_father->latch = NULL;
4170 loops_state_set (LOOPS_MAY_HAVE_MULTIPLE_LATCHES);
4171 }
4172 redirect_eh_edge_1 (e, new_bb, change_region);
4173 redirect_edge_succ (e, new_bb);
4174 flush_pending_stmts (e);
4175 }
4176 else
4177 ei_next (&ei);
4178
4179 BITMAP_FREE (ophi_handled);
4180 return true;
4181
4182 fail:
4183 FOR_EACH_EDGE (e, ei, old_bb->preds)
4184 redirect_edge_var_map_clear (e);
4185 BITMAP_FREE (ophi_handled);
4186 return false;
4187 }
4188
4189 /* A subroutine of cleanup_empty_eh. Move a landing pad LP from its
4190 old region to NEW_REGION at BB. */
4191
4192 static void
4193 cleanup_empty_eh_move_lp (basic_block bb, edge e_out,
4194 eh_landing_pad lp, eh_region new_region)
4195 {
4196 gimple_stmt_iterator gsi;
4197 eh_landing_pad *pp;
4198
4199 for (pp = &lp->region->landing_pads; *pp != lp; pp = &(*pp)->next_lp)
4200 continue;
4201 *pp = lp->next_lp;
4202
4203 lp->region = new_region;
4204 lp->next_lp = new_region->landing_pads;
4205 new_region->landing_pads = lp;
4206
4207 /* Delete the RESX that was matched within the empty handler block. */
4208 gsi = gsi_last_bb (bb);
4209 unlink_stmt_vdef (gsi_stmt (gsi));
4210 gsi_remove (&gsi, true);
4211
4212 /* Clean up E_OUT for the fallthru. */
4213 e_out->flags = (e_out->flags & ~EDGE_EH) | EDGE_FALLTHRU;
4214 e_out->probability = REG_BR_PROB_BASE;
4215 }
4216
4217 /* A subroutine of cleanup_empty_eh. Handle more complex cases of
4218 unsplitting than unsplit_eh was prepared to handle, e.g. when
4219 multiple incoming edges and phis are involved. */
4220
4221 static bool
4222 cleanup_empty_eh_unsplit (basic_block bb, edge e_out, eh_landing_pad lp)
4223 {
4224 gimple_stmt_iterator gsi;
4225 tree lab;
4226
4227 /* We really ought not have totally lost everything following
4228 a landing pad label. Given that BB is empty, there had better
4229 be a successor. */
4230 gcc_assert (e_out != NULL);
4231
4232 /* The destination block must not already have a landing pad
4233 for a different region. */
4234 lab = NULL;
4235 for (gsi = gsi_start_bb (e_out->dest); !gsi_end_p (gsi); gsi_next (&gsi))
4236 {
4237 gimple stmt = gsi_stmt (gsi);
4238 int lp_nr;
4239
4240 if (gimple_code (stmt) != GIMPLE_LABEL)
4241 break;
4242 lab = gimple_label_label (stmt);
4243 lp_nr = EH_LANDING_PAD_NR (lab);
4244 if (lp_nr && get_eh_region_from_lp_number (lp_nr) != lp->region)
4245 return false;
4246 }
4247
4248 /* Attempt to move the PHIs into the successor block. */
4249 if (cleanup_empty_eh_merge_phis (e_out->dest, bb, e_out, false))
4250 {
4251 if (dump_file && (dump_flags & TDF_DETAILS))
4252 fprintf (dump_file,
4253 "Unsplit EH landing pad %d to block %i "
4254 "(via cleanup_empty_eh).\n",
4255 lp->index, e_out->dest->index);
4256 return true;
4257 }
4258
4259 return false;
4260 }
4261
4262 /* Return true if edge E_FIRST is part of an empty infinite loop
4263 or leads to such a loop through a series of single successor
4264 empty bbs. */
4265
4266 static bool
4267 infinite_empty_loop_p (edge e_first)
4268 {
4269 bool inf_loop = false;
4270 edge e;
4271
4272 if (e_first->dest == e_first->src)
4273 return true;
4274
4275 e_first->src->aux = (void *) 1;
4276 for (e = e_first; single_succ_p (e->dest); e = single_succ_edge (e->dest))
4277 {
4278 gimple_stmt_iterator gsi;
4279 if (e->dest->aux)
4280 {
4281 inf_loop = true;
4282 break;
4283 }
4284 e->dest->aux = (void *) 1;
4285 gsi = gsi_after_labels (e->dest);
4286 if (!gsi_end_p (gsi) && is_gimple_debug (gsi_stmt (gsi)))
4287 gsi_next_nondebug (&gsi);
4288 if (!gsi_end_p (gsi))
4289 break;
4290 }
4291 e_first->src->aux = NULL;
4292 for (e = e_first; e->dest->aux; e = single_succ_edge (e->dest))
4293 e->dest->aux = NULL;
4294
4295 return inf_loop;
4296 }
4297
4298 /* Examine the block associated with LP to determine if it's an empty
4299 handler for its EH region. If so, attempt to redirect EH edges to
4300 an outer region. Return true the CFG was updated in any way. This
4301 is similar to jump forwarding, just across EH edges. */
4302
4303 static bool
4304 cleanup_empty_eh (eh_landing_pad lp)
4305 {
4306 basic_block bb = label_to_block (lp->post_landing_pad);
4307 gimple_stmt_iterator gsi;
4308 gimple resx;
4309 eh_region new_region;
4310 edge_iterator ei;
4311 edge e, e_out;
4312 bool has_non_eh_pred;
4313 bool ret = false;
4314 int new_lp_nr;
4315
4316 /* There can be zero or one edges out of BB. This is the quickest test. */
4317 switch (EDGE_COUNT (bb->succs))
4318 {
4319 case 0:
4320 e_out = NULL;
4321 break;
4322 case 1:
4323 e_out = single_succ_edge (bb);
4324 break;
4325 default:
4326 return false;
4327 }
4328
4329 resx = last_stmt (bb);
4330 if (resx && is_gimple_resx (resx))
4331 {
4332 if (stmt_can_throw_external (resx))
4333 optimize_clobbers (bb);
4334 else if (sink_clobbers (bb))
4335 ret = true;
4336 }
4337
4338 gsi = gsi_after_labels (bb);
4339
4340 /* Make sure to skip debug statements. */
4341 if (!gsi_end_p (gsi) && is_gimple_debug (gsi_stmt (gsi)))
4342 gsi_next_nondebug (&gsi);
4343
4344 /* If the block is totally empty, look for more unsplitting cases. */
4345 if (gsi_end_p (gsi))
4346 {
4347 /* For the degenerate case of an infinite loop bail out.
4348 If bb has no successors and is totally empty, which can happen e.g.
4349 because of incorrect noreturn attribute, bail out too. */
4350 if (e_out == NULL
4351 || infinite_empty_loop_p (e_out))
4352 return ret;
4353
4354 return ret | cleanup_empty_eh_unsplit (bb, e_out, lp);
4355 }
4356
4357 /* The block should consist only of a single RESX statement, modulo a
4358 preceding call to __builtin_stack_restore if there is no outgoing
4359 edge, since the call can be eliminated in this case. */
4360 resx = gsi_stmt (gsi);
4361 if (!e_out && gimple_call_builtin_p (resx, BUILT_IN_STACK_RESTORE))
4362 {
4363 gsi_next (&gsi);
4364 resx = gsi_stmt (gsi);
4365 }
4366 if (!is_gimple_resx (resx))
4367 return ret;
4368 gcc_assert (gsi_one_before_end_p (gsi));
4369
4370 /* Determine if there are non-EH edges, or resx edges into the handler. */
4371 has_non_eh_pred = false;
4372 FOR_EACH_EDGE (e, ei, bb->preds)
4373 if (!(e->flags & EDGE_EH))
4374 has_non_eh_pred = true;
4375
4376 /* Find the handler that's outer of the empty handler by looking at
4377 where the RESX instruction was vectored. */
4378 new_lp_nr = lookup_stmt_eh_lp (resx);
4379 new_region = get_eh_region_from_lp_number (new_lp_nr);
4380
4381 /* If there's no destination region within the current function,
4382 redirection is trivial via removing the throwing statements from
4383 the EH region, removing the EH edges, and allowing the block
4384 to go unreachable. */
4385 if (new_region == NULL)
4386 {
4387 gcc_assert (e_out == NULL);
4388 for (ei = ei_start (bb->preds); (e = ei_safe_edge (ei)); )
4389 if (e->flags & EDGE_EH)
4390 {
4391 gimple stmt = last_stmt (e->src);
4392 remove_stmt_from_eh_lp (stmt);
4393 remove_edge (e);
4394 }
4395 else
4396 ei_next (&ei);
4397 goto succeed;
4398 }
4399
4400 /* If the destination region is a MUST_NOT_THROW, allow the runtime
4401 to handle the abort and allow the blocks to go unreachable. */
4402 if (new_region->type == ERT_MUST_NOT_THROW)
4403 {
4404 for (ei = ei_start (bb->preds); (e = ei_safe_edge (ei)); )
4405 if (e->flags & EDGE_EH)
4406 {
4407 gimple stmt = last_stmt (e->src);
4408 remove_stmt_from_eh_lp (stmt);
4409 add_stmt_to_eh_lp (stmt, new_lp_nr);
4410 remove_edge (e);
4411 }
4412 else
4413 ei_next (&ei);
4414 goto succeed;
4415 }
4416
4417 /* Try to redirect the EH edges and merge the PHIs into the destination
4418 landing pad block. If the merge succeeds, we'll already have redirected
4419 all the EH edges. The handler itself will go unreachable if there were
4420 no normal edges. */
4421 if (cleanup_empty_eh_merge_phis (e_out->dest, bb, e_out, true))
4422 goto succeed;
4423
4424 /* Finally, if all input edges are EH edges, then we can (potentially)
4425 reduce the number of transfers from the runtime by moving the landing
4426 pad from the original region to the new region. This is a win when
4427 we remove the last CLEANUP region along a particular exception
4428 propagation path. Since nothing changes except for the region with
4429 which the landing pad is associated, the PHI nodes do not need to be
4430 adjusted at all. */
4431 if (!has_non_eh_pred)
4432 {
4433 cleanup_empty_eh_move_lp (bb, e_out, lp, new_region);
4434 if (dump_file && (dump_flags & TDF_DETAILS))
4435 fprintf (dump_file, "Empty EH handler %i moved to EH region %i.\n",
4436 lp->index, new_region->index);
4437
4438 /* ??? The CFG didn't change, but we may have rendered the
4439 old EH region unreachable. Trigger a cleanup there. */
4440 return true;
4441 }
4442
4443 return ret;
4444
4445 succeed:
4446 if (dump_file && (dump_flags & TDF_DETAILS))
4447 fprintf (dump_file, "Empty EH handler %i removed.\n", lp->index);
4448 remove_eh_landing_pad (lp);
4449 return true;
4450 }
4451
4452 /* Do a post-order traversal of the EH region tree. Examine each
4453 post_landing_pad block and see if we can eliminate it as empty. */
4454
4455 static bool
4456 cleanup_all_empty_eh (void)
4457 {
4458 bool changed = false;
4459 eh_landing_pad lp;
4460 int i;
4461
4462 for (i = 1; vec_safe_iterate (cfun->eh->lp_array, i, &lp); ++i)
4463 if (lp)
4464 changed |= cleanup_empty_eh (lp);
4465
4466 return changed;
4467 }
4468
4469 /* Perform cleanups and lowering of exception handling
4470 1) cleanups regions with handlers doing nothing are optimized out
4471 2) MUST_NOT_THROW regions that became dead because of 1) are optimized out
4472 3) Info about regions that are containing instructions, and regions
4473 reachable via local EH edges is collected
4474 4) Eh tree is pruned for regions no longer necessary.
4475
4476 TODO: Push MUST_NOT_THROW regions to the root of the EH tree.
4477 Unify those that have the same failure decl and locus.
4478 */
4479
4480 static unsigned int
4481 execute_cleanup_eh_1 (void)
4482 {
4483 /* Do this first: unsplit_all_eh and cleanup_all_empty_eh can die
4484 looking up unreachable landing pads. */
4485 remove_unreachable_handlers ();
4486
4487 /* Watch out for the region tree vanishing due to all unreachable. */
4488 if (cfun->eh->region_tree)
4489 {
4490 bool changed = false;
4491
4492 if (optimize)
4493 changed |= unsplit_all_eh ();
4494 changed |= cleanup_all_empty_eh ();
4495
4496 if (changed)
4497 {
4498 free_dominance_info (CDI_DOMINATORS);
4499 free_dominance_info (CDI_POST_DOMINATORS);
4500
4501 /* We delayed all basic block deletion, as we may have performed
4502 cleanups on EH edges while non-EH edges were still present. */
4503 delete_unreachable_blocks ();
4504
4505 /* We manipulated the landing pads. Remove any region that no
4506 longer has a landing pad. */
4507 remove_unreachable_handlers_no_lp ();
4508
4509 return TODO_cleanup_cfg | TODO_update_ssa_only_virtuals;
4510 }
4511 }
4512
4513 return 0;
4514 }
4515
4516 namespace {
4517
4518 const pass_data pass_data_cleanup_eh =
4519 {
4520 GIMPLE_PASS, /* type */
4521 "ehcleanup", /* name */
4522 OPTGROUP_NONE, /* optinfo_flags */
4523 TV_TREE_EH, /* tv_id */
4524 PROP_gimple_lcf, /* properties_required */
4525 0, /* properties_provided */
4526 0, /* properties_destroyed */
4527 0, /* todo_flags_start */
4528 0, /* todo_flags_finish */
4529 };
4530
4531 class pass_cleanup_eh : public gimple_opt_pass
4532 {
4533 public:
4534 pass_cleanup_eh (gcc::context *ctxt)
4535 : gimple_opt_pass (pass_data_cleanup_eh, ctxt)
4536 {}
4537
4538 /* opt_pass methods: */
4539 opt_pass * clone () { return new pass_cleanup_eh (m_ctxt); }
4540 virtual bool gate (function *fun)
4541 {
4542 return fun->eh != NULL && fun->eh->region_tree != NULL;
4543 }
4544
4545 virtual unsigned int execute (function *);
4546
4547 }; // class pass_cleanup_eh
4548
4549 unsigned int
4550 pass_cleanup_eh::execute (function *fun)
4551 {
4552 int ret = execute_cleanup_eh_1 ();
4553
4554 /* If the function no longer needs an EH personality routine
4555 clear it. This exposes cross-language inlining opportunities
4556 and avoids references to a never defined personality routine. */
4557 if (DECL_FUNCTION_PERSONALITY (current_function_decl)
4558 && function_needs_eh_personality (fun) != eh_personality_lang)
4559 DECL_FUNCTION_PERSONALITY (current_function_decl) = NULL_TREE;
4560
4561 return ret;
4562 }
4563
4564 } // anon namespace
4565
4566 gimple_opt_pass *
4567 make_pass_cleanup_eh (gcc::context *ctxt)
4568 {
4569 return new pass_cleanup_eh (ctxt);
4570 }
4571 \f
4572 /* Verify that BB containing STMT as the last statement, has precisely the
4573 edge that make_eh_edges would create. */
4574
4575 DEBUG_FUNCTION bool
4576 verify_eh_edges (gimple stmt)
4577 {
4578 basic_block bb = gimple_bb (stmt);
4579 eh_landing_pad lp = NULL;
4580 int lp_nr;
4581 edge_iterator ei;
4582 edge e, eh_edge;
4583
4584 lp_nr = lookup_stmt_eh_lp (stmt);
4585 if (lp_nr > 0)
4586 lp = get_eh_landing_pad_from_number (lp_nr);
4587
4588 eh_edge = NULL;
4589 FOR_EACH_EDGE (e, ei, bb->succs)
4590 {
4591 if (e->flags & EDGE_EH)
4592 {
4593 if (eh_edge)
4594 {
4595 error ("BB %i has multiple EH edges", bb->index);
4596 return true;
4597 }
4598 else
4599 eh_edge = e;
4600 }
4601 }
4602
4603 if (lp == NULL)
4604 {
4605 if (eh_edge)
4606 {
4607 error ("BB %i can not throw but has an EH edge", bb->index);
4608 return true;
4609 }
4610 return false;
4611 }
4612
4613 if (!stmt_could_throw_p (stmt))
4614 {
4615 error ("BB %i last statement has incorrectly set lp", bb->index);
4616 return true;
4617 }
4618
4619 if (eh_edge == NULL)
4620 {
4621 error ("BB %i is missing an EH edge", bb->index);
4622 return true;
4623 }
4624
4625 if (eh_edge->dest != label_to_block (lp->post_landing_pad))
4626 {
4627 error ("Incorrect EH edge %i->%i", bb->index, eh_edge->dest->index);
4628 return true;
4629 }
4630
4631 return false;
4632 }
4633
4634 /* Similarly, but handle GIMPLE_EH_DISPATCH specifically. */
4635
4636 DEBUG_FUNCTION bool
4637 verify_eh_dispatch_edge (gimple stmt)
4638 {
4639 eh_region r;
4640 eh_catch c;
4641 basic_block src, dst;
4642 bool want_fallthru = true;
4643 edge_iterator ei;
4644 edge e, fall_edge;
4645
4646 r = get_eh_region_from_number (gimple_eh_dispatch_region (stmt));
4647 src = gimple_bb (stmt);
4648
4649 FOR_EACH_EDGE (e, ei, src->succs)
4650 gcc_assert (e->aux == NULL);
4651
4652 switch (r->type)
4653 {
4654 case ERT_TRY:
4655 for (c = r->u.eh_try.first_catch; c ; c = c->next_catch)
4656 {
4657 dst = label_to_block (c->label);
4658 e = find_edge (src, dst);
4659 if (e == NULL)
4660 {
4661 error ("BB %i is missing an edge", src->index);
4662 return true;
4663 }
4664 e->aux = (void *)e;
4665
4666 /* A catch-all handler doesn't have a fallthru. */
4667 if (c->type_list == NULL)
4668 {
4669 want_fallthru = false;
4670 break;
4671 }
4672 }
4673 break;
4674
4675 case ERT_ALLOWED_EXCEPTIONS:
4676 dst = label_to_block (r->u.allowed.label);
4677 e = find_edge (src, dst);
4678 if (e == NULL)
4679 {
4680 error ("BB %i is missing an edge", src->index);
4681 return true;
4682 }
4683 e->aux = (void *)e;
4684 break;
4685
4686 default:
4687 gcc_unreachable ();
4688 }
4689
4690 fall_edge = NULL;
4691 FOR_EACH_EDGE (e, ei, src->succs)
4692 {
4693 if (e->flags & EDGE_FALLTHRU)
4694 {
4695 if (fall_edge != NULL)
4696 {
4697 error ("BB %i too many fallthru edges", src->index);
4698 return true;
4699 }
4700 fall_edge = e;
4701 }
4702 else if (e->aux)
4703 e->aux = NULL;
4704 else
4705 {
4706 error ("BB %i has incorrect edge", src->index);
4707 return true;
4708 }
4709 }
4710 if ((fall_edge != NULL) ^ want_fallthru)
4711 {
4712 error ("BB %i has incorrect fallthru edge", src->index);
4713 return true;
4714 }
4715
4716 return false;
4717 }