[ARM/AArch64][testsuite] Add vmull tests.
[gcc.git] / gcc / tree-eh.c
1 /* Exception handling semantics and decomposition for trees.
2 Copyright (C) 2003-2015 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
9 any later version.
10
11 GCC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "hash-table.h"
24 #include "tm.h"
25 #include "hash-set.h"
26 #include "machmode.h"
27 #include "vec.h"
28 #include "double-int.h"
29 #include "input.h"
30 #include "alias.h"
31 #include "symtab.h"
32 #include "wide-int.h"
33 #include "inchash.h"
34 #include "tree.h"
35 #include "fold-const.h"
36 #include "hashtab.h"
37 #include "hard-reg-set.h"
38 #include "function.h"
39 #include "rtl.h"
40 #include "flags.h"
41 #include "statistics.h"
42 #include "real.h"
43 #include "fixed-value.h"
44 #include "insn-config.h"
45 #include "expmed.h"
46 #include "dojump.h"
47 #include "explow.h"
48 #include "calls.h"
49 #include "emit-rtl.h"
50 #include "varasm.h"
51 #include "stmt.h"
52 #include "expr.h"
53 #include "except.h"
54 #include "predict.h"
55 #include "dominance.h"
56 #include "cfg.h"
57 #include "cfganal.h"
58 #include "cfgcleanup.h"
59 #include "basic-block.h"
60 #include "tree-ssa-alias.h"
61 #include "internal-fn.h"
62 #include "tree-eh.h"
63 #include "gimple-expr.h"
64 #include "is-a.h"
65 #include "gimple.h"
66 #include "gimple-iterator.h"
67 #include "gimple-ssa.h"
68 #include "hash-map.h"
69 #include "plugin-api.h"
70 #include "ipa-ref.h"
71 #include "cgraph.h"
72 #include "tree-cfg.h"
73 #include "tree-phinodes.h"
74 #include "ssa-iterators.h"
75 #include "stringpool.h"
76 #include "tree-ssanames.h"
77 #include "tree-into-ssa.h"
78 #include "tree-ssa.h"
79 #include "tree-inline.h"
80 #include "tree-pass.h"
81 #include "langhooks.h"
82 #include "diagnostic-core.h"
83 #include "target.h"
84 #include "cfgloop.h"
85 #include "gimple-low.h"
86
87 /* In some instances a tree and a gimple need to be stored in a same table,
88 i.e. in hash tables. This is a structure to do this. */
89 typedef union {tree *tp; tree t; gimple g;} treemple;
90
91 /* Misc functions used in this file. */
92
93 /* Remember and lookup EH landing pad data for arbitrary statements.
94 Really this means any statement that could_throw_p. We could
95 stuff this information into the stmt_ann data structure, but:
96
97 (1) We absolutely rely on this information being kept until
98 we get to rtl. Once we're done with lowering here, if we lose
99 the information there's no way to recover it!
100
101 (2) There are many more statements that *cannot* throw as
102 compared to those that can. We should be saving some amount
103 of space by only allocating memory for those that can throw. */
104
105 /* Add statement T in function IFUN to landing pad NUM. */
106
107 static void
108 add_stmt_to_eh_lp_fn (struct function *ifun, gimple t, int num)
109 {
110 gcc_assert (num != 0);
111
112 if (!get_eh_throw_stmt_table (ifun))
113 set_eh_throw_stmt_table (ifun, hash_map<gimple, int>::create_ggc (31));
114
115 gcc_assert (!get_eh_throw_stmt_table (ifun)->put (t, num));
116 }
117
118 /* Add statement T in the current function (cfun) to EH landing pad NUM. */
119
120 void
121 add_stmt_to_eh_lp (gimple t, int num)
122 {
123 add_stmt_to_eh_lp_fn (cfun, t, num);
124 }
125
126 /* Add statement T to the single EH landing pad in REGION. */
127
128 static void
129 record_stmt_eh_region (eh_region region, gimple t)
130 {
131 if (region == NULL)
132 return;
133 if (region->type == ERT_MUST_NOT_THROW)
134 add_stmt_to_eh_lp_fn (cfun, t, -region->index);
135 else
136 {
137 eh_landing_pad lp = region->landing_pads;
138 if (lp == NULL)
139 lp = gen_eh_landing_pad (region);
140 else
141 gcc_assert (lp->next_lp == NULL);
142 add_stmt_to_eh_lp_fn (cfun, t, lp->index);
143 }
144 }
145
146
147 /* Remove statement T in function IFUN from its EH landing pad. */
148
149 bool
150 remove_stmt_from_eh_lp_fn (struct function *ifun, gimple t)
151 {
152 if (!get_eh_throw_stmt_table (ifun))
153 return false;
154
155 if (!get_eh_throw_stmt_table (ifun)->get (t))
156 return false;
157
158 get_eh_throw_stmt_table (ifun)->remove (t);
159 return true;
160 }
161
162
163 /* Remove statement T in the current function (cfun) from its
164 EH landing pad. */
165
166 bool
167 remove_stmt_from_eh_lp (gimple t)
168 {
169 return remove_stmt_from_eh_lp_fn (cfun, t);
170 }
171
172 /* Determine if statement T is inside an EH region in function IFUN.
173 Positive numbers indicate a landing pad index; negative numbers
174 indicate a MUST_NOT_THROW region index; zero indicates that the
175 statement is not recorded in the region table. */
176
177 int
178 lookup_stmt_eh_lp_fn (struct function *ifun, gimple t)
179 {
180 if (ifun->eh->throw_stmt_table == NULL)
181 return 0;
182
183 int *lp_nr = ifun->eh->throw_stmt_table->get (t);
184 return lp_nr ? *lp_nr : 0;
185 }
186
187 /* Likewise, but always use the current function. */
188
189 int
190 lookup_stmt_eh_lp (gimple t)
191 {
192 /* We can get called from initialized data when -fnon-call-exceptions
193 is on; prevent crash. */
194 if (!cfun)
195 return 0;
196 return lookup_stmt_eh_lp_fn (cfun, t);
197 }
198
199 /* First pass of EH node decomposition. Build up a tree of GIMPLE_TRY_FINALLY
200 nodes and LABEL_DECL nodes. We will use this during the second phase to
201 determine if a goto leaves the body of a TRY_FINALLY_EXPR node. */
202
203 struct finally_tree_node
204 {
205 /* When storing a GIMPLE_TRY, we have to record a gimple. However
206 when deciding whether a GOTO to a certain LABEL_DECL (which is a
207 tree) leaves the TRY block, its necessary to record a tree in
208 this field. Thus a treemple is used. */
209 treemple child;
210 gtry *parent;
211 };
212
213 /* Hashtable helpers. */
214
215 struct finally_tree_hasher : typed_free_remove <finally_tree_node>
216 {
217 typedef finally_tree_node value_type;
218 typedef finally_tree_node compare_type;
219 static inline hashval_t hash (const value_type *);
220 static inline bool equal (const value_type *, const compare_type *);
221 };
222
223 inline hashval_t
224 finally_tree_hasher::hash (const value_type *v)
225 {
226 return (intptr_t)v->child.t >> 4;
227 }
228
229 inline bool
230 finally_tree_hasher::equal (const value_type *v, const compare_type *c)
231 {
232 return v->child.t == c->child.t;
233 }
234
235 /* Note that this table is *not* marked GTY. It is short-lived. */
236 static hash_table<finally_tree_hasher> *finally_tree;
237
238 static void
239 record_in_finally_tree (treemple child, gtry *parent)
240 {
241 struct finally_tree_node *n;
242 finally_tree_node **slot;
243
244 n = XNEW (struct finally_tree_node);
245 n->child = child;
246 n->parent = parent;
247
248 slot = finally_tree->find_slot (n, INSERT);
249 gcc_assert (!*slot);
250 *slot = n;
251 }
252
253 static void
254 collect_finally_tree (gimple stmt, gtry *region);
255
256 /* Go through the gimple sequence. Works with collect_finally_tree to
257 record all GIMPLE_LABEL and GIMPLE_TRY statements. */
258
259 static void
260 collect_finally_tree_1 (gimple_seq seq, gtry *region)
261 {
262 gimple_stmt_iterator gsi;
263
264 for (gsi = gsi_start (seq); !gsi_end_p (gsi); gsi_next (&gsi))
265 collect_finally_tree (gsi_stmt (gsi), region);
266 }
267
268 static void
269 collect_finally_tree (gimple stmt, gtry *region)
270 {
271 treemple temp;
272
273 switch (gimple_code (stmt))
274 {
275 case GIMPLE_LABEL:
276 temp.t = gimple_label_label (as_a <glabel *> (stmt));
277 record_in_finally_tree (temp, region);
278 break;
279
280 case GIMPLE_TRY:
281 if (gimple_try_kind (stmt) == GIMPLE_TRY_FINALLY)
282 {
283 temp.g = stmt;
284 record_in_finally_tree (temp, region);
285 collect_finally_tree_1 (gimple_try_eval (stmt),
286 as_a <gtry *> (stmt));
287 collect_finally_tree_1 (gimple_try_cleanup (stmt), region);
288 }
289 else if (gimple_try_kind (stmt) == GIMPLE_TRY_CATCH)
290 {
291 collect_finally_tree_1 (gimple_try_eval (stmt), region);
292 collect_finally_tree_1 (gimple_try_cleanup (stmt), region);
293 }
294 break;
295
296 case GIMPLE_CATCH:
297 collect_finally_tree_1 (gimple_catch_handler (
298 as_a <gcatch *> (stmt)),
299 region);
300 break;
301
302 case GIMPLE_EH_FILTER:
303 collect_finally_tree_1 (gimple_eh_filter_failure (stmt), region);
304 break;
305
306 case GIMPLE_EH_ELSE:
307 {
308 geh_else *eh_else_stmt = as_a <geh_else *> (stmt);
309 collect_finally_tree_1 (gimple_eh_else_n_body (eh_else_stmt), region);
310 collect_finally_tree_1 (gimple_eh_else_e_body (eh_else_stmt), region);
311 }
312 break;
313
314 default:
315 /* A type, a decl, or some kind of statement that we're not
316 interested in. Don't walk them. */
317 break;
318 }
319 }
320
321
322 /* Use the finally tree to determine if a jump from START to TARGET
323 would leave the try_finally node that START lives in. */
324
325 static bool
326 outside_finally_tree (treemple start, gimple target)
327 {
328 struct finally_tree_node n, *p;
329
330 do
331 {
332 n.child = start;
333 p = finally_tree->find (&n);
334 if (!p)
335 return true;
336 start.g = p->parent;
337 }
338 while (start.g != target);
339
340 return false;
341 }
342
343 /* Second pass of EH node decomposition. Actually transform the GIMPLE_TRY
344 nodes into a set of gotos, magic labels, and eh regions.
345 The eh region creation is straight-forward, but frobbing all the gotos
346 and such into shape isn't. */
347
348 /* The sequence into which we record all EH stuff. This will be
349 placed at the end of the function when we're all done. */
350 static gimple_seq eh_seq;
351
352 /* Record whether an EH region contains something that can throw,
353 indexed by EH region number. */
354 static bitmap eh_region_may_contain_throw_map;
355
356 /* The GOTO_QUEUE is is an array of GIMPLE_GOTO and GIMPLE_RETURN
357 statements that are seen to escape this GIMPLE_TRY_FINALLY node.
358 The idea is to record a gimple statement for everything except for
359 the conditionals, which get their labels recorded. Since labels are
360 of type 'tree', we need this node to store both gimple and tree
361 objects. REPL_STMT is the sequence used to replace the goto/return
362 statement. CONT_STMT is used to store the statement that allows
363 the return/goto to jump to the original destination. */
364
365 struct goto_queue_node
366 {
367 treemple stmt;
368 location_t location;
369 gimple_seq repl_stmt;
370 gimple cont_stmt;
371 int index;
372 /* This is used when index >= 0 to indicate that stmt is a label (as
373 opposed to a goto stmt). */
374 int is_label;
375 };
376
377 /* State of the world while lowering. */
378
379 struct leh_state
380 {
381 /* What's "current" while constructing the eh region tree. These
382 correspond to variables of the same name in cfun->eh, which we
383 don't have easy access to. */
384 eh_region cur_region;
385
386 /* What's "current" for the purposes of __builtin_eh_pointer. For
387 a CATCH, this is the associated TRY. For an EH_FILTER, this is
388 the associated ALLOWED_EXCEPTIONS, etc. */
389 eh_region ehp_region;
390
391 /* Processing of TRY_FINALLY requires a bit more state. This is
392 split out into a separate structure so that we don't have to
393 copy so much when processing other nodes. */
394 struct leh_tf_state *tf;
395 };
396
397 struct leh_tf_state
398 {
399 /* Pointer to the GIMPLE_TRY_FINALLY node under discussion. The
400 try_finally_expr is the original GIMPLE_TRY_FINALLY. We need to retain
401 this so that outside_finally_tree can reliably reference the tree used
402 in the collect_finally_tree data structures. */
403 gtry *try_finally_expr;
404 gtry *top_p;
405
406 /* While lowering a top_p usually it is expanded into multiple statements,
407 thus we need the following field to store them. */
408 gimple_seq top_p_seq;
409
410 /* The state outside this try_finally node. */
411 struct leh_state *outer;
412
413 /* The exception region created for it. */
414 eh_region region;
415
416 /* The goto queue. */
417 struct goto_queue_node *goto_queue;
418 size_t goto_queue_size;
419 size_t goto_queue_active;
420
421 /* Pointer map to help in searching goto_queue when it is large. */
422 hash_map<gimple, goto_queue_node *> *goto_queue_map;
423
424 /* The set of unique labels seen as entries in the goto queue. */
425 vec<tree> dest_array;
426
427 /* A label to be added at the end of the completed transformed
428 sequence. It will be set if may_fallthru was true *at one time*,
429 though subsequent transformations may have cleared that flag. */
430 tree fallthru_label;
431
432 /* True if it is possible to fall out the bottom of the try block.
433 Cleared if the fallthru is converted to a goto. */
434 bool may_fallthru;
435
436 /* True if any entry in goto_queue is a GIMPLE_RETURN. */
437 bool may_return;
438
439 /* True if the finally block can receive an exception edge.
440 Cleared if the exception case is handled by code duplication. */
441 bool may_throw;
442 };
443
444 static gimple_seq lower_eh_must_not_throw (struct leh_state *, gtry *);
445
446 /* Search for STMT in the goto queue. Return the replacement,
447 or null if the statement isn't in the queue. */
448
449 #define LARGE_GOTO_QUEUE 20
450
451 static void lower_eh_constructs_1 (struct leh_state *state, gimple_seq *seq);
452
453 static gimple_seq
454 find_goto_replacement (struct leh_tf_state *tf, treemple stmt)
455 {
456 unsigned int i;
457
458 if (tf->goto_queue_active < LARGE_GOTO_QUEUE)
459 {
460 for (i = 0; i < tf->goto_queue_active; i++)
461 if ( tf->goto_queue[i].stmt.g == stmt.g)
462 return tf->goto_queue[i].repl_stmt;
463 return NULL;
464 }
465
466 /* If we have a large number of entries in the goto_queue, create a
467 pointer map and use that for searching. */
468
469 if (!tf->goto_queue_map)
470 {
471 tf->goto_queue_map = new hash_map<gimple, goto_queue_node *>;
472 for (i = 0; i < tf->goto_queue_active; i++)
473 {
474 bool existed = tf->goto_queue_map->put (tf->goto_queue[i].stmt.g,
475 &tf->goto_queue[i]);
476 gcc_assert (!existed);
477 }
478 }
479
480 goto_queue_node **slot = tf->goto_queue_map->get (stmt.g);
481 if (slot != NULL)
482 return ((*slot)->repl_stmt);
483
484 return NULL;
485 }
486
487 /* A subroutine of replace_goto_queue_1. Handles the sub-clauses of a
488 lowered GIMPLE_COND. If, by chance, the replacement is a simple goto,
489 then we can just splat it in, otherwise we add the new stmts immediately
490 after the GIMPLE_COND and redirect. */
491
492 static void
493 replace_goto_queue_cond_clause (tree *tp, struct leh_tf_state *tf,
494 gimple_stmt_iterator *gsi)
495 {
496 tree label;
497 gimple_seq new_seq;
498 treemple temp;
499 location_t loc = gimple_location (gsi_stmt (*gsi));
500
501 temp.tp = tp;
502 new_seq = find_goto_replacement (tf, temp);
503 if (!new_seq)
504 return;
505
506 if (gimple_seq_singleton_p (new_seq)
507 && gimple_code (gimple_seq_first_stmt (new_seq)) == GIMPLE_GOTO)
508 {
509 *tp = gimple_goto_dest (gimple_seq_first_stmt (new_seq));
510 return;
511 }
512
513 label = create_artificial_label (loc);
514 /* Set the new label for the GIMPLE_COND */
515 *tp = label;
516
517 gsi_insert_after (gsi, gimple_build_label (label), GSI_CONTINUE_LINKING);
518 gsi_insert_seq_after (gsi, gimple_seq_copy (new_seq), GSI_CONTINUE_LINKING);
519 }
520
521 /* The real work of replace_goto_queue. Returns with TSI updated to
522 point to the next statement. */
523
524 static void replace_goto_queue_stmt_list (gimple_seq *, struct leh_tf_state *);
525
526 static void
527 replace_goto_queue_1 (gimple stmt, struct leh_tf_state *tf,
528 gimple_stmt_iterator *gsi)
529 {
530 gimple_seq seq;
531 treemple temp;
532 temp.g = NULL;
533
534 switch (gimple_code (stmt))
535 {
536 case GIMPLE_GOTO:
537 case GIMPLE_RETURN:
538 temp.g = stmt;
539 seq = find_goto_replacement (tf, temp);
540 if (seq)
541 {
542 gsi_insert_seq_before (gsi, gimple_seq_copy (seq), GSI_SAME_STMT);
543 gsi_remove (gsi, false);
544 return;
545 }
546 break;
547
548 case GIMPLE_COND:
549 replace_goto_queue_cond_clause (gimple_op_ptr (stmt, 2), tf, gsi);
550 replace_goto_queue_cond_clause (gimple_op_ptr (stmt, 3), tf, gsi);
551 break;
552
553 case GIMPLE_TRY:
554 replace_goto_queue_stmt_list (gimple_try_eval_ptr (stmt), tf);
555 replace_goto_queue_stmt_list (gimple_try_cleanup_ptr (stmt), tf);
556 break;
557 case GIMPLE_CATCH:
558 replace_goto_queue_stmt_list (gimple_catch_handler_ptr (
559 as_a <gcatch *> (stmt)),
560 tf);
561 break;
562 case GIMPLE_EH_FILTER:
563 replace_goto_queue_stmt_list (gimple_eh_filter_failure_ptr (stmt), tf);
564 break;
565 case GIMPLE_EH_ELSE:
566 {
567 geh_else *eh_else_stmt = as_a <geh_else *> (stmt);
568 replace_goto_queue_stmt_list (gimple_eh_else_n_body_ptr (eh_else_stmt),
569 tf);
570 replace_goto_queue_stmt_list (gimple_eh_else_e_body_ptr (eh_else_stmt),
571 tf);
572 }
573 break;
574
575 default:
576 /* These won't have gotos in them. */
577 break;
578 }
579
580 gsi_next (gsi);
581 }
582
583 /* A subroutine of replace_goto_queue. Handles GIMPLE_SEQ. */
584
585 static void
586 replace_goto_queue_stmt_list (gimple_seq *seq, struct leh_tf_state *tf)
587 {
588 gimple_stmt_iterator gsi = gsi_start (*seq);
589
590 while (!gsi_end_p (gsi))
591 replace_goto_queue_1 (gsi_stmt (gsi), tf, &gsi);
592 }
593
594 /* Replace all goto queue members. */
595
596 static void
597 replace_goto_queue (struct leh_tf_state *tf)
598 {
599 if (tf->goto_queue_active == 0)
600 return;
601 replace_goto_queue_stmt_list (&tf->top_p_seq, tf);
602 replace_goto_queue_stmt_list (&eh_seq, tf);
603 }
604
605 /* Add a new record to the goto queue contained in TF. NEW_STMT is the
606 data to be added, IS_LABEL indicates whether NEW_STMT is a label or
607 a gimple return. */
608
609 static void
610 record_in_goto_queue (struct leh_tf_state *tf,
611 treemple new_stmt,
612 int index,
613 bool is_label,
614 location_t location)
615 {
616 size_t active, size;
617 struct goto_queue_node *q;
618
619 gcc_assert (!tf->goto_queue_map);
620
621 active = tf->goto_queue_active;
622 size = tf->goto_queue_size;
623 if (active >= size)
624 {
625 size = (size ? size * 2 : 32);
626 tf->goto_queue_size = size;
627 tf->goto_queue
628 = XRESIZEVEC (struct goto_queue_node, tf->goto_queue, size);
629 }
630
631 q = &tf->goto_queue[active];
632 tf->goto_queue_active = active + 1;
633
634 memset (q, 0, sizeof (*q));
635 q->stmt = new_stmt;
636 q->index = index;
637 q->location = location;
638 q->is_label = is_label;
639 }
640
641 /* Record the LABEL label in the goto queue contained in TF.
642 TF is not null. */
643
644 static void
645 record_in_goto_queue_label (struct leh_tf_state *tf, treemple stmt, tree label,
646 location_t location)
647 {
648 int index;
649 treemple temp, new_stmt;
650
651 if (!label)
652 return;
653
654 /* Computed and non-local gotos do not get processed. Given
655 their nature we can neither tell whether we've escaped the
656 finally block nor redirect them if we knew. */
657 if (TREE_CODE (label) != LABEL_DECL)
658 return;
659
660 /* No need to record gotos that don't leave the try block. */
661 temp.t = label;
662 if (!outside_finally_tree (temp, tf->try_finally_expr))
663 return;
664
665 if (! tf->dest_array.exists ())
666 {
667 tf->dest_array.create (10);
668 tf->dest_array.quick_push (label);
669 index = 0;
670 }
671 else
672 {
673 int n = tf->dest_array.length ();
674 for (index = 0; index < n; ++index)
675 if (tf->dest_array[index] == label)
676 break;
677 if (index == n)
678 tf->dest_array.safe_push (label);
679 }
680
681 /* In the case of a GOTO we want to record the destination label,
682 since with a GIMPLE_COND we have an easy access to the then/else
683 labels. */
684 new_stmt = stmt;
685 record_in_goto_queue (tf, new_stmt, index, true, location);
686 }
687
688 /* For any GIMPLE_GOTO or GIMPLE_RETURN, decide whether it leaves a try_finally
689 node, and if so record that fact in the goto queue associated with that
690 try_finally node. */
691
692 static void
693 maybe_record_in_goto_queue (struct leh_state *state, gimple stmt)
694 {
695 struct leh_tf_state *tf = state->tf;
696 treemple new_stmt;
697
698 if (!tf)
699 return;
700
701 switch (gimple_code (stmt))
702 {
703 case GIMPLE_COND:
704 {
705 gcond *cond_stmt = as_a <gcond *> (stmt);
706 new_stmt.tp = gimple_op_ptr (cond_stmt, 2);
707 record_in_goto_queue_label (tf, new_stmt,
708 gimple_cond_true_label (cond_stmt),
709 EXPR_LOCATION (*new_stmt.tp));
710 new_stmt.tp = gimple_op_ptr (cond_stmt, 3);
711 record_in_goto_queue_label (tf, new_stmt,
712 gimple_cond_false_label (cond_stmt),
713 EXPR_LOCATION (*new_stmt.tp));
714 }
715 break;
716 case GIMPLE_GOTO:
717 new_stmt.g = stmt;
718 record_in_goto_queue_label (tf, new_stmt, gimple_goto_dest (stmt),
719 gimple_location (stmt));
720 break;
721
722 case GIMPLE_RETURN:
723 tf->may_return = true;
724 new_stmt.g = stmt;
725 record_in_goto_queue (tf, new_stmt, -1, false, gimple_location (stmt));
726 break;
727
728 default:
729 gcc_unreachable ();
730 }
731 }
732
733
734 #ifdef ENABLE_CHECKING
735 /* We do not process GIMPLE_SWITCHes for now. As long as the original source
736 was in fact structured, and we've not yet done jump threading, then none
737 of the labels will leave outer GIMPLE_TRY_FINALLY nodes. Verify this. */
738
739 static void
740 verify_norecord_switch_expr (struct leh_state *state,
741 gswitch *switch_expr)
742 {
743 struct leh_tf_state *tf = state->tf;
744 size_t i, n;
745
746 if (!tf)
747 return;
748
749 n = gimple_switch_num_labels (switch_expr);
750
751 for (i = 0; i < n; ++i)
752 {
753 treemple temp;
754 tree lab = CASE_LABEL (gimple_switch_label (switch_expr, i));
755 temp.t = lab;
756 gcc_assert (!outside_finally_tree (temp, tf->try_finally_expr));
757 }
758 }
759 #else
760 #define verify_norecord_switch_expr(state, switch_expr)
761 #endif
762
763 /* Redirect a RETURN_EXPR pointed to by Q to FINLAB. If MOD is
764 non-null, insert it before the new branch. */
765
766 static void
767 do_return_redirection (struct goto_queue_node *q, tree finlab, gimple_seq mod)
768 {
769 gimple x;
770
771 /* In the case of a return, the queue node must be a gimple statement. */
772 gcc_assert (!q->is_label);
773
774 /* Note that the return value may have already been computed, e.g.,
775
776 int x;
777 int foo (void)
778 {
779 x = 0;
780 try {
781 return x;
782 } finally {
783 x++;
784 }
785 }
786
787 should return 0, not 1. We don't have to do anything to make
788 this happens because the return value has been placed in the
789 RESULT_DECL already. */
790
791 q->cont_stmt = q->stmt.g;
792
793 if (mod)
794 gimple_seq_add_seq (&q->repl_stmt, mod);
795
796 x = gimple_build_goto (finlab);
797 gimple_set_location (x, q->location);
798 gimple_seq_add_stmt (&q->repl_stmt, x);
799 }
800
801 /* Similar, but easier, for GIMPLE_GOTO. */
802
803 static void
804 do_goto_redirection (struct goto_queue_node *q, tree finlab, gimple_seq mod,
805 struct leh_tf_state *tf)
806 {
807 ggoto *x;
808
809 gcc_assert (q->is_label);
810
811 q->cont_stmt = gimple_build_goto (tf->dest_array[q->index]);
812
813 if (mod)
814 gimple_seq_add_seq (&q->repl_stmt, mod);
815
816 x = gimple_build_goto (finlab);
817 gimple_set_location (x, q->location);
818 gimple_seq_add_stmt (&q->repl_stmt, x);
819 }
820
821 /* Emit a standard landing pad sequence into SEQ for REGION. */
822
823 static void
824 emit_post_landing_pad (gimple_seq *seq, eh_region region)
825 {
826 eh_landing_pad lp = region->landing_pads;
827 glabel *x;
828
829 if (lp == NULL)
830 lp = gen_eh_landing_pad (region);
831
832 lp->post_landing_pad = create_artificial_label (UNKNOWN_LOCATION);
833 EH_LANDING_PAD_NR (lp->post_landing_pad) = lp->index;
834
835 x = gimple_build_label (lp->post_landing_pad);
836 gimple_seq_add_stmt (seq, x);
837 }
838
839 /* Emit a RESX statement into SEQ for REGION. */
840
841 static void
842 emit_resx (gimple_seq *seq, eh_region region)
843 {
844 gresx *x = gimple_build_resx (region->index);
845 gimple_seq_add_stmt (seq, x);
846 if (region->outer)
847 record_stmt_eh_region (region->outer, x);
848 }
849
850 /* Emit an EH_DISPATCH statement into SEQ for REGION. */
851
852 static void
853 emit_eh_dispatch (gimple_seq *seq, eh_region region)
854 {
855 geh_dispatch *x = gimple_build_eh_dispatch (region->index);
856 gimple_seq_add_stmt (seq, x);
857 }
858
859 /* Note that the current EH region may contain a throw, or a
860 call to a function which itself may contain a throw. */
861
862 static void
863 note_eh_region_may_contain_throw (eh_region region)
864 {
865 while (bitmap_set_bit (eh_region_may_contain_throw_map, region->index))
866 {
867 if (region->type == ERT_MUST_NOT_THROW)
868 break;
869 region = region->outer;
870 if (region == NULL)
871 break;
872 }
873 }
874
875 /* Check if REGION has been marked as containing a throw. If REGION is
876 NULL, this predicate is false. */
877
878 static inline bool
879 eh_region_may_contain_throw (eh_region r)
880 {
881 return r && bitmap_bit_p (eh_region_may_contain_throw_map, r->index);
882 }
883
884 /* We want to transform
885 try { body; } catch { stuff; }
886 to
887 normal_seqence:
888 body;
889 over:
890 eh_seqence:
891 landing_pad:
892 stuff;
893 goto over;
894
895 TP is a GIMPLE_TRY node. REGION is the region whose post_landing_pad
896 should be placed before the second operand, or NULL. OVER is
897 an existing label that should be put at the exit, or NULL. */
898
899 static gimple_seq
900 frob_into_branch_around (gtry *tp, eh_region region, tree over)
901 {
902 gimple x;
903 gimple_seq cleanup, result;
904 location_t loc = gimple_location (tp);
905
906 cleanup = gimple_try_cleanup (tp);
907 result = gimple_try_eval (tp);
908
909 if (region)
910 emit_post_landing_pad (&eh_seq, region);
911
912 if (gimple_seq_may_fallthru (cleanup))
913 {
914 if (!over)
915 over = create_artificial_label (loc);
916 x = gimple_build_goto (over);
917 gimple_set_location (x, loc);
918 gimple_seq_add_stmt (&cleanup, x);
919 }
920 gimple_seq_add_seq (&eh_seq, cleanup);
921
922 if (over)
923 {
924 x = gimple_build_label (over);
925 gimple_seq_add_stmt (&result, x);
926 }
927 return result;
928 }
929
930 /* A subroutine of lower_try_finally. Duplicate the tree rooted at T.
931 Make sure to record all new labels found. */
932
933 static gimple_seq
934 lower_try_finally_dup_block (gimple_seq seq, struct leh_state *outer_state,
935 location_t loc)
936 {
937 gtry *region = NULL;
938 gimple_seq new_seq;
939 gimple_stmt_iterator gsi;
940
941 new_seq = copy_gimple_seq_and_replace_locals (seq);
942
943 for (gsi = gsi_start (new_seq); !gsi_end_p (gsi); gsi_next (&gsi))
944 {
945 gimple stmt = gsi_stmt (gsi);
946 if (LOCATION_LOCUS (gimple_location (stmt)) == UNKNOWN_LOCATION)
947 {
948 tree block = gimple_block (stmt);
949 gimple_set_location (stmt, loc);
950 gimple_set_block (stmt, block);
951 }
952 }
953
954 if (outer_state->tf)
955 region = outer_state->tf->try_finally_expr;
956 collect_finally_tree_1 (new_seq, region);
957
958 return new_seq;
959 }
960
961 /* A subroutine of lower_try_finally. Create a fallthru label for
962 the given try_finally state. The only tricky bit here is that
963 we have to make sure to record the label in our outer context. */
964
965 static tree
966 lower_try_finally_fallthru_label (struct leh_tf_state *tf)
967 {
968 tree label = tf->fallthru_label;
969 treemple temp;
970
971 if (!label)
972 {
973 label = create_artificial_label (gimple_location (tf->try_finally_expr));
974 tf->fallthru_label = label;
975 if (tf->outer->tf)
976 {
977 temp.t = label;
978 record_in_finally_tree (temp, tf->outer->tf->try_finally_expr);
979 }
980 }
981 return label;
982 }
983
984 /* A subroutine of lower_try_finally. If FINALLY consits of a
985 GIMPLE_EH_ELSE node, return it. */
986
987 static inline geh_else *
988 get_eh_else (gimple_seq finally)
989 {
990 gimple x = gimple_seq_first_stmt (finally);
991 if (gimple_code (x) == GIMPLE_EH_ELSE)
992 {
993 gcc_assert (gimple_seq_singleton_p (finally));
994 return as_a <geh_else *> (x);
995 }
996 return NULL;
997 }
998
999 /* A subroutine of lower_try_finally. If the eh_protect_cleanup_actions
1000 langhook returns non-null, then the language requires that the exception
1001 path out of a try_finally be treated specially. To wit: the code within
1002 the finally block may not itself throw an exception. We have two choices
1003 here. First we can duplicate the finally block and wrap it in a
1004 must_not_throw region. Second, we can generate code like
1005
1006 try {
1007 finally_block;
1008 } catch {
1009 if (fintmp == eh_edge)
1010 protect_cleanup_actions;
1011 }
1012
1013 where "fintmp" is the temporary used in the switch statement generation
1014 alternative considered below. For the nonce, we always choose the first
1015 option.
1016
1017 THIS_STATE may be null if this is a try-cleanup, not a try-finally. */
1018
1019 static void
1020 honor_protect_cleanup_actions (struct leh_state *outer_state,
1021 struct leh_state *this_state,
1022 struct leh_tf_state *tf)
1023 {
1024 tree protect_cleanup_actions;
1025 gimple_stmt_iterator gsi;
1026 bool finally_may_fallthru;
1027 gimple_seq finally;
1028 gimple x;
1029 geh_mnt *eh_mnt;
1030 gtry *try_stmt;
1031 geh_else *eh_else;
1032
1033 /* First check for nothing to do. */
1034 if (lang_hooks.eh_protect_cleanup_actions == NULL)
1035 return;
1036 protect_cleanup_actions = lang_hooks.eh_protect_cleanup_actions ();
1037 if (protect_cleanup_actions == NULL)
1038 return;
1039
1040 finally = gimple_try_cleanup (tf->top_p);
1041 eh_else = get_eh_else (finally);
1042
1043 /* Duplicate the FINALLY block. Only need to do this for try-finally,
1044 and not for cleanups. If we've got an EH_ELSE, extract it now. */
1045 if (eh_else)
1046 {
1047 finally = gimple_eh_else_e_body (eh_else);
1048 gimple_try_set_cleanup (tf->top_p, gimple_eh_else_n_body (eh_else));
1049 }
1050 else if (this_state)
1051 finally = lower_try_finally_dup_block (finally, outer_state,
1052 gimple_location (tf->try_finally_expr));
1053 finally_may_fallthru = gimple_seq_may_fallthru (finally);
1054
1055 /* If this cleanup consists of a TRY_CATCH_EXPR with TRY_CATCH_IS_CLEANUP
1056 set, the handler of the TRY_CATCH_EXPR is another cleanup which ought
1057 to be in an enclosing scope, but needs to be implemented at this level
1058 to avoid a nesting violation (see wrap_temporary_cleanups in
1059 cp/decl.c). Since it's logically at an outer level, we should call
1060 terminate before we get to it, so strip it away before adding the
1061 MUST_NOT_THROW filter. */
1062 gsi = gsi_start (finally);
1063 x = gsi_stmt (gsi);
1064 if (gimple_code (x) == GIMPLE_TRY
1065 && gimple_try_kind (x) == GIMPLE_TRY_CATCH
1066 && gimple_try_catch_is_cleanup (x))
1067 {
1068 gsi_insert_seq_before (&gsi, gimple_try_eval (x), GSI_SAME_STMT);
1069 gsi_remove (&gsi, false);
1070 }
1071
1072 /* Wrap the block with protect_cleanup_actions as the action. */
1073 eh_mnt = gimple_build_eh_must_not_throw (protect_cleanup_actions);
1074 try_stmt = gimple_build_try (finally, gimple_seq_alloc_with_stmt (eh_mnt),
1075 GIMPLE_TRY_CATCH);
1076 finally = lower_eh_must_not_throw (outer_state, try_stmt);
1077
1078 /* Drop all of this into the exception sequence. */
1079 emit_post_landing_pad (&eh_seq, tf->region);
1080 gimple_seq_add_seq (&eh_seq, finally);
1081 if (finally_may_fallthru)
1082 emit_resx (&eh_seq, tf->region);
1083
1084 /* Having now been handled, EH isn't to be considered with
1085 the rest of the outgoing edges. */
1086 tf->may_throw = false;
1087 }
1088
1089 /* A subroutine of lower_try_finally. We have determined that there is
1090 no fallthru edge out of the finally block. This means that there is
1091 no outgoing edge corresponding to any incoming edge. Restructure the
1092 try_finally node for this special case. */
1093
1094 static void
1095 lower_try_finally_nofallthru (struct leh_state *state,
1096 struct leh_tf_state *tf)
1097 {
1098 tree lab;
1099 gimple x;
1100 geh_else *eh_else;
1101 gimple_seq finally;
1102 struct goto_queue_node *q, *qe;
1103
1104 lab = create_artificial_label (gimple_location (tf->try_finally_expr));
1105
1106 /* We expect that tf->top_p is a GIMPLE_TRY. */
1107 finally = gimple_try_cleanup (tf->top_p);
1108 tf->top_p_seq = gimple_try_eval (tf->top_p);
1109
1110 x = gimple_build_label (lab);
1111 gimple_seq_add_stmt (&tf->top_p_seq, x);
1112
1113 q = tf->goto_queue;
1114 qe = q + tf->goto_queue_active;
1115 for (; q < qe; ++q)
1116 if (q->index < 0)
1117 do_return_redirection (q, lab, NULL);
1118 else
1119 do_goto_redirection (q, lab, NULL, tf);
1120
1121 replace_goto_queue (tf);
1122
1123 /* Emit the finally block into the stream. Lower EH_ELSE at this time. */
1124 eh_else = get_eh_else (finally);
1125 if (eh_else)
1126 {
1127 finally = gimple_eh_else_n_body (eh_else);
1128 lower_eh_constructs_1 (state, &finally);
1129 gimple_seq_add_seq (&tf->top_p_seq, finally);
1130
1131 if (tf->may_throw)
1132 {
1133 finally = gimple_eh_else_e_body (eh_else);
1134 lower_eh_constructs_1 (state, &finally);
1135
1136 emit_post_landing_pad (&eh_seq, tf->region);
1137 gimple_seq_add_seq (&eh_seq, finally);
1138 }
1139 }
1140 else
1141 {
1142 lower_eh_constructs_1 (state, &finally);
1143 gimple_seq_add_seq (&tf->top_p_seq, finally);
1144
1145 if (tf->may_throw)
1146 {
1147 emit_post_landing_pad (&eh_seq, tf->region);
1148
1149 x = gimple_build_goto (lab);
1150 gimple_set_location (x, gimple_location (tf->try_finally_expr));
1151 gimple_seq_add_stmt (&eh_seq, x);
1152 }
1153 }
1154 }
1155
1156 /* A subroutine of lower_try_finally. We have determined that there is
1157 exactly one destination of the finally block. Restructure the
1158 try_finally node for this special case. */
1159
1160 static void
1161 lower_try_finally_onedest (struct leh_state *state, struct leh_tf_state *tf)
1162 {
1163 struct goto_queue_node *q, *qe;
1164 geh_else *eh_else;
1165 glabel *label_stmt;
1166 gimple x;
1167 gimple_seq finally;
1168 gimple_stmt_iterator gsi;
1169 tree finally_label;
1170 location_t loc = gimple_location (tf->try_finally_expr);
1171
1172 finally = gimple_try_cleanup (tf->top_p);
1173 tf->top_p_seq = gimple_try_eval (tf->top_p);
1174
1175 /* Since there's only one destination, and the destination edge can only
1176 either be EH or non-EH, that implies that all of our incoming edges
1177 are of the same type. Therefore we can lower EH_ELSE immediately. */
1178 eh_else = get_eh_else (finally);
1179 if (eh_else)
1180 {
1181 if (tf->may_throw)
1182 finally = gimple_eh_else_e_body (eh_else);
1183 else
1184 finally = gimple_eh_else_n_body (eh_else);
1185 }
1186
1187 lower_eh_constructs_1 (state, &finally);
1188
1189 for (gsi = gsi_start (finally); !gsi_end_p (gsi); gsi_next (&gsi))
1190 {
1191 gimple stmt = gsi_stmt (gsi);
1192 if (LOCATION_LOCUS (gimple_location (stmt)) == UNKNOWN_LOCATION)
1193 {
1194 tree block = gimple_block (stmt);
1195 gimple_set_location (stmt, gimple_location (tf->try_finally_expr));
1196 gimple_set_block (stmt, block);
1197 }
1198 }
1199
1200 if (tf->may_throw)
1201 {
1202 /* Only reachable via the exception edge. Add the given label to
1203 the head of the FINALLY block. Append a RESX at the end. */
1204 emit_post_landing_pad (&eh_seq, tf->region);
1205 gimple_seq_add_seq (&eh_seq, finally);
1206 emit_resx (&eh_seq, tf->region);
1207 return;
1208 }
1209
1210 if (tf->may_fallthru)
1211 {
1212 /* Only reachable via the fallthru edge. Do nothing but let
1213 the two blocks run together; we'll fall out the bottom. */
1214 gimple_seq_add_seq (&tf->top_p_seq, finally);
1215 return;
1216 }
1217
1218 finally_label = create_artificial_label (loc);
1219 label_stmt = gimple_build_label (finally_label);
1220 gimple_seq_add_stmt (&tf->top_p_seq, label_stmt);
1221
1222 gimple_seq_add_seq (&tf->top_p_seq, finally);
1223
1224 q = tf->goto_queue;
1225 qe = q + tf->goto_queue_active;
1226
1227 if (tf->may_return)
1228 {
1229 /* Reachable by return expressions only. Redirect them. */
1230 for (; q < qe; ++q)
1231 do_return_redirection (q, finally_label, NULL);
1232 replace_goto_queue (tf);
1233 }
1234 else
1235 {
1236 /* Reachable by goto expressions only. Redirect them. */
1237 for (; q < qe; ++q)
1238 do_goto_redirection (q, finally_label, NULL, tf);
1239 replace_goto_queue (tf);
1240
1241 if (tf->dest_array[0] == tf->fallthru_label)
1242 {
1243 /* Reachable by goto to fallthru label only. Redirect it
1244 to the new label (already created, sadly), and do not
1245 emit the final branch out, or the fallthru label. */
1246 tf->fallthru_label = NULL;
1247 return;
1248 }
1249 }
1250
1251 /* Place the original return/goto to the original destination
1252 immediately after the finally block. */
1253 x = tf->goto_queue[0].cont_stmt;
1254 gimple_seq_add_stmt (&tf->top_p_seq, x);
1255 maybe_record_in_goto_queue (state, x);
1256 }
1257
1258 /* A subroutine of lower_try_finally. There are multiple edges incoming
1259 and outgoing from the finally block. Implement this by duplicating the
1260 finally block for every destination. */
1261
1262 static void
1263 lower_try_finally_copy (struct leh_state *state, struct leh_tf_state *tf)
1264 {
1265 gimple_seq finally;
1266 gimple_seq new_stmt;
1267 gimple_seq seq;
1268 gimple x;
1269 geh_else *eh_else;
1270 tree tmp;
1271 location_t tf_loc = gimple_location (tf->try_finally_expr);
1272
1273 finally = gimple_try_cleanup (tf->top_p);
1274
1275 /* Notice EH_ELSE, and simplify some of the remaining code
1276 by considering FINALLY to be the normal return path only. */
1277 eh_else = get_eh_else (finally);
1278 if (eh_else)
1279 finally = gimple_eh_else_n_body (eh_else);
1280
1281 tf->top_p_seq = gimple_try_eval (tf->top_p);
1282 new_stmt = NULL;
1283
1284 if (tf->may_fallthru)
1285 {
1286 seq = lower_try_finally_dup_block (finally, state, tf_loc);
1287 lower_eh_constructs_1 (state, &seq);
1288 gimple_seq_add_seq (&new_stmt, seq);
1289
1290 tmp = lower_try_finally_fallthru_label (tf);
1291 x = gimple_build_goto (tmp);
1292 gimple_set_location (x, tf_loc);
1293 gimple_seq_add_stmt (&new_stmt, x);
1294 }
1295
1296 if (tf->may_throw)
1297 {
1298 /* We don't need to copy the EH path of EH_ELSE,
1299 since it is only emitted once. */
1300 if (eh_else)
1301 seq = gimple_eh_else_e_body (eh_else);
1302 else
1303 seq = lower_try_finally_dup_block (finally, state, tf_loc);
1304 lower_eh_constructs_1 (state, &seq);
1305
1306 emit_post_landing_pad (&eh_seq, tf->region);
1307 gimple_seq_add_seq (&eh_seq, seq);
1308 emit_resx (&eh_seq, tf->region);
1309 }
1310
1311 if (tf->goto_queue)
1312 {
1313 struct goto_queue_node *q, *qe;
1314 int return_index, index;
1315 struct labels_s
1316 {
1317 struct goto_queue_node *q;
1318 tree label;
1319 } *labels;
1320
1321 return_index = tf->dest_array.length ();
1322 labels = XCNEWVEC (struct labels_s, return_index + 1);
1323
1324 q = tf->goto_queue;
1325 qe = q + tf->goto_queue_active;
1326 for (; q < qe; q++)
1327 {
1328 index = q->index < 0 ? return_index : q->index;
1329
1330 if (!labels[index].q)
1331 labels[index].q = q;
1332 }
1333
1334 for (index = 0; index < return_index + 1; index++)
1335 {
1336 tree lab;
1337
1338 q = labels[index].q;
1339 if (! q)
1340 continue;
1341
1342 lab = labels[index].label
1343 = create_artificial_label (tf_loc);
1344
1345 if (index == return_index)
1346 do_return_redirection (q, lab, NULL);
1347 else
1348 do_goto_redirection (q, lab, NULL, tf);
1349
1350 x = gimple_build_label (lab);
1351 gimple_seq_add_stmt (&new_stmt, x);
1352
1353 seq = lower_try_finally_dup_block (finally, state, q->location);
1354 lower_eh_constructs_1 (state, &seq);
1355 gimple_seq_add_seq (&new_stmt, seq);
1356
1357 gimple_seq_add_stmt (&new_stmt, q->cont_stmt);
1358 maybe_record_in_goto_queue (state, q->cont_stmt);
1359 }
1360
1361 for (q = tf->goto_queue; q < qe; q++)
1362 {
1363 tree lab;
1364
1365 index = q->index < 0 ? return_index : q->index;
1366
1367 if (labels[index].q == q)
1368 continue;
1369
1370 lab = labels[index].label;
1371
1372 if (index == return_index)
1373 do_return_redirection (q, lab, NULL);
1374 else
1375 do_goto_redirection (q, lab, NULL, tf);
1376 }
1377
1378 replace_goto_queue (tf);
1379 free (labels);
1380 }
1381
1382 /* Need to link new stmts after running replace_goto_queue due
1383 to not wanting to process the same goto stmts twice. */
1384 gimple_seq_add_seq (&tf->top_p_seq, new_stmt);
1385 }
1386
1387 /* A subroutine of lower_try_finally. There are multiple edges incoming
1388 and outgoing from the finally block. Implement this by instrumenting
1389 each incoming edge and creating a switch statement at the end of the
1390 finally block that branches to the appropriate destination. */
1391
1392 static void
1393 lower_try_finally_switch (struct leh_state *state, struct leh_tf_state *tf)
1394 {
1395 struct goto_queue_node *q, *qe;
1396 tree finally_tmp, finally_label;
1397 int return_index, eh_index, fallthru_index;
1398 int nlabels, ndests, j, last_case_index;
1399 tree last_case;
1400 vec<tree> case_label_vec;
1401 gimple_seq switch_body = NULL;
1402 gimple x;
1403 geh_else *eh_else;
1404 tree tmp;
1405 gimple switch_stmt;
1406 gimple_seq finally;
1407 hash_map<tree, gimple> *cont_map = NULL;
1408 /* The location of the TRY_FINALLY stmt. */
1409 location_t tf_loc = gimple_location (tf->try_finally_expr);
1410 /* The location of the finally block. */
1411 location_t finally_loc;
1412
1413 finally = gimple_try_cleanup (tf->top_p);
1414 eh_else = get_eh_else (finally);
1415
1416 /* Mash the TRY block to the head of the chain. */
1417 tf->top_p_seq = gimple_try_eval (tf->top_p);
1418
1419 /* The location of the finally is either the last stmt in the finally
1420 block or the location of the TRY_FINALLY itself. */
1421 x = gimple_seq_last_stmt (finally);
1422 finally_loc = x ? gimple_location (x) : tf_loc;
1423
1424 /* Prepare for switch statement generation. */
1425 nlabels = tf->dest_array.length ();
1426 return_index = nlabels;
1427 eh_index = return_index + tf->may_return;
1428 fallthru_index = eh_index + (tf->may_throw && !eh_else);
1429 ndests = fallthru_index + tf->may_fallthru;
1430
1431 finally_tmp = create_tmp_var (integer_type_node, "finally_tmp");
1432 finally_label = create_artificial_label (finally_loc);
1433
1434 /* We use vec::quick_push on case_label_vec throughout this function,
1435 since we know the size in advance and allocate precisely as muce
1436 space as needed. */
1437 case_label_vec.create (ndests);
1438 last_case = NULL;
1439 last_case_index = 0;
1440
1441 /* Begin inserting code for getting to the finally block. Things
1442 are done in this order to correspond to the sequence the code is
1443 laid out. */
1444
1445 if (tf->may_fallthru)
1446 {
1447 x = gimple_build_assign (finally_tmp,
1448 build_int_cst (integer_type_node,
1449 fallthru_index));
1450 gimple_seq_add_stmt (&tf->top_p_seq, x);
1451
1452 tmp = build_int_cst (integer_type_node, fallthru_index);
1453 last_case = build_case_label (tmp, NULL,
1454 create_artificial_label (tf_loc));
1455 case_label_vec.quick_push (last_case);
1456 last_case_index++;
1457
1458 x = gimple_build_label (CASE_LABEL (last_case));
1459 gimple_seq_add_stmt (&switch_body, x);
1460
1461 tmp = lower_try_finally_fallthru_label (tf);
1462 x = gimple_build_goto (tmp);
1463 gimple_set_location (x, tf_loc);
1464 gimple_seq_add_stmt (&switch_body, x);
1465 }
1466
1467 /* For EH_ELSE, emit the exception path (plus resx) now, then
1468 subsequently we only need consider the normal path. */
1469 if (eh_else)
1470 {
1471 if (tf->may_throw)
1472 {
1473 finally = gimple_eh_else_e_body (eh_else);
1474 lower_eh_constructs_1 (state, &finally);
1475
1476 emit_post_landing_pad (&eh_seq, tf->region);
1477 gimple_seq_add_seq (&eh_seq, finally);
1478 emit_resx (&eh_seq, tf->region);
1479 }
1480
1481 finally = gimple_eh_else_n_body (eh_else);
1482 }
1483 else if (tf->may_throw)
1484 {
1485 emit_post_landing_pad (&eh_seq, tf->region);
1486
1487 x = gimple_build_assign (finally_tmp,
1488 build_int_cst (integer_type_node, eh_index));
1489 gimple_seq_add_stmt (&eh_seq, x);
1490
1491 x = gimple_build_goto (finally_label);
1492 gimple_set_location (x, tf_loc);
1493 gimple_seq_add_stmt (&eh_seq, x);
1494
1495 tmp = build_int_cst (integer_type_node, eh_index);
1496 last_case = build_case_label (tmp, NULL,
1497 create_artificial_label (tf_loc));
1498 case_label_vec.quick_push (last_case);
1499 last_case_index++;
1500
1501 x = gimple_build_label (CASE_LABEL (last_case));
1502 gimple_seq_add_stmt (&eh_seq, x);
1503 emit_resx (&eh_seq, tf->region);
1504 }
1505
1506 x = gimple_build_label (finally_label);
1507 gimple_seq_add_stmt (&tf->top_p_seq, x);
1508
1509 lower_eh_constructs_1 (state, &finally);
1510 gimple_seq_add_seq (&tf->top_p_seq, finally);
1511
1512 /* Redirect each incoming goto edge. */
1513 q = tf->goto_queue;
1514 qe = q + tf->goto_queue_active;
1515 j = last_case_index + tf->may_return;
1516 /* Prepare the assignments to finally_tmp that are executed upon the
1517 entrance through a particular edge. */
1518 for (; q < qe; ++q)
1519 {
1520 gimple_seq mod = NULL;
1521 int switch_id;
1522 unsigned int case_index;
1523
1524 if (q->index < 0)
1525 {
1526 x = gimple_build_assign (finally_tmp,
1527 build_int_cst (integer_type_node,
1528 return_index));
1529 gimple_seq_add_stmt (&mod, x);
1530 do_return_redirection (q, finally_label, mod);
1531 switch_id = return_index;
1532 }
1533 else
1534 {
1535 x = gimple_build_assign (finally_tmp,
1536 build_int_cst (integer_type_node, q->index));
1537 gimple_seq_add_stmt (&mod, x);
1538 do_goto_redirection (q, finally_label, mod, tf);
1539 switch_id = q->index;
1540 }
1541
1542 case_index = j + q->index;
1543 if (case_label_vec.length () <= case_index || !case_label_vec[case_index])
1544 {
1545 tree case_lab;
1546 tmp = build_int_cst (integer_type_node, switch_id);
1547 case_lab = build_case_label (tmp, NULL,
1548 create_artificial_label (tf_loc));
1549 /* We store the cont_stmt in the pointer map, so that we can recover
1550 it in the loop below. */
1551 if (!cont_map)
1552 cont_map = new hash_map<tree, gimple>;
1553 cont_map->put (case_lab, q->cont_stmt);
1554 case_label_vec.quick_push (case_lab);
1555 }
1556 }
1557 for (j = last_case_index; j < last_case_index + nlabels; j++)
1558 {
1559 gimple cont_stmt;
1560
1561 last_case = case_label_vec[j];
1562
1563 gcc_assert (last_case);
1564 gcc_assert (cont_map);
1565
1566 cont_stmt = *cont_map->get (last_case);
1567
1568 x = gimple_build_label (CASE_LABEL (last_case));
1569 gimple_seq_add_stmt (&switch_body, x);
1570 gimple_seq_add_stmt (&switch_body, cont_stmt);
1571 maybe_record_in_goto_queue (state, cont_stmt);
1572 }
1573 if (cont_map)
1574 delete cont_map;
1575
1576 replace_goto_queue (tf);
1577
1578 /* Make sure that the last case is the default label, as one is required.
1579 Then sort the labels, which is also required in GIMPLE. */
1580 CASE_LOW (last_case) = NULL;
1581 tree tem = case_label_vec.pop ();
1582 gcc_assert (tem == last_case);
1583 sort_case_labels (case_label_vec);
1584
1585 /* Build the switch statement, setting last_case to be the default
1586 label. */
1587 switch_stmt = gimple_build_switch (finally_tmp, last_case,
1588 case_label_vec);
1589 gimple_set_location (switch_stmt, finally_loc);
1590
1591 /* Need to link SWITCH_STMT after running replace_goto_queue
1592 due to not wanting to process the same goto stmts twice. */
1593 gimple_seq_add_stmt (&tf->top_p_seq, switch_stmt);
1594 gimple_seq_add_seq (&tf->top_p_seq, switch_body);
1595 }
1596
1597 /* Decide whether or not we are going to duplicate the finally block.
1598 There are several considerations.
1599
1600 First, if this is Java, then the finally block contains code
1601 written by the user. It has line numbers associated with it,
1602 so duplicating the block means it's difficult to set a breakpoint.
1603 Since controlling code generation via -g is verboten, we simply
1604 never duplicate code without optimization.
1605
1606 Second, we'd like to prevent egregious code growth. One way to
1607 do this is to estimate the size of the finally block, multiply
1608 that by the number of copies we'd need to make, and compare against
1609 the estimate of the size of the switch machinery we'd have to add. */
1610
1611 static bool
1612 decide_copy_try_finally (int ndests, bool may_throw, gimple_seq finally)
1613 {
1614 int f_estimate, sw_estimate;
1615 geh_else *eh_else;
1616
1617 /* If there's an EH_ELSE involved, the exception path is separate
1618 and really doesn't come into play for this computation. */
1619 eh_else = get_eh_else (finally);
1620 if (eh_else)
1621 {
1622 ndests -= may_throw;
1623 finally = gimple_eh_else_n_body (eh_else);
1624 }
1625
1626 if (!optimize)
1627 {
1628 gimple_stmt_iterator gsi;
1629
1630 if (ndests == 1)
1631 return true;
1632
1633 for (gsi = gsi_start (finally); !gsi_end_p (gsi); gsi_next (&gsi))
1634 {
1635 gimple stmt = gsi_stmt (gsi);
1636 if (!is_gimple_debug (stmt) && !gimple_clobber_p (stmt))
1637 return false;
1638 }
1639 return true;
1640 }
1641
1642 /* Finally estimate N times, plus N gotos. */
1643 f_estimate = count_insns_seq (finally, &eni_size_weights);
1644 f_estimate = (f_estimate + 1) * ndests;
1645
1646 /* Switch statement (cost 10), N variable assignments, N gotos. */
1647 sw_estimate = 10 + 2 * ndests;
1648
1649 /* Optimize for size clearly wants our best guess. */
1650 if (optimize_function_for_size_p (cfun))
1651 return f_estimate < sw_estimate;
1652
1653 /* ??? These numbers are completely made up so far. */
1654 if (optimize > 1)
1655 return f_estimate < 100 || f_estimate < sw_estimate * 2;
1656 else
1657 return f_estimate < 40 || f_estimate * 2 < sw_estimate * 3;
1658 }
1659
1660 /* REG is the enclosing region for a possible cleanup region, or the region
1661 itself. Returns TRUE if such a region would be unreachable.
1662
1663 Cleanup regions within a must-not-throw region aren't actually reachable
1664 even if there are throwing stmts within them, because the personality
1665 routine will call terminate before unwinding. */
1666
1667 static bool
1668 cleanup_is_dead_in (eh_region reg)
1669 {
1670 while (reg && reg->type == ERT_CLEANUP)
1671 reg = reg->outer;
1672 return (reg && reg->type == ERT_MUST_NOT_THROW);
1673 }
1674
1675 /* A subroutine of lower_eh_constructs_1. Lower a GIMPLE_TRY_FINALLY nodes
1676 to a sequence of labels and blocks, plus the exception region trees
1677 that record all the magic. This is complicated by the need to
1678 arrange for the FINALLY block to be executed on all exits. */
1679
1680 static gimple_seq
1681 lower_try_finally (struct leh_state *state, gtry *tp)
1682 {
1683 struct leh_tf_state this_tf;
1684 struct leh_state this_state;
1685 int ndests;
1686 gimple_seq old_eh_seq;
1687
1688 /* Process the try block. */
1689
1690 memset (&this_tf, 0, sizeof (this_tf));
1691 this_tf.try_finally_expr = tp;
1692 this_tf.top_p = tp;
1693 this_tf.outer = state;
1694 if (using_eh_for_cleanups_p () && !cleanup_is_dead_in (state->cur_region))
1695 {
1696 this_tf.region = gen_eh_region_cleanup (state->cur_region);
1697 this_state.cur_region = this_tf.region;
1698 }
1699 else
1700 {
1701 this_tf.region = NULL;
1702 this_state.cur_region = state->cur_region;
1703 }
1704
1705 this_state.ehp_region = state->ehp_region;
1706 this_state.tf = &this_tf;
1707
1708 old_eh_seq = eh_seq;
1709 eh_seq = NULL;
1710
1711 lower_eh_constructs_1 (&this_state, gimple_try_eval_ptr (tp));
1712
1713 /* Determine if the try block is escaped through the bottom. */
1714 this_tf.may_fallthru = gimple_seq_may_fallthru (gimple_try_eval (tp));
1715
1716 /* Determine if any exceptions are possible within the try block. */
1717 if (this_tf.region)
1718 this_tf.may_throw = eh_region_may_contain_throw (this_tf.region);
1719 if (this_tf.may_throw)
1720 honor_protect_cleanup_actions (state, &this_state, &this_tf);
1721
1722 /* Determine how many edges (still) reach the finally block. Or rather,
1723 how many destinations are reached by the finally block. Use this to
1724 determine how we process the finally block itself. */
1725
1726 ndests = this_tf.dest_array.length ();
1727 ndests += this_tf.may_fallthru;
1728 ndests += this_tf.may_return;
1729 ndests += this_tf.may_throw;
1730
1731 /* If the FINALLY block is not reachable, dike it out. */
1732 if (ndests == 0)
1733 {
1734 gimple_seq_add_seq (&this_tf.top_p_seq, gimple_try_eval (tp));
1735 gimple_try_set_cleanup (tp, NULL);
1736 }
1737 /* If the finally block doesn't fall through, then any destination
1738 we might try to impose there isn't reached either. There may be
1739 some minor amount of cleanup and redirection still needed. */
1740 else if (!gimple_seq_may_fallthru (gimple_try_cleanup (tp)))
1741 lower_try_finally_nofallthru (state, &this_tf);
1742
1743 /* We can easily special-case redirection to a single destination. */
1744 else if (ndests == 1)
1745 lower_try_finally_onedest (state, &this_tf);
1746 else if (decide_copy_try_finally (ndests, this_tf.may_throw,
1747 gimple_try_cleanup (tp)))
1748 lower_try_finally_copy (state, &this_tf);
1749 else
1750 lower_try_finally_switch (state, &this_tf);
1751
1752 /* If someone requested we add a label at the end of the transformed
1753 block, do so. */
1754 if (this_tf.fallthru_label)
1755 {
1756 /* This must be reached only if ndests == 0. */
1757 gimple x = gimple_build_label (this_tf.fallthru_label);
1758 gimple_seq_add_stmt (&this_tf.top_p_seq, x);
1759 }
1760
1761 this_tf.dest_array.release ();
1762 free (this_tf.goto_queue);
1763 if (this_tf.goto_queue_map)
1764 delete this_tf.goto_queue_map;
1765
1766 /* If there was an old (aka outer) eh_seq, append the current eh_seq.
1767 If there was no old eh_seq, then the append is trivially already done. */
1768 if (old_eh_seq)
1769 {
1770 if (eh_seq == NULL)
1771 eh_seq = old_eh_seq;
1772 else
1773 {
1774 gimple_seq new_eh_seq = eh_seq;
1775 eh_seq = old_eh_seq;
1776 gimple_seq_add_seq (&eh_seq, new_eh_seq);
1777 }
1778 }
1779
1780 return this_tf.top_p_seq;
1781 }
1782
1783 /* A subroutine of lower_eh_constructs_1. Lower a GIMPLE_TRY_CATCH with a
1784 list of GIMPLE_CATCH to a sequence of labels and blocks, plus the
1785 exception region trees that records all the magic. */
1786
1787 static gimple_seq
1788 lower_catch (struct leh_state *state, gtry *tp)
1789 {
1790 eh_region try_region = NULL;
1791 struct leh_state this_state = *state;
1792 gimple_stmt_iterator gsi;
1793 tree out_label;
1794 gimple_seq new_seq, cleanup;
1795 gimple x;
1796 location_t try_catch_loc = gimple_location (tp);
1797
1798 if (flag_exceptions)
1799 {
1800 try_region = gen_eh_region_try (state->cur_region);
1801 this_state.cur_region = try_region;
1802 }
1803
1804 lower_eh_constructs_1 (&this_state, gimple_try_eval_ptr (tp));
1805
1806 if (!eh_region_may_contain_throw (try_region))
1807 return gimple_try_eval (tp);
1808
1809 new_seq = NULL;
1810 emit_eh_dispatch (&new_seq, try_region);
1811 emit_resx (&new_seq, try_region);
1812
1813 this_state.cur_region = state->cur_region;
1814 this_state.ehp_region = try_region;
1815
1816 out_label = NULL;
1817 cleanup = gimple_try_cleanup (tp);
1818 for (gsi = gsi_start (cleanup);
1819 !gsi_end_p (gsi);
1820 gsi_next (&gsi))
1821 {
1822 eh_catch c;
1823 gcatch *catch_stmt;
1824 gimple_seq handler;
1825
1826 catch_stmt = as_a <gcatch *> (gsi_stmt (gsi));
1827 c = gen_eh_region_catch (try_region, gimple_catch_types (catch_stmt));
1828
1829 handler = gimple_catch_handler (catch_stmt);
1830 lower_eh_constructs_1 (&this_state, &handler);
1831
1832 c->label = create_artificial_label (UNKNOWN_LOCATION);
1833 x = gimple_build_label (c->label);
1834 gimple_seq_add_stmt (&new_seq, x);
1835
1836 gimple_seq_add_seq (&new_seq, handler);
1837
1838 if (gimple_seq_may_fallthru (new_seq))
1839 {
1840 if (!out_label)
1841 out_label = create_artificial_label (try_catch_loc);
1842
1843 x = gimple_build_goto (out_label);
1844 gimple_seq_add_stmt (&new_seq, x);
1845 }
1846 if (!c->type_list)
1847 break;
1848 }
1849
1850 gimple_try_set_cleanup (tp, new_seq);
1851
1852 return frob_into_branch_around (tp, try_region, out_label);
1853 }
1854
1855 /* A subroutine of lower_eh_constructs_1. Lower a GIMPLE_TRY with a
1856 GIMPLE_EH_FILTER to a sequence of labels and blocks, plus the exception
1857 region trees that record all the magic. */
1858
1859 static gimple_seq
1860 lower_eh_filter (struct leh_state *state, gtry *tp)
1861 {
1862 struct leh_state this_state = *state;
1863 eh_region this_region = NULL;
1864 gimple inner, x;
1865 gimple_seq new_seq;
1866
1867 inner = gimple_seq_first_stmt (gimple_try_cleanup (tp));
1868
1869 if (flag_exceptions)
1870 {
1871 this_region = gen_eh_region_allowed (state->cur_region,
1872 gimple_eh_filter_types (inner));
1873 this_state.cur_region = this_region;
1874 }
1875
1876 lower_eh_constructs_1 (&this_state, gimple_try_eval_ptr (tp));
1877
1878 if (!eh_region_may_contain_throw (this_region))
1879 return gimple_try_eval (tp);
1880
1881 new_seq = NULL;
1882 this_state.cur_region = state->cur_region;
1883 this_state.ehp_region = this_region;
1884
1885 emit_eh_dispatch (&new_seq, this_region);
1886 emit_resx (&new_seq, this_region);
1887
1888 this_region->u.allowed.label = create_artificial_label (UNKNOWN_LOCATION);
1889 x = gimple_build_label (this_region->u.allowed.label);
1890 gimple_seq_add_stmt (&new_seq, x);
1891
1892 lower_eh_constructs_1 (&this_state, gimple_eh_filter_failure_ptr (inner));
1893 gimple_seq_add_seq (&new_seq, gimple_eh_filter_failure (inner));
1894
1895 gimple_try_set_cleanup (tp, new_seq);
1896
1897 return frob_into_branch_around (tp, this_region, NULL);
1898 }
1899
1900 /* A subroutine of lower_eh_constructs_1. Lower a GIMPLE_TRY with
1901 an GIMPLE_EH_MUST_NOT_THROW to a sequence of labels and blocks,
1902 plus the exception region trees that record all the magic. */
1903
1904 static gimple_seq
1905 lower_eh_must_not_throw (struct leh_state *state, gtry *tp)
1906 {
1907 struct leh_state this_state = *state;
1908
1909 if (flag_exceptions)
1910 {
1911 gimple inner = gimple_seq_first_stmt (gimple_try_cleanup (tp));
1912 eh_region this_region;
1913
1914 this_region = gen_eh_region_must_not_throw (state->cur_region);
1915 this_region->u.must_not_throw.failure_decl
1916 = gimple_eh_must_not_throw_fndecl (
1917 as_a <geh_mnt *> (inner));
1918 this_region->u.must_not_throw.failure_loc
1919 = LOCATION_LOCUS (gimple_location (tp));
1920
1921 /* In order to get mangling applied to this decl, we must mark it
1922 used now. Otherwise, pass_ipa_free_lang_data won't think it
1923 needs to happen. */
1924 TREE_USED (this_region->u.must_not_throw.failure_decl) = 1;
1925
1926 this_state.cur_region = this_region;
1927 }
1928
1929 lower_eh_constructs_1 (&this_state, gimple_try_eval_ptr (tp));
1930
1931 return gimple_try_eval (tp);
1932 }
1933
1934 /* Implement a cleanup expression. This is similar to try-finally,
1935 except that we only execute the cleanup block for exception edges. */
1936
1937 static gimple_seq
1938 lower_cleanup (struct leh_state *state, gtry *tp)
1939 {
1940 struct leh_state this_state = *state;
1941 eh_region this_region = NULL;
1942 struct leh_tf_state fake_tf;
1943 gimple_seq result;
1944 bool cleanup_dead = cleanup_is_dead_in (state->cur_region);
1945
1946 if (flag_exceptions && !cleanup_dead)
1947 {
1948 this_region = gen_eh_region_cleanup (state->cur_region);
1949 this_state.cur_region = this_region;
1950 }
1951
1952 lower_eh_constructs_1 (&this_state, gimple_try_eval_ptr (tp));
1953
1954 if (cleanup_dead || !eh_region_may_contain_throw (this_region))
1955 return gimple_try_eval (tp);
1956
1957 /* Build enough of a try-finally state so that we can reuse
1958 honor_protect_cleanup_actions. */
1959 memset (&fake_tf, 0, sizeof (fake_tf));
1960 fake_tf.top_p = fake_tf.try_finally_expr = tp;
1961 fake_tf.outer = state;
1962 fake_tf.region = this_region;
1963 fake_tf.may_fallthru = gimple_seq_may_fallthru (gimple_try_eval (tp));
1964 fake_tf.may_throw = true;
1965
1966 honor_protect_cleanup_actions (state, NULL, &fake_tf);
1967
1968 if (fake_tf.may_throw)
1969 {
1970 /* In this case honor_protect_cleanup_actions had nothing to do,
1971 and we should process this normally. */
1972 lower_eh_constructs_1 (state, gimple_try_cleanup_ptr (tp));
1973 result = frob_into_branch_around (tp, this_region,
1974 fake_tf.fallthru_label);
1975 }
1976 else
1977 {
1978 /* In this case honor_protect_cleanup_actions did nearly all of
1979 the work. All we have left is to append the fallthru_label. */
1980
1981 result = gimple_try_eval (tp);
1982 if (fake_tf.fallthru_label)
1983 {
1984 gimple x = gimple_build_label (fake_tf.fallthru_label);
1985 gimple_seq_add_stmt (&result, x);
1986 }
1987 }
1988 return result;
1989 }
1990
1991 /* Main loop for lowering eh constructs. Also moves gsi to the next
1992 statement. */
1993
1994 static void
1995 lower_eh_constructs_2 (struct leh_state *state, gimple_stmt_iterator *gsi)
1996 {
1997 gimple_seq replace;
1998 gimple x;
1999 gimple stmt = gsi_stmt (*gsi);
2000
2001 switch (gimple_code (stmt))
2002 {
2003 case GIMPLE_CALL:
2004 {
2005 tree fndecl = gimple_call_fndecl (stmt);
2006 tree rhs, lhs;
2007
2008 if (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
2009 switch (DECL_FUNCTION_CODE (fndecl))
2010 {
2011 case BUILT_IN_EH_POINTER:
2012 /* The front end may have generated a call to
2013 __builtin_eh_pointer (0) within a catch region. Replace
2014 this zero argument with the current catch region number. */
2015 if (state->ehp_region)
2016 {
2017 tree nr = build_int_cst (integer_type_node,
2018 state->ehp_region->index);
2019 gimple_call_set_arg (stmt, 0, nr);
2020 }
2021 else
2022 {
2023 /* The user has dome something silly. Remove it. */
2024 rhs = null_pointer_node;
2025 goto do_replace;
2026 }
2027 break;
2028
2029 case BUILT_IN_EH_FILTER:
2030 /* ??? This should never appear, but since it's a builtin it
2031 is accessible to abuse by users. Just remove it and
2032 replace the use with the arbitrary value zero. */
2033 rhs = build_int_cst (TREE_TYPE (TREE_TYPE (fndecl)), 0);
2034 do_replace:
2035 lhs = gimple_call_lhs (stmt);
2036 x = gimple_build_assign (lhs, rhs);
2037 gsi_insert_before (gsi, x, GSI_SAME_STMT);
2038 /* FALLTHRU */
2039
2040 case BUILT_IN_EH_COPY_VALUES:
2041 /* Likewise this should not appear. Remove it. */
2042 gsi_remove (gsi, true);
2043 return;
2044
2045 default:
2046 break;
2047 }
2048 }
2049 /* FALLTHRU */
2050
2051 case GIMPLE_ASSIGN:
2052 /* If the stmt can throw use a new temporary for the assignment
2053 to a LHS. This makes sure the old value of the LHS is
2054 available on the EH edge. Only do so for statements that
2055 potentially fall through (no noreturn calls e.g.), otherwise
2056 this new assignment might create fake fallthru regions. */
2057 if (stmt_could_throw_p (stmt)
2058 && gimple_has_lhs (stmt)
2059 && gimple_stmt_may_fallthru (stmt)
2060 && !tree_could_throw_p (gimple_get_lhs (stmt))
2061 && is_gimple_reg_type (TREE_TYPE (gimple_get_lhs (stmt))))
2062 {
2063 tree lhs = gimple_get_lhs (stmt);
2064 tree tmp = create_tmp_var (TREE_TYPE (lhs));
2065 gimple s = gimple_build_assign (lhs, tmp);
2066 gimple_set_location (s, gimple_location (stmt));
2067 gimple_set_block (s, gimple_block (stmt));
2068 gimple_set_lhs (stmt, tmp);
2069 if (TREE_CODE (TREE_TYPE (tmp)) == COMPLEX_TYPE
2070 || TREE_CODE (TREE_TYPE (tmp)) == VECTOR_TYPE)
2071 DECL_GIMPLE_REG_P (tmp) = 1;
2072 gsi_insert_after (gsi, s, GSI_SAME_STMT);
2073 }
2074 /* Look for things that can throw exceptions, and record them. */
2075 if (state->cur_region && stmt_could_throw_p (stmt))
2076 {
2077 record_stmt_eh_region (state->cur_region, stmt);
2078 note_eh_region_may_contain_throw (state->cur_region);
2079 }
2080 break;
2081
2082 case GIMPLE_COND:
2083 case GIMPLE_GOTO:
2084 case GIMPLE_RETURN:
2085 maybe_record_in_goto_queue (state, stmt);
2086 break;
2087
2088 case GIMPLE_SWITCH:
2089 verify_norecord_switch_expr (state, as_a <gswitch *> (stmt));
2090 break;
2091
2092 case GIMPLE_TRY:
2093 {
2094 gtry *try_stmt = as_a <gtry *> (stmt);
2095 if (gimple_try_kind (try_stmt) == GIMPLE_TRY_FINALLY)
2096 replace = lower_try_finally (state, try_stmt);
2097 else
2098 {
2099 x = gimple_seq_first_stmt (gimple_try_cleanup (try_stmt));
2100 if (!x)
2101 {
2102 replace = gimple_try_eval (try_stmt);
2103 lower_eh_constructs_1 (state, &replace);
2104 }
2105 else
2106 switch (gimple_code (x))
2107 {
2108 case GIMPLE_CATCH:
2109 replace = lower_catch (state, try_stmt);
2110 break;
2111 case GIMPLE_EH_FILTER:
2112 replace = lower_eh_filter (state, try_stmt);
2113 break;
2114 case GIMPLE_EH_MUST_NOT_THROW:
2115 replace = lower_eh_must_not_throw (state, try_stmt);
2116 break;
2117 case GIMPLE_EH_ELSE:
2118 /* This code is only valid with GIMPLE_TRY_FINALLY. */
2119 gcc_unreachable ();
2120 default:
2121 replace = lower_cleanup (state, try_stmt);
2122 break;
2123 }
2124 }
2125 }
2126
2127 /* Remove the old stmt and insert the transformed sequence
2128 instead. */
2129 gsi_insert_seq_before (gsi, replace, GSI_SAME_STMT);
2130 gsi_remove (gsi, true);
2131
2132 /* Return since we don't want gsi_next () */
2133 return;
2134
2135 case GIMPLE_EH_ELSE:
2136 /* We should be eliminating this in lower_try_finally et al. */
2137 gcc_unreachable ();
2138
2139 default:
2140 /* A type, a decl, or some kind of statement that we're not
2141 interested in. Don't walk them. */
2142 break;
2143 }
2144
2145 gsi_next (gsi);
2146 }
2147
2148 /* A helper to unwrap a gimple_seq and feed stmts to lower_eh_constructs_2. */
2149
2150 static void
2151 lower_eh_constructs_1 (struct leh_state *state, gimple_seq *pseq)
2152 {
2153 gimple_stmt_iterator gsi;
2154 for (gsi = gsi_start (*pseq); !gsi_end_p (gsi);)
2155 lower_eh_constructs_2 (state, &gsi);
2156 }
2157
2158 namespace {
2159
2160 const pass_data pass_data_lower_eh =
2161 {
2162 GIMPLE_PASS, /* type */
2163 "eh", /* name */
2164 OPTGROUP_NONE, /* optinfo_flags */
2165 TV_TREE_EH, /* tv_id */
2166 PROP_gimple_lcf, /* properties_required */
2167 PROP_gimple_leh, /* properties_provided */
2168 0, /* properties_destroyed */
2169 0, /* todo_flags_start */
2170 0, /* todo_flags_finish */
2171 };
2172
2173 class pass_lower_eh : public gimple_opt_pass
2174 {
2175 public:
2176 pass_lower_eh (gcc::context *ctxt)
2177 : gimple_opt_pass (pass_data_lower_eh, ctxt)
2178 {}
2179
2180 /* opt_pass methods: */
2181 virtual unsigned int execute (function *);
2182
2183 }; // class pass_lower_eh
2184
2185 unsigned int
2186 pass_lower_eh::execute (function *fun)
2187 {
2188 struct leh_state null_state;
2189 gimple_seq bodyp;
2190
2191 bodyp = gimple_body (current_function_decl);
2192 if (bodyp == NULL)
2193 return 0;
2194
2195 finally_tree = new hash_table<finally_tree_hasher> (31);
2196 eh_region_may_contain_throw_map = BITMAP_ALLOC (NULL);
2197 memset (&null_state, 0, sizeof (null_state));
2198
2199 collect_finally_tree_1 (bodyp, NULL);
2200 lower_eh_constructs_1 (&null_state, &bodyp);
2201 gimple_set_body (current_function_decl, bodyp);
2202
2203 /* We assume there's a return statement, or something, at the end of
2204 the function, and thus ploping the EH sequence afterward won't
2205 change anything. */
2206 gcc_assert (!gimple_seq_may_fallthru (bodyp));
2207 gimple_seq_add_seq (&bodyp, eh_seq);
2208
2209 /* We assume that since BODYP already existed, adding EH_SEQ to it
2210 didn't change its value, and we don't have to re-set the function. */
2211 gcc_assert (bodyp == gimple_body (current_function_decl));
2212
2213 delete finally_tree;
2214 finally_tree = NULL;
2215 BITMAP_FREE (eh_region_may_contain_throw_map);
2216 eh_seq = NULL;
2217
2218 /* If this function needs a language specific EH personality routine
2219 and the frontend didn't already set one do so now. */
2220 if (function_needs_eh_personality (fun) == eh_personality_lang
2221 && !DECL_FUNCTION_PERSONALITY (current_function_decl))
2222 DECL_FUNCTION_PERSONALITY (current_function_decl)
2223 = lang_hooks.eh_personality ();
2224
2225 return 0;
2226 }
2227
2228 } // anon namespace
2229
2230 gimple_opt_pass *
2231 make_pass_lower_eh (gcc::context *ctxt)
2232 {
2233 return new pass_lower_eh (ctxt);
2234 }
2235 \f
2236 /* Create the multiple edges from an EH_DISPATCH statement to all of
2237 the possible handlers for its EH region. Return true if there's
2238 no fallthru edge; false if there is. */
2239
2240 bool
2241 make_eh_dispatch_edges (geh_dispatch *stmt)
2242 {
2243 eh_region r;
2244 eh_catch c;
2245 basic_block src, dst;
2246
2247 r = get_eh_region_from_number (gimple_eh_dispatch_region (stmt));
2248 src = gimple_bb (stmt);
2249
2250 switch (r->type)
2251 {
2252 case ERT_TRY:
2253 for (c = r->u.eh_try.first_catch; c ; c = c->next_catch)
2254 {
2255 dst = label_to_block (c->label);
2256 make_edge (src, dst, 0);
2257
2258 /* A catch-all handler doesn't have a fallthru. */
2259 if (c->type_list == NULL)
2260 return false;
2261 }
2262 break;
2263
2264 case ERT_ALLOWED_EXCEPTIONS:
2265 dst = label_to_block (r->u.allowed.label);
2266 make_edge (src, dst, 0);
2267 break;
2268
2269 default:
2270 gcc_unreachable ();
2271 }
2272
2273 return true;
2274 }
2275
2276 /* Create the single EH edge from STMT to its nearest landing pad,
2277 if there is such a landing pad within the current function. */
2278
2279 void
2280 make_eh_edges (gimple stmt)
2281 {
2282 basic_block src, dst;
2283 eh_landing_pad lp;
2284 int lp_nr;
2285
2286 lp_nr = lookup_stmt_eh_lp (stmt);
2287 if (lp_nr <= 0)
2288 return;
2289
2290 lp = get_eh_landing_pad_from_number (lp_nr);
2291 gcc_assert (lp != NULL);
2292
2293 src = gimple_bb (stmt);
2294 dst = label_to_block (lp->post_landing_pad);
2295 make_edge (src, dst, EDGE_EH);
2296 }
2297
2298 /* Do the work in redirecting EDGE_IN to NEW_BB within the EH region tree;
2299 do not actually perform the final edge redirection.
2300
2301 CHANGE_REGION is true when we're being called from cleanup_empty_eh and
2302 we intend to change the destination EH region as well; this means
2303 EH_LANDING_PAD_NR must already be set on the destination block label.
2304 If false, we're being called from generic cfg manipulation code and we
2305 should preserve our place within the region tree. */
2306
2307 static void
2308 redirect_eh_edge_1 (edge edge_in, basic_block new_bb, bool change_region)
2309 {
2310 eh_landing_pad old_lp, new_lp;
2311 basic_block old_bb;
2312 gimple throw_stmt;
2313 int old_lp_nr, new_lp_nr;
2314 tree old_label, new_label;
2315 edge_iterator ei;
2316 edge e;
2317
2318 old_bb = edge_in->dest;
2319 old_label = gimple_block_label (old_bb);
2320 old_lp_nr = EH_LANDING_PAD_NR (old_label);
2321 gcc_assert (old_lp_nr > 0);
2322 old_lp = get_eh_landing_pad_from_number (old_lp_nr);
2323
2324 throw_stmt = last_stmt (edge_in->src);
2325 gcc_assert (lookup_stmt_eh_lp (throw_stmt) == old_lp_nr);
2326
2327 new_label = gimple_block_label (new_bb);
2328
2329 /* Look for an existing region that might be using NEW_BB already. */
2330 new_lp_nr = EH_LANDING_PAD_NR (new_label);
2331 if (new_lp_nr)
2332 {
2333 new_lp = get_eh_landing_pad_from_number (new_lp_nr);
2334 gcc_assert (new_lp);
2335
2336 /* Unless CHANGE_REGION is true, the new and old landing pad
2337 had better be associated with the same EH region. */
2338 gcc_assert (change_region || new_lp->region == old_lp->region);
2339 }
2340 else
2341 {
2342 new_lp = NULL;
2343 gcc_assert (!change_region);
2344 }
2345
2346 /* Notice when we redirect the last EH edge away from OLD_BB. */
2347 FOR_EACH_EDGE (e, ei, old_bb->preds)
2348 if (e != edge_in && (e->flags & EDGE_EH))
2349 break;
2350
2351 if (new_lp)
2352 {
2353 /* NEW_LP already exists. If there are still edges into OLD_LP,
2354 there's nothing to do with the EH tree. If there are no more
2355 edges into OLD_LP, then we want to remove OLD_LP as it is unused.
2356 If CHANGE_REGION is true, then our caller is expecting to remove
2357 the landing pad. */
2358 if (e == NULL && !change_region)
2359 remove_eh_landing_pad (old_lp);
2360 }
2361 else
2362 {
2363 /* No correct landing pad exists. If there are no more edges
2364 into OLD_LP, then we can simply re-use the existing landing pad.
2365 Otherwise, we have to create a new landing pad. */
2366 if (e == NULL)
2367 {
2368 EH_LANDING_PAD_NR (old_lp->post_landing_pad) = 0;
2369 new_lp = old_lp;
2370 }
2371 else
2372 new_lp = gen_eh_landing_pad (old_lp->region);
2373 new_lp->post_landing_pad = new_label;
2374 EH_LANDING_PAD_NR (new_label) = new_lp->index;
2375 }
2376
2377 /* Maybe move the throwing statement to the new region. */
2378 if (old_lp != new_lp)
2379 {
2380 remove_stmt_from_eh_lp (throw_stmt);
2381 add_stmt_to_eh_lp (throw_stmt, new_lp->index);
2382 }
2383 }
2384
2385 /* Redirect EH edge E to NEW_BB. */
2386
2387 edge
2388 redirect_eh_edge (edge edge_in, basic_block new_bb)
2389 {
2390 redirect_eh_edge_1 (edge_in, new_bb, false);
2391 return ssa_redirect_edge (edge_in, new_bb);
2392 }
2393
2394 /* This is a subroutine of gimple_redirect_edge_and_branch. Update the
2395 labels for redirecting a non-fallthru EH_DISPATCH edge E to NEW_BB.
2396 The actual edge update will happen in the caller. */
2397
2398 void
2399 redirect_eh_dispatch_edge (geh_dispatch *stmt, edge e, basic_block new_bb)
2400 {
2401 tree new_lab = gimple_block_label (new_bb);
2402 bool any_changed = false;
2403 basic_block old_bb;
2404 eh_region r;
2405 eh_catch c;
2406
2407 r = get_eh_region_from_number (gimple_eh_dispatch_region (stmt));
2408 switch (r->type)
2409 {
2410 case ERT_TRY:
2411 for (c = r->u.eh_try.first_catch; c ; c = c->next_catch)
2412 {
2413 old_bb = label_to_block (c->label);
2414 if (old_bb == e->dest)
2415 {
2416 c->label = new_lab;
2417 any_changed = true;
2418 }
2419 }
2420 break;
2421
2422 case ERT_ALLOWED_EXCEPTIONS:
2423 old_bb = label_to_block (r->u.allowed.label);
2424 gcc_assert (old_bb == e->dest);
2425 r->u.allowed.label = new_lab;
2426 any_changed = true;
2427 break;
2428
2429 default:
2430 gcc_unreachable ();
2431 }
2432
2433 gcc_assert (any_changed);
2434 }
2435 \f
2436 /* Helper function for operation_could_trap_p and stmt_could_throw_p. */
2437
2438 bool
2439 operation_could_trap_helper_p (enum tree_code op,
2440 bool fp_operation,
2441 bool honor_trapv,
2442 bool honor_nans,
2443 bool honor_snans,
2444 tree divisor,
2445 bool *handled)
2446 {
2447 *handled = true;
2448 switch (op)
2449 {
2450 case TRUNC_DIV_EXPR:
2451 case CEIL_DIV_EXPR:
2452 case FLOOR_DIV_EXPR:
2453 case ROUND_DIV_EXPR:
2454 case EXACT_DIV_EXPR:
2455 case CEIL_MOD_EXPR:
2456 case FLOOR_MOD_EXPR:
2457 case ROUND_MOD_EXPR:
2458 case TRUNC_MOD_EXPR:
2459 case RDIV_EXPR:
2460 if (honor_snans || honor_trapv)
2461 return true;
2462 if (fp_operation)
2463 return flag_trapping_math;
2464 if (!TREE_CONSTANT (divisor) || integer_zerop (divisor))
2465 return true;
2466 return false;
2467
2468 case LT_EXPR:
2469 case LE_EXPR:
2470 case GT_EXPR:
2471 case GE_EXPR:
2472 case LTGT_EXPR:
2473 /* Some floating point comparisons may trap. */
2474 return honor_nans;
2475
2476 case EQ_EXPR:
2477 case NE_EXPR:
2478 case UNORDERED_EXPR:
2479 case ORDERED_EXPR:
2480 case UNLT_EXPR:
2481 case UNLE_EXPR:
2482 case UNGT_EXPR:
2483 case UNGE_EXPR:
2484 case UNEQ_EXPR:
2485 return honor_snans;
2486
2487 case NEGATE_EXPR:
2488 case ABS_EXPR:
2489 case CONJ_EXPR:
2490 /* These operations don't trap with floating point. */
2491 if (honor_trapv)
2492 return true;
2493 return false;
2494
2495 case PLUS_EXPR:
2496 case MINUS_EXPR:
2497 case MULT_EXPR:
2498 /* Any floating arithmetic may trap. */
2499 if (fp_operation && flag_trapping_math)
2500 return true;
2501 if (honor_trapv)
2502 return true;
2503 return false;
2504
2505 case COMPLEX_EXPR:
2506 case CONSTRUCTOR:
2507 /* Constructing an object cannot trap. */
2508 return false;
2509
2510 default:
2511 /* Any floating arithmetic may trap. */
2512 if (fp_operation && flag_trapping_math)
2513 return true;
2514
2515 *handled = false;
2516 return false;
2517 }
2518 }
2519
2520 /* Return true if operation OP may trap. FP_OPERATION is true if OP is applied
2521 on floating-point values. HONOR_TRAPV is true if OP is applied on integer
2522 type operands that may trap. If OP is a division operator, DIVISOR contains
2523 the value of the divisor. */
2524
2525 bool
2526 operation_could_trap_p (enum tree_code op, bool fp_operation, bool honor_trapv,
2527 tree divisor)
2528 {
2529 bool honor_nans = (fp_operation && flag_trapping_math
2530 && !flag_finite_math_only);
2531 bool honor_snans = fp_operation && flag_signaling_nans != 0;
2532 bool handled;
2533
2534 if (TREE_CODE_CLASS (op) != tcc_comparison
2535 && TREE_CODE_CLASS (op) != tcc_unary
2536 && TREE_CODE_CLASS (op) != tcc_binary)
2537 return false;
2538
2539 return operation_could_trap_helper_p (op, fp_operation, honor_trapv,
2540 honor_nans, honor_snans, divisor,
2541 &handled);
2542 }
2543
2544
2545 /* Returns true if it is possible to prove that the index of
2546 an array access REF (an ARRAY_REF expression) falls into the
2547 array bounds. */
2548
2549 static bool
2550 in_array_bounds_p (tree ref)
2551 {
2552 tree idx = TREE_OPERAND (ref, 1);
2553 tree min, max;
2554
2555 if (TREE_CODE (idx) != INTEGER_CST)
2556 return false;
2557
2558 min = array_ref_low_bound (ref);
2559 max = array_ref_up_bound (ref);
2560 if (!min
2561 || !max
2562 || TREE_CODE (min) != INTEGER_CST
2563 || TREE_CODE (max) != INTEGER_CST)
2564 return false;
2565
2566 if (tree_int_cst_lt (idx, min)
2567 || tree_int_cst_lt (max, idx))
2568 return false;
2569
2570 return true;
2571 }
2572
2573 /* Returns true if it is possible to prove that the range of
2574 an array access REF (an ARRAY_RANGE_REF expression) falls
2575 into the array bounds. */
2576
2577 static bool
2578 range_in_array_bounds_p (tree ref)
2579 {
2580 tree domain_type = TYPE_DOMAIN (TREE_TYPE (ref));
2581 tree range_min, range_max, min, max;
2582
2583 range_min = TYPE_MIN_VALUE (domain_type);
2584 range_max = TYPE_MAX_VALUE (domain_type);
2585 if (!range_min
2586 || !range_max
2587 || TREE_CODE (range_min) != INTEGER_CST
2588 || TREE_CODE (range_max) != INTEGER_CST)
2589 return false;
2590
2591 min = array_ref_low_bound (ref);
2592 max = array_ref_up_bound (ref);
2593 if (!min
2594 || !max
2595 || TREE_CODE (min) != INTEGER_CST
2596 || TREE_CODE (max) != INTEGER_CST)
2597 return false;
2598
2599 if (tree_int_cst_lt (range_min, min)
2600 || tree_int_cst_lt (max, range_max))
2601 return false;
2602
2603 return true;
2604 }
2605
2606 /* Return true if EXPR can trap, as in dereferencing an invalid pointer
2607 location or floating point arithmetic. C.f. the rtl version, may_trap_p.
2608 This routine expects only GIMPLE lhs or rhs input. */
2609
2610 bool
2611 tree_could_trap_p (tree expr)
2612 {
2613 enum tree_code code;
2614 bool fp_operation = false;
2615 bool honor_trapv = false;
2616 tree t, base, div = NULL_TREE;
2617
2618 if (!expr)
2619 return false;
2620
2621 code = TREE_CODE (expr);
2622 t = TREE_TYPE (expr);
2623
2624 if (t)
2625 {
2626 if (COMPARISON_CLASS_P (expr))
2627 fp_operation = FLOAT_TYPE_P (TREE_TYPE (TREE_OPERAND (expr, 0)));
2628 else
2629 fp_operation = FLOAT_TYPE_P (t);
2630 honor_trapv = INTEGRAL_TYPE_P (t) && TYPE_OVERFLOW_TRAPS (t);
2631 }
2632
2633 if (TREE_CODE_CLASS (code) == tcc_binary)
2634 div = TREE_OPERAND (expr, 1);
2635 if (operation_could_trap_p (code, fp_operation, honor_trapv, div))
2636 return true;
2637
2638 restart:
2639 switch (code)
2640 {
2641 case COMPONENT_REF:
2642 case REALPART_EXPR:
2643 case IMAGPART_EXPR:
2644 case BIT_FIELD_REF:
2645 case VIEW_CONVERT_EXPR:
2646 case WITH_SIZE_EXPR:
2647 expr = TREE_OPERAND (expr, 0);
2648 code = TREE_CODE (expr);
2649 goto restart;
2650
2651 case ARRAY_RANGE_REF:
2652 base = TREE_OPERAND (expr, 0);
2653 if (tree_could_trap_p (base))
2654 return true;
2655 if (TREE_THIS_NOTRAP (expr))
2656 return false;
2657 return !range_in_array_bounds_p (expr);
2658
2659 case ARRAY_REF:
2660 base = TREE_OPERAND (expr, 0);
2661 if (tree_could_trap_p (base))
2662 return true;
2663 if (TREE_THIS_NOTRAP (expr))
2664 return false;
2665 return !in_array_bounds_p (expr);
2666
2667 case TARGET_MEM_REF:
2668 case MEM_REF:
2669 if (TREE_CODE (TREE_OPERAND (expr, 0)) == ADDR_EXPR
2670 && tree_could_trap_p (TREE_OPERAND (TREE_OPERAND (expr, 0), 0)))
2671 return true;
2672 if (TREE_THIS_NOTRAP (expr))
2673 return false;
2674 /* We cannot prove that the access is in-bounds when we have
2675 variable-index TARGET_MEM_REFs. */
2676 if (code == TARGET_MEM_REF
2677 && (TMR_INDEX (expr) || TMR_INDEX2 (expr)))
2678 return true;
2679 if (TREE_CODE (TREE_OPERAND (expr, 0)) == ADDR_EXPR)
2680 {
2681 tree base = TREE_OPERAND (TREE_OPERAND (expr, 0), 0);
2682 offset_int off = mem_ref_offset (expr);
2683 if (wi::neg_p (off, SIGNED))
2684 return true;
2685 if (TREE_CODE (base) == STRING_CST)
2686 return wi::leu_p (TREE_STRING_LENGTH (base), off);
2687 else if (DECL_SIZE_UNIT (base) == NULL_TREE
2688 || TREE_CODE (DECL_SIZE_UNIT (base)) != INTEGER_CST
2689 || wi::leu_p (wi::to_offset (DECL_SIZE_UNIT (base)), off))
2690 return true;
2691 /* Now we are sure the first byte of the access is inside
2692 the object. */
2693 return false;
2694 }
2695 return true;
2696
2697 case INDIRECT_REF:
2698 return !TREE_THIS_NOTRAP (expr);
2699
2700 case ASM_EXPR:
2701 return TREE_THIS_VOLATILE (expr);
2702
2703 case CALL_EXPR:
2704 t = get_callee_fndecl (expr);
2705 /* Assume that calls to weak functions may trap. */
2706 if (!t || !DECL_P (t))
2707 return true;
2708 if (DECL_WEAK (t))
2709 return tree_could_trap_p (t);
2710 return false;
2711
2712 case FUNCTION_DECL:
2713 /* Assume that accesses to weak functions may trap, unless we know
2714 they are certainly defined in current TU or in some other
2715 LTO partition. */
2716 if (DECL_WEAK (expr) && !DECL_COMDAT (expr) && DECL_EXTERNAL (expr))
2717 {
2718 cgraph_node *node = cgraph_node::get (expr);
2719 if (node)
2720 node = node->function_symbol ();
2721 return !(node && node->in_other_partition);
2722 }
2723 return false;
2724
2725 case VAR_DECL:
2726 /* Assume that accesses to weak vars may trap, unless we know
2727 they are certainly defined in current TU or in some other
2728 LTO partition. */
2729 if (DECL_WEAK (expr) && !DECL_COMDAT (expr) && DECL_EXTERNAL (expr))
2730 {
2731 varpool_node *node = varpool_node::get (expr);
2732 if (node)
2733 node = node->ultimate_alias_target ();
2734 return !(node && node->in_other_partition);
2735 }
2736 return false;
2737
2738 default:
2739 return false;
2740 }
2741 }
2742
2743
2744 /* Helper for stmt_could_throw_p. Return true if STMT (assumed to be a
2745 an assignment or a conditional) may throw. */
2746
2747 static bool
2748 stmt_could_throw_1_p (gimple stmt)
2749 {
2750 enum tree_code code = gimple_expr_code (stmt);
2751 bool honor_nans = false;
2752 bool honor_snans = false;
2753 bool fp_operation = false;
2754 bool honor_trapv = false;
2755 tree t;
2756 size_t i;
2757 bool handled, ret;
2758
2759 if (TREE_CODE_CLASS (code) == tcc_comparison
2760 || TREE_CODE_CLASS (code) == tcc_unary
2761 || TREE_CODE_CLASS (code) == tcc_binary)
2762 {
2763 if (is_gimple_assign (stmt)
2764 && TREE_CODE_CLASS (code) == tcc_comparison)
2765 t = TREE_TYPE (gimple_assign_rhs1 (stmt));
2766 else if (gimple_code (stmt) == GIMPLE_COND)
2767 t = TREE_TYPE (gimple_cond_lhs (stmt));
2768 else
2769 t = gimple_expr_type (stmt);
2770 fp_operation = FLOAT_TYPE_P (t);
2771 if (fp_operation)
2772 {
2773 honor_nans = flag_trapping_math && !flag_finite_math_only;
2774 honor_snans = flag_signaling_nans != 0;
2775 }
2776 else if (INTEGRAL_TYPE_P (t) && TYPE_OVERFLOW_TRAPS (t))
2777 honor_trapv = true;
2778 }
2779
2780 /* Check if the main expression may trap. */
2781 t = is_gimple_assign (stmt) ? gimple_assign_rhs2 (stmt) : NULL;
2782 ret = operation_could_trap_helper_p (code, fp_operation, honor_trapv,
2783 honor_nans, honor_snans, t,
2784 &handled);
2785 if (handled)
2786 return ret;
2787
2788 /* If the expression does not trap, see if any of the individual operands may
2789 trap. */
2790 for (i = 0; i < gimple_num_ops (stmt); i++)
2791 if (tree_could_trap_p (gimple_op (stmt, i)))
2792 return true;
2793
2794 return false;
2795 }
2796
2797
2798 /* Return true if statement STMT could throw an exception. */
2799
2800 bool
2801 stmt_could_throw_p (gimple stmt)
2802 {
2803 if (!flag_exceptions)
2804 return false;
2805
2806 /* The only statements that can throw an exception are assignments,
2807 conditionals, calls, resx, and asms. */
2808 switch (gimple_code (stmt))
2809 {
2810 case GIMPLE_RESX:
2811 return true;
2812
2813 case GIMPLE_CALL:
2814 return !gimple_call_nothrow_p (as_a <gcall *> (stmt));
2815
2816 case GIMPLE_ASSIGN:
2817 case GIMPLE_COND:
2818 if (!cfun->can_throw_non_call_exceptions)
2819 return false;
2820 return stmt_could_throw_1_p (stmt);
2821
2822 case GIMPLE_ASM:
2823 if (!cfun->can_throw_non_call_exceptions)
2824 return false;
2825 return gimple_asm_volatile_p (as_a <gasm *> (stmt));
2826
2827 default:
2828 return false;
2829 }
2830 }
2831
2832
2833 /* Return true if expression T could throw an exception. */
2834
2835 bool
2836 tree_could_throw_p (tree t)
2837 {
2838 if (!flag_exceptions)
2839 return false;
2840 if (TREE_CODE (t) == MODIFY_EXPR)
2841 {
2842 if (cfun->can_throw_non_call_exceptions
2843 && tree_could_trap_p (TREE_OPERAND (t, 0)))
2844 return true;
2845 t = TREE_OPERAND (t, 1);
2846 }
2847
2848 if (TREE_CODE (t) == WITH_SIZE_EXPR)
2849 t = TREE_OPERAND (t, 0);
2850 if (TREE_CODE (t) == CALL_EXPR)
2851 return (call_expr_flags (t) & ECF_NOTHROW) == 0;
2852 if (cfun->can_throw_non_call_exceptions)
2853 return tree_could_trap_p (t);
2854 return false;
2855 }
2856
2857 /* Return true if STMT can throw an exception that is not caught within
2858 the current function (CFUN). */
2859
2860 bool
2861 stmt_can_throw_external (gimple stmt)
2862 {
2863 int lp_nr;
2864
2865 if (!stmt_could_throw_p (stmt))
2866 return false;
2867
2868 lp_nr = lookup_stmt_eh_lp (stmt);
2869 return lp_nr == 0;
2870 }
2871
2872 /* Return true if STMT can throw an exception that is caught within
2873 the current function (CFUN). */
2874
2875 bool
2876 stmt_can_throw_internal (gimple stmt)
2877 {
2878 int lp_nr;
2879
2880 if (!stmt_could_throw_p (stmt))
2881 return false;
2882
2883 lp_nr = lookup_stmt_eh_lp (stmt);
2884 return lp_nr > 0;
2885 }
2886
2887 /* Given a statement STMT in IFUN, if STMT can no longer throw, then
2888 remove any entry it might have from the EH table. Return true if
2889 any change was made. */
2890
2891 bool
2892 maybe_clean_eh_stmt_fn (struct function *ifun, gimple stmt)
2893 {
2894 if (stmt_could_throw_p (stmt))
2895 return false;
2896 return remove_stmt_from_eh_lp_fn (ifun, stmt);
2897 }
2898
2899 /* Likewise, but always use the current function. */
2900
2901 bool
2902 maybe_clean_eh_stmt (gimple stmt)
2903 {
2904 return maybe_clean_eh_stmt_fn (cfun, stmt);
2905 }
2906
2907 /* Given a statement OLD_STMT and a new statement NEW_STMT that has replaced
2908 OLD_STMT in the function, remove OLD_STMT from the EH table and put NEW_STMT
2909 in the table if it should be in there. Return TRUE if a replacement was
2910 done that my require an EH edge purge. */
2911
2912 bool
2913 maybe_clean_or_replace_eh_stmt (gimple old_stmt, gimple new_stmt)
2914 {
2915 int lp_nr = lookup_stmt_eh_lp (old_stmt);
2916
2917 if (lp_nr != 0)
2918 {
2919 bool new_stmt_could_throw = stmt_could_throw_p (new_stmt);
2920
2921 if (new_stmt == old_stmt && new_stmt_could_throw)
2922 return false;
2923
2924 remove_stmt_from_eh_lp (old_stmt);
2925 if (new_stmt_could_throw)
2926 {
2927 add_stmt_to_eh_lp (new_stmt, lp_nr);
2928 return false;
2929 }
2930 else
2931 return true;
2932 }
2933
2934 return false;
2935 }
2936
2937 /* Given a statement OLD_STMT in OLD_FUN and a duplicate statement NEW_STMT
2938 in NEW_FUN, copy the EH table data from OLD_STMT to NEW_STMT. The MAP
2939 operand is the return value of duplicate_eh_regions. */
2940
2941 bool
2942 maybe_duplicate_eh_stmt_fn (struct function *new_fun, gimple new_stmt,
2943 struct function *old_fun, gimple old_stmt,
2944 hash_map<void *, void *> *map,
2945 int default_lp_nr)
2946 {
2947 int old_lp_nr, new_lp_nr;
2948
2949 if (!stmt_could_throw_p (new_stmt))
2950 return false;
2951
2952 old_lp_nr = lookup_stmt_eh_lp_fn (old_fun, old_stmt);
2953 if (old_lp_nr == 0)
2954 {
2955 if (default_lp_nr == 0)
2956 return false;
2957 new_lp_nr = default_lp_nr;
2958 }
2959 else if (old_lp_nr > 0)
2960 {
2961 eh_landing_pad old_lp, new_lp;
2962
2963 old_lp = (*old_fun->eh->lp_array)[old_lp_nr];
2964 new_lp = static_cast<eh_landing_pad> (*map->get (old_lp));
2965 new_lp_nr = new_lp->index;
2966 }
2967 else
2968 {
2969 eh_region old_r, new_r;
2970
2971 old_r = (*old_fun->eh->region_array)[-old_lp_nr];
2972 new_r = static_cast<eh_region> (*map->get (old_r));
2973 new_lp_nr = -new_r->index;
2974 }
2975
2976 add_stmt_to_eh_lp_fn (new_fun, new_stmt, new_lp_nr);
2977 return true;
2978 }
2979
2980 /* Similar, but both OLD_STMT and NEW_STMT are within the current function,
2981 and thus no remapping is required. */
2982
2983 bool
2984 maybe_duplicate_eh_stmt (gimple new_stmt, gimple old_stmt)
2985 {
2986 int lp_nr;
2987
2988 if (!stmt_could_throw_p (new_stmt))
2989 return false;
2990
2991 lp_nr = lookup_stmt_eh_lp (old_stmt);
2992 if (lp_nr == 0)
2993 return false;
2994
2995 add_stmt_to_eh_lp (new_stmt, lp_nr);
2996 return true;
2997 }
2998 \f
2999 /* Returns TRUE if oneh and twoh are exception handlers (gimple_try_cleanup of
3000 GIMPLE_TRY) that are similar enough to be considered the same. Currently
3001 this only handles handlers consisting of a single call, as that's the
3002 important case for C++: a destructor call for a particular object showing
3003 up in multiple handlers. */
3004
3005 static bool
3006 same_handler_p (gimple_seq oneh, gimple_seq twoh)
3007 {
3008 gimple_stmt_iterator gsi;
3009 gimple ones, twos;
3010 unsigned int ai;
3011
3012 gsi = gsi_start (oneh);
3013 if (!gsi_one_before_end_p (gsi))
3014 return false;
3015 ones = gsi_stmt (gsi);
3016
3017 gsi = gsi_start (twoh);
3018 if (!gsi_one_before_end_p (gsi))
3019 return false;
3020 twos = gsi_stmt (gsi);
3021
3022 if (!is_gimple_call (ones)
3023 || !is_gimple_call (twos)
3024 || gimple_call_lhs (ones)
3025 || gimple_call_lhs (twos)
3026 || gimple_call_chain (ones)
3027 || gimple_call_chain (twos)
3028 || !gimple_call_same_target_p (ones, twos)
3029 || gimple_call_num_args (ones) != gimple_call_num_args (twos))
3030 return false;
3031
3032 for (ai = 0; ai < gimple_call_num_args (ones); ++ai)
3033 if (!operand_equal_p (gimple_call_arg (ones, ai),
3034 gimple_call_arg (twos, ai), 0))
3035 return false;
3036
3037 return true;
3038 }
3039
3040 /* Optimize
3041 try { A() } finally { try { ~B() } catch { ~A() } }
3042 try { ... } finally { ~A() }
3043 into
3044 try { A() } catch { ~B() }
3045 try { ~B() ... } finally { ~A() }
3046
3047 This occurs frequently in C++, where A is a local variable and B is a
3048 temporary used in the initializer for A. */
3049
3050 static void
3051 optimize_double_finally (gtry *one, gtry *two)
3052 {
3053 gimple oneh;
3054 gimple_stmt_iterator gsi;
3055 gimple_seq cleanup;
3056
3057 cleanup = gimple_try_cleanup (one);
3058 gsi = gsi_start (cleanup);
3059 if (!gsi_one_before_end_p (gsi))
3060 return;
3061
3062 oneh = gsi_stmt (gsi);
3063 if (gimple_code (oneh) != GIMPLE_TRY
3064 || gimple_try_kind (oneh) != GIMPLE_TRY_CATCH)
3065 return;
3066
3067 if (same_handler_p (gimple_try_cleanup (oneh), gimple_try_cleanup (two)))
3068 {
3069 gimple_seq seq = gimple_try_eval (oneh);
3070
3071 gimple_try_set_cleanup (one, seq);
3072 gimple_try_set_kind (one, GIMPLE_TRY_CATCH);
3073 seq = copy_gimple_seq_and_replace_locals (seq);
3074 gimple_seq_add_seq (&seq, gimple_try_eval (two));
3075 gimple_try_set_eval (two, seq);
3076 }
3077 }
3078
3079 /* Perform EH refactoring optimizations that are simpler to do when code
3080 flow has been lowered but EH structures haven't. */
3081
3082 static void
3083 refactor_eh_r (gimple_seq seq)
3084 {
3085 gimple_stmt_iterator gsi;
3086 gimple one, two;
3087
3088 one = NULL;
3089 two = NULL;
3090 gsi = gsi_start (seq);
3091 while (1)
3092 {
3093 one = two;
3094 if (gsi_end_p (gsi))
3095 two = NULL;
3096 else
3097 two = gsi_stmt (gsi);
3098 if (one && two)
3099 if (gtry *try_one = dyn_cast <gtry *> (one))
3100 if (gtry *try_two = dyn_cast <gtry *> (two))
3101 if (gimple_try_kind (try_one) == GIMPLE_TRY_FINALLY
3102 && gimple_try_kind (try_two) == GIMPLE_TRY_FINALLY)
3103 optimize_double_finally (try_one, try_two);
3104 if (one)
3105 switch (gimple_code (one))
3106 {
3107 case GIMPLE_TRY:
3108 refactor_eh_r (gimple_try_eval (one));
3109 refactor_eh_r (gimple_try_cleanup (one));
3110 break;
3111 case GIMPLE_CATCH:
3112 refactor_eh_r (gimple_catch_handler (as_a <gcatch *> (one)));
3113 break;
3114 case GIMPLE_EH_FILTER:
3115 refactor_eh_r (gimple_eh_filter_failure (one));
3116 break;
3117 case GIMPLE_EH_ELSE:
3118 {
3119 geh_else *eh_else_stmt = as_a <geh_else *> (one);
3120 refactor_eh_r (gimple_eh_else_n_body (eh_else_stmt));
3121 refactor_eh_r (gimple_eh_else_e_body (eh_else_stmt));
3122 }
3123 break;
3124 default:
3125 break;
3126 }
3127 if (two)
3128 gsi_next (&gsi);
3129 else
3130 break;
3131 }
3132 }
3133
3134 namespace {
3135
3136 const pass_data pass_data_refactor_eh =
3137 {
3138 GIMPLE_PASS, /* type */
3139 "ehopt", /* name */
3140 OPTGROUP_NONE, /* optinfo_flags */
3141 TV_TREE_EH, /* tv_id */
3142 PROP_gimple_lcf, /* properties_required */
3143 0, /* properties_provided */
3144 0, /* properties_destroyed */
3145 0, /* todo_flags_start */
3146 0, /* todo_flags_finish */
3147 };
3148
3149 class pass_refactor_eh : public gimple_opt_pass
3150 {
3151 public:
3152 pass_refactor_eh (gcc::context *ctxt)
3153 : gimple_opt_pass (pass_data_refactor_eh, ctxt)
3154 {}
3155
3156 /* opt_pass methods: */
3157 virtual bool gate (function *) { return flag_exceptions != 0; }
3158 virtual unsigned int execute (function *)
3159 {
3160 refactor_eh_r (gimple_body (current_function_decl));
3161 return 0;
3162 }
3163
3164 }; // class pass_refactor_eh
3165
3166 } // anon namespace
3167
3168 gimple_opt_pass *
3169 make_pass_refactor_eh (gcc::context *ctxt)
3170 {
3171 return new pass_refactor_eh (ctxt);
3172 }
3173 \f
3174 /* At the end of gimple optimization, we can lower RESX. */
3175
3176 static bool
3177 lower_resx (basic_block bb, gresx *stmt,
3178 hash_map<eh_region, tree> *mnt_map)
3179 {
3180 int lp_nr;
3181 eh_region src_r, dst_r;
3182 gimple_stmt_iterator gsi;
3183 gimple x;
3184 tree fn, src_nr;
3185 bool ret = false;
3186
3187 lp_nr = lookup_stmt_eh_lp (stmt);
3188 if (lp_nr != 0)
3189 dst_r = get_eh_region_from_lp_number (lp_nr);
3190 else
3191 dst_r = NULL;
3192
3193 src_r = get_eh_region_from_number (gimple_resx_region (stmt));
3194 gsi = gsi_last_bb (bb);
3195
3196 if (src_r == NULL)
3197 {
3198 /* We can wind up with no source region when pass_cleanup_eh shows
3199 that there are no entries into an eh region and deletes it, but
3200 then the block that contains the resx isn't removed. This can
3201 happen without optimization when the switch statement created by
3202 lower_try_finally_switch isn't simplified to remove the eh case.
3203
3204 Resolve this by expanding the resx node to an abort. */
3205
3206 fn = builtin_decl_implicit (BUILT_IN_TRAP);
3207 x = gimple_build_call (fn, 0);
3208 gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3209
3210 while (EDGE_COUNT (bb->succs) > 0)
3211 remove_edge (EDGE_SUCC (bb, 0));
3212 }
3213 else if (dst_r)
3214 {
3215 /* When we have a destination region, we resolve this by copying
3216 the excptr and filter values into place, and changing the edge
3217 to immediately after the landing pad. */
3218 edge e;
3219
3220 if (lp_nr < 0)
3221 {
3222 basic_block new_bb;
3223 tree lab;
3224
3225 /* We are resuming into a MUST_NOT_CALL region. Expand a call to
3226 the failure decl into a new block, if needed. */
3227 gcc_assert (dst_r->type == ERT_MUST_NOT_THROW);
3228
3229 tree *slot = mnt_map->get (dst_r);
3230 if (slot == NULL)
3231 {
3232 gimple_stmt_iterator gsi2;
3233
3234 new_bb = create_empty_bb (bb);
3235 add_bb_to_loop (new_bb, bb->loop_father);
3236 lab = gimple_block_label (new_bb);
3237 gsi2 = gsi_start_bb (new_bb);
3238
3239 fn = dst_r->u.must_not_throw.failure_decl;
3240 x = gimple_build_call (fn, 0);
3241 gimple_set_location (x, dst_r->u.must_not_throw.failure_loc);
3242 gsi_insert_after (&gsi2, x, GSI_CONTINUE_LINKING);
3243
3244 mnt_map->put (dst_r, lab);
3245 }
3246 else
3247 {
3248 lab = *slot;
3249 new_bb = label_to_block (lab);
3250 }
3251
3252 gcc_assert (EDGE_COUNT (bb->succs) == 0);
3253 e = make_edge (bb, new_bb, EDGE_FALLTHRU);
3254 e->count = bb->count;
3255 e->probability = REG_BR_PROB_BASE;
3256 }
3257 else
3258 {
3259 edge_iterator ei;
3260 tree dst_nr = build_int_cst (integer_type_node, dst_r->index);
3261
3262 fn = builtin_decl_implicit (BUILT_IN_EH_COPY_VALUES);
3263 src_nr = build_int_cst (integer_type_node, src_r->index);
3264 x = gimple_build_call (fn, 2, dst_nr, src_nr);
3265 gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3266
3267 /* Update the flags for the outgoing edge. */
3268 e = single_succ_edge (bb);
3269 gcc_assert (e->flags & EDGE_EH);
3270 e->flags = (e->flags & ~EDGE_EH) | EDGE_FALLTHRU;
3271
3272 /* If there are no more EH users of the landing pad, delete it. */
3273 FOR_EACH_EDGE (e, ei, e->dest->preds)
3274 if (e->flags & EDGE_EH)
3275 break;
3276 if (e == NULL)
3277 {
3278 eh_landing_pad lp = get_eh_landing_pad_from_number (lp_nr);
3279 remove_eh_landing_pad (lp);
3280 }
3281 }
3282
3283 ret = true;
3284 }
3285 else
3286 {
3287 tree var;
3288
3289 /* When we don't have a destination region, this exception escapes
3290 up the call chain. We resolve this by generating a call to the
3291 _Unwind_Resume library function. */
3292
3293 /* The ARM EABI redefines _Unwind_Resume as __cxa_end_cleanup
3294 with no arguments for C++ and Java. Check for that. */
3295 if (src_r->use_cxa_end_cleanup)
3296 {
3297 fn = builtin_decl_implicit (BUILT_IN_CXA_END_CLEANUP);
3298 x = gimple_build_call (fn, 0);
3299 gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3300 }
3301 else
3302 {
3303 fn = builtin_decl_implicit (BUILT_IN_EH_POINTER);
3304 src_nr = build_int_cst (integer_type_node, src_r->index);
3305 x = gimple_build_call (fn, 1, src_nr);
3306 var = create_tmp_var (ptr_type_node);
3307 var = make_ssa_name (var, x);
3308 gimple_call_set_lhs (x, var);
3309 gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3310
3311 fn = builtin_decl_implicit (BUILT_IN_UNWIND_RESUME);
3312 x = gimple_build_call (fn, 1, var);
3313 gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3314 }
3315
3316 gcc_assert (EDGE_COUNT (bb->succs) == 0);
3317 }
3318
3319 gsi_remove (&gsi, true);
3320
3321 return ret;
3322 }
3323
3324 namespace {
3325
3326 const pass_data pass_data_lower_resx =
3327 {
3328 GIMPLE_PASS, /* type */
3329 "resx", /* name */
3330 OPTGROUP_NONE, /* optinfo_flags */
3331 TV_TREE_EH, /* tv_id */
3332 PROP_gimple_lcf, /* properties_required */
3333 0, /* properties_provided */
3334 0, /* properties_destroyed */
3335 0, /* todo_flags_start */
3336 0, /* todo_flags_finish */
3337 };
3338
3339 class pass_lower_resx : public gimple_opt_pass
3340 {
3341 public:
3342 pass_lower_resx (gcc::context *ctxt)
3343 : gimple_opt_pass (pass_data_lower_resx, ctxt)
3344 {}
3345
3346 /* opt_pass methods: */
3347 virtual bool gate (function *) { return flag_exceptions != 0; }
3348 virtual unsigned int execute (function *);
3349
3350 }; // class pass_lower_resx
3351
3352 unsigned
3353 pass_lower_resx::execute (function *fun)
3354 {
3355 basic_block bb;
3356 bool dominance_invalidated = false;
3357 bool any_rewritten = false;
3358
3359 hash_map<eh_region, tree> mnt_map;
3360
3361 FOR_EACH_BB_FN (bb, fun)
3362 {
3363 gimple last = last_stmt (bb);
3364 if (last && is_gimple_resx (last))
3365 {
3366 dominance_invalidated |=
3367 lower_resx (bb, as_a <gresx *> (last), &mnt_map);
3368 any_rewritten = true;
3369 }
3370 }
3371
3372 if (dominance_invalidated)
3373 {
3374 free_dominance_info (CDI_DOMINATORS);
3375 free_dominance_info (CDI_POST_DOMINATORS);
3376 }
3377
3378 return any_rewritten ? TODO_update_ssa_only_virtuals : 0;
3379 }
3380
3381 } // anon namespace
3382
3383 gimple_opt_pass *
3384 make_pass_lower_resx (gcc::context *ctxt)
3385 {
3386 return new pass_lower_resx (ctxt);
3387 }
3388
3389 /* Try to optimize var = {v} {CLOBBER} stmts followed just by
3390 external throw. */
3391
3392 static void
3393 optimize_clobbers (basic_block bb)
3394 {
3395 gimple_stmt_iterator gsi = gsi_last_bb (bb);
3396 bool any_clobbers = false;
3397 bool seen_stack_restore = false;
3398 edge_iterator ei;
3399 edge e;
3400
3401 /* Only optimize anything if the bb contains at least one clobber,
3402 ends with resx (checked by caller), optionally contains some
3403 debug stmts or labels, or at most one __builtin_stack_restore
3404 call, and has an incoming EH edge. */
3405 for (gsi_prev (&gsi); !gsi_end_p (gsi); gsi_prev (&gsi))
3406 {
3407 gimple stmt = gsi_stmt (gsi);
3408 if (is_gimple_debug (stmt))
3409 continue;
3410 if (gimple_clobber_p (stmt))
3411 {
3412 any_clobbers = true;
3413 continue;
3414 }
3415 if (!seen_stack_restore
3416 && gimple_call_builtin_p (stmt, BUILT_IN_STACK_RESTORE))
3417 {
3418 seen_stack_restore = true;
3419 continue;
3420 }
3421 if (gimple_code (stmt) == GIMPLE_LABEL)
3422 break;
3423 return;
3424 }
3425 if (!any_clobbers)
3426 return;
3427 FOR_EACH_EDGE (e, ei, bb->preds)
3428 if (e->flags & EDGE_EH)
3429 break;
3430 if (e == NULL)
3431 return;
3432 gsi = gsi_last_bb (bb);
3433 for (gsi_prev (&gsi); !gsi_end_p (gsi); gsi_prev (&gsi))
3434 {
3435 gimple stmt = gsi_stmt (gsi);
3436 if (!gimple_clobber_p (stmt))
3437 continue;
3438 unlink_stmt_vdef (stmt);
3439 gsi_remove (&gsi, true);
3440 release_defs (stmt);
3441 }
3442 }
3443
3444 /* Try to sink var = {v} {CLOBBER} stmts followed just by
3445 internal throw to successor BB. */
3446
3447 static int
3448 sink_clobbers (basic_block bb)
3449 {
3450 edge e;
3451 edge_iterator ei;
3452 gimple_stmt_iterator gsi, dgsi;
3453 basic_block succbb;
3454 bool any_clobbers = false;
3455 unsigned todo = 0;
3456
3457 /* Only optimize if BB has a single EH successor and
3458 all predecessor edges are EH too. */
3459 if (!single_succ_p (bb)
3460 || (single_succ_edge (bb)->flags & EDGE_EH) == 0)
3461 return 0;
3462
3463 FOR_EACH_EDGE (e, ei, bb->preds)
3464 {
3465 if ((e->flags & EDGE_EH) == 0)
3466 return 0;
3467 }
3468
3469 /* And BB contains only CLOBBER stmts before the final
3470 RESX. */
3471 gsi = gsi_last_bb (bb);
3472 for (gsi_prev (&gsi); !gsi_end_p (gsi); gsi_prev (&gsi))
3473 {
3474 gimple stmt = gsi_stmt (gsi);
3475 if (is_gimple_debug (stmt))
3476 continue;
3477 if (gimple_code (stmt) == GIMPLE_LABEL)
3478 break;
3479 if (!gimple_clobber_p (stmt))
3480 return 0;
3481 any_clobbers = true;
3482 }
3483 if (!any_clobbers)
3484 return 0;
3485
3486 edge succe = single_succ_edge (bb);
3487 succbb = succe->dest;
3488
3489 /* See if there is a virtual PHI node to take an updated virtual
3490 operand from. */
3491 gphi *vphi = NULL;
3492 tree vuse = NULL_TREE;
3493 for (gphi_iterator gpi = gsi_start_phis (succbb);
3494 !gsi_end_p (gpi); gsi_next (&gpi))
3495 {
3496 tree res = gimple_phi_result (gpi.phi ());
3497 if (virtual_operand_p (res))
3498 {
3499 vphi = gpi.phi ();
3500 vuse = res;
3501 break;
3502 }
3503 }
3504
3505 dgsi = gsi_after_labels (succbb);
3506 gsi = gsi_last_bb (bb);
3507 for (gsi_prev (&gsi); !gsi_end_p (gsi); gsi_prev (&gsi))
3508 {
3509 gimple stmt = gsi_stmt (gsi);
3510 tree lhs;
3511 if (is_gimple_debug (stmt))
3512 continue;
3513 if (gimple_code (stmt) == GIMPLE_LABEL)
3514 break;
3515 lhs = gimple_assign_lhs (stmt);
3516 /* Unfortunately we don't have dominance info updated at this
3517 point, so checking if
3518 dominated_by_p (CDI_DOMINATORS, succbb,
3519 gimple_bb (SSA_NAME_DEF_STMT (TREE_OPERAND (lhs, 0)))
3520 would be too costly. Thus, avoid sinking any clobbers that
3521 refer to non-(D) SSA_NAMEs. */
3522 if (TREE_CODE (lhs) == MEM_REF
3523 && TREE_CODE (TREE_OPERAND (lhs, 0)) == SSA_NAME
3524 && !SSA_NAME_IS_DEFAULT_DEF (TREE_OPERAND (lhs, 0)))
3525 {
3526 unlink_stmt_vdef (stmt);
3527 gsi_remove (&gsi, true);
3528 release_defs (stmt);
3529 continue;
3530 }
3531
3532 /* As we do not change stmt order when sinking across a
3533 forwarder edge we can keep virtual operands in place. */
3534 gsi_remove (&gsi, false);
3535 gsi_insert_before (&dgsi, stmt, GSI_NEW_STMT);
3536
3537 /* But adjust virtual operands if we sunk across a PHI node. */
3538 if (vuse)
3539 {
3540 gimple use_stmt;
3541 imm_use_iterator iter;
3542 use_operand_p use_p;
3543 FOR_EACH_IMM_USE_STMT (use_stmt, iter, vuse)
3544 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
3545 SET_USE (use_p, gimple_vdef (stmt));
3546 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (vuse))
3547 {
3548 SSA_NAME_OCCURS_IN_ABNORMAL_PHI (gimple_vdef (stmt)) = 1;
3549 SSA_NAME_OCCURS_IN_ABNORMAL_PHI (vuse) = 0;
3550 }
3551 /* Adjust the incoming virtual operand. */
3552 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (vphi, succe), gimple_vuse (stmt));
3553 SET_USE (gimple_vuse_op (stmt), vuse);
3554 }
3555 /* If there isn't a single predecessor but no virtual PHI node
3556 arrange for virtual operands to be renamed. */
3557 else if (gimple_vuse_op (stmt) != NULL_USE_OPERAND_P
3558 && !single_pred_p (succbb))
3559 {
3560 /* In this case there will be no use of the VDEF of this stmt.
3561 ??? Unless this is a secondary opportunity and we have not
3562 removed unreachable blocks yet, so we cannot assert this.
3563 Which also means we will end up renaming too many times. */
3564 SET_USE (gimple_vuse_op (stmt), gimple_vop (cfun));
3565 mark_virtual_operands_for_renaming (cfun);
3566 todo |= TODO_update_ssa_only_virtuals;
3567 }
3568 }
3569
3570 return todo;
3571 }
3572
3573 /* At the end of inlining, we can lower EH_DISPATCH. Return true when
3574 we have found some duplicate labels and removed some edges. */
3575
3576 static bool
3577 lower_eh_dispatch (basic_block src, geh_dispatch *stmt)
3578 {
3579 gimple_stmt_iterator gsi;
3580 int region_nr;
3581 eh_region r;
3582 tree filter, fn;
3583 gimple x;
3584 bool redirected = false;
3585
3586 region_nr = gimple_eh_dispatch_region (stmt);
3587 r = get_eh_region_from_number (region_nr);
3588
3589 gsi = gsi_last_bb (src);
3590
3591 switch (r->type)
3592 {
3593 case ERT_TRY:
3594 {
3595 auto_vec<tree> labels;
3596 tree default_label = NULL;
3597 eh_catch c;
3598 edge_iterator ei;
3599 edge e;
3600 hash_set<tree> seen_values;
3601
3602 /* Collect the labels for a switch. Zero the post_landing_pad
3603 field becase we'll no longer have anything keeping these labels
3604 in existence and the optimizer will be free to merge these
3605 blocks at will. */
3606 for (c = r->u.eh_try.first_catch; c ; c = c->next_catch)
3607 {
3608 tree tp_node, flt_node, lab = c->label;
3609 bool have_label = false;
3610
3611 c->label = NULL;
3612 tp_node = c->type_list;
3613 flt_node = c->filter_list;
3614
3615 if (tp_node == NULL)
3616 {
3617 default_label = lab;
3618 break;
3619 }
3620 do
3621 {
3622 /* Filter out duplicate labels that arise when this handler
3623 is shadowed by an earlier one. When no labels are
3624 attached to the handler anymore, we remove
3625 the corresponding edge and then we delete unreachable
3626 blocks at the end of this pass. */
3627 if (! seen_values.contains (TREE_VALUE (flt_node)))
3628 {
3629 tree t = build_case_label (TREE_VALUE (flt_node),
3630 NULL, lab);
3631 labels.safe_push (t);
3632 seen_values.add (TREE_VALUE (flt_node));
3633 have_label = true;
3634 }
3635
3636 tp_node = TREE_CHAIN (tp_node);
3637 flt_node = TREE_CHAIN (flt_node);
3638 }
3639 while (tp_node);
3640 if (! have_label)
3641 {
3642 remove_edge (find_edge (src, label_to_block (lab)));
3643 redirected = true;
3644 }
3645 }
3646
3647 /* Clean up the edge flags. */
3648 FOR_EACH_EDGE (e, ei, src->succs)
3649 {
3650 if (e->flags & EDGE_FALLTHRU)
3651 {
3652 /* If there was no catch-all, use the fallthru edge. */
3653 if (default_label == NULL)
3654 default_label = gimple_block_label (e->dest);
3655 e->flags &= ~EDGE_FALLTHRU;
3656 }
3657 }
3658 gcc_assert (default_label != NULL);
3659
3660 /* Don't generate a switch if there's only a default case.
3661 This is common in the form of try { A; } catch (...) { B; }. */
3662 if (!labels.exists ())
3663 {
3664 e = single_succ_edge (src);
3665 e->flags |= EDGE_FALLTHRU;
3666 }
3667 else
3668 {
3669 fn = builtin_decl_implicit (BUILT_IN_EH_FILTER);
3670 x = gimple_build_call (fn, 1, build_int_cst (integer_type_node,
3671 region_nr));
3672 filter = create_tmp_var (TREE_TYPE (TREE_TYPE (fn)));
3673 filter = make_ssa_name (filter, x);
3674 gimple_call_set_lhs (x, filter);
3675 gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3676
3677 /* Turn the default label into a default case. */
3678 default_label = build_case_label (NULL, NULL, default_label);
3679 sort_case_labels (labels);
3680
3681 x = gimple_build_switch (filter, default_label, labels);
3682 gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3683 }
3684 }
3685 break;
3686
3687 case ERT_ALLOWED_EXCEPTIONS:
3688 {
3689 edge b_e = BRANCH_EDGE (src);
3690 edge f_e = FALLTHRU_EDGE (src);
3691
3692 fn = builtin_decl_implicit (BUILT_IN_EH_FILTER);
3693 x = gimple_build_call (fn, 1, build_int_cst (integer_type_node,
3694 region_nr));
3695 filter = create_tmp_var (TREE_TYPE (TREE_TYPE (fn)));
3696 filter = make_ssa_name (filter, x);
3697 gimple_call_set_lhs (x, filter);
3698 gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3699
3700 r->u.allowed.label = NULL;
3701 x = gimple_build_cond (EQ_EXPR, filter,
3702 build_int_cst (TREE_TYPE (filter),
3703 r->u.allowed.filter),
3704 NULL_TREE, NULL_TREE);
3705 gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3706
3707 b_e->flags = b_e->flags | EDGE_TRUE_VALUE;
3708 f_e->flags = (f_e->flags & ~EDGE_FALLTHRU) | EDGE_FALSE_VALUE;
3709 }
3710 break;
3711
3712 default:
3713 gcc_unreachable ();
3714 }
3715
3716 /* Replace the EH_DISPATCH with the SWITCH or COND generated above. */
3717 gsi_remove (&gsi, true);
3718 return redirected;
3719 }
3720
3721 namespace {
3722
3723 const pass_data pass_data_lower_eh_dispatch =
3724 {
3725 GIMPLE_PASS, /* type */
3726 "ehdisp", /* name */
3727 OPTGROUP_NONE, /* optinfo_flags */
3728 TV_TREE_EH, /* tv_id */
3729 PROP_gimple_lcf, /* properties_required */
3730 0, /* properties_provided */
3731 0, /* properties_destroyed */
3732 0, /* todo_flags_start */
3733 0, /* todo_flags_finish */
3734 };
3735
3736 class pass_lower_eh_dispatch : public gimple_opt_pass
3737 {
3738 public:
3739 pass_lower_eh_dispatch (gcc::context *ctxt)
3740 : gimple_opt_pass (pass_data_lower_eh_dispatch, ctxt)
3741 {}
3742
3743 /* opt_pass methods: */
3744 virtual bool gate (function *fun) { return fun->eh->region_tree != NULL; }
3745 virtual unsigned int execute (function *);
3746
3747 }; // class pass_lower_eh_dispatch
3748
3749 unsigned
3750 pass_lower_eh_dispatch::execute (function *fun)
3751 {
3752 basic_block bb;
3753 int flags = 0;
3754 bool redirected = false;
3755
3756 assign_filter_values ();
3757
3758 FOR_EACH_BB_FN (bb, fun)
3759 {
3760 gimple last = last_stmt (bb);
3761 if (last == NULL)
3762 continue;
3763 if (gimple_code (last) == GIMPLE_EH_DISPATCH)
3764 {
3765 redirected |= lower_eh_dispatch (bb,
3766 as_a <geh_dispatch *> (last));
3767 flags |= TODO_update_ssa_only_virtuals;
3768 }
3769 else if (gimple_code (last) == GIMPLE_RESX)
3770 {
3771 if (stmt_can_throw_external (last))
3772 optimize_clobbers (bb);
3773 else
3774 flags |= sink_clobbers (bb);
3775 }
3776 }
3777
3778 if (redirected)
3779 delete_unreachable_blocks ();
3780 return flags;
3781 }
3782
3783 } // anon namespace
3784
3785 gimple_opt_pass *
3786 make_pass_lower_eh_dispatch (gcc::context *ctxt)
3787 {
3788 return new pass_lower_eh_dispatch (ctxt);
3789 }
3790 \f
3791 /* Walk statements, see what regions and, optionally, landing pads
3792 are really referenced.
3793
3794 Returns in R_REACHABLEP an sbitmap with bits set for reachable regions,
3795 and in LP_REACHABLE an sbitmap with bits set for reachable landing pads.
3796
3797 Passing NULL for LP_REACHABLE is valid, in this case only reachable
3798 regions are marked.
3799
3800 The caller is responsible for freeing the returned sbitmaps. */
3801
3802 static void
3803 mark_reachable_handlers (sbitmap *r_reachablep, sbitmap *lp_reachablep)
3804 {
3805 sbitmap r_reachable, lp_reachable;
3806 basic_block bb;
3807 bool mark_landing_pads = (lp_reachablep != NULL);
3808 gcc_checking_assert (r_reachablep != NULL);
3809
3810 r_reachable = sbitmap_alloc (cfun->eh->region_array->length ());
3811 bitmap_clear (r_reachable);
3812 *r_reachablep = r_reachable;
3813
3814 if (mark_landing_pads)
3815 {
3816 lp_reachable = sbitmap_alloc (cfun->eh->lp_array->length ());
3817 bitmap_clear (lp_reachable);
3818 *lp_reachablep = lp_reachable;
3819 }
3820 else
3821 lp_reachable = NULL;
3822
3823 FOR_EACH_BB_FN (bb, cfun)
3824 {
3825 gimple_stmt_iterator gsi;
3826
3827 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
3828 {
3829 gimple stmt = gsi_stmt (gsi);
3830
3831 if (mark_landing_pads)
3832 {
3833 int lp_nr = lookup_stmt_eh_lp (stmt);
3834
3835 /* Negative LP numbers are MUST_NOT_THROW regions which
3836 are not considered BB enders. */
3837 if (lp_nr < 0)
3838 bitmap_set_bit (r_reachable, -lp_nr);
3839
3840 /* Positive LP numbers are real landing pads, and BB enders. */
3841 else if (lp_nr > 0)
3842 {
3843 gcc_assert (gsi_one_before_end_p (gsi));
3844 eh_region region = get_eh_region_from_lp_number (lp_nr);
3845 bitmap_set_bit (r_reachable, region->index);
3846 bitmap_set_bit (lp_reachable, lp_nr);
3847 }
3848 }
3849
3850 /* Avoid removing regions referenced from RESX/EH_DISPATCH. */
3851 switch (gimple_code (stmt))
3852 {
3853 case GIMPLE_RESX:
3854 bitmap_set_bit (r_reachable,
3855 gimple_resx_region (as_a <gresx *> (stmt)));
3856 break;
3857 case GIMPLE_EH_DISPATCH:
3858 bitmap_set_bit (r_reachable,
3859 gimple_eh_dispatch_region (
3860 as_a <geh_dispatch *> (stmt)));
3861 break;
3862 default:
3863 break;
3864 }
3865 }
3866 }
3867 }
3868
3869 /* Remove unreachable handlers and unreachable landing pads. */
3870
3871 static void
3872 remove_unreachable_handlers (void)
3873 {
3874 sbitmap r_reachable, lp_reachable;
3875 eh_region region;
3876 eh_landing_pad lp;
3877 unsigned i;
3878
3879 mark_reachable_handlers (&r_reachable, &lp_reachable);
3880
3881 if (dump_file)
3882 {
3883 fprintf (dump_file, "Before removal of unreachable regions:\n");
3884 dump_eh_tree (dump_file, cfun);
3885 fprintf (dump_file, "Reachable regions: ");
3886 dump_bitmap_file (dump_file, r_reachable);
3887 fprintf (dump_file, "Reachable landing pads: ");
3888 dump_bitmap_file (dump_file, lp_reachable);
3889 }
3890
3891 if (dump_file)
3892 {
3893 FOR_EACH_VEC_SAFE_ELT (cfun->eh->region_array, i, region)
3894 if (region && !bitmap_bit_p (r_reachable, region->index))
3895 fprintf (dump_file,
3896 "Removing unreachable region %d\n",
3897 region->index);
3898 }
3899
3900 remove_unreachable_eh_regions (r_reachable);
3901
3902 FOR_EACH_VEC_SAFE_ELT (cfun->eh->lp_array, i, lp)
3903 if (lp && !bitmap_bit_p (lp_reachable, lp->index))
3904 {
3905 if (dump_file)
3906 fprintf (dump_file,
3907 "Removing unreachable landing pad %d\n",
3908 lp->index);
3909 remove_eh_landing_pad (lp);
3910 }
3911
3912 if (dump_file)
3913 {
3914 fprintf (dump_file, "\n\nAfter removal of unreachable regions:\n");
3915 dump_eh_tree (dump_file, cfun);
3916 fprintf (dump_file, "\n\n");
3917 }
3918
3919 sbitmap_free (r_reachable);
3920 sbitmap_free (lp_reachable);
3921
3922 #ifdef ENABLE_CHECKING
3923 verify_eh_tree (cfun);
3924 #endif
3925 }
3926
3927 /* Remove unreachable handlers if any landing pads have been removed after
3928 last ehcleanup pass (due to gimple_purge_dead_eh_edges). */
3929
3930 void
3931 maybe_remove_unreachable_handlers (void)
3932 {
3933 eh_landing_pad lp;
3934 unsigned i;
3935
3936 if (cfun->eh == NULL)
3937 return;
3938
3939 FOR_EACH_VEC_SAFE_ELT (cfun->eh->lp_array, i, lp)
3940 if (lp && lp->post_landing_pad)
3941 {
3942 if (label_to_block (lp->post_landing_pad) == NULL)
3943 {
3944 remove_unreachable_handlers ();
3945 return;
3946 }
3947 }
3948 }
3949
3950 /* Remove regions that do not have landing pads. This assumes
3951 that remove_unreachable_handlers has already been run, and
3952 that we've just manipulated the landing pads since then.
3953
3954 Preserve regions with landing pads and regions that prevent
3955 exceptions from propagating further, even if these regions
3956 are not reachable. */
3957
3958 static void
3959 remove_unreachable_handlers_no_lp (void)
3960 {
3961 eh_region region;
3962 sbitmap r_reachable;
3963 unsigned i;
3964
3965 mark_reachable_handlers (&r_reachable, /*lp_reachablep=*/NULL);
3966
3967 FOR_EACH_VEC_SAFE_ELT (cfun->eh->region_array, i, region)
3968 {
3969 if (! region)
3970 continue;
3971
3972 if (region->landing_pads != NULL
3973 || region->type == ERT_MUST_NOT_THROW)
3974 bitmap_set_bit (r_reachable, region->index);
3975
3976 if (dump_file
3977 && !bitmap_bit_p (r_reachable, region->index))
3978 fprintf (dump_file,
3979 "Removing unreachable region %d\n",
3980 region->index);
3981 }
3982
3983 remove_unreachable_eh_regions (r_reachable);
3984
3985 sbitmap_free (r_reachable);
3986 }
3987
3988 /* Undo critical edge splitting on an EH landing pad. Earlier, we
3989 optimisticaly split all sorts of edges, including EH edges. The
3990 optimization passes in between may not have needed them; if not,
3991 we should undo the split.
3992
3993 Recognize this case by having one EH edge incoming to the BB and
3994 one normal edge outgoing; BB should be empty apart from the
3995 post_landing_pad label.
3996
3997 Note that this is slightly different from the empty handler case
3998 handled by cleanup_empty_eh, in that the actual handler may yet
3999 have actual code but the landing pad has been separated from the
4000 handler. As such, cleanup_empty_eh relies on this transformation
4001 having been done first. */
4002
4003 static bool
4004 unsplit_eh (eh_landing_pad lp)
4005 {
4006 basic_block bb = label_to_block (lp->post_landing_pad);
4007 gimple_stmt_iterator gsi;
4008 edge e_in, e_out;
4009
4010 /* Quickly check the edge counts on BB for singularity. */
4011 if (!single_pred_p (bb) || !single_succ_p (bb))
4012 return false;
4013 e_in = single_pred_edge (bb);
4014 e_out = single_succ_edge (bb);
4015
4016 /* Input edge must be EH and output edge must be normal. */
4017 if ((e_in->flags & EDGE_EH) == 0 || (e_out->flags & EDGE_EH) != 0)
4018 return false;
4019
4020 /* The block must be empty except for the labels and debug insns. */
4021 gsi = gsi_after_labels (bb);
4022 if (!gsi_end_p (gsi) && is_gimple_debug (gsi_stmt (gsi)))
4023 gsi_next_nondebug (&gsi);
4024 if (!gsi_end_p (gsi))
4025 return false;
4026
4027 /* The destination block must not already have a landing pad
4028 for a different region. */
4029 for (gsi = gsi_start_bb (e_out->dest); !gsi_end_p (gsi); gsi_next (&gsi))
4030 {
4031 glabel *label_stmt = dyn_cast <glabel *> (gsi_stmt (gsi));
4032 tree lab;
4033 int lp_nr;
4034
4035 if (!label_stmt)
4036 break;
4037 lab = gimple_label_label (label_stmt);
4038 lp_nr = EH_LANDING_PAD_NR (lab);
4039 if (lp_nr && get_eh_region_from_lp_number (lp_nr) != lp->region)
4040 return false;
4041 }
4042
4043 /* The new destination block must not already be a destination of
4044 the source block, lest we merge fallthru and eh edges and get
4045 all sorts of confused. */
4046 if (find_edge (e_in->src, e_out->dest))
4047 return false;
4048
4049 /* ??? We can get degenerate phis due to cfg cleanups. I would have
4050 thought this should have been cleaned up by a phicprop pass, but
4051 that doesn't appear to handle virtuals. Propagate by hand. */
4052 if (!gimple_seq_empty_p (phi_nodes (bb)))
4053 {
4054 for (gphi_iterator gpi = gsi_start_phis (bb); !gsi_end_p (gpi); )
4055 {
4056 gimple use_stmt;
4057 gphi *phi = gpi.phi ();
4058 tree lhs = gimple_phi_result (phi);
4059 tree rhs = gimple_phi_arg_def (phi, 0);
4060 use_operand_p use_p;
4061 imm_use_iterator iter;
4062
4063 FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
4064 {
4065 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
4066 SET_USE (use_p, rhs);
4067 }
4068
4069 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs))
4070 SSA_NAME_OCCURS_IN_ABNORMAL_PHI (rhs) = 1;
4071
4072 remove_phi_node (&gpi, true);
4073 }
4074 }
4075
4076 if (dump_file && (dump_flags & TDF_DETAILS))
4077 fprintf (dump_file, "Unsplit EH landing pad %d to block %i.\n",
4078 lp->index, e_out->dest->index);
4079
4080 /* Redirect the edge. Since redirect_eh_edge_1 expects to be moving
4081 a successor edge, humor it. But do the real CFG change with the
4082 predecessor of E_OUT in order to preserve the ordering of arguments
4083 to the PHI nodes in E_OUT->DEST. */
4084 redirect_eh_edge_1 (e_in, e_out->dest, false);
4085 redirect_edge_pred (e_out, e_in->src);
4086 e_out->flags = e_in->flags;
4087 e_out->probability = e_in->probability;
4088 e_out->count = e_in->count;
4089 remove_edge (e_in);
4090
4091 return true;
4092 }
4093
4094 /* Examine each landing pad block and see if it matches unsplit_eh. */
4095
4096 static bool
4097 unsplit_all_eh (void)
4098 {
4099 bool changed = false;
4100 eh_landing_pad lp;
4101 int i;
4102
4103 for (i = 1; vec_safe_iterate (cfun->eh->lp_array, i, &lp); ++i)
4104 if (lp)
4105 changed |= unsplit_eh (lp);
4106
4107 return changed;
4108 }
4109
4110 /* A subroutine of cleanup_empty_eh. Redirect all EH edges incoming
4111 to OLD_BB to NEW_BB; return true on success, false on failure.
4112
4113 OLD_BB_OUT is the edge into NEW_BB from OLD_BB, so if we miss any
4114 PHI variables from OLD_BB we can pick them up from OLD_BB_OUT.
4115 Virtual PHIs may be deleted and marked for renaming. */
4116
4117 static bool
4118 cleanup_empty_eh_merge_phis (basic_block new_bb, basic_block old_bb,
4119 edge old_bb_out, bool change_region)
4120 {
4121 gphi_iterator ngsi, ogsi;
4122 edge_iterator ei;
4123 edge e;
4124 bitmap ophi_handled;
4125
4126 /* The destination block must not be a regular successor for any
4127 of the preds of the landing pad. Thus, avoid turning
4128 <..>
4129 | \ EH
4130 | <..>
4131 | /
4132 <..>
4133 into
4134 <..>
4135 | | EH
4136 <..>
4137 which CFG verification would choke on. See PR45172 and PR51089. */
4138 FOR_EACH_EDGE (e, ei, old_bb->preds)
4139 if (find_edge (e->src, new_bb))
4140 return false;
4141
4142 FOR_EACH_EDGE (e, ei, old_bb->preds)
4143 redirect_edge_var_map_clear (e);
4144
4145 ophi_handled = BITMAP_ALLOC (NULL);
4146
4147 /* First, iterate through the PHIs on NEW_BB and set up the edge_var_map
4148 for the edges we're going to move. */
4149 for (ngsi = gsi_start_phis (new_bb); !gsi_end_p (ngsi); gsi_next (&ngsi))
4150 {
4151 gphi *ophi, *nphi = ngsi.phi ();
4152 tree nresult, nop;
4153
4154 nresult = gimple_phi_result (nphi);
4155 nop = gimple_phi_arg_def (nphi, old_bb_out->dest_idx);
4156
4157 /* Find the corresponding PHI in OLD_BB so we can forward-propagate
4158 the source ssa_name. */
4159 ophi = NULL;
4160 for (ogsi = gsi_start_phis (old_bb); !gsi_end_p (ogsi); gsi_next (&ogsi))
4161 {
4162 ophi = ogsi.phi ();
4163 if (gimple_phi_result (ophi) == nop)
4164 break;
4165 ophi = NULL;
4166 }
4167
4168 /* If we did find the corresponding PHI, copy those inputs. */
4169 if (ophi)
4170 {
4171 /* If NOP is used somewhere else beyond phis in new_bb, give up. */
4172 if (!has_single_use (nop))
4173 {
4174 imm_use_iterator imm_iter;
4175 use_operand_p use_p;
4176
4177 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, nop)
4178 {
4179 if (!gimple_debug_bind_p (USE_STMT (use_p))
4180 && (gimple_code (USE_STMT (use_p)) != GIMPLE_PHI
4181 || gimple_bb (USE_STMT (use_p)) != new_bb))
4182 goto fail;
4183 }
4184 }
4185 bitmap_set_bit (ophi_handled, SSA_NAME_VERSION (nop));
4186 FOR_EACH_EDGE (e, ei, old_bb->preds)
4187 {
4188 location_t oloc;
4189 tree oop;
4190
4191 if ((e->flags & EDGE_EH) == 0)
4192 continue;
4193 oop = gimple_phi_arg_def (ophi, e->dest_idx);
4194 oloc = gimple_phi_arg_location (ophi, e->dest_idx);
4195 redirect_edge_var_map_add (e, nresult, oop, oloc);
4196 }
4197 }
4198 /* If we didn't find the PHI, if it's a real variable or a VOP, we know
4199 from the fact that OLD_BB is tree_empty_eh_handler_p that the
4200 variable is unchanged from input to the block and we can simply
4201 re-use the input to NEW_BB from the OLD_BB_OUT edge. */
4202 else
4203 {
4204 location_t nloc
4205 = gimple_phi_arg_location (nphi, old_bb_out->dest_idx);
4206 FOR_EACH_EDGE (e, ei, old_bb->preds)
4207 redirect_edge_var_map_add (e, nresult, nop, nloc);
4208 }
4209 }
4210
4211 /* Second, verify that all PHIs from OLD_BB have been handled. If not,
4212 we don't know what values from the other edges into NEW_BB to use. */
4213 for (ogsi = gsi_start_phis (old_bb); !gsi_end_p (ogsi); gsi_next (&ogsi))
4214 {
4215 gphi *ophi = ogsi.phi ();
4216 tree oresult = gimple_phi_result (ophi);
4217 if (!bitmap_bit_p (ophi_handled, SSA_NAME_VERSION (oresult)))
4218 goto fail;
4219 }
4220
4221 /* Finally, move the edges and update the PHIs. */
4222 for (ei = ei_start (old_bb->preds); (e = ei_safe_edge (ei)); )
4223 if (e->flags & EDGE_EH)
4224 {
4225 /* ??? CFG manipluation routines do not try to update loop
4226 form on edge redirection. Do so manually here for now. */
4227 /* If we redirect a loop entry or latch edge that will either create
4228 a multiple entry loop or rotate the loop. If the loops merge
4229 we may have created a loop with multiple latches.
4230 All of this isn't easily fixed thus cancel the affected loop
4231 and mark the other loop as possibly having multiple latches. */
4232 if (e->dest == e->dest->loop_father->header)
4233 {
4234 mark_loop_for_removal (e->dest->loop_father);
4235 new_bb->loop_father->latch = NULL;
4236 loops_state_set (LOOPS_MAY_HAVE_MULTIPLE_LATCHES);
4237 }
4238 redirect_eh_edge_1 (e, new_bb, change_region);
4239 redirect_edge_succ (e, new_bb);
4240 flush_pending_stmts (e);
4241 }
4242 else
4243 ei_next (&ei);
4244
4245 BITMAP_FREE (ophi_handled);
4246 return true;
4247
4248 fail:
4249 FOR_EACH_EDGE (e, ei, old_bb->preds)
4250 redirect_edge_var_map_clear (e);
4251 BITMAP_FREE (ophi_handled);
4252 return false;
4253 }
4254
4255 /* A subroutine of cleanup_empty_eh. Move a landing pad LP from its
4256 old region to NEW_REGION at BB. */
4257
4258 static void
4259 cleanup_empty_eh_move_lp (basic_block bb, edge e_out,
4260 eh_landing_pad lp, eh_region new_region)
4261 {
4262 gimple_stmt_iterator gsi;
4263 eh_landing_pad *pp;
4264
4265 for (pp = &lp->region->landing_pads; *pp != lp; pp = &(*pp)->next_lp)
4266 continue;
4267 *pp = lp->next_lp;
4268
4269 lp->region = new_region;
4270 lp->next_lp = new_region->landing_pads;
4271 new_region->landing_pads = lp;
4272
4273 /* Delete the RESX that was matched within the empty handler block. */
4274 gsi = gsi_last_bb (bb);
4275 unlink_stmt_vdef (gsi_stmt (gsi));
4276 gsi_remove (&gsi, true);
4277
4278 /* Clean up E_OUT for the fallthru. */
4279 e_out->flags = (e_out->flags & ~EDGE_EH) | EDGE_FALLTHRU;
4280 e_out->probability = REG_BR_PROB_BASE;
4281 }
4282
4283 /* A subroutine of cleanup_empty_eh. Handle more complex cases of
4284 unsplitting than unsplit_eh was prepared to handle, e.g. when
4285 multiple incoming edges and phis are involved. */
4286
4287 static bool
4288 cleanup_empty_eh_unsplit (basic_block bb, edge e_out, eh_landing_pad lp)
4289 {
4290 gimple_stmt_iterator gsi;
4291 tree lab;
4292
4293 /* We really ought not have totally lost everything following
4294 a landing pad label. Given that BB is empty, there had better
4295 be a successor. */
4296 gcc_assert (e_out != NULL);
4297
4298 /* The destination block must not already have a landing pad
4299 for a different region. */
4300 lab = NULL;
4301 for (gsi = gsi_start_bb (e_out->dest); !gsi_end_p (gsi); gsi_next (&gsi))
4302 {
4303 glabel *stmt = dyn_cast <glabel *> (gsi_stmt (gsi));
4304 int lp_nr;
4305
4306 if (!stmt)
4307 break;
4308 lab = gimple_label_label (stmt);
4309 lp_nr = EH_LANDING_PAD_NR (lab);
4310 if (lp_nr && get_eh_region_from_lp_number (lp_nr) != lp->region)
4311 return false;
4312 }
4313
4314 /* Attempt to move the PHIs into the successor block. */
4315 if (cleanup_empty_eh_merge_phis (e_out->dest, bb, e_out, false))
4316 {
4317 if (dump_file && (dump_flags & TDF_DETAILS))
4318 fprintf (dump_file,
4319 "Unsplit EH landing pad %d to block %i "
4320 "(via cleanup_empty_eh).\n",
4321 lp->index, e_out->dest->index);
4322 return true;
4323 }
4324
4325 return false;
4326 }
4327
4328 /* Return true if edge E_FIRST is part of an empty infinite loop
4329 or leads to such a loop through a series of single successor
4330 empty bbs. */
4331
4332 static bool
4333 infinite_empty_loop_p (edge e_first)
4334 {
4335 bool inf_loop = false;
4336 edge e;
4337
4338 if (e_first->dest == e_first->src)
4339 return true;
4340
4341 e_first->src->aux = (void *) 1;
4342 for (e = e_first; single_succ_p (e->dest); e = single_succ_edge (e->dest))
4343 {
4344 gimple_stmt_iterator gsi;
4345 if (e->dest->aux)
4346 {
4347 inf_loop = true;
4348 break;
4349 }
4350 e->dest->aux = (void *) 1;
4351 gsi = gsi_after_labels (e->dest);
4352 if (!gsi_end_p (gsi) && is_gimple_debug (gsi_stmt (gsi)))
4353 gsi_next_nondebug (&gsi);
4354 if (!gsi_end_p (gsi))
4355 break;
4356 }
4357 e_first->src->aux = NULL;
4358 for (e = e_first; e->dest->aux; e = single_succ_edge (e->dest))
4359 e->dest->aux = NULL;
4360
4361 return inf_loop;
4362 }
4363
4364 /* Examine the block associated with LP to determine if it's an empty
4365 handler for its EH region. If so, attempt to redirect EH edges to
4366 an outer region. Return true the CFG was updated in any way. This
4367 is similar to jump forwarding, just across EH edges. */
4368
4369 static bool
4370 cleanup_empty_eh (eh_landing_pad lp)
4371 {
4372 basic_block bb = label_to_block (lp->post_landing_pad);
4373 gimple_stmt_iterator gsi;
4374 gimple resx;
4375 eh_region new_region;
4376 edge_iterator ei;
4377 edge e, e_out;
4378 bool has_non_eh_pred;
4379 bool ret = false;
4380 int new_lp_nr;
4381
4382 /* There can be zero or one edges out of BB. This is the quickest test. */
4383 switch (EDGE_COUNT (bb->succs))
4384 {
4385 case 0:
4386 e_out = NULL;
4387 break;
4388 case 1:
4389 e_out = single_succ_edge (bb);
4390 break;
4391 default:
4392 return false;
4393 }
4394
4395 resx = last_stmt (bb);
4396 if (resx && is_gimple_resx (resx))
4397 {
4398 if (stmt_can_throw_external (resx))
4399 optimize_clobbers (bb);
4400 else if (sink_clobbers (bb))
4401 ret = true;
4402 }
4403
4404 gsi = gsi_after_labels (bb);
4405
4406 /* Make sure to skip debug statements. */
4407 if (!gsi_end_p (gsi) && is_gimple_debug (gsi_stmt (gsi)))
4408 gsi_next_nondebug (&gsi);
4409
4410 /* If the block is totally empty, look for more unsplitting cases. */
4411 if (gsi_end_p (gsi))
4412 {
4413 /* For the degenerate case of an infinite loop bail out.
4414 If bb has no successors and is totally empty, which can happen e.g.
4415 because of incorrect noreturn attribute, bail out too. */
4416 if (e_out == NULL
4417 || infinite_empty_loop_p (e_out))
4418 return ret;
4419
4420 return ret | cleanup_empty_eh_unsplit (bb, e_out, lp);
4421 }
4422
4423 /* The block should consist only of a single RESX statement, modulo a
4424 preceding call to __builtin_stack_restore if there is no outgoing
4425 edge, since the call can be eliminated in this case. */
4426 resx = gsi_stmt (gsi);
4427 if (!e_out && gimple_call_builtin_p (resx, BUILT_IN_STACK_RESTORE))
4428 {
4429 gsi_next (&gsi);
4430 resx = gsi_stmt (gsi);
4431 }
4432 if (!is_gimple_resx (resx))
4433 return ret;
4434 gcc_assert (gsi_one_before_end_p (gsi));
4435
4436 /* Determine if there are non-EH edges, or resx edges into the handler. */
4437 has_non_eh_pred = false;
4438 FOR_EACH_EDGE (e, ei, bb->preds)
4439 if (!(e->flags & EDGE_EH))
4440 has_non_eh_pred = true;
4441
4442 /* Find the handler that's outer of the empty handler by looking at
4443 where the RESX instruction was vectored. */
4444 new_lp_nr = lookup_stmt_eh_lp (resx);
4445 new_region = get_eh_region_from_lp_number (new_lp_nr);
4446
4447 /* If there's no destination region within the current function,
4448 redirection is trivial via removing the throwing statements from
4449 the EH region, removing the EH edges, and allowing the block
4450 to go unreachable. */
4451 if (new_region == NULL)
4452 {
4453 gcc_assert (e_out == NULL);
4454 for (ei = ei_start (bb->preds); (e = ei_safe_edge (ei)); )
4455 if (e->flags & EDGE_EH)
4456 {
4457 gimple stmt = last_stmt (e->src);
4458 remove_stmt_from_eh_lp (stmt);
4459 remove_edge (e);
4460 }
4461 else
4462 ei_next (&ei);
4463 goto succeed;
4464 }
4465
4466 /* If the destination region is a MUST_NOT_THROW, allow the runtime
4467 to handle the abort and allow the blocks to go unreachable. */
4468 if (new_region->type == ERT_MUST_NOT_THROW)
4469 {
4470 for (ei = ei_start (bb->preds); (e = ei_safe_edge (ei)); )
4471 if (e->flags & EDGE_EH)
4472 {
4473 gimple stmt = last_stmt (e->src);
4474 remove_stmt_from_eh_lp (stmt);
4475 add_stmt_to_eh_lp (stmt, new_lp_nr);
4476 remove_edge (e);
4477 }
4478 else
4479 ei_next (&ei);
4480 goto succeed;
4481 }
4482
4483 /* Try to redirect the EH edges and merge the PHIs into the destination
4484 landing pad block. If the merge succeeds, we'll already have redirected
4485 all the EH edges. The handler itself will go unreachable if there were
4486 no normal edges. */
4487 if (cleanup_empty_eh_merge_phis (e_out->dest, bb, e_out, true))
4488 goto succeed;
4489
4490 /* Finally, if all input edges are EH edges, then we can (potentially)
4491 reduce the number of transfers from the runtime by moving the landing
4492 pad from the original region to the new region. This is a win when
4493 we remove the last CLEANUP region along a particular exception
4494 propagation path. Since nothing changes except for the region with
4495 which the landing pad is associated, the PHI nodes do not need to be
4496 adjusted at all. */
4497 if (!has_non_eh_pred)
4498 {
4499 cleanup_empty_eh_move_lp (bb, e_out, lp, new_region);
4500 if (dump_file && (dump_flags & TDF_DETAILS))
4501 fprintf (dump_file, "Empty EH handler %i moved to EH region %i.\n",
4502 lp->index, new_region->index);
4503
4504 /* ??? The CFG didn't change, but we may have rendered the
4505 old EH region unreachable. Trigger a cleanup there. */
4506 return true;
4507 }
4508
4509 return ret;
4510
4511 succeed:
4512 if (dump_file && (dump_flags & TDF_DETAILS))
4513 fprintf (dump_file, "Empty EH handler %i removed.\n", lp->index);
4514 remove_eh_landing_pad (lp);
4515 return true;
4516 }
4517
4518 /* Do a post-order traversal of the EH region tree. Examine each
4519 post_landing_pad block and see if we can eliminate it as empty. */
4520
4521 static bool
4522 cleanup_all_empty_eh (void)
4523 {
4524 bool changed = false;
4525 eh_landing_pad lp;
4526 int i;
4527
4528 for (i = 1; vec_safe_iterate (cfun->eh->lp_array, i, &lp); ++i)
4529 if (lp)
4530 changed |= cleanup_empty_eh (lp);
4531
4532 return changed;
4533 }
4534
4535 /* Perform cleanups and lowering of exception handling
4536 1) cleanups regions with handlers doing nothing are optimized out
4537 2) MUST_NOT_THROW regions that became dead because of 1) are optimized out
4538 3) Info about regions that are containing instructions, and regions
4539 reachable via local EH edges is collected
4540 4) Eh tree is pruned for regions no longer necessary.
4541
4542 TODO: Push MUST_NOT_THROW regions to the root of the EH tree.
4543 Unify those that have the same failure decl and locus.
4544 */
4545
4546 static unsigned int
4547 execute_cleanup_eh_1 (void)
4548 {
4549 /* Do this first: unsplit_all_eh and cleanup_all_empty_eh can die
4550 looking up unreachable landing pads. */
4551 remove_unreachable_handlers ();
4552
4553 /* Watch out for the region tree vanishing due to all unreachable. */
4554 if (cfun->eh->region_tree)
4555 {
4556 bool changed = false;
4557
4558 if (optimize)
4559 changed |= unsplit_all_eh ();
4560 changed |= cleanup_all_empty_eh ();
4561
4562 if (changed)
4563 {
4564 free_dominance_info (CDI_DOMINATORS);
4565 free_dominance_info (CDI_POST_DOMINATORS);
4566
4567 /* We delayed all basic block deletion, as we may have performed
4568 cleanups on EH edges while non-EH edges were still present. */
4569 delete_unreachable_blocks ();
4570
4571 /* We manipulated the landing pads. Remove any region that no
4572 longer has a landing pad. */
4573 remove_unreachable_handlers_no_lp ();
4574
4575 return TODO_cleanup_cfg | TODO_update_ssa_only_virtuals;
4576 }
4577 }
4578
4579 return 0;
4580 }
4581
4582 namespace {
4583
4584 const pass_data pass_data_cleanup_eh =
4585 {
4586 GIMPLE_PASS, /* type */
4587 "ehcleanup", /* name */
4588 OPTGROUP_NONE, /* optinfo_flags */
4589 TV_TREE_EH, /* tv_id */
4590 PROP_gimple_lcf, /* properties_required */
4591 0, /* properties_provided */
4592 0, /* properties_destroyed */
4593 0, /* todo_flags_start */
4594 0, /* todo_flags_finish */
4595 };
4596
4597 class pass_cleanup_eh : public gimple_opt_pass
4598 {
4599 public:
4600 pass_cleanup_eh (gcc::context *ctxt)
4601 : gimple_opt_pass (pass_data_cleanup_eh, ctxt)
4602 {}
4603
4604 /* opt_pass methods: */
4605 opt_pass * clone () { return new pass_cleanup_eh (m_ctxt); }
4606 virtual bool gate (function *fun)
4607 {
4608 return fun->eh != NULL && fun->eh->region_tree != NULL;
4609 }
4610
4611 virtual unsigned int execute (function *);
4612
4613 }; // class pass_cleanup_eh
4614
4615 unsigned int
4616 pass_cleanup_eh::execute (function *fun)
4617 {
4618 int ret = execute_cleanup_eh_1 ();
4619
4620 /* If the function no longer needs an EH personality routine
4621 clear it. This exposes cross-language inlining opportunities
4622 and avoids references to a never defined personality routine. */
4623 if (DECL_FUNCTION_PERSONALITY (current_function_decl)
4624 && function_needs_eh_personality (fun) != eh_personality_lang)
4625 DECL_FUNCTION_PERSONALITY (current_function_decl) = NULL_TREE;
4626
4627 return ret;
4628 }
4629
4630 } // anon namespace
4631
4632 gimple_opt_pass *
4633 make_pass_cleanup_eh (gcc::context *ctxt)
4634 {
4635 return new pass_cleanup_eh (ctxt);
4636 }
4637 \f
4638 /* Verify that BB containing STMT as the last statement, has precisely the
4639 edge that make_eh_edges would create. */
4640
4641 DEBUG_FUNCTION bool
4642 verify_eh_edges (gimple stmt)
4643 {
4644 basic_block bb = gimple_bb (stmt);
4645 eh_landing_pad lp = NULL;
4646 int lp_nr;
4647 edge_iterator ei;
4648 edge e, eh_edge;
4649
4650 lp_nr = lookup_stmt_eh_lp (stmt);
4651 if (lp_nr > 0)
4652 lp = get_eh_landing_pad_from_number (lp_nr);
4653
4654 eh_edge = NULL;
4655 FOR_EACH_EDGE (e, ei, bb->succs)
4656 {
4657 if (e->flags & EDGE_EH)
4658 {
4659 if (eh_edge)
4660 {
4661 error ("BB %i has multiple EH edges", bb->index);
4662 return true;
4663 }
4664 else
4665 eh_edge = e;
4666 }
4667 }
4668
4669 if (lp == NULL)
4670 {
4671 if (eh_edge)
4672 {
4673 error ("BB %i can not throw but has an EH edge", bb->index);
4674 return true;
4675 }
4676 return false;
4677 }
4678
4679 if (!stmt_could_throw_p (stmt))
4680 {
4681 error ("BB %i last statement has incorrectly set lp", bb->index);
4682 return true;
4683 }
4684
4685 if (eh_edge == NULL)
4686 {
4687 error ("BB %i is missing an EH edge", bb->index);
4688 return true;
4689 }
4690
4691 if (eh_edge->dest != label_to_block (lp->post_landing_pad))
4692 {
4693 error ("Incorrect EH edge %i->%i", bb->index, eh_edge->dest->index);
4694 return true;
4695 }
4696
4697 return false;
4698 }
4699
4700 /* Similarly, but handle GIMPLE_EH_DISPATCH specifically. */
4701
4702 DEBUG_FUNCTION bool
4703 verify_eh_dispatch_edge (geh_dispatch *stmt)
4704 {
4705 eh_region r;
4706 eh_catch c;
4707 basic_block src, dst;
4708 bool want_fallthru = true;
4709 edge_iterator ei;
4710 edge e, fall_edge;
4711
4712 r = get_eh_region_from_number (gimple_eh_dispatch_region (stmt));
4713 src = gimple_bb (stmt);
4714
4715 FOR_EACH_EDGE (e, ei, src->succs)
4716 gcc_assert (e->aux == NULL);
4717
4718 switch (r->type)
4719 {
4720 case ERT_TRY:
4721 for (c = r->u.eh_try.first_catch; c ; c = c->next_catch)
4722 {
4723 dst = label_to_block (c->label);
4724 e = find_edge (src, dst);
4725 if (e == NULL)
4726 {
4727 error ("BB %i is missing an edge", src->index);
4728 return true;
4729 }
4730 e->aux = (void *)e;
4731
4732 /* A catch-all handler doesn't have a fallthru. */
4733 if (c->type_list == NULL)
4734 {
4735 want_fallthru = false;
4736 break;
4737 }
4738 }
4739 break;
4740
4741 case ERT_ALLOWED_EXCEPTIONS:
4742 dst = label_to_block (r->u.allowed.label);
4743 e = find_edge (src, dst);
4744 if (e == NULL)
4745 {
4746 error ("BB %i is missing an edge", src->index);
4747 return true;
4748 }
4749 e->aux = (void *)e;
4750 break;
4751
4752 default:
4753 gcc_unreachable ();
4754 }
4755
4756 fall_edge = NULL;
4757 FOR_EACH_EDGE (e, ei, src->succs)
4758 {
4759 if (e->flags & EDGE_FALLTHRU)
4760 {
4761 if (fall_edge != NULL)
4762 {
4763 error ("BB %i too many fallthru edges", src->index);
4764 return true;
4765 }
4766 fall_edge = e;
4767 }
4768 else if (e->aux)
4769 e->aux = NULL;
4770 else
4771 {
4772 error ("BB %i has incorrect edge", src->index);
4773 return true;
4774 }
4775 }
4776 if ((fall_edge != NULL) ^ want_fallthru)
4777 {
4778 error ("BB %i has incorrect fallthru edge", src->index);
4779 return true;
4780 }
4781
4782 return false;
4783 }