]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/tree-eh.c
2015-07-07 Andrew MacLeod <amacleod@redhat.com>
[thirdparty/gcc.git] / gcc / tree-eh.c
1 /* Exception handling semantics and decomposition for trees.
2 Copyright (C) 2003-2015 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
9 any later version.
10
11 GCC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "backend.h"
24 #include "tree.h"
25 #include "gimple.h"
26 #include "rtl.h"
27 #include "ssa.h"
28 #include "alias.h"
29 #include "fold-const.h"
30 #include "flags.h"
31 #include "insn-config.h"
32 #include "expmed.h"
33 #include "dojump.h"
34 #include "explow.h"
35 #include "calls.h"
36 #include "emit-rtl.h"
37 #include "varasm.h"
38 #include "stmt.h"
39 #include "expr.h"
40 #include "except.h"
41 #include "cfganal.h"
42 #include "cfgcleanup.h"
43 #include "internal-fn.h"
44 #include "tree-eh.h"
45 #include "gimple-iterator.h"
46 #include "cgraph.h"
47 #include "tree-cfg.h"
48 #include "tree-into-ssa.h"
49 #include "tree-ssa.h"
50 #include "tree-inline.h"
51 #include "tree-pass.h"
52 #include "langhooks.h"
53 #include "diagnostic-core.h"
54 #include "target.h"
55 #include "cfgloop.h"
56 #include "gimple-low.h"
57
58 /* In some instances a tree and a gimple need to be stored in a same table,
59 i.e. in hash tables. This is a structure to do this. */
60 typedef union {tree *tp; tree t; gimple g;} treemple;
61
62 /* Misc functions used in this file. */
63
64 /* Remember and lookup EH landing pad data for arbitrary statements.
65 Really this means any statement that could_throw_p. We could
66 stuff this information into the stmt_ann data structure, but:
67
68 (1) We absolutely rely on this information being kept until
69 we get to rtl. Once we're done with lowering here, if we lose
70 the information there's no way to recover it!
71
72 (2) There are many more statements that *cannot* throw as
73 compared to those that can. We should be saving some amount
74 of space by only allocating memory for those that can throw. */
75
76 /* Add statement T in function IFUN to landing pad NUM. */
77
78 static void
79 add_stmt_to_eh_lp_fn (struct function *ifun, gimple t, int num)
80 {
81 gcc_assert (num != 0);
82
83 if (!get_eh_throw_stmt_table (ifun))
84 set_eh_throw_stmt_table (ifun, hash_map<gimple, int>::create_ggc (31));
85
86 gcc_assert (!get_eh_throw_stmt_table (ifun)->put (t, num));
87 }
88
89 /* Add statement T in the current function (cfun) to EH landing pad NUM. */
90
91 void
92 add_stmt_to_eh_lp (gimple t, int num)
93 {
94 add_stmt_to_eh_lp_fn (cfun, t, num);
95 }
96
97 /* Add statement T to the single EH landing pad in REGION. */
98
99 static void
100 record_stmt_eh_region (eh_region region, gimple t)
101 {
102 if (region == NULL)
103 return;
104 if (region->type == ERT_MUST_NOT_THROW)
105 add_stmt_to_eh_lp_fn (cfun, t, -region->index);
106 else
107 {
108 eh_landing_pad lp = region->landing_pads;
109 if (lp == NULL)
110 lp = gen_eh_landing_pad (region);
111 else
112 gcc_assert (lp->next_lp == NULL);
113 add_stmt_to_eh_lp_fn (cfun, t, lp->index);
114 }
115 }
116
117
118 /* Remove statement T in function IFUN from its EH landing pad. */
119
120 bool
121 remove_stmt_from_eh_lp_fn (struct function *ifun, gimple t)
122 {
123 if (!get_eh_throw_stmt_table (ifun))
124 return false;
125
126 if (!get_eh_throw_stmt_table (ifun)->get (t))
127 return false;
128
129 get_eh_throw_stmt_table (ifun)->remove (t);
130 return true;
131 }
132
133
134 /* Remove statement T in the current function (cfun) from its
135 EH landing pad. */
136
137 bool
138 remove_stmt_from_eh_lp (gimple t)
139 {
140 return remove_stmt_from_eh_lp_fn (cfun, t);
141 }
142
143 /* Determine if statement T is inside an EH region in function IFUN.
144 Positive numbers indicate a landing pad index; negative numbers
145 indicate a MUST_NOT_THROW region index; zero indicates that the
146 statement is not recorded in the region table. */
147
148 int
149 lookup_stmt_eh_lp_fn (struct function *ifun, gimple t)
150 {
151 if (ifun->eh->throw_stmt_table == NULL)
152 return 0;
153
154 int *lp_nr = ifun->eh->throw_stmt_table->get (t);
155 return lp_nr ? *lp_nr : 0;
156 }
157
158 /* Likewise, but always use the current function. */
159
160 int
161 lookup_stmt_eh_lp (gimple t)
162 {
163 /* We can get called from initialized data when -fnon-call-exceptions
164 is on; prevent crash. */
165 if (!cfun)
166 return 0;
167 return lookup_stmt_eh_lp_fn (cfun, t);
168 }
169
170 /* First pass of EH node decomposition. Build up a tree of GIMPLE_TRY_FINALLY
171 nodes and LABEL_DECL nodes. We will use this during the second phase to
172 determine if a goto leaves the body of a TRY_FINALLY_EXPR node. */
173
174 struct finally_tree_node
175 {
176 /* When storing a GIMPLE_TRY, we have to record a gimple. However
177 when deciding whether a GOTO to a certain LABEL_DECL (which is a
178 tree) leaves the TRY block, its necessary to record a tree in
179 this field. Thus a treemple is used. */
180 treemple child;
181 gtry *parent;
182 };
183
184 /* Hashtable helpers. */
185
186 struct finally_tree_hasher : free_ptr_hash <finally_tree_node>
187 {
188 static inline hashval_t hash (const finally_tree_node *);
189 static inline bool equal (const finally_tree_node *,
190 const finally_tree_node *);
191 };
192
193 inline hashval_t
194 finally_tree_hasher::hash (const finally_tree_node *v)
195 {
196 return (intptr_t)v->child.t >> 4;
197 }
198
199 inline bool
200 finally_tree_hasher::equal (const finally_tree_node *v,
201 const finally_tree_node *c)
202 {
203 return v->child.t == c->child.t;
204 }
205
206 /* Note that this table is *not* marked GTY. It is short-lived. */
207 static hash_table<finally_tree_hasher> *finally_tree;
208
209 static void
210 record_in_finally_tree (treemple child, gtry *parent)
211 {
212 struct finally_tree_node *n;
213 finally_tree_node **slot;
214
215 n = XNEW (struct finally_tree_node);
216 n->child = child;
217 n->parent = parent;
218
219 slot = finally_tree->find_slot (n, INSERT);
220 gcc_assert (!*slot);
221 *slot = n;
222 }
223
224 static void
225 collect_finally_tree (gimple stmt, gtry *region);
226
227 /* Go through the gimple sequence. Works with collect_finally_tree to
228 record all GIMPLE_LABEL and GIMPLE_TRY statements. */
229
230 static void
231 collect_finally_tree_1 (gimple_seq seq, gtry *region)
232 {
233 gimple_stmt_iterator gsi;
234
235 for (gsi = gsi_start (seq); !gsi_end_p (gsi); gsi_next (&gsi))
236 collect_finally_tree (gsi_stmt (gsi), region);
237 }
238
239 static void
240 collect_finally_tree (gimple stmt, gtry *region)
241 {
242 treemple temp;
243
244 switch (gimple_code (stmt))
245 {
246 case GIMPLE_LABEL:
247 temp.t = gimple_label_label (as_a <glabel *> (stmt));
248 record_in_finally_tree (temp, region);
249 break;
250
251 case GIMPLE_TRY:
252 if (gimple_try_kind (stmt) == GIMPLE_TRY_FINALLY)
253 {
254 temp.g = stmt;
255 record_in_finally_tree (temp, region);
256 collect_finally_tree_1 (gimple_try_eval (stmt),
257 as_a <gtry *> (stmt));
258 collect_finally_tree_1 (gimple_try_cleanup (stmt), region);
259 }
260 else if (gimple_try_kind (stmt) == GIMPLE_TRY_CATCH)
261 {
262 collect_finally_tree_1 (gimple_try_eval (stmt), region);
263 collect_finally_tree_1 (gimple_try_cleanup (stmt), region);
264 }
265 break;
266
267 case GIMPLE_CATCH:
268 collect_finally_tree_1 (gimple_catch_handler (
269 as_a <gcatch *> (stmt)),
270 region);
271 break;
272
273 case GIMPLE_EH_FILTER:
274 collect_finally_tree_1 (gimple_eh_filter_failure (stmt), region);
275 break;
276
277 case GIMPLE_EH_ELSE:
278 {
279 geh_else *eh_else_stmt = as_a <geh_else *> (stmt);
280 collect_finally_tree_1 (gimple_eh_else_n_body (eh_else_stmt), region);
281 collect_finally_tree_1 (gimple_eh_else_e_body (eh_else_stmt), region);
282 }
283 break;
284
285 default:
286 /* A type, a decl, or some kind of statement that we're not
287 interested in. Don't walk them. */
288 break;
289 }
290 }
291
292
293 /* Use the finally tree to determine if a jump from START to TARGET
294 would leave the try_finally node that START lives in. */
295
296 static bool
297 outside_finally_tree (treemple start, gimple target)
298 {
299 struct finally_tree_node n, *p;
300
301 do
302 {
303 n.child = start;
304 p = finally_tree->find (&n);
305 if (!p)
306 return true;
307 start.g = p->parent;
308 }
309 while (start.g != target);
310
311 return false;
312 }
313
314 /* Second pass of EH node decomposition. Actually transform the GIMPLE_TRY
315 nodes into a set of gotos, magic labels, and eh regions.
316 The eh region creation is straight-forward, but frobbing all the gotos
317 and such into shape isn't. */
318
319 /* The sequence into which we record all EH stuff. This will be
320 placed at the end of the function when we're all done. */
321 static gimple_seq eh_seq;
322
323 /* Record whether an EH region contains something that can throw,
324 indexed by EH region number. */
325 static bitmap eh_region_may_contain_throw_map;
326
327 /* The GOTO_QUEUE is is an array of GIMPLE_GOTO and GIMPLE_RETURN
328 statements that are seen to escape this GIMPLE_TRY_FINALLY node.
329 The idea is to record a gimple statement for everything except for
330 the conditionals, which get their labels recorded. Since labels are
331 of type 'tree', we need this node to store both gimple and tree
332 objects. REPL_STMT is the sequence used to replace the goto/return
333 statement. CONT_STMT is used to store the statement that allows
334 the return/goto to jump to the original destination. */
335
336 struct goto_queue_node
337 {
338 treemple stmt;
339 location_t location;
340 gimple_seq repl_stmt;
341 gimple cont_stmt;
342 int index;
343 /* This is used when index >= 0 to indicate that stmt is a label (as
344 opposed to a goto stmt). */
345 int is_label;
346 };
347
348 /* State of the world while lowering. */
349
350 struct leh_state
351 {
352 /* What's "current" while constructing the eh region tree. These
353 correspond to variables of the same name in cfun->eh, which we
354 don't have easy access to. */
355 eh_region cur_region;
356
357 /* What's "current" for the purposes of __builtin_eh_pointer. For
358 a CATCH, this is the associated TRY. For an EH_FILTER, this is
359 the associated ALLOWED_EXCEPTIONS, etc. */
360 eh_region ehp_region;
361
362 /* Processing of TRY_FINALLY requires a bit more state. This is
363 split out into a separate structure so that we don't have to
364 copy so much when processing other nodes. */
365 struct leh_tf_state *tf;
366 };
367
368 struct leh_tf_state
369 {
370 /* Pointer to the GIMPLE_TRY_FINALLY node under discussion. The
371 try_finally_expr is the original GIMPLE_TRY_FINALLY. We need to retain
372 this so that outside_finally_tree can reliably reference the tree used
373 in the collect_finally_tree data structures. */
374 gtry *try_finally_expr;
375 gtry *top_p;
376
377 /* While lowering a top_p usually it is expanded into multiple statements,
378 thus we need the following field to store them. */
379 gimple_seq top_p_seq;
380
381 /* The state outside this try_finally node. */
382 struct leh_state *outer;
383
384 /* The exception region created for it. */
385 eh_region region;
386
387 /* The goto queue. */
388 struct goto_queue_node *goto_queue;
389 size_t goto_queue_size;
390 size_t goto_queue_active;
391
392 /* Pointer map to help in searching goto_queue when it is large. */
393 hash_map<gimple, goto_queue_node *> *goto_queue_map;
394
395 /* The set of unique labels seen as entries in the goto queue. */
396 vec<tree> dest_array;
397
398 /* A label to be added at the end of the completed transformed
399 sequence. It will be set if may_fallthru was true *at one time*,
400 though subsequent transformations may have cleared that flag. */
401 tree fallthru_label;
402
403 /* True if it is possible to fall out the bottom of the try block.
404 Cleared if the fallthru is converted to a goto. */
405 bool may_fallthru;
406
407 /* True if any entry in goto_queue is a GIMPLE_RETURN. */
408 bool may_return;
409
410 /* True if the finally block can receive an exception edge.
411 Cleared if the exception case is handled by code duplication. */
412 bool may_throw;
413 };
414
415 static gimple_seq lower_eh_must_not_throw (struct leh_state *, gtry *);
416
417 /* Search for STMT in the goto queue. Return the replacement,
418 or null if the statement isn't in the queue. */
419
420 #define LARGE_GOTO_QUEUE 20
421
422 static void lower_eh_constructs_1 (struct leh_state *state, gimple_seq *seq);
423
424 static gimple_seq
425 find_goto_replacement (struct leh_tf_state *tf, treemple stmt)
426 {
427 unsigned int i;
428
429 if (tf->goto_queue_active < LARGE_GOTO_QUEUE)
430 {
431 for (i = 0; i < tf->goto_queue_active; i++)
432 if ( tf->goto_queue[i].stmt.g == stmt.g)
433 return tf->goto_queue[i].repl_stmt;
434 return NULL;
435 }
436
437 /* If we have a large number of entries in the goto_queue, create a
438 pointer map and use that for searching. */
439
440 if (!tf->goto_queue_map)
441 {
442 tf->goto_queue_map = new hash_map<gimple, goto_queue_node *>;
443 for (i = 0; i < tf->goto_queue_active; i++)
444 {
445 bool existed = tf->goto_queue_map->put (tf->goto_queue[i].stmt.g,
446 &tf->goto_queue[i]);
447 gcc_assert (!existed);
448 }
449 }
450
451 goto_queue_node **slot = tf->goto_queue_map->get (stmt.g);
452 if (slot != NULL)
453 return ((*slot)->repl_stmt);
454
455 return NULL;
456 }
457
458 /* A subroutine of replace_goto_queue_1. Handles the sub-clauses of a
459 lowered GIMPLE_COND. If, by chance, the replacement is a simple goto,
460 then we can just splat it in, otherwise we add the new stmts immediately
461 after the GIMPLE_COND and redirect. */
462
463 static void
464 replace_goto_queue_cond_clause (tree *tp, struct leh_tf_state *tf,
465 gimple_stmt_iterator *gsi)
466 {
467 tree label;
468 gimple_seq new_seq;
469 treemple temp;
470 location_t loc = gimple_location (gsi_stmt (*gsi));
471
472 temp.tp = tp;
473 new_seq = find_goto_replacement (tf, temp);
474 if (!new_seq)
475 return;
476
477 if (gimple_seq_singleton_p (new_seq)
478 && gimple_code (gimple_seq_first_stmt (new_seq)) == GIMPLE_GOTO)
479 {
480 *tp = gimple_goto_dest (gimple_seq_first_stmt (new_seq));
481 return;
482 }
483
484 label = create_artificial_label (loc);
485 /* Set the new label for the GIMPLE_COND */
486 *tp = label;
487
488 gsi_insert_after (gsi, gimple_build_label (label), GSI_CONTINUE_LINKING);
489 gsi_insert_seq_after (gsi, gimple_seq_copy (new_seq), GSI_CONTINUE_LINKING);
490 }
491
492 /* The real work of replace_goto_queue. Returns with TSI updated to
493 point to the next statement. */
494
495 static void replace_goto_queue_stmt_list (gimple_seq *, struct leh_tf_state *);
496
497 static void
498 replace_goto_queue_1 (gimple stmt, struct leh_tf_state *tf,
499 gimple_stmt_iterator *gsi)
500 {
501 gimple_seq seq;
502 treemple temp;
503 temp.g = NULL;
504
505 switch (gimple_code (stmt))
506 {
507 case GIMPLE_GOTO:
508 case GIMPLE_RETURN:
509 temp.g = stmt;
510 seq = find_goto_replacement (tf, temp);
511 if (seq)
512 {
513 gsi_insert_seq_before (gsi, gimple_seq_copy (seq), GSI_SAME_STMT);
514 gsi_remove (gsi, false);
515 return;
516 }
517 break;
518
519 case GIMPLE_COND:
520 replace_goto_queue_cond_clause (gimple_op_ptr (stmt, 2), tf, gsi);
521 replace_goto_queue_cond_clause (gimple_op_ptr (stmt, 3), tf, gsi);
522 break;
523
524 case GIMPLE_TRY:
525 replace_goto_queue_stmt_list (gimple_try_eval_ptr (stmt), tf);
526 replace_goto_queue_stmt_list (gimple_try_cleanup_ptr (stmt), tf);
527 break;
528 case GIMPLE_CATCH:
529 replace_goto_queue_stmt_list (gimple_catch_handler_ptr (
530 as_a <gcatch *> (stmt)),
531 tf);
532 break;
533 case GIMPLE_EH_FILTER:
534 replace_goto_queue_stmt_list (gimple_eh_filter_failure_ptr (stmt), tf);
535 break;
536 case GIMPLE_EH_ELSE:
537 {
538 geh_else *eh_else_stmt = as_a <geh_else *> (stmt);
539 replace_goto_queue_stmt_list (gimple_eh_else_n_body_ptr (eh_else_stmt),
540 tf);
541 replace_goto_queue_stmt_list (gimple_eh_else_e_body_ptr (eh_else_stmt),
542 tf);
543 }
544 break;
545
546 default:
547 /* These won't have gotos in them. */
548 break;
549 }
550
551 gsi_next (gsi);
552 }
553
554 /* A subroutine of replace_goto_queue. Handles GIMPLE_SEQ. */
555
556 static void
557 replace_goto_queue_stmt_list (gimple_seq *seq, struct leh_tf_state *tf)
558 {
559 gimple_stmt_iterator gsi = gsi_start (*seq);
560
561 while (!gsi_end_p (gsi))
562 replace_goto_queue_1 (gsi_stmt (gsi), tf, &gsi);
563 }
564
565 /* Replace all goto queue members. */
566
567 static void
568 replace_goto_queue (struct leh_tf_state *tf)
569 {
570 if (tf->goto_queue_active == 0)
571 return;
572 replace_goto_queue_stmt_list (&tf->top_p_seq, tf);
573 replace_goto_queue_stmt_list (&eh_seq, tf);
574 }
575
576 /* Add a new record to the goto queue contained in TF. NEW_STMT is the
577 data to be added, IS_LABEL indicates whether NEW_STMT is a label or
578 a gimple return. */
579
580 static void
581 record_in_goto_queue (struct leh_tf_state *tf,
582 treemple new_stmt,
583 int index,
584 bool is_label,
585 location_t location)
586 {
587 size_t active, size;
588 struct goto_queue_node *q;
589
590 gcc_assert (!tf->goto_queue_map);
591
592 active = tf->goto_queue_active;
593 size = tf->goto_queue_size;
594 if (active >= size)
595 {
596 size = (size ? size * 2 : 32);
597 tf->goto_queue_size = size;
598 tf->goto_queue
599 = XRESIZEVEC (struct goto_queue_node, tf->goto_queue, size);
600 }
601
602 q = &tf->goto_queue[active];
603 tf->goto_queue_active = active + 1;
604
605 memset (q, 0, sizeof (*q));
606 q->stmt = new_stmt;
607 q->index = index;
608 q->location = location;
609 q->is_label = is_label;
610 }
611
612 /* Record the LABEL label in the goto queue contained in TF.
613 TF is not null. */
614
615 static void
616 record_in_goto_queue_label (struct leh_tf_state *tf, treemple stmt, tree label,
617 location_t location)
618 {
619 int index;
620 treemple temp, new_stmt;
621
622 if (!label)
623 return;
624
625 /* Computed and non-local gotos do not get processed. Given
626 their nature we can neither tell whether we've escaped the
627 finally block nor redirect them if we knew. */
628 if (TREE_CODE (label) != LABEL_DECL)
629 return;
630
631 /* No need to record gotos that don't leave the try block. */
632 temp.t = label;
633 if (!outside_finally_tree (temp, tf->try_finally_expr))
634 return;
635
636 if (! tf->dest_array.exists ())
637 {
638 tf->dest_array.create (10);
639 tf->dest_array.quick_push (label);
640 index = 0;
641 }
642 else
643 {
644 int n = tf->dest_array.length ();
645 for (index = 0; index < n; ++index)
646 if (tf->dest_array[index] == label)
647 break;
648 if (index == n)
649 tf->dest_array.safe_push (label);
650 }
651
652 /* In the case of a GOTO we want to record the destination label,
653 since with a GIMPLE_COND we have an easy access to the then/else
654 labels. */
655 new_stmt = stmt;
656 record_in_goto_queue (tf, new_stmt, index, true, location);
657 }
658
659 /* For any GIMPLE_GOTO or GIMPLE_RETURN, decide whether it leaves a try_finally
660 node, and if so record that fact in the goto queue associated with that
661 try_finally node. */
662
663 static void
664 maybe_record_in_goto_queue (struct leh_state *state, gimple stmt)
665 {
666 struct leh_tf_state *tf = state->tf;
667 treemple new_stmt;
668
669 if (!tf)
670 return;
671
672 switch (gimple_code (stmt))
673 {
674 case GIMPLE_COND:
675 {
676 gcond *cond_stmt = as_a <gcond *> (stmt);
677 new_stmt.tp = gimple_op_ptr (cond_stmt, 2);
678 record_in_goto_queue_label (tf, new_stmt,
679 gimple_cond_true_label (cond_stmt),
680 EXPR_LOCATION (*new_stmt.tp));
681 new_stmt.tp = gimple_op_ptr (cond_stmt, 3);
682 record_in_goto_queue_label (tf, new_stmt,
683 gimple_cond_false_label (cond_stmt),
684 EXPR_LOCATION (*new_stmt.tp));
685 }
686 break;
687 case GIMPLE_GOTO:
688 new_stmt.g = stmt;
689 record_in_goto_queue_label (tf, new_stmt, gimple_goto_dest (stmt),
690 gimple_location (stmt));
691 break;
692
693 case GIMPLE_RETURN:
694 tf->may_return = true;
695 new_stmt.g = stmt;
696 record_in_goto_queue (tf, new_stmt, -1, false, gimple_location (stmt));
697 break;
698
699 default:
700 gcc_unreachable ();
701 }
702 }
703
704
705 #ifdef ENABLE_CHECKING
706 /* We do not process GIMPLE_SWITCHes for now. As long as the original source
707 was in fact structured, and we've not yet done jump threading, then none
708 of the labels will leave outer GIMPLE_TRY_FINALLY nodes. Verify this. */
709
710 static void
711 verify_norecord_switch_expr (struct leh_state *state,
712 gswitch *switch_expr)
713 {
714 struct leh_tf_state *tf = state->tf;
715 size_t i, n;
716
717 if (!tf)
718 return;
719
720 n = gimple_switch_num_labels (switch_expr);
721
722 for (i = 0; i < n; ++i)
723 {
724 treemple temp;
725 tree lab = CASE_LABEL (gimple_switch_label (switch_expr, i));
726 temp.t = lab;
727 gcc_assert (!outside_finally_tree (temp, tf->try_finally_expr));
728 }
729 }
730 #else
731 #define verify_norecord_switch_expr(state, switch_expr)
732 #endif
733
734 /* Redirect a RETURN_EXPR pointed to by Q to FINLAB. If MOD is
735 non-null, insert it before the new branch. */
736
737 static void
738 do_return_redirection (struct goto_queue_node *q, tree finlab, gimple_seq mod)
739 {
740 gimple x;
741
742 /* In the case of a return, the queue node must be a gimple statement. */
743 gcc_assert (!q->is_label);
744
745 /* Note that the return value may have already been computed, e.g.,
746
747 int x;
748 int foo (void)
749 {
750 x = 0;
751 try {
752 return x;
753 } finally {
754 x++;
755 }
756 }
757
758 should return 0, not 1. We don't have to do anything to make
759 this happens because the return value has been placed in the
760 RESULT_DECL already. */
761
762 q->cont_stmt = q->stmt.g;
763
764 if (mod)
765 gimple_seq_add_seq (&q->repl_stmt, mod);
766
767 x = gimple_build_goto (finlab);
768 gimple_set_location (x, q->location);
769 gimple_seq_add_stmt (&q->repl_stmt, x);
770 }
771
772 /* Similar, but easier, for GIMPLE_GOTO. */
773
774 static void
775 do_goto_redirection (struct goto_queue_node *q, tree finlab, gimple_seq mod,
776 struct leh_tf_state *tf)
777 {
778 ggoto *x;
779
780 gcc_assert (q->is_label);
781
782 q->cont_stmt = gimple_build_goto (tf->dest_array[q->index]);
783
784 if (mod)
785 gimple_seq_add_seq (&q->repl_stmt, mod);
786
787 x = gimple_build_goto (finlab);
788 gimple_set_location (x, q->location);
789 gimple_seq_add_stmt (&q->repl_stmt, x);
790 }
791
792 /* Emit a standard landing pad sequence into SEQ for REGION. */
793
794 static void
795 emit_post_landing_pad (gimple_seq *seq, eh_region region)
796 {
797 eh_landing_pad lp = region->landing_pads;
798 glabel *x;
799
800 if (lp == NULL)
801 lp = gen_eh_landing_pad (region);
802
803 lp->post_landing_pad = create_artificial_label (UNKNOWN_LOCATION);
804 EH_LANDING_PAD_NR (lp->post_landing_pad) = lp->index;
805
806 x = gimple_build_label (lp->post_landing_pad);
807 gimple_seq_add_stmt (seq, x);
808 }
809
810 /* Emit a RESX statement into SEQ for REGION. */
811
812 static void
813 emit_resx (gimple_seq *seq, eh_region region)
814 {
815 gresx *x = gimple_build_resx (region->index);
816 gimple_seq_add_stmt (seq, x);
817 if (region->outer)
818 record_stmt_eh_region (region->outer, x);
819 }
820
821 /* Emit an EH_DISPATCH statement into SEQ for REGION. */
822
823 static void
824 emit_eh_dispatch (gimple_seq *seq, eh_region region)
825 {
826 geh_dispatch *x = gimple_build_eh_dispatch (region->index);
827 gimple_seq_add_stmt (seq, x);
828 }
829
830 /* Note that the current EH region may contain a throw, or a
831 call to a function which itself may contain a throw. */
832
833 static void
834 note_eh_region_may_contain_throw (eh_region region)
835 {
836 while (bitmap_set_bit (eh_region_may_contain_throw_map, region->index))
837 {
838 if (region->type == ERT_MUST_NOT_THROW)
839 break;
840 region = region->outer;
841 if (region == NULL)
842 break;
843 }
844 }
845
846 /* Check if REGION has been marked as containing a throw. If REGION is
847 NULL, this predicate is false. */
848
849 static inline bool
850 eh_region_may_contain_throw (eh_region r)
851 {
852 return r && bitmap_bit_p (eh_region_may_contain_throw_map, r->index);
853 }
854
855 /* We want to transform
856 try { body; } catch { stuff; }
857 to
858 normal_sequence:
859 body;
860 over:
861 eh_sequence:
862 landing_pad:
863 stuff;
864 goto over;
865
866 TP is a GIMPLE_TRY node. REGION is the region whose post_landing_pad
867 should be placed before the second operand, or NULL. OVER is
868 an existing label that should be put at the exit, or NULL. */
869
870 static gimple_seq
871 frob_into_branch_around (gtry *tp, eh_region region, tree over)
872 {
873 gimple x;
874 gimple_seq cleanup, result;
875 location_t loc = gimple_location (tp);
876
877 cleanup = gimple_try_cleanup (tp);
878 result = gimple_try_eval (tp);
879
880 if (region)
881 emit_post_landing_pad (&eh_seq, region);
882
883 if (gimple_seq_may_fallthru (cleanup))
884 {
885 if (!over)
886 over = create_artificial_label (loc);
887 x = gimple_build_goto (over);
888 gimple_set_location (x, loc);
889 gimple_seq_add_stmt (&cleanup, x);
890 }
891 gimple_seq_add_seq (&eh_seq, cleanup);
892
893 if (over)
894 {
895 x = gimple_build_label (over);
896 gimple_seq_add_stmt (&result, x);
897 }
898 return result;
899 }
900
901 /* A subroutine of lower_try_finally. Duplicate the tree rooted at T.
902 Make sure to record all new labels found. */
903
904 static gimple_seq
905 lower_try_finally_dup_block (gimple_seq seq, struct leh_state *outer_state,
906 location_t loc)
907 {
908 gtry *region = NULL;
909 gimple_seq new_seq;
910 gimple_stmt_iterator gsi;
911
912 new_seq = copy_gimple_seq_and_replace_locals (seq);
913
914 for (gsi = gsi_start (new_seq); !gsi_end_p (gsi); gsi_next (&gsi))
915 {
916 gimple stmt = gsi_stmt (gsi);
917 if (LOCATION_LOCUS (gimple_location (stmt)) == UNKNOWN_LOCATION)
918 {
919 tree block = gimple_block (stmt);
920 gimple_set_location (stmt, loc);
921 gimple_set_block (stmt, block);
922 }
923 }
924
925 if (outer_state->tf)
926 region = outer_state->tf->try_finally_expr;
927 collect_finally_tree_1 (new_seq, region);
928
929 return new_seq;
930 }
931
932 /* A subroutine of lower_try_finally. Create a fallthru label for
933 the given try_finally state. The only tricky bit here is that
934 we have to make sure to record the label in our outer context. */
935
936 static tree
937 lower_try_finally_fallthru_label (struct leh_tf_state *tf)
938 {
939 tree label = tf->fallthru_label;
940 treemple temp;
941
942 if (!label)
943 {
944 label = create_artificial_label (gimple_location (tf->try_finally_expr));
945 tf->fallthru_label = label;
946 if (tf->outer->tf)
947 {
948 temp.t = label;
949 record_in_finally_tree (temp, tf->outer->tf->try_finally_expr);
950 }
951 }
952 return label;
953 }
954
955 /* A subroutine of lower_try_finally. If FINALLY consits of a
956 GIMPLE_EH_ELSE node, return it. */
957
958 static inline geh_else *
959 get_eh_else (gimple_seq finally)
960 {
961 gimple x = gimple_seq_first_stmt (finally);
962 if (gimple_code (x) == GIMPLE_EH_ELSE)
963 {
964 gcc_assert (gimple_seq_singleton_p (finally));
965 return as_a <geh_else *> (x);
966 }
967 return NULL;
968 }
969
970 /* A subroutine of lower_try_finally. If the eh_protect_cleanup_actions
971 langhook returns non-null, then the language requires that the exception
972 path out of a try_finally be treated specially. To wit: the code within
973 the finally block may not itself throw an exception. We have two choices
974 here. First we can duplicate the finally block and wrap it in a
975 must_not_throw region. Second, we can generate code like
976
977 try {
978 finally_block;
979 } catch {
980 if (fintmp == eh_edge)
981 protect_cleanup_actions;
982 }
983
984 where "fintmp" is the temporary used in the switch statement generation
985 alternative considered below. For the nonce, we always choose the first
986 option.
987
988 THIS_STATE may be null if this is a try-cleanup, not a try-finally. */
989
990 static void
991 honor_protect_cleanup_actions (struct leh_state *outer_state,
992 struct leh_state *this_state,
993 struct leh_tf_state *tf)
994 {
995 tree protect_cleanup_actions;
996 gimple_stmt_iterator gsi;
997 bool finally_may_fallthru;
998 gimple_seq finally;
999 gimple x;
1000 geh_mnt *eh_mnt;
1001 gtry *try_stmt;
1002 geh_else *eh_else;
1003
1004 /* First check for nothing to do. */
1005 if (lang_hooks.eh_protect_cleanup_actions == NULL)
1006 return;
1007 protect_cleanup_actions = lang_hooks.eh_protect_cleanup_actions ();
1008 if (protect_cleanup_actions == NULL)
1009 return;
1010
1011 finally = gimple_try_cleanup (tf->top_p);
1012 eh_else = get_eh_else (finally);
1013
1014 /* Duplicate the FINALLY block. Only need to do this for try-finally,
1015 and not for cleanups. If we've got an EH_ELSE, extract it now. */
1016 if (eh_else)
1017 {
1018 finally = gimple_eh_else_e_body (eh_else);
1019 gimple_try_set_cleanup (tf->top_p, gimple_eh_else_n_body (eh_else));
1020 }
1021 else if (this_state)
1022 finally = lower_try_finally_dup_block (finally, outer_state,
1023 gimple_location (tf->try_finally_expr));
1024 finally_may_fallthru = gimple_seq_may_fallthru (finally);
1025
1026 /* If this cleanup consists of a TRY_CATCH_EXPR with TRY_CATCH_IS_CLEANUP
1027 set, the handler of the TRY_CATCH_EXPR is another cleanup which ought
1028 to be in an enclosing scope, but needs to be implemented at this level
1029 to avoid a nesting violation (see wrap_temporary_cleanups in
1030 cp/decl.c). Since it's logically at an outer level, we should call
1031 terminate before we get to it, so strip it away before adding the
1032 MUST_NOT_THROW filter. */
1033 gsi = gsi_start (finally);
1034 x = gsi_stmt (gsi);
1035 if (gimple_code (x) == GIMPLE_TRY
1036 && gimple_try_kind (x) == GIMPLE_TRY_CATCH
1037 && gimple_try_catch_is_cleanup (x))
1038 {
1039 gsi_insert_seq_before (&gsi, gimple_try_eval (x), GSI_SAME_STMT);
1040 gsi_remove (&gsi, false);
1041 }
1042
1043 /* Wrap the block with protect_cleanup_actions as the action. */
1044 eh_mnt = gimple_build_eh_must_not_throw (protect_cleanup_actions);
1045 try_stmt = gimple_build_try (finally, gimple_seq_alloc_with_stmt (eh_mnt),
1046 GIMPLE_TRY_CATCH);
1047 finally = lower_eh_must_not_throw (outer_state, try_stmt);
1048
1049 /* Drop all of this into the exception sequence. */
1050 emit_post_landing_pad (&eh_seq, tf->region);
1051 gimple_seq_add_seq (&eh_seq, finally);
1052 if (finally_may_fallthru)
1053 emit_resx (&eh_seq, tf->region);
1054
1055 /* Having now been handled, EH isn't to be considered with
1056 the rest of the outgoing edges. */
1057 tf->may_throw = false;
1058 }
1059
1060 /* A subroutine of lower_try_finally. We have determined that there is
1061 no fallthru edge out of the finally block. This means that there is
1062 no outgoing edge corresponding to any incoming edge. Restructure the
1063 try_finally node for this special case. */
1064
1065 static void
1066 lower_try_finally_nofallthru (struct leh_state *state,
1067 struct leh_tf_state *tf)
1068 {
1069 tree lab;
1070 gimple x;
1071 geh_else *eh_else;
1072 gimple_seq finally;
1073 struct goto_queue_node *q, *qe;
1074
1075 lab = create_artificial_label (gimple_location (tf->try_finally_expr));
1076
1077 /* We expect that tf->top_p is a GIMPLE_TRY. */
1078 finally = gimple_try_cleanup (tf->top_p);
1079 tf->top_p_seq = gimple_try_eval (tf->top_p);
1080
1081 x = gimple_build_label (lab);
1082 gimple_seq_add_stmt (&tf->top_p_seq, x);
1083
1084 q = tf->goto_queue;
1085 qe = q + tf->goto_queue_active;
1086 for (; q < qe; ++q)
1087 if (q->index < 0)
1088 do_return_redirection (q, lab, NULL);
1089 else
1090 do_goto_redirection (q, lab, NULL, tf);
1091
1092 replace_goto_queue (tf);
1093
1094 /* Emit the finally block into the stream. Lower EH_ELSE at this time. */
1095 eh_else = get_eh_else (finally);
1096 if (eh_else)
1097 {
1098 finally = gimple_eh_else_n_body (eh_else);
1099 lower_eh_constructs_1 (state, &finally);
1100 gimple_seq_add_seq (&tf->top_p_seq, finally);
1101
1102 if (tf->may_throw)
1103 {
1104 finally = gimple_eh_else_e_body (eh_else);
1105 lower_eh_constructs_1 (state, &finally);
1106
1107 emit_post_landing_pad (&eh_seq, tf->region);
1108 gimple_seq_add_seq (&eh_seq, finally);
1109 }
1110 }
1111 else
1112 {
1113 lower_eh_constructs_1 (state, &finally);
1114 gimple_seq_add_seq (&tf->top_p_seq, finally);
1115
1116 if (tf->may_throw)
1117 {
1118 emit_post_landing_pad (&eh_seq, tf->region);
1119
1120 x = gimple_build_goto (lab);
1121 gimple_set_location (x, gimple_location (tf->try_finally_expr));
1122 gimple_seq_add_stmt (&eh_seq, x);
1123 }
1124 }
1125 }
1126
1127 /* A subroutine of lower_try_finally. We have determined that there is
1128 exactly one destination of the finally block. Restructure the
1129 try_finally node for this special case. */
1130
1131 static void
1132 lower_try_finally_onedest (struct leh_state *state, struct leh_tf_state *tf)
1133 {
1134 struct goto_queue_node *q, *qe;
1135 geh_else *eh_else;
1136 glabel *label_stmt;
1137 gimple x;
1138 gimple_seq finally;
1139 gimple_stmt_iterator gsi;
1140 tree finally_label;
1141 location_t loc = gimple_location (tf->try_finally_expr);
1142
1143 finally = gimple_try_cleanup (tf->top_p);
1144 tf->top_p_seq = gimple_try_eval (tf->top_p);
1145
1146 /* Since there's only one destination, and the destination edge can only
1147 either be EH or non-EH, that implies that all of our incoming edges
1148 are of the same type. Therefore we can lower EH_ELSE immediately. */
1149 eh_else = get_eh_else (finally);
1150 if (eh_else)
1151 {
1152 if (tf->may_throw)
1153 finally = gimple_eh_else_e_body (eh_else);
1154 else
1155 finally = gimple_eh_else_n_body (eh_else);
1156 }
1157
1158 lower_eh_constructs_1 (state, &finally);
1159
1160 for (gsi = gsi_start (finally); !gsi_end_p (gsi); gsi_next (&gsi))
1161 {
1162 gimple stmt = gsi_stmt (gsi);
1163 if (LOCATION_LOCUS (gimple_location (stmt)) == UNKNOWN_LOCATION)
1164 {
1165 tree block = gimple_block (stmt);
1166 gimple_set_location (stmt, gimple_location (tf->try_finally_expr));
1167 gimple_set_block (stmt, block);
1168 }
1169 }
1170
1171 if (tf->may_throw)
1172 {
1173 /* Only reachable via the exception edge. Add the given label to
1174 the head of the FINALLY block. Append a RESX at the end. */
1175 emit_post_landing_pad (&eh_seq, tf->region);
1176 gimple_seq_add_seq (&eh_seq, finally);
1177 emit_resx (&eh_seq, tf->region);
1178 return;
1179 }
1180
1181 if (tf->may_fallthru)
1182 {
1183 /* Only reachable via the fallthru edge. Do nothing but let
1184 the two blocks run together; we'll fall out the bottom. */
1185 gimple_seq_add_seq (&tf->top_p_seq, finally);
1186 return;
1187 }
1188
1189 finally_label = create_artificial_label (loc);
1190 label_stmt = gimple_build_label (finally_label);
1191 gimple_seq_add_stmt (&tf->top_p_seq, label_stmt);
1192
1193 gimple_seq_add_seq (&tf->top_p_seq, finally);
1194
1195 q = tf->goto_queue;
1196 qe = q + tf->goto_queue_active;
1197
1198 if (tf->may_return)
1199 {
1200 /* Reachable by return expressions only. Redirect them. */
1201 for (; q < qe; ++q)
1202 do_return_redirection (q, finally_label, NULL);
1203 replace_goto_queue (tf);
1204 }
1205 else
1206 {
1207 /* Reachable by goto expressions only. Redirect them. */
1208 for (; q < qe; ++q)
1209 do_goto_redirection (q, finally_label, NULL, tf);
1210 replace_goto_queue (tf);
1211
1212 if (tf->dest_array[0] == tf->fallthru_label)
1213 {
1214 /* Reachable by goto to fallthru label only. Redirect it
1215 to the new label (already created, sadly), and do not
1216 emit the final branch out, or the fallthru label. */
1217 tf->fallthru_label = NULL;
1218 return;
1219 }
1220 }
1221
1222 /* Place the original return/goto to the original destination
1223 immediately after the finally block. */
1224 x = tf->goto_queue[0].cont_stmt;
1225 gimple_seq_add_stmt (&tf->top_p_seq, x);
1226 maybe_record_in_goto_queue (state, x);
1227 }
1228
1229 /* A subroutine of lower_try_finally. There are multiple edges incoming
1230 and outgoing from the finally block. Implement this by duplicating the
1231 finally block for every destination. */
1232
1233 static void
1234 lower_try_finally_copy (struct leh_state *state, struct leh_tf_state *tf)
1235 {
1236 gimple_seq finally;
1237 gimple_seq new_stmt;
1238 gimple_seq seq;
1239 gimple x;
1240 geh_else *eh_else;
1241 tree tmp;
1242 location_t tf_loc = gimple_location (tf->try_finally_expr);
1243
1244 finally = gimple_try_cleanup (tf->top_p);
1245
1246 /* Notice EH_ELSE, and simplify some of the remaining code
1247 by considering FINALLY to be the normal return path only. */
1248 eh_else = get_eh_else (finally);
1249 if (eh_else)
1250 finally = gimple_eh_else_n_body (eh_else);
1251
1252 tf->top_p_seq = gimple_try_eval (tf->top_p);
1253 new_stmt = NULL;
1254
1255 if (tf->may_fallthru)
1256 {
1257 seq = lower_try_finally_dup_block (finally, state, tf_loc);
1258 lower_eh_constructs_1 (state, &seq);
1259 gimple_seq_add_seq (&new_stmt, seq);
1260
1261 tmp = lower_try_finally_fallthru_label (tf);
1262 x = gimple_build_goto (tmp);
1263 gimple_set_location (x, tf_loc);
1264 gimple_seq_add_stmt (&new_stmt, x);
1265 }
1266
1267 if (tf->may_throw)
1268 {
1269 /* We don't need to copy the EH path of EH_ELSE,
1270 since it is only emitted once. */
1271 if (eh_else)
1272 seq = gimple_eh_else_e_body (eh_else);
1273 else
1274 seq = lower_try_finally_dup_block (finally, state, tf_loc);
1275 lower_eh_constructs_1 (state, &seq);
1276
1277 emit_post_landing_pad (&eh_seq, tf->region);
1278 gimple_seq_add_seq (&eh_seq, seq);
1279 emit_resx (&eh_seq, tf->region);
1280 }
1281
1282 if (tf->goto_queue)
1283 {
1284 struct goto_queue_node *q, *qe;
1285 int return_index, index;
1286 struct labels_s
1287 {
1288 struct goto_queue_node *q;
1289 tree label;
1290 } *labels;
1291
1292 return_index = tf->dest_array.length ();
1293 labels = XCNEWVEC (struct labels_s, return_index + 1);
1294
1295 q = tf->goto_queue;
1296 qe = q + tf->goto_queue_active;
1297 for (; q < qe; q++)
1298 {
1299 index = q->index < 0 ? return_index : q->index;
1300
1301 if (!labels[index].q)
1302 labels[index].q = q;
1303 }
1304
1305 for (index = 0; index < return_index + 1; index++)
1306 {
1307 tree lab;
1308
1309 q = labels[index].q;
1310 if (! q)
1311 continue;
1312
1313 lab = labels[index].label
1314 = create_artificial_label (tf_loc);
1315
1316 if (index == return_index)
1317 do_return_redirection (q, lab, NULL);
1318 else
1319 do_goto_redirection (q, lab, NULL, tf);
1320
1321 x = gimple_build_label (lab);
1322 gimple_seq_add_stmt (&new_stmt, x);
1323
1324 seq = lower_try_finally_dup_block (finally, state, q->location);
1325 lower_eh_constructs_1 (state, &seq);
1326 gimple_seq_add_seq (&new_stmt, seq);
1327
1328 gimple_seq_add_stmt (&new_stmt, q->cont_stmt);
1329 maybe_record_in_goto_queue (state, q->cont_stmt);
1330 }
1331
1332 for (q = tf->goto_queue; q < qe; q++)
1333 {
1334 tree lab;
1335
1336 index = q->index < 0 ? return_index : q->index;
1337
1338 if (labels[index].q == q)
1339 continue;
1340
1341 lab = labels[index].label;
1342
1343 if (index == return_index)
1344 do_return_redirection (q, lab, NULL);
1345 else
1346 do_goto_redirection (q, lab, NULL, tf);
1347 }
1348
1349 replace_goto_queue (tf);
1350 free (labels);
1351 }
1352
1353 /* Need to link new stmts after running replace_goto_queue due
1354 to not wanting to process the same goto stmts twice. */
1355 gimple_seq_add_seq (&tf->top_p_seq, new_stmt);
1356 }
1357
1358 /* A subroutine of lower_try_finally. There are multiple edges incoming
1359 and outgoing from the finally block. Implement this by instrumenting
1360 each incoming edge and creating a switch statement at the end of the
1361 finally block that branches to the appropriate destination. */
1362
1363 static void
1364 lower_try_finally_switch (struct leh_state *state, struct leh_tf_state *tf)
1365 {
1366 struct goto_queue_node *q, *qe;
1367 tree finally_tmp, finally_label;
1368 int return_index, eh_index, fallthru_index;
1369 int nlabels, ndests, j, last_case_index;
1370 tree last_case;
1371 vec<tree> case_label_vec;
1372 gimple_seq switch_body = NULL;
1373 gimple x;
1374 geh_else *eh_else;
1375 tree tmp;
1376 gimple switch_stmt;
1377 gimple_seq finally;
1378 hash_map<tree, gimple> *cont_map = NULL;
1379 /* The location of the TRY_FINALLY stmt. */
1380 location_t tf_loc = gimple_location (tf->try_finally_expr);
1381 /* The location of the finally block. */
1382 location_t finally_loc;
1383
1384 finally = gimple_try_cleanup (tf->top_p);
1385 eh_else = get_eh_else (finally);
1386
1387 /* Mash the TRY block to the head of the chain. */
1388 tf->top_p_seq = gimple_try_eval (tf->top_p);
1389
1390 /* The location of the finally is either the last stmt in the finally
1391 block or the location of the TRY_FINALLY itself. */
1392 x = gimple_seq_last_stmt (finally);
1393 finally_loc = x ? gimple_location (x) : tf_loc;
1394
1395 /* Prepare for switch statement generation. */
1396 nlabels = tf->dest_array.length ();
1397 return_index = nlabels;
1398 eh_index = return_index + tf->may_return;
1399 fallthru_index = eh_index + (tf->may_throw && !eh_else);
1400 ndests = fallthru_index + tf->may_fallthru;
1401
1402 finally_tmp = create_tmp_var (integer_type_node, "finally_tmp");
1403 finally_label = create_artificial_label (finally_loc);
1404
1405 /* We use vec::quick_push on case_label_vec throughout this function,
1406 since we know the size in advance and allocate precisely as muce
1407 space as needed. */
1408 case_label_vec.create (ndests);
1409 last_case = NULL;
1410 last_case_index = 0;
1411
1412 /* Begin inserting code for getting to the finally block. Things
1413 are done in this order to correspond to the sequence the code is
1414 laid out. */
1415
1416 if (tf->may_fallthru)
1417 {
1418 x = gimple_build_assign (finally_tmp,
1419 build_int_cst (integer_type_node,
1420 fallthru_index));
1421 gimple_seq_add_stmt (&tf->top_p_seq, x);
1422
1423 tmp = build_int_cst (integer_type_node, fallthru_index);
1424 last_case = build_case_label (tmp, NULL,
1425 create_artificial_label (tf_loc));
1426 case_label_vec.quick_push (last_case);
1427 last_case_index++;
1428
1429 x = gimple_build_label (CASE_LABEL (last_case));
1430 gimple_seq_add_stmt (&switch_body, x);
1431
1432 tmp = lower_try_finally_fallthru_label (tf);
1433 x = gimple_build_goto (tmp);
1434 gimple_set_location (x, tf_loc);
1435 gimple_seq_add_stmt (&switch_body, x);
1436 }
1437
1438 /* For EH_ELSE, emit the exception path (plus resx) now, then
1439 subsequently we only need consider the normal path. */
1440 if (eh_else)
1441 {
1442 if (tf->may_throw)
1443 {
1444 finally = gimple_eh_else_e_body (eh_else);
1445 lower_eh_constructs_1 (state, &finally);
1446
1447 emit_post_landing_pad (&eh_seq, tf->region);
1448 gimple_seq_add_seq (&eh_seq, finally);
1449 emit_resx (&eh_seq, tf->region);
1450 }
1451
1452 finally = gimple_eh_else_n_body (eh_else);
1453 }
1454 else if (tf->may_throw)
1455 {
1456 emit_post_landing_pad (&eh_seq, tf->region);
1457
1458 x = gimple_build_assign (finally_tmp,
1459 build_int_cst (integer_type_node, eh_index));
1460 gimple_seq_add_stmt (&eh_seq, x);
1461
1462 x = gimple_build_goto (finally_label);
1463 gimple_set_location (x, tf_loc);
1464 gimple_seq_add_stmt (&eh_seq, x);
1465
1466 tmp = build_int_cst (integer_type_node, eh_index);
1467 last_case = build_case_label (tmp, NULL,
1468 create_artificial_label (tf_loc));
1469 case_label_vec.quick_push (last_case);
1470 last_case_index++;
1471
1472 x = gimple_build_label (CASE_LABEL (last_case));
1473 gimple_seq_add_stmt (&eh_seq, x);
1474 emit_resx (&eh_seq, tf->region);
1475 }
1476
1477 x = gimple_build_label (finally_label);
1478 gimple_seq_add_stmt (&tf->top_p_seq, x);
1479
1480 lower_eh_constructs_1 (state, &finally);
1481 gimple_seq_add_seq (&tf->top_p_seq, finally);
1482
1483 /* Redirect each incoming goto edge. */
1484 q = tf->goto_queue;
1485 qe = q + tf->goto_queue_active;
1486 j = last_case_index + tf->may_return;
1487 /* Prepare the assignments to finally_tmp that are executed upon the
1488 entrance through a particular edge. */
1489 for (; q < qe; ++q)
1490 {
1491 gimple_seq mod = NULL;
1492 int switch_id;
1493 unsigned int case_index;
1494
1495 if (q->index < 0)
1496 {
1497 x = gimple_build_assign (finally_tmp,
1498 build_int_cst (integer_type_node,
1499 return_index));
1500 gimple_seq_add_stmt (&mod, x);
1501 do_return_redirection (q, finally_label, mod);
1502 switch_id = return_index;
1503 }
1504 else
1505 {
1506 x = gimple_build_assign (finally_tmp,
1507 build_int_cst (integer_type_node, q->index));
1508 gimple_seq_add_stmt (&mod, x);
1509 do_goto_redirection (q, finally_label, mod, tf);
1510 switch_id = q->index;
1511 }
1512
1513 case_index = j + q->index;
1514 if (case_label_vec.length () <= case_index || !case_label_vec[case_index])
1515 {
1516 tree case_lab;
1517 tmp = build_int_cst (integer_type_node, switch_id);
1518 case_lab = build_case_label (tmp, NULL,
1519 create_artificial_label (tf_loc));
1520 /* We store the cont_stmt in the pointer map, so that we can recover
1521 it in the loop below. */
1522 if (!cont_map)
1523 cont_map = new hash_map<tree, gimple>;
1524 cont_map->put (case_lab, q->cont_stmt);
1525 case_label_vec.quick_push (case_lab);
1526 }
1527 }
1528 for (j = last_case_index; j < last_case_index + nlabels; j++)
1529 {
1530 gimple cont_stmt;
1531
1532 last_case = case_label_vec[j];
1533
1534 gcc_assert (last_case);
1535 gcc_assert (cont_map);
1536
1537 cont_stmt = *cont_map->get (last_case);
1538
1539 x = gimple_build_label (CASE_LABEL (last_case));
1540 gimple_seq_add_stmt (&switch_body, x);
1541 gimple_seq_add_stmt (&switch_body, cont_stmt);
1542 maybe_record_in_goto_queue (state, cont_stmt);
1543 }
1544 if (cont_map)
1545 delete cont_map;
1546
1547 replace_goto_queue (tf);
1548
1549 /* Make sure that the last case is the default label, as one is required.
1550 Then sort the labels, which is also required in GIMPLE. */
1551 CASE_LOW (last_case) = NULL;
1552 tree tem = case_label_vec.pop ();
1553 gcc_assert (tem == last_case);
1554 sort_case_labels (case_label_vec);
1555
1556 /* Build the switch statement, setting last_case to be the default
1557 label. */
1558 switch_stmt = gimple_build_switch (finally_tmp, last_case,
1559 case_label_vec);
1560 gimple_set_location (switch_stmt, finally_loc);
1561
1562 /* Need to link SWITCH_STMT after running replace_goto_queue
1563 due to not wanting to process the same goto stmts twice. */
1564 gimple_seq_add_stmt (&tf->top_p_seq, switch_stmt);
1565 gimple_seq_add_seq (&tf->top_p_seq, switch_body);
1566 }
1567
1568 /* Decide whether or not we are going to duplicate the finally block.
1569 There are several considerations.
1570
1571 First, if this is Java, then the finally block contains code
1572 written by the user. It has line numbers associated with it,
1573 so duplicating the block means it's difficult to set a breakpoint.
1574 Since controlling code generation via -g is verboten, we simply
1575 never duplicate code without optimization.
1576
1577 Second, we'd like to prevent egregious code growth. One way to
1578 do this is to estimate the size of the finally block, multiply
1579 that by the number of copies we'd need to make, and compare against
1580 the estimate of the size of the switch machinery we'd have to add. */
1581
1582 static bool
1583 decide_copy_try_finally (int ndests, bool may_throw, gimple_seq finally)
1584 {
1585 int f_estimate, sw_estimate;
1586 geh_else *eh_else;
1587
1588 /* If there's an EH_ELSE involved, the exception path is separate
1589 and really doesn't come into play for this computation. */
1590 eh_else = get_eh_else (finally);
1591 if (eh_else)
1592 {
1593 ndests -= may_throw;
1594 finally = gimple_eh_else_n_body (eh_else);
1595 }
1596
1597 if (!optimize)
1598 {
1599 gimple_stmt_iterator gsi;
1600
1601 if (ndests == 1)
1602 return true;
1603
1604 for (gsi = gsi_start (finally); !gsi_end_p (gsi); gsi_next (&gsi))
1605 {
1606 gimple stmt = gsi_stmt (gsi);
1607 if (!is_gimple_debug (stmt) && !gimple_clobber_p (stmt))
1608 return false;
1609 }
1610 return true;
1611 }
1612
1613 /* Finally estimate N times, plus N gotos. */
1614 f_estimate = count_insns_seq (finally, &eni_size_weights);
1615 f_estimate = (f_estimate + 1) * ndests;
1616
1617 /* Switch statement (cost 10), N variable assignments, N gotos. */
1618 sw_estimate = 10 + 2 * ndests;
1619
1620 /* Optimize for size clearly wants our best guess. */
1621 if (optimize_function_for_size_p (cfun))
1622 return f_estimate < sw_estimate;
1623
1624 /* ??? These numbers are completely made up so far. */
1625 if (optimize > 1)
1626 return f_estimate < 100 || f_estimate < sw_estimate * 2;
1627 else
1628 return f_estimate < 40 || f_estimate * 2 < sw_estimate * 3;
1629 }
1630
1631 /* REG is the enclosing region for a possible cleanup region, or the region
1632 itself. Returns TRUE if such a region would be unreachable.
1633
1634 Cleanup regions within a must-not-throw region aren't actually reachable
1635 even if there are throwing stmts within them, because the personality
1636 routine will call terminate before unwinding. */
1637
1638 static bool
1639 cleanup_is_dead_in (eh_region reg)
1640 {
1641 while (reg && reg->type == ERT_CLEANUP)
1642 reg = reg->outer;
1643 return (reg && reg->type == ERT_MUST_NOT_THROW);
1644 }
1645
1646 /* A subroutine of lower_eh_constructs_1. Lower a GIMPLE_TRY_FINALLY nodes
1647 to a sequence of labels and blocks, plus the exception region trees
1648 that record all the magic. This is complicated by the need to
1649 arrange for the FINALLY block to be executed on all exits. */
1650
1651 static gimple_seq
1652 lower_try_finally (struct leh_state *state, gtry *tp)
1653 {
1654 struct leh_tf_state this_tf;
1655 struct leh_state this_state;
1656 int ndests;
1657 gimple_seq old_eh_seq;
1658
1659 /* Process the try block. */
1660
1661 memset (&this_tf, 0, sizeof (this_tf));
1662 this_tf.try_finally_expr = tp;
1663 this_tf.top_p = tp;
1664 this_tf.outer = state;
1665 if (using_eh_for_cleanups_p () && !cleanup_is_dead_in (state->cur_region))
1666 {
1667 this_tf.region = gen_eh_region_cleanup (state->cur_region);
1668 this_state.cur_region = this_tf.region;
1669 }
1670 else
1671 {
1672 this_tf.region = NULL;
1673 this_state.cur_region = state->cur_region;
1674 }
1675
1676 this_state.ehp_region = state->ehp_region;
1677 this_state.tf = &this_tf;
1678
1679 old_eh_seq = eh_seq;
1680 eh_seq = NULL;
1681
1682 lower_eh_constructs_1 (&this_state, gimple_try_eval_ptr (tp));
1683
1684 /* Determine if the try block is escaped through the bottom. */
1685 this_tf.may_fallthru = gimple_seq_may_fallthru (gimple_try_eval (tp));
1686
1687 /* Determine if any exceptions are possible within the try block. */
1688 if (this_tf.region)
1689 this_tf.may_throw = eh_region_may_contain_throw (this_tf.region);
1690 if (this_tf.may_throw)
1691 honor_protect_cleanup_actions (state, &this_state, &this_tf);
1692
1693 /* Determine how many edges (still) reach the finally block. Or rather,
1694 how many destinations are reached by the finally block. Use this to
1695 determine how we process the finally block itself. */
1696
1697 ndests = this_tf.dest_array.length ();
1698 ndests += this_tf.may_fallthru;
1699 ndests += this_tf.may_return;
1700 ndests += this_tf.may_throw;
1701
1702 /* If the FINALLY block is not reachable, dike it out. */
1703 if (ndests == 0)
1704 {
1705 gimple_seq_add_seq (&this_tf.top_p_seq, gimple_try_eval (tp));
1706 gimple_try_set_cleanup (tp, NULL);
1707 }
1708 /* If the finally block doesn't fall through, then any destination
1709 we might try to impose there isn't reached either. There may be
1710 some minor amount of cleanup and redirection still needed. */
1711 else if (!gimple_seq_may_fallthru (gimple_try_cleanup (tp)))
1712 lower_try_finally_nofallthru (state, &this_tf);
1713
1714 /* We can easily special-case redirection to a single destination. */
1715 else if (ndests == 1)
1716 lower_try_finally_onedest (state, &this_tf);
1717 else if (decide_copy_try_finally (ndests, this_tf.may_throw,
1718 gimple_try_cleanup (tp)))
1719 lower_try_finally_copy (state, &this_tf);
1720 else
1721 lower_try_finally_switch (state, &this_tf);
1722
1723 /* If someone requested we add a label at the end of the transformed
1724 block, do so. */
1725 if (this_tf.fallthru_label)
1726 {
1727 /* This must be reached only if ndests == 0. */
1728 gimple x = gimple_build_label (this_tf.fallthru_label);
1729 gimple_seq_add_stmt (&this_tf.top_p_seq, x);
1730 }
1731
1732 this_tf.dest_array.release ();
1733 free (this_tf.goto_queue);
1734 if (this_tf.goto_queue_map)
1735 delete this_tf.goto_queue_map;
1736
1737 /* If there was an old (aka outer) eh_seq, append the current eh_seq.
1738 If there was no old eh_seq, then the append is trivially already done. */
1739 if (old_eh_seq)
1740 {
1741 if (eh_seq == NULL)
1742 eh_seq = old_eh_seq;
1743 else
1744 {
1745 gimple_seq new_eh_seq = eh_seq;
1746 eh_seq = old_eh_seq;
1747 gimple_seq_add_seq (&eh_seq, new_eh_seq);
1748 }
1749 }
1750
1751 return this_tf.top_p_seq;
1752 }
1753
1754 /* A subroutine of lower_eh_constructs_1. Lower a GIMPLE_TRY_CATCH with a
1755 list of GIMPLE_CATCH to a sequence of labels and blocks, plus the
1756 exception region trees that records all the magic. */
1757
1758 static gimple_seq
1759 lower_catch (struct leh_state *state, gtry *tp)
1760 {
1761 eh_region try_region = NULL;
1762 struct leh_state this_state = *state;
1763 gimple_stmt_iterator gsi;
1764 tree out_label;
1765 gimple_seq new_seq, cleanup;
1766 gimple x;
1767 location_t try_catch_loc = gimple_location (tp);
1768
1769 if (flag_exceptions)
1770 {
1771 try_region = gen_eh_region_try (state->cur_region);
1772 this_state.cur_region = try_region;
1773 }
1774
1775 lower_eh_constructs_1 (&this_state, gimple_try_eval_ptr (tp));
1776
1777 if (!eh_region_may_contain_throw (try_region))
1778 return gimple_try_eval (tp);
1779
1780 new_seq = NULL;
1781 emit_eh_dispatch (&new_seq, try_region);
1782 emit_resx (&new_seq, try_region);
1783
1784 this_state.cur_region = state->cur_region;
1785 this_state.ehp_region = try_region;
1786
1787 /* Add eh_seq from lowering EH in the cleanup sequence after the cleanup
1788 itself, so that e.g. for coverage purposes the nested cleanups don't
1789 appear before the cleanup body. See PR64634 for details. */
1790 gimple_seq old_eh_seq = eh_seq;
1791 eh_seq = NULL;
1792
1793 out_label = NULL;
1794 cleanup = gimple_try_cleanup (tp);
1795 for (gsi = gsi_start (cleanup);
1796 !gsi_end_p (gsi);
1797 gsi_next (&gsi))
1798 {
1799 eh_catch c;
1800 gcatch *catch_stmt;
1801 gimple_seq handler;
1802
1803 catch_stmt = as_a <gcatch *> (gsi_stmt (gsi));
1804 c = gen_eh_region_catch (try_region, gimple_catch_types (catch_stmt));
1805
1806 handler = gimple_catch_handler (catch_stmt);
1807 lower_eh_constructs_1 (&this_state, &handler);
1808
1809 c->label = create_artificial_label (UNKNOWN_LOCATION);
1810 x = gimple_build_label (c->label);
1811 gimple_seq_add_stmt (&new_seq, x);
1812
1813 gimple_seq_add_seq (&new_seq, handler);
1814
1815 if (gimple_seq_may_fallthru (new_seq))
1816 {
1817 if (!out_label)
1818 out_label = create_artificial_label (try_catch_loc);
1819
1820 x = gimple_build_goto (out_label);
1821 gimple_seq_add_stmt (&new_seq, x);
1822 }
1823 if (!c->type_list)
1824 break;
1825 }
1826
1827 gimple_try_set_cleanup (tp, new_seq);
1828
1829 gimple_seq new_eh_seq = eh_seq;
1830 eh_seq = old_eh_seq;
1831 gimple_seq ret_seq = frob_into_branch_around (tp, try_region, out_label);
1832 gimple_seq_add_seq (&eh_seq, new_eh_seq);
1833 return ret_seq;
1834 }
1835
1836 /* A subroutine of lower_eh_constructs_1. Lower a GIMPLE_TRY with a
1837 GIMPLE_EH_FILTER to a sequence of labels and blocks, plus the exception
1838 region trees that record all the magic. */
1839
1840 static gimple_seq
1841 lower_eh_filter (struct leh_state *state, gtry *tp)
1842 {
1843 struct leh_state this_state = *state;
1844 eh_region this_region = NULL;
1845 gimple inner, x;
1846 gimple_seq new_seq;
1847
1848 inner = gimple_seq_first_stmt (gimple_try_cleanup (tp));
1849
1850 if (flag_exceptions)
1851 {
1852 this_region = gen_eh_region_allowed (state->cur_region,
1853 gimple_eh_filter_types (inner));
1854 this_state.cur_region = this_region;
1855 }
1856
1857 lower_eh_constructs_1 (&this_state, gimple_try_eval_ptr (tp));
1858
1859 if (!eh_region_may_contain_throw (this_region))
1860 return gimple_try_eval (tp);
1861
1862 new_seq = NULL;
1863 this_state.cur_region = state->cur_region;
1864 this_state.ehp_region = this_region;
1865
1866 emit_eh_dispatch (&new_seq, this_region);
1867 emit_resx (&new_seq, this_region);
1868
1869 this_region->u.allowed.label = create_artificial_label (UNKNOWN_LOCATION);
1870 x = gimple_build_label (this_region->u.allowed.label);
1871 gimple_seq_add_stmt (&new_seq, x);
1872
1873 lower_eh_constructs_1 (&this_state, gimple_eh_filter_failure_ptr (inner));
1874 gimple_seq_add_seq (&new_seq, gimple_eh_filter_failure (inner));
1875
1876 gimple_try_set_cleanup (tp, new_seq);
1877
1878 return frob_into_branch_around (tp, this_region, NULL);
1879 }
1880
1881 /* A subroutine of lower_eh_constructs_1. Lower a GIMPLE_TRY with
1882 an GIMPLE_EH_MUST_NOT_THROW to a sequence of labels and blocks,
1883 plus the exception region trees that record all the magic. */
1884
1885 static gimple_seq
1886 lower_eh_must_not_throw (struct leh_state *state, gtry *tp)
1887 {
1888 struct leh_state this_state = *state;
1889
1890 if (flag_exceptions)
1891 {
1892 gimple inner = gimple_seq_first_stmt (gimple_try_cleanup (tp));
1893 eh_region this_region;
1894
1895 this_region = gen_eh_region_must_not_throw (state->cur_region);
1896 this_region->u.must_not_throw.failure_decl
1897 = gimple_eh_must_not_throw_fndecl (
1898 as_a <geh_mnt *> (inner));
1899 this_region->u.must_not_throw.failure_loc
1900 = LOCATION_LOCUS (gimple_location (tp));
1901
1902 /* In order to get mangling applied to this decl, we must mark it
1903 used now. Otherwise, pass_ipa_free_lang_data won't think it
1904 needs to happen. */
1905 TREE_USED (this_region->u.must_not_throw.failure_decl) = 1;
1906
1907 this_state.cur_region = this_region;
1908 }
1909
1910 lower_eh_constructs_1 (&this_state, gimple_try_eval_ptr (tp));
1911
1912 return gimple_try_eval (tp);
1913 }
1914
1915 /* Implement a cleanup expression. This is similar to try-finally,
1916 except that we only execute the cleanup block for exception edges. */
1917
1918 static gimple_seq
1919 lower_cleanup (struct leh_state *state, gtry *tp)
1920 {
1921 struct leh_state this_state = *state;
1922 eh_region this_region = NULL;
1923 struct leh_tf_state fake_tf;
1924 gimple_seq result;
1925 bool cleanup_dead = cleanup_is_dead_in (state->cur_region);
1926
1927 if (flag_exceptions && !cleanup_dead)
1928 {
1929 this_region = gen_eh_region_cleanup (state->cur_region);
1930 this_state.cur_region = this_region;
1931 }
1932
1933 lower_eh_constructs_1 (&this_state, gimple_try_eval_ptr (tp));
1934
1935 if (cleanup_dead || !eh_region_may_contain_throw (this_region))
1936 return gimple_try_eval (tp);
1937
1938 /* Build enough of a try-finally state so that we can reuse
1939 honor_protect_cleanup_actions. */
1940 memset (&fake_tf, 0, sizeof (fake_tf));
1941 fake_tf.top_p = fake_tf.try_finally_expr = tp;
1942 fake_tf.outer = state;
1943 fake_tf.region = this_region;
1944 fake_tf.may_fallthru = gimple_seq_may_fallthru (gimple_try_eval (tp));
1945 fake_tf.may_throw = true;
1946
1947 honor_protect_cleanup_actions (state, NULL, &fake_tf);
1948
1949 if (fake_tf.may_throw)
1950 {
1951 /* In this case honor_protect_cleanup_actions had nothing to do,
1952 and we should process this normally. */
1953 lower_eh_constructs_1 (state, gimple_try_cleanup_ptr (tp));
1954 result = frob_into_branch_around (tp, this_region,
1955 fake_tf.fallthru_label);
1956 }
1957 else
1958 {
1959 /* In this case honor_protect_cleanup_actions did nearly all of
1960 the work. All we have left is to append the fallthru_label. */
1961
1962 result = gimple_try_eval (tp);
1963 if (fake_tf.fallthru_label)
1964 {
1965 gimple x = gimple_build_label (fake_tf.fallthru_label);
1966 gimple_seq_add_stmt (&result, x);
1967 }
1968 }
1969 return result;
1970 }
1971
1972 /* Main loop for lowering eh constructs. Also moves gsi to the next
1973 statement. */
1974
1975 static void
1976 lower_eh_constructs_2 (struct leh_state *state, gimple_stmt_iterator *gsi)
1977 {
1978 gimple_seq replace;
1979 gimple x;
1980 gimple stmt = gsi_stmt (*gsi);
1981
1982 switch (gimple_code (stmt))
1983 {
1984 case GIMPLE_CALL:
1985 {
1986 tree fndecl = gimple_call_fndecl (stmt);
1987 tree rhs, lhs;
1988
1989 if (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
1990 switch (DECL_FUNCTION_CODE (fndecl))
1991 {
1992 case BUILT_IN_EH_POINTER:
1993 /* The front end may have generated a call to
1994 __builtin_eh_pointer (0) within a catch region. Replace
1995 this zero argument with the current catch region number. */
1996 if (state->ehp_region)
1997 {
1998 tree nr = build_int_cst (integer_type_node,
1999 state->ehp_region->index);
2000 gimple_call_set_arg (stmt, 0, nr);
2001 }
2002 else
2003 {
2004 /* The user has dome something silly. Remove it. */
2005 rhs = null_pointer_node;
2006 goto do_replace;
2007 }
2008 break;
2009
2010 case BUILT_IN_EH_FILTER:
2011 /* ??? This should never appear, but since it's a builtin it
2012 is accessible to abuse by users. Just remove it and
2013 replace the use with the arbitrary value zero. */
2014 rhs = build_int_cst (TREE_TYPE (TREE_TYPE (fndecl)), 0);
2015 do_replace:
2016 lhs = gimple_call_lhs (stmt);
2017 x = gimple_build_assign (lhs, rhs);
2018 gsi_insert_before (gsi, x, GSI_SAME_STMT);
2019 /* FALLTHRU */
2020
2021 case BUILT_IN_EH_COPY_VALUES:
2022 /* Likewise this should not appear. Remove it. */
2023 gsi_remove (gsi, true);
2024 return;
2025
2026 default:
2027 break;
2028 }
2029 }
2030 /* FALLTHRU */
2031
2032 case GIMPLE_ASSIGN:
2033 /* If the stmt can throw use a new temporary for the assignment
2034 to a LHS. This makes sure the old value of the LHS is
2035 available on the EH edge. Only do so for statements that
2036 potentially fall through (no noreturn calls e.g.), otherwise
2037 this new assignment might create fake fallthru regions. */
2038 if (stmt_could_throw_p (stmt)
2039 && gimple_has_lhs (stmt)
2040 && gimple_stmt_may_fallthru (stmt)
2041 && !tree_could_throw_p (gimple_get_lhs (stmt))
2042 && is_gimple_reg_type (TREE_TYPE (gimple_get_lhs (stmt))))
2043 {
2044 tree lhs = gimple_get_lhs (stmt);
2045 tree tmp = create_tmp_var (TREE_TYPE (lhs));
2046 gimple s = gimple_build_assign (lhs, tmp);
2047 gimple_set_location (s, gimple_location (stmt));
2048 gimple_set_block (s, gimple_block (stmt));
2049 gimple_set_lhs (stmt, tmp);
2050 if (TREE_CODE (TREE_TYPE (tmp)) == COMPLEX_TYPE
2051 || TREE_CODE (TREE_TYPE (tmp)) == VECTOR_TYPE)
2052 DECL_GIMPLE_REG_P (tmp) = 1;
2053 gsi_insert_after (gsi, s, GSI_SAME_STMT);
2054 }
2055 /* Look for things that can throw exceptions, and record them. */
2056 if (state->cur_region && stmt_could_throw_p (stmt))
2057 {
2058 record_stmt_eh_region (state->cur_region, stmt);
2059 note_eh_region_may_contain_throw (state->cur_region);
2060 }
2061 break;
2062
2063 case GIMPLE_COND:
2064 case GIMPLE_GOTO:
2065 case GIMPLE_RETURN:
2066 maybe_record_in_goto_queue (state, stmt);
2067 break;
2068
2069 case GIMPLE_SWITCH:
2070 verify_norecord_switch_expr (state, as_a <gswitch *> (stmt));
2071 break;
2072
2073 case GIMPLE_TRY:
2074 {
2075 gtry *try_stmt = as_a <gtry *> (stmt);
2076 if (gimple_try_kind (try_stmt) == GIMPLE_TRY_FINALLY)
2077 replace = lower_try_finally (state, try_stmt);
2078 else
2079 {
2080 x = gimple_seq_first_stmt (gimple_try_cleanup (try_stmt));
2081 if (!x)
2082 {
2083 replace = gimple_try_eval (try_stmt);
2084 lower_eh_constructs_1 (state, &replace);
2085 }
2086 else
2087 switch (gimple_code (x))
2088 {
2089 case GIMPLE_CATCH:
2090 replace = lower_catch (state, try_stmt);
2091 break;
2092 case GIMPLE_EH_FILTER:
2093 replace = lower_eh_filter (state, try_stmt);
2094 break;
2095 case GIMPLE_EH_MUST_NOT_THROW:
2096 replace = lower_eh_must_not_throw (state, try_stmt);
2097 break;
2098 case GIMPLE_EH_ELSE:
2099 /* This code is only valid with GIMPLE_TRY_FINALLY. */
2100 gcc_unreachable ();
2101 default:
2102 replace = lower_cleanup (state, try_stmt);
2103 break;
2104 }
2105 }
2106 }
2107
2108 /* Remove the old stmt and insert the transformed sequence
2109 instead. */
2110 gsi_insert_seq_before (gsi, replace, GSI_SAME_STMT);
2111 gsi_remove (gsi, true);
2112
2113 /* Return since we don't want gsi_next () */
2114 return;
2115
2116 case GIMPLE_EH_ELSE:
2117 /* We should be eliminating this in lower_try_finally et al. */
2118 gcc_unreachable ();
2119
2120 default:
2121 /* A type, a decl, or some kind of statement that we're not
2122 interested in. Don't walk them. */
2123 break;
2124 }
2125
2126 gsi_next (gsi);
2127 }
2128
2129 /* A helper to unwrap a gimple_seq and feed stmts to lower_eh_constructs_2. */
2130
2131 static void
2132 lower_eh_constructs_1 (struct leh_state *state, gimple_seq *pseq)
2133 {
2134 gimple_stmt_iterator gsi;
2135 for (gsi = gsi_start (*pseq); !gsi_end_p (gsi);)
2136 lower_eh_constructs_2 (state, &gsi);
2137 }
2138
2139 namespace {
2140
2141 const pass_data pass_data_lower_eh =
2142 {
2143 GIMPLE_PASS, /* type */
2144 "eh", /* name */
2145 OPTGROUP_NONE, /* optinfo_flags */
2146 TV_TREE_EH, /* tv_id */
2147 PROP_gimple_lcf, /* properties_required */
2148 PROP_gimple_leh, /* properties_provided */
2149 0, /* properties_destroyed */
2150 0, /* todo_flags_start */
2151 0, /* todo_flags_finish */
2152 };
2153
2154 class pass_lower_eh : public gimple_opt_pass
2155 {
2156 public:
2157 pass_lower_eh (gcc::context *ctxt)
2158 : gimple_opt_pass (pass_data_lower_eh, ctxt)
2159 {}
2160
2161 /* opt_pass methods: */
2162 virtual unsigned int execute (function *);
2163
2164 }; // class pass_lower_eh
2165
2166 unsigned int
2167 pass_lower_eh::execute (function *fun)
2168 {
2169 struct leh_state null_state;
2170 gimple_seq bodyp;
2171
2172 bodyp = gimple_body (current_function_decl);
2173 if (bodyp == NULL)
2174 return 0;
2175
2176 finally_tree = new hash_table<finally_tree_hasher> (31);
2177 eh_region_may_contain_throw_map = BITMAP_ALLOC (NULL);
2178 memset (&null_state, 0, sizeof (null_state));
2179
2180 collect_finally_tree_1 (bodyp, NULL);
2181 lower_eh_constructs_1 (&null_state, &bodyp);
2182 gimple_set_body (current_function_decl, bodyp);
2183
2184 /* We assume there's a return statement, or something, at the end of
2185 the function, and thus ploping the EH sequence afterward won't
2186 change anything. */
2187 gcc_assert (!gimple_seq_may_fallthru (bodyp));
2188 gimple_seq_add_seq (&bodyp, eh_seq);
2189
2190 /* We assume that since BODYP already existed, adding EH_SEQ to it
2191 didn't change its value, and we don't have to re-set the function. */
2192 gcc_assert (bodyp == gimple_body (current_function_decl));
2193
2194 delete finally_tree;
2195 finally_tree = NULL;
2196 BITMAP_FREE (eh_region_may_contain_throw_map);
2197 eh_seq = NULL;
2198
2199 /* If this function needs a language specific EH personality routine
2200 and the frontend didn't already set one do so now. */
2201 if (function_needs_eh_personality (fun) == eh_personality_lang
2202 && !DECL_FUNCTION_PERSONALITY (current_function_decl))
2203 DECL_FUNCTION_PERSONALITY (current_function_decl)
2204 = lang_hooks.eh_personality ();
2205
2206 return 0;
2207 }
2208
2209 } // anon namespace
2210
2211 gimple_opt_pass *
2212 make_pass_lower_eh (gcc::context *ctxt)
2213 {
2214 return new pass_lower_eh (ctxt);
2215 }
2216 \f
2217 /* Create the multiple edges from an EH_DISPATCH statement to all of
2218 the possible handlers for its EH region. Return true if there's
2219 no fallthru edge; false if there is. */
2220
2221 bool
2222 make_eh_dispatch_edges (geh_dispatch *stmt)
2223 {
2224 eh_region r;
2225 eh_catch c;
2226 basic_block src, dst;
2227
2228 r = get_eh_region_from_number (gimple_eh_dispatch_region (stmt));
2229 src = gimple_bb (stmt);
2230
2231 switch (r->type)
2232 {
2233 case ERT_TRY:
2234 for (c = r->u.eh_try.first_catch; c ; c = c->next_catch)
2235 {
2236 dst = label_to_block (c->label);
2237 make_edge (src, dst, 0);
2238
2239 /* A catch-all handler doesn't have a fallthru. */
2240 if (c->type_list == NULL)
2241 return false;
2242 }
2243 break;
2244
2245 case ERT_ALLOWED_EXCEPTIONS:
2246 dst = label_to_block (r->u.allowed.label);
2247 make_edge (src, dst, 0);
2248 break;
2249
2250 default:
2251 gcc_unreachable ();
2252 }
2253
2254 return true;
2255 }
2256
2257 /* Create the single EH edge from STMT to its nearest landing pad,
2258 if there is such a landing pad within the current function. */
2259
2260 void
2261 make_eh_edges (gimple stmt)
2262 {
2263 basic_block src, dst;
2264 eh_landing_pad lp;
2265 int lp_nr;
2266
2267 lp_nr = lookup_stmt_eh_lp (stmt);
2268 if (lp_nr <= 0)
2269 return;
2270
2271 lp = get_eh_landing_pad_from_number (lp_nr);
2272 gcc_assert (lp != NULL);
2273
2274 src = gimple_bb (stmt);
2275 dst = label_to_block (lp->post_landing_pad);
2276 make_edge (src, dst, EDGE_EH);
2277 }
2278
2279 /* Do the work in redirecting EDGE_IN to NEW_BB within the EH region tree;
2280 do not actually perform the final edge redirection.
2281
2282 CHANGE_REGION is true when we're being called from cleanup_empty_eh and
2283 we intend to change the destination EH region as well; this means
2284 EH_LANDING_PAD_NR must already be set on the destination block label.
2285 If false, we're being called from generic cfg manipulation code and we
2286 should preserve our place within the region tree. */
2287
2288 static void
2289 redirect_eh_edge_1 (edge edge_in, basic_block new_bb, bool change_region)
2290 {
2291 eh_landing_pad old_lp, new_lp;
2292 basic_block old_bb;
2293 gimple throw_stmt;
2294 int old_lp_nr, new_lp_nr;
2295 tree old_label, new_label;
2296 edge_iterator ei;
2297 edge e;
2298
2299 old_bb = edge_in->dest;
2300 old_label = gimple_block_label (old_bb);
2301 old_lp_nr = EH_LANDING_PAD_NR (old_label);
2302 gcc_assert (old_lp_nr > 0);
2303 old_lp = get_eh_landing_pad_from_number (old_lp_nr);
2304
2305 throw_stmt = last_stmt (edge_in->src);
2306 gcc_assert (lookup_stmt_eh_lp (throw_stmt) == old_lp_nr);
2307
2308 new_label = gimple_block_label (new_bb);
2309
2310 /* Look for an existing region that might be using NEW_BB already. */
2311 new_lp_nr = EH_LANDING_PAD_NR (new_label);
2312 if (new_lp_nr)
2313 {
2314 new_lp = get_eh_landing_pad_from_number (new_lp_nr);
2315 gcc_assert (new_lp);
2316
2317 /* Unless CHANGE_REGION is true, the new and old landing pad
2318 had better be associated with the same EH region. */
2319 gcc_assert (change_region || new_lp->region == old_lp->region);
2320 }
2321 else
2322 {
2323 new_lp = NULL;
2324 gcc_assert (!change_region);
2325 }
2326
2327 /* Notice when we redirect the last EH edge away from OLD_BB. */
2328 FOR_EACH_EDGE (e, ei, old_bb->preds)
2329 if (e != edge_in && (e->flags & EDGE_EH))
2330 break;
2331
2332 if (new_lp)
2333 {
2334 /* NEW_LP already exists. If there are still edges into OLD_LP,
2335 there's nothing to do with the EH tree. If there are no more
2336 edges into OLD_LP, then we want to remove OLD_LP as it is unused.
2337 If CHANGE_REGION is true, then our caller is expecting to remove
2338 the landing pad. */
2339 if (e == NULL && !change_region)
2340 remove_eh_landing_pad (old_lp);
2341 }
2342 else
2343 {
2344 /* No correct landing pad exists. If there are no more edges
2345 into OLD_LP, then we can simply re-use the existing landing pad.
2346 Otherwise, we have to create a new landing pad. */
2347 if (e == NULL)
2348 {
2349 EH_LANDING_PAD_NR (old_lp->post_landing_pad) = 0;
2350 new_lp = old_lp;
2351 }
2352 else
2353 new_lp = gen_eh_landing_pad (old_lp->region);
2354 new_lp->post_landing_pad = new_label;
2355 EH_LANDING_PAD_NR (new_label) = new_lp->index;
2356 }
2357
2358 /* Maybe move the throwing statement to the new region. */
2359 if (old_lp != new_lp)
2360 {
2361 remove_stmt_from_eh_lp (throw_stmt);
2362 add_stmt_to_eh_lp (throw_stmt, new_lp->index);
2363 }
2364 }
2365
2366 /* Redirect EH edge E to NEW_BB. */
2367
2368 edge
2369 redirect_eh_edge (edge edge_in, basic_block new_bb)
2370 {
2371 redirect_eh_edge_1 (edge_in, new_bb, false);
2372 return ssa_redirect_edge (edge_in, new_bb);
2373 }
2374
2375 /* This is a subroutine of gimple_redirect_edge_and_branch. Update the
2376 labels for redirecting a non-fallthru EH_DISPATCH edge E to NEW_BB.
2377 The actual edge update will happen in the caller. */
2378
2379 void
2380 redirect_eh_dispatch_edge (geh_dispatch *stmt, edge e, basic_block new_bb)
2381 {
2382 tree new_lab = gimple_block_label (new_bb);
2383 bool any_changed = false;
2384 basic_block old_bb;
2385 eh_region r;
2386 eh_catch c;
2387
2388 r = get_eh_region_from_number (gimple_eh_dispatch_region (stmt));
2389 switch (r->type)
2390 {
2391 case ERT_TRY:
2392 for (c = r->u.eh_try.first_catch; c ; c = c->next_catch)
2393 {
2394 old_bb = label_to_block (c->label);
2395 if (old_bb == e->dest)
2396 {
2397 c->label = new_lab;
2398 any_changed = true;
2399 }
2400 }
2401 break;
2402
2403 case ERT_ALLOWED_EXCEPTIONS:
2404 old_bb = label_to_block (r->u.allowed.label);
2405 gcc_assert (old_bb == e->dest);
2406 r->u.allowed.label = new_lab;
2407 any_changed = true;
2408 break;
2409
2410 default:
2411 gcc_unreachable ();
2412 }
2413
2414 gcc_assert (any_changed);
2415 }
2416 \f
2417 /* Helper function for operation_could_trap_p and stmt_could_throw_p. */
2418
2419 bool
2420 operation_could_trap_helper_p (enum tree_code op,
2421 bool fp_operation,
2422 bool honor_trapv,
2423 bool honor_nans,
2424 bool honor_snans,
2425 tree divisor,
2426 bool *handled)
2427 {
2428 *handled = true;
2429 switch (op)
2430 {
2431 case TRUNC_DIV_EXPR:
2432 case CEIL_DIV_EXPR:
2433 case FLOOR_DIV_EXPR:
2434 case ROUND_DIV_EXPR:
2435 case EXACT_DIV_EXPR:
2436 case CEIL_MOD_EXPR:
2437 case FLOOR_MOD_EXPR:
2438 case ROUND_MOD_EXPR:
2439 case TRUNC_MOD_EXPR:
2440 case RDIV_EXPR:
2441 if (honor_snans || honor_trapv)
2442 return true;
2443 if (fp_operation)
2444 return flag_trapping_math;
2445 if (!TREE_CONSTANT (divisor) || integer_zerop (divisor))
2446 return true;
2447 return false;
2448
2449 case LT_EXPR:
2450 case LE_EXPR:
2451 case GT_EXPR:
2452 case GE_EXPR:
2453 case LTGT_EXPR:
2454 /* Some floating point comparisons may trap. */
2455 return honor_nans;
2456
2457 case EQ_EXPR:
2458 case NE_EXPR:
2459 case UNORDERED_EXPR:
2460 case ORDERED_EXPR:
2461 case UNLT_EXPR:
2462 case UNLE_EXPR:
2463 case UNGT_EXPR:
2464 case UNGE_EXPR:
2465 case UNEQ_EXPR:
2466 return honor_snans;
2467
2468 case NEGATE_EXPR:
2469 case ABS_EXPR:
2470 case CONJ_EXPR:
2471 /* These operations don't trap with floating point. */
2472 if (honor_trapv)
2473 return true;
2474 return false;
2475
2476 case PLUS_EXPR:
2477 case MINUS_EXPR:
2478 case MULT_EXPR:
2479 /* Any floating arithmetic may trap. */
2480 if (fp_operation && flag_trapping_math)
2481 return true;
2482 if (honor_trapv)
2483 return true;
2484 return false;
2485
2486 case COMPLEX_EXPR:
2487 case CONSTRUCTOR:
2488 /* Constructing an object cannot trap. */
2489 return false;
2490
2491 default:
2492 /* Any floating arithmetic may trap. */
2493 if (fp_operation && flag_trapping_math)
2494 return true;
2495
2496 *handled = false;
2497 return false;
2498 }
2499 }
2500
2501 /* Return true if operation OP may trap. FP_OPERATION is true if OP is applied
2502 on floating-point values. HONOR_TRAPV is true if OP is applied on integer
2503 type operands that may trap. If OP is a division operator, DIVISOR contains
2504 the value of the divisor. */
2505
2506 bool
2507 operation_could_trap_p (enum tree_code op, bool fp_operation, bool honor_trapv,
2508 tree divisor)
2509 {
2510 bool honor_nans = (fp_operation && flag_trapping_math
2511 && !flag_finite_math_only);
2512 bool honor_snans = fp_operation && flag_signaling_nans != 0;
2513 bool handled;
2514
2515 if (TREE_CODE_CLASS (op) != tcc_comparison
2516 && TREE_CODE_CLASS (op) != tcc_unary
2517 && TREE_CODE_CLASS (op) != tcc_binary)
2518 return false;
2519
2520 return operation_could_trap_helper_p (op, fp_operation, honor_trapv,
2521 honor_nans, honor_snans, divisor,
2522 &handled);
2523 }
2524
2525
2526 /* Returns true if it is possible to prove that the index of
2527 an array access REF (an ARRAY_REF expression) falls into the
2528 array bounds. */
2529
2530 static bool
2531 in_array_bounds_p (tree ref)
2532 {
2533 tree idx = TREE_OPERAND (ref, 1);
2534 tree min, max;
2535
2536 if (TREE_CODE (idx) != INTEGER_CST)
2537 return false;
2538
2539 min = array_ref_low_bound (ref);
2540 max = array_ref_up_bound (ref);
2541 if (!min
2542 || !max
2543 || TREE_CODE (min) != INTEGER_CST
2544 || TREE_CODE (max) != INTEGER_CST)
2545 return false;
2546
2547 if (tree_int_cst_lt (idx, min)
2548 || tree_int_cst_lt (max, idx))
2549 return false;
2550
2551 return true;
2552 }
2553
2554 /* Returns true if it is possible to prove that the range of
2555 an array access REF (an ARRAY_RANGE_REF expression) falls
2556 into the array bounds. */
2557
2558 static bool
2559 range_in_array_bounds_p (tree ref)
2560 {
2561 tree domain_type = TYPE_DOMAIN (TREE_TYPE (ref));
2562 tree range_min, range_max, min, max;
2563
2564 range_min = TYPE_MIN_VALUE (domain_type);
2565 range_max = TYPE_MAX_VALUE (domain_type);
2566 if (!range_min
2567 || !range_max
2568 || TREE_CODE (range_min) != INTEGER_CST
2569 || TREE_CODE (range_max) != INTEGER_CST)
2570 return false;
2571
2572 min = array_ref_low_bound (ref);
2573 max = array_ref_up_bound (ref);
2574 if (!min
2575 || !max
2576 || TREE_CODE (min) != INTEGER_CST
2577 || TREE_CODE (max) != INTEGER_CST)
2578 return false;
2579
2580 if (tree_int_cst_lt (range_min, min)
2581 || tree_int_cst_lt (max, range_max))
2582 return false;
2583
2584 return true;
2585 }
2586
2587 /* Return true if EXPR can trap, as in dereferencing an invalid pointer
2588 location or floating point arithmetic. C.f. the rtl version, may_trap_p.
2589 This routine expects only GIMPLE lhs or rhs input. */
2590
2591 bool
2592 tree_could_trap_p (tree expr)
2593 {
2594 enum tree_code code;
2595 bool fp_operation = false;
2596 bool honor_trapv = false;
2597 tree t, base, div = NULL_TREE;
2598
2599 if (!expr)
2600 return false;
2601
2602 code = TREE_CODE (expr);
2603 t = TREE_TYPE (expr);
2604
2605 if (t)
2606 {
2607 if (COMPARISON_CLASS_P (expr))
2608 fp_operation = FLOAT_TYPE_P (TREE_TYPE (TREE_OPERAND (expr, 0)));
2609 else
2610 fp_operation = FLOAT_TYPE_P (t);
2611 honor_trapv = INTEGRAL_TYPE_P (t) && TYPE_OVERFLOW_TRAPS (t);
2612 }
2613
2614 if (TREE_CODE_CLASS (code) == tcc_binary)
2615 div = TREE_OPERAND (expr, 1);
2616 if (operation_could_trap_p (code, fp_operation, honor_trapv, div))
2617 return true;
2618
2619 restart:
2620 switch (code)
2621 {
2622 case COMPONENT_REF:
2623 case REALPART_EXPR:
2624 case IMAGPART_EXPR:
2625 case BIT_FIELD_REF:
2626 case VIEW_CONVERT_EXPR:
2627 case WITH_SIZE_EXPR:
2628 expr = TREE_OPERAND (expr, 0);
2629 code = TREE_CODE (expr);
2630 goto restart;
2631
2632 case ARRAY_RANGE_REF:
2633 base = TREE_OPERAND (expr, 0);
2634 if (tree_could_trap_p (base))
2635 return true;
2636 if (TREE_THIS_NOTRAP (expr))
2637 return false;
2638 return !range_in_array_bounds_p (expr);
2639
2640 case ARRAY_REF:
2641 base = TREE_OPERAND (expr, 0);
2642 if (tree_could_trap_p (base))
2643 return true;
2644 if (TREE_THIS_NOTRAP (expr))
2645 return false;
2646 return !in_array_bounds_p (expr);
2647
2648 case TARGET_MEM_REF:
2649 case MEM_REF:
2650 if (TREE_CODE (TREE_OPERAND (expr, 0)) == ADDR_EXPR
2651 && tree_could_trap_p (TREE_OPERAND (TREE_OPERAND (expr, 0), 0)))
2652 return true;
2653 if (TREE_THIS_NOTRAP (expr))
2654 return false;
2655 /* We cannot prove that the access is in-bounds when we have
2656 variable-index TARGET_MEM_REFs. */
2657 if (code == TARGET_MEM_REF
2658 && (TMR_INDEX (expr) || TMR_INDEX2 (expr)))
2659 return true;
2660 if (TREE_CODE (TREE_OPERAND (expr, 0)) == ADDR_EXPR)
2661 {
2662 tree base = TREE_OPERAND (TREE_OPERAND (expr, 0), 0);
2663 offset_int off = mem_ref_offset (expr);
2664 if (wi::neg_p (off, SIGNED))
2665 return true;
2666 if (TREE_CODE (base) == STRING_CST)
2667 return wi::leu_p (TREE_STRING_LENGTH (base), off);
2668 else if (DECL_SIZE_UNIT (base) == NULL_TREE
2669 || TREE_CODE (DECL_SIZE_UNIT (base)) != INTEGER_CST
2670 || wi::leu_p (wi::to_offset (DECL_SIZE_UNIT (base)), off))
2671 return true;
2672 /* Now we are sure the first byte of the access is inside
2673 the object. */
2674 return false;
2675 }
2676 return true;
2677
2678 case INDIRECT_REF:
2679 return !TREE_THIS_NOTRAP (expr);
2680
2681 case ASM_EXPR:
2682 return TREE_THIS_VOLATILE (expr);
2683
2684 case CALL_EXPR:
2685 t = get_callee_fndecl (expr);
2686 /* Assume that calls to weak functions may trap. */
2687 if (!t || !DECL_P (t))
2688 return true;
2689 if (DECL_WEAK (t))
2690 return tree_could_trap_p (t);
2691 return false;
2692
2693 case FUNCTION_DECL:
2694 /* Assume that accesses to weak functions may trap, unless we know
2695 they are certainly defined in current TU or in some other
2696 LTO partition. */
2697 if (DECL_WEAK (expr) && !DECL_COMDAT (expr) && DECL_EXTERNAL (expr))
2698 {
2699 cgraph_node *node = cgraph_node::get (expr);
2700 if (node)
2701 node = node->function_symbol ();
2702 return !(node && node->in_other_partition);
2703 }
2704 return false;
2705
2706 case VAR_DECL:
2707 /* Assume that accesses to weak vars may trap, unless we know
2708 they are certainly defined in current TU or in some other
2709 LTO partition. */
2710 if (DECL_WEAK (expr) && !DECL_COMDAT (expr) && DECL_EXTERNAL (expr))
2711 {
2712 varpool_node *node = varpool_node::get (expr);
2713 if (node)
2714 node = node->ultimate_alias_target ();
2715 return !(node && node->in_other_partition);
2716 }
2717 return false;
2718
2719 default:
2720 return false;
2721 }
2722 }
2723
2724
2725 /* Helper for stmt_could_throw_p. Return true if STMT (assumed to be a
2726 an assignment or a conditional) may throw. */
2727
2728 static bool
2729 stmt_could_throw_1_p (gimple stmt)
2730 {
2731 enum tree_code code = gimple_expr_code (stmt);
2732 bool honor_nans = false;
2733 bool honor_snans = false;
2734 bool fp_operation = false;
2735 bool honor_trapv = false;
2736 tree t;
2737 size_t i;
2738 bool handled, ret;
2739
2740 if (TREE_CODE_CLASS (code) == tcc_comparison
2741 || TREE_CODE_CLASS (code) == tcc_unary
2742 || TREE_CODE_CLASS (code) == tcc_binary)
2743 {
2744 if (is_gimple_assign (stmt)
2745 && TREE_CODE_CLASS (code) == tcc_comparison)
2746 t = TREE_TYPE (gimple_assign_rhs1 (stmt));
2747 else if (gimple_code (stmt) == GIMPLE_COND)
2748 t = TREE_TYPE (gimple_cond_lhs (stmt));
2749 else
2750 t = gimple_expr_type (stmt);
2751 fp_operation = FLOAT_TYPE_P (t);
2752 if (fp_operation)
2753 {
2754 honor_nans = flag_trapping_math && !flag_finite_math_only;
2755 honor_snans = flag_signaling_nans != 0;
2756 }
2757 else if (INTEGRAL_TYPE_P (t) && TYPE_OVERFLOW_TRAPS (t))
2758 honor_trapv = true;
2759 }
2760
2761 /* Check if the main expression may trap. */
2762 t = is_gimple_assign (stmt) ? gimple_assign_rhs2 (stmt) : NULL;
2763 ret = operation_could_trap_helper_p (code, fp_operation, honor_trapv,
2764 honor_nans, honor_snans, t,
2765 &handled);
2766 if (handled)
2767 return ret;
2768
2769 /* If the expression does not trap, see if any of the individual operands may
2770 trap. */
2771 for (i = 0; i < gimple_num_ops (stmt); i++)
2772 if (tree_could_trap_p (gimple_op (stmt, i)))
2773 return true;
2774
2775 return false;
2776 }
2777
2778
2779 /* Return true if statement STMT could throw an exception. */
2780
2781 bool
2782 stmt_could_throw_p (gimple stmt)
2783 {
2784 if (!flag_exceptions)
2785 return false;
2786
2787 /* The only statements that can throw an exception are assignments,
2788 conditionals, calls, resx, and asms. */
2789 switch (gimple_code (stmt))
2790 {
2791 case GIMPLE_RESX:
2792 return true;
2793
2794 case GIMPLE_CALL:
2795 return !gimple_call_nothrow_p (as_a <gcall *> (stmt));
2796
2797 case GIMPLE_ASSIGN:
2798 case GIMPLE_COND:
2799 if (!cfun->can_throw_non_call_exceptions)
2800 return false;
2801 return stmt_could_throw_1_p (stmt);
2802
2803 case GIMPLE_ASM:
2804 if (!cfun->can_throw_non_call_exceptions)
2805 return false;
2806 return gimple_asm_volatile_p (as_a <gasm *> (stmt));
2807
2808 default:
2809 return false;
2810 }
2811 }
2812
2813
2814 /* Return true if expression T could throw an exception. */
2815
2816 bool
2817 tree_could_throw_p (tree t)
2818 {
2819 if (!flag_exceptions)
2820 return false;
2821 if (TREE_CODE (t) == MODIFY_EXPR)
2822 {
2823 if (cfun->can_throw_non_call_exceptions
2824 && tree_could_trap_p (TREE_OPERAND (t, 0)))
2825 return true;
2826 t = TREE_OPERAND (t, 1);
2827 }
2828
2829 if (TREE_CODE (t) == WITH_SIZE_EXPR)
2830 t = TREE_OPERAND (t, 0);
2831 if (TREE_CODE (t) == CALL_EXPR)
2832 return (call_expr_flags (t) & ECF_NOTHROW) == 0;
2833 if (cfun->can_throw_non_call_exceptions)
2834 return tree_could_trap_p (t);
2835 return false;
2836 }
2837
2838 /* Return true if STMT can throw an exception that is not caught within
2839 the current function (CFUN). */
2840
2841 bool
2842 stmt_can_throw_external (gimple stmt)
2843 {
2844 int lp_nr;
2845
2846 if (!stmt_could_throw_p (stmt))
2847 return false;
2848
2849 lp_nr = lookup_stmt_eh_lp (stmt);
2850 return lp_nr == 0;
2851 }
2852
2853 /* Return true if STMT can throw an exception that is caught within
2854 the current function (CFUN). */
2855
2856 bool
2857 stmt_can_throw_internal (gimple stmt)
2858 {
2859 int lp_nr;
2860
2861 if (!stmt_could_throw_p (stmt))
2862 return false;
2863
2864 lp_nr = lookup_stmt_eh_lp (stmt);
2865 return lp_nr > 0;
2866 }
2867
2868 /* Given a statement STMT in IFUN, if STMT can no longer throw, then
2869 remove any entry it might have from the EH table. Return true if
2870 any change was made. */
2871
2872 bool
2873 maybe_clean_eh_stmt_fn (struct function *ifun, gimple stmt)
2874 {
2875 if (stmt_could_throw_p (stmt))
2876 return false;
2877 return remove_stmt_from_eh_lp_fn (ifun, stmt);
2878 }
2879
2880 /* Likewise, but always use the current function. */
2881
2882 bool
2883 maybe_clean_eh_stmt (gimple stmt)
2884 {
2885 return maybe_clean_eh_stmt_fn (cfun, stmt);
2886 }
2887
2888 /* Given a statement OLD_STMT and a new statement NEW_STMT that has replaced
2889 OLD_STMT in the function, remove OLD_STMT from the EH table and put NEW_STMT
2890 in the table if it should be in there. Return TRUE if a replacement was
2891 done that my require an EH edge purge. */
2892
2893 bool
2894 maybe_clean_or_replace_eh_stmt (gimple old_stmt, gimple new_stmt)
2895 {
2896 int lp_nr = lookup_stmt_eh_lp (old_stmt);
2897
2898 if (lp_nr != 0)
2899 {
2900 bool new_stmt_could_throw = stmt_could_throw_p (new_stmt);
2901
2902 if (new_stmt == old_stmt && new_stmt_could_throw)
2903 return false;
2904
2905 remove_stmt_from_eh_lp (old_stmt);
2906 if (new_stmt_could_throw)
2907 {
2908 add_stmt_to_eh_lp (new_stmt, lp_nr);
2909 return false;
2910 }
2911 else
2912 return true;
2913 }
2914
2915 return false;
2916 }
2917
2918 /* Given a statement OLD_STMT in OLD_FUN and a duplicate statement NEW_STMT
2919 in NEW_FUN, copy the EH table data from OLD_STMT to NEW_STMT. The MAP
2920 operand is the return value of duplicate_eh_regions. */
2921
2922 bool
2923 maybe_duplicate_eh_stmt_fn (struct function *new_fun, gimple new_stmt,
2924 struct function *old_fun, gimple old_stmt,
2925 hash_map<void *, void *> *map,
2926 int default_lp_nr)
2927 {
2928 int old_lp_nr, new_lp_nr;
2929
2930 if (!stmt_could_throw_p (new_stmt))
2931 return false;
2932
2933 old_lp_nr = lookup_stmt_eh_lp_fn (old_fun, old_stmt);
2934 if (old_lp_nr == 0)
2935 {
2936 if (default_lp_nr == 0)
2937 return false;
2938 new_lp_nr = default_lp_nr;
2939 }
2940 else if (old_lp_nr > 0)
2941 {
2942 eh_landing_pad old_lp, new_lp;
2943
2944 old_lp = (*old_fun->eh->lp_array)[old_lp_nr];
2945 new_lp = static_cast<eh_landing_pad> (*map->get (old_lp));
2946 new_lp_nr = new_lp->index;
2947 }
2948 else
2949 {
2950 eh_region old_r, new_r;
2951
2952 old_r = (*old_fun->eh->region_array)[-old_lp_nr];
2953 new_r = static_cast<eh_region> (*map->get (old_r));
2954 new_lp_nr = -new_r->index;
2955 }
2956
2957 add_stmt_to_eh_lp_fn (new_fun, new_stmt, new_lp_nr);
2958 return true;
2959 }
2960
2961 /* Similar, but both OLD_STMT and NEW_STMT are within the current function,
2962 and thus no remapping is required. */
2963
2964 bool
2965 maybe_duplicate_eh_stmt (gimple new_stmt, gimple old_stmt)
2966 {
2967 int lp_nr;
2968
2969 if (!stmt_could_throw_p (new_stmt))
2970 return false;
2971
2972 lp_nr = lookup_stmt_eh_lp (old_stmt);
2973 if (lp_nr == 0)
2974 return false;
2975
2976 add_stmt_to_eh_lp (new_stmt, lp_nr);
2977 return true;
2978 }
2979 \f
2980 /* Returns TRUE if oneh and twoh are exception handlers (gimple_try_cleanup of
2981 GIMPLE_TRY) that are similar enough to be considered the same. Currently
2982 this only handles handlers consisting of a single call, as that's the
2983 important case for C++: a destructor call for a particular object showing
2984 up in multiple handlers. */
2985
2986 static bool
2987 same_handler_p (gimple_seq oneh, gimple_seq twoh)
2988 {
2989 gimple_stmt_iterator gsi;
2990 gimple ones, twos;
2991 unsigned int ai;
2992
2993 gsi = gsi_start (oneh);
2994 if (!gsi_one_before_end_p (gsi))
2995 return false;
2996 ones = gsi_stmt (gsi);
2997
2998 gsi = gsi_start (twoh);
2999 if (!gsi_one_before_end_p (gsi))
3000 return false;
3001 twos = gsi_stmt (gsi);
3002
3003 if (!is_gimple_call (ones)
3004 || !is_gimple_call (twos)
3005 || gimple_call_lhs (ones)
3006 || gimple_call_lhs (twos)
3007 || gimple_call_chain (ones)
3008 || gimple_call_chain (twos)
3009 || !gimple_call_same_target_p (ones, twos)
3010 || gimple_call_num_args (ones) != gimple_call_num_args (twos))
3011 return false;
3012
3013 for (ai = 0; ai < gimple_call_num_args (ones); ++ai)
3014 if (!operand_equal_p (gimple_call_arg (ones, ai),
3015 gimple_call_arg (twos, ai), 0))
3016 return false;
3017
3018 return true;
3019 }
3020
3021 /* Optimize
3022 try { A() } finally { try { ~B() } catch { ~A() } }
3023 try { ... } finally { ~A() }
3024 into
3025 try { A() } catch { ~B() }
3026 try { ~B() ... } finally { ~A() }
3027
3028 This occurs frequently in C++, where A is a local variable and B is a
3029 temporary used in the initializer for A. */
3030
3031 static void
3032 optimize_double_finally (gtry *one, gtry *two)
3033 {
3034 gimple oneh;
3035 gimple_stmt_iterator gsi;
3036 gimple_seq cleanup;
3037
3038 cleanup = gimple_try_cleanup (one);
3039 gsi = gsi_start (cleanup);
3040 if (!gsi_one_before_end_p (gsi))
3041 return;
3042
3043 oneh = gsi_stmt (gsi);
3044 if (gimple_code (oneh) != GIMPLE_TRY
3045 || gimple_try_kind (oneh) != GIMPLE_TRY_CATCH)
3046 return;
3047
3048 if (same_handler_p (gimple_try_cleanup (oneh), gimple_try_cleanup (two)))
3049 {
3050 gimple_seq seq = gimple_try_eval (oneh);
3051
3052 gimple_try_set_cleanup (one, seq);
3053 gimple_try_set_kind (one, GIMPLE_TRY_CATCH);
3054 seq = copy_gimple_seq_and_replace_locals (seq);
3055 gimple_seq_add_seq (&seq, gimple_try_eval (two));
3056 gimple_try_set_eval (two, seq);
3057 }
3058 }
3059
3060 /* Perform EH refactoring optimizations that are simpler to do when code
3061 flow has been lowered but EH structures haven't. */
3062
3063 static void
3064 refactor_eh_r (gimple_seq seq)
3065 {
3066 gimple_stmt_iterator gsi;
3067 gimple one, two;
3068
3069 one = NULL;
3070 two = NULL;
3071 gsi = gsi_start (seq);
3072 while (1)
3073 {
3074 one = two;
3075 if (gsi_end_p (gsi))
3076 two = NULL;
3077 else
3078 two = gsi_stmt (gsi);
3079 if (one && two)
3080 if (gtry *try_one = dyn_cast <gtry *> (one))
3081 if (gtry *try_two = dyn_cast <gtry *> (two))
3082 if (gimple_try_kind (try_one) == GIMPLE_TRY_FINALLY
3083 && gimple_try_kind (try_two) == GIMPLE_TRY_FINALLY)
3084 optimize_double_finally (try_one, try_two);
3085 if (one)
3086 switch (gimple_code (one))
3087 {
3088 case GIMPLE_TRY:
3089 refactor_eh_r (gimple_try_eval (one));
3090 refactor_eh_r (gimple_try_cleanup (one));
3091 break;
3092 case GIMPLE_CATCH:
3093 refactor_eh_r (gimple_catch_handler (as_a <gcatch *> (one)));
3094 break;
3095 case GIMPLE_EH_FILTER:
3096 refactor_eh_r (gimple_eh_filter_failure (one));
3097 break;
3098 case GIMPLE_EH_ELSE:
3099 {
3100 geh_else *eh_else_stmt = as_a <geh_else *> (one);
3101 refactor_eh_r (gimple_eh_else_n_body (eh_else_stmt));
3102 refactor_eh_r (gimple_eh_else_e_body (eh_else_stmt));
3103 }
3104 break;
3105 default:
3106 break;
3107 }
3108 if (two)
3109 gsi_next (&gsi);
3110 else
3111 break;
3112 }
3113 }
3114
3115 namespace {
3116
3117 const pass_data pass_data_refactor_eh =
3118 {
3119 GIMPLE_PASS, /* type */
3120 "ehopt", /* name */
3121 OPTGROUP_NONE, /* optinfo_flags */
3122 TV_TREE_EH, /* tv_id */
3123 PROP_gimple_lcf, /* properties_required */
3124 0, /* properties_provided */
3125 0, /* properties_destroyed */
3126 0, /* todo_flags_start */
3127 0, /* todo_flags_finish */
3128 };
3129
3130 class pass_refactor_eh : public gimple_opt_pass
3131 {
3132 public:
3133 pass_refactor_eh (gcc::context *ctxt)
3134 : gimple_opt_pass (pass_data_refactor_eh, ctxt)
3135 {}
3136
3137 /* opt_pass methods: */
3138 virtual bool gate (function *) { return flag_exceptions != 0; }
3139 virtual unsigned int execute (function *)
3140 {
3141 refactor_eh_r (gimple_body (current_function_decl));
3142 return 0;
3143 }
3144
3145 }; // class pass_refactor_eh
3146
3147 } // anon namespace
3148
3149 gimple_opt_pass *
3150 make_pass_refactor_eh (gcc::context *ctxt)
3151 {
3152 return new pass_refactor_eh (ctxt);
3153 }
3154 \f
3155 /* At the end of gimple optimization, we can lower RESX. */
3156
3157 static bool
3158 lower_resx (basic_block bb, gresx *stmt,
3159 hash_map<eh_region, tree> *mnt_map)
3160 {
3161 int lp_nr;
3162 eh_region src_r, dst_r;
3163 gimple_stmt_iterator gsi;
3164 gimple x;
3165 tree fn, src_nr;
3166 bool ret = false;
3167
3168 lp_nr = lookup_stmt_eh_lp (stmt);
3169 if (lp_nr != 0)
3170 dst_r = get_eh_region_from_lp_number (lp_nr);
3171 else
3172 dst_r = NULL;
3173
3174 src_r = get_eh_region_from_number (gimple_resx_region (stmt));
3175 gsi = gsi_last_bb (bb);
3176
3177 if (src_r == NULL)
3178 {
3179 /* We can wind up with no source region when pass_cleanup_eh shows
3180 that there are no entries into an eh region and deletes it, but
3181 then the block that contains the resx isn't removed. This can
3182 happen without optimization when the switch statement created by
3183 lower_try_finally_switch isn't simplified to remove the eh case.
3184
3185 Resolve this by expanding the resx node to an abort. */
3186
3187 fn = builtin_decl_implicit (BUILT_IN_TRAP);
3188 x = gimple_build_call (fn, 0);
3189 gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3190
3191 while (EDGE_COUNT (bb->succs) > 0)
3192 remove_edge (EDGE_SUCC (bb, 0));
3193 }
3194 else if (dst_r)
3195 {
3196 /* When we have a destination region, we resolve this by copying
3197 the excptr and filter values into place, and changing the edge
3198 to immediately after the landing pad. */
3199 edge e;
3200
3201 if (lp_nr < 0)
3202 {
3203 basic_block new_bb;
3204 tree lab;
3205
3206 /* We are resuming into a MUST_NOT_CALL region. Expand a call to
3207 the failure decl into a new block, if needed. */
3208 gcc_assert (dst_r->type == ERT_MUST_NOT_THROW);
3209
3210 tree *slot = mnt_map->get (dst_r);
3211 if (slot == NULL)
3212 {
3213 gimple_stmt_iterator gsi2;
3214
3215 new_bb = create_empty_bb (bb);
3216 add_bb_to_loop (new_bb, bb->loop_father);
3217 lab = gimple_block_label (new_bb);
3218 gsi2 = gsi_start_bb (new_bb);
3219
3220 fn = dst_r->u.must_not_throw.failure_decl;
3221 x = gimple_build_call (fn, 0);
3222 gimple_set_location (x, dst_r->u.must_not_throw.failure_loc);
3223 gsi_insert_after (&gsi2, x, GSI_CONTINUE_LINKING);
3224
3225 mnt_map->put (dst_r, lab);
3226 }
3227 else
3228 {
3229 lab = *slot;
3230 new_bb = label_to_block (lab);
3231 }
3232
3233 gcc_assert (EDGE_COUNT (bb->succs) == 0);
3234 e = make_edge (bb, new_bb, EDGE_FALLTHRU);
3235 e->count = bb->count;
3236 e->probability = REG_BR_PROB_BASE;
3237 }
3238 else
3239 {
3240 edge_iterator ei;
3241 tree dst_nr = build_int_cst (integer_type_node, dst_r->index);
3242
3243 fn = builtin_decl_implicit (BUILT_IN_EH_COPY_VALUES);
3244 src_nr = build_int_cst (integer_type_node, src_r->index);
3245 x = gimple_build_call (fn, 2, dst_nr, src_nr);
3246 gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3247
3248 /* Update the flags for the outgoing edge. */
3249 e = single_succ_edge (bb);
3250 gcc_assert (e->flags & EDGE_EH);
3251 e->flags = (e->flags & ~EDGE_EH) | EDGE_FALLTHRU;
3252
3253 /* If there are no more EH users of the landing pad, delete it. */
3254 FOR_EACH_EDGE (e, ei, e->dest->preds)
3255 if (e->flags & EDGE_EH)
3256 break;
3257 if (e == NULL)
3258 {
3259 eh_landing_pad lp = get_eh_landing_pad_from_number (lp_nr);
3260 remove_eh_landing_pad (lp);
3261 }
3262 }
3263
3264 ret = true;
3265 }
3266 else
3267 {
3268 tree var;
3269
3270 /* When we don't have a destination region, this exception escapes
3271 up the call chain. We resolve this by generating a call to the
3272 _Unwind_Resume library function. */
3273
3274 /* The ARM EABI redefines _Unwind_Resume as __cxa_end_cleanup
3275 with no arguments for C++ and Java. Check for that. */
3276 if (src_r->use_cxa_end_cleanup)
3277 {
3278 fn = builtin_decl_implicit (BUILT_IN_CXA_END_CLEANUP);
3279 x = gimple_build_call (fn, 0);
3280 gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3281 }
3282 else
3283 {
3284 fn = builtin_decl_implicit (BUILT_IN_EH_POINTER);
3285 src_nr = build_int_cst (integer_type_node, src_r->index);
3286 x = gimple_build_call (fn, 1, src_nr);
3287 var = create_tmp_var (ptr_type_node);
3288 var = make_ssa_name (var, x);
3289 gimple_call_set_lhs (x, var);
3290 gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3291
3292 fn = builtin_decl_implicit (BUILT_IN_UNWIND_RESUME);
3293 x = gimple_build_call (fn, 1, var);
3294 gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3295 }
3296
3297 gcc_assert (EDGE_COUNT (bb->succs) == 0);
3298 }
3299
3300 gsi_remove (&gsi, true);
3301
3302 return ret;
3303 }
3304
3305 namespace {
3306
3307 const pass_data pass_data_lower_resx =
3308 {
3309 GIMPLE_PASS, /* type */
3310 "resx", /* name */
3311 OPTGROUP_NONE, /* optinfo_flags */
3312 TV_TREE_EH, /* tv_id */
3313 PROP_gimple_lcf, /* properties_required */
3314 0, /* properties_provided */
3315 0, /* properties_destroyed */
3316 0, /* todo_flags_start */
3317 0, /* todo_flags_finish */
3318 };
3319
3320 class pass_lower_resx : public gimple_opt_pass
3321 {
3322 public:
3323 pass_lower_resx (gcc::context *ctxt)
3324 : gimple_opt_pass (pass_data_lower_resx, ctxt)
3325 {}
3326
3327 /* opt_pass methods: */
3328 virtual bool gate (function *) { return flag_exceptions != 0; }
3329 virtual unsigned int execute (function *);
3330
3331 }; // class pass_lower_resx
3332
3333 unsigned
3334 pass_lower_resx::execute (function *fun)
3335 {
3336 basic_block bb;
3337 bool dominance_invalidated = false;
3338 bool any_rewritten = false;
3339
3340 hash_map<eh_region, tree> mnt_map;
3341
3342 FOR_EACH_BB_FN (bb, fun)
3343 {
3344 gimple last = last_stmt (bb);
3345 if (last && is_gimple_resx (last))
3346 {
3347 dominance_invalidated |=
3348 lower_resx (bb, as_a <gresx *> (last), &mnt_map);
3349 any_rewritten = true;
3350 }
3351 }
3352
3353 if (dominance_invalidated)
3354 {
3355 free_dominance_info (CDI_DOMINATORS);
3356 free_dominance_info (CDI_POST_DOMINATORS);
3357 }
3358
3359 return any_rewritten ? TODO_update_ssa_only_virtuals : 0;
3360 }
3361
3362 } // anon namespace
3363
3364 gimple_opt_pass *
3365 make_pass_lower_resx (gcc::context *ctxt)
3366 {
3367 return new pass_lower_resx (ctxt);
3368 }
3369
3370 /* Try to optimize var = {v} {CLOBBER} stmts followed just by
3371 external throw. */
3372
3373 static void
3374 optimize_clobbers (basic_block bb)
3375 {
3376 gimple_stmt_iterator gsi = gsi_last_bb (bb);
3377 bool any_clobbers = false;
3378 bool seen_stack_restore = false;
3379 edge_iterator ei;
3380 edge e;
3381
3382 /* Only optimize anything if the bb contains at least one clobber,
3383 ends with resx (checked by caller), optionally contains some
3384 debug stmts or labels, or at most one __builtin_stack_restore
3385 call, and has an incoming EH edge. */
3386 for (gsi_prev (&gsi); !gsi_end_p (gsi); gsi_prev (&gsi))
3387 {
3388 gimple stmt = gsi_stmt (gsi);
3389 if (is_gimple_debug (stmt))
3390 continue;
3391 if (gimple_clobber_p (stmt))
3392 {
3393 any_clobbers = true;
3394 continue;
3395 }
3396 if (!seen_stack_restore
3397 && gimple_call_builtin_p (stmt, BUILT_IN_STACK_RESTORE))
3398 {
3399 seen_stack_restore = true;
3400 continue;
3401 }
3402 if (gimple_code (stmt) == GIMPLE_LABEL)
3403 break;
3404 return;
3405 }
3406 if (!any_clobbers)
3407 return;
3408 FOR_EACH_EDGE (e, ei, bb->preds)
3409 if (e->flags & EDGE_EH)
3410 break;
3411 if (e == NULL)
3412 return;
3413 gsi = gsi_last_bb (bb);
3414 for (gsi_prev (&gsi); !gsi_end_p (gsi); gsi_prev (&gsi))
3415 {
3416 gimple stmt = gsi_stmt (gsi);
3417 if (!gimple_clobber_p (stmt))
3418 continue;
3419 unlink_stmt_vdef (stmt);
3420 gsi_remove (&gsi, true);
3421 release_defs (stmt);
3422 }
3423 }
3424
3425 /* Try to sink var = {v} {CLOBBER} stmts followed just by
3426 internal throw to successor BB. */
3427
3428 static int
3429 sink_clobbers (basic_block bb)
3430 {
3431 edge e;
3432 edge_iterator ei;
3433 gimple_stmt_iterator gsi, dgsi;
3434 basic_block succbb;
3435 bool any_clobbers = false;
3436 unsigned todo = 0;
3437
3438 /* Only optimize if BB has a single EH successor and
3439 all predecessor edges are EH too. */
3440 if (!single_succ_p (bb)
3441 || (single_succ_edge (bb)->flags & EDGE_EH) == 0)
3442 return 0;
3443
3444 FOR_EACH_EDGE (e, ei, bb->preds)
3445 {
3446 if ((e->flags & EDGE_EH) == 0)
3447 return 0;
3448 }
3449
3450 /* And BB contains only CLOBBER stmts before the final
3451 RESX. */
3452 gsi = gsi_last_bb (bb);
3453 for (gsi_prev (&gsi); !gsi_end_p (gsi); gsi_prev (&gsi))
3454 {
3455 gimple stmt = gsi_stmt (gsi);
3456 if (is_gimple_debug (stmt))
3457 continue;
3458 if (gimple_code (stmt) == GIMPLE_LABEL)
3459 break;
3460 if (!gimple_clobber_p (stmt))
3461 return 0;
3462 any_clobbers = true;
3463 }
3464 if (!any_clobbers)
3465 return 0;
3466
3467 edge succe = single_succ_edge (bb);
3468 succbb = succe->dest;
3469
3470 /* See if there is a virtual PHI node to take an updated virtual
3471 operand from. */
3472 gphi *vphi = NULL;
3473 tree vuse = NULL_TREE;
3474 for (gphi_iterator gpi = gsi_start_phis (succbb);
3475 !gsi_end_p (gpi); gsi_next (&gpi))
3476 {
3477 tree res = gimple_phi_result (gpi.phi ());
3478 if (virtual_operand_p (res))
3479 {
3480 vphi = gpi.phi ();
3481 vuse = res;
3482 break;
3483 }
3484 }
3485
3486 dgsi = gsi_after_labels (succbb);
3487 gsi = gsi_last_bb (bb);
3488 for (gsi_prev (&gsi); !gsi_end_p (gsi); gsi_prev (&gsi))
3489 {
3490 gimple stmt = gsi_stmt (gsi);
3491 tree lhs;
3492 if (is_gimple_debug (stmt))
3493 continue;
3494 if (gimple_code (stmt) == GIMPLE_LABEL)
3495 break;
3496 lhs = gimple_assign_lhs (stmt);
3497 /* Unfortunately we don't have dominance info updated at this
3498 point, so checking if
3499 dominated_by_p (CDI_DOMINATORS, succbb,
3500 gimple_bb (SSA_NAME_DEF_STMT (TREE_OPERAND (lhs, 0)))
3501 would be too costly. Thus, avoid sinking any clobbers that
3502 refer to non-(D) SSA_NAMEs. */
3503 if (TREE_CODE (lhs) == MEM_REF
3504 && TREE_CODE (TREE_OPERAND (lhs, 0)) == SSA_NAME
3505 && !SSA_NAME_IS_DEFAULT_DEF (TREE_OPERAND (lhs, 0)))
3506 {
3507 unlink_stmt_vdef (stmt);
3508 gsi_remove (&gsi, true);
3509 release_defs (stmt);
3510 continue;
3511 }
3512
3513 /* As we do not change stmt order when sinking across a
3514 forwarder edge we can keep virtual operands in place. */
3515 gsi_remove (&gsi, false);
3516 gsi_insert_before (&dgsi, stmt, GSI_NEW_STMT);
3517
3518 /* But adjust virtual operands if we sunk across a PHI node. */
3519 if (vuse)
3520 {
3521 gimple use_stmt;
3522 imm_use_iterator iter;
3523 use_operand_p use_p;
3524 FOR_EACH_IMM_USE_STMT (use_stmt, iter, vuse)
3525 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
3526 SET_USE (use_p, gimple_vdef (stmt));
3527 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (vuse))
3528 {
3529 SSA_NAME_OCCURS_IN_ABNORMAL_PHI (gimple_vdef (stmt)) = 1;
3530 SSA_NAME_OCCURS_IN_ABNORMAL_PHI (vuse) = 0;
3531 }
3532 /* Adjust the incoming virtual operand. */
3533 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (vphi, succe), gimple_vuse (stmt));
3534 SET_USE (gimple_vuse_op (stmt), vuse);
3535 }
3536 /* If there isn't a single predecessor but no virtual PHI node
3537 arrange for virtual operands to be renamed. */
3538 else if (gimple_vuse_op (stmt) != NULL_USE_OPERAND_P
3539 && !single_pred_p (succbb))
3540 {
3541 /* In this case there will be no use of the VDEF of this stmt.
3542 ??? Unless this is a secondary opportunity and we have not
3543 removed unreachable blocks yet, so we cannot assert this.
3544 Which also means we will end up renaming too many times. */
3545 SET_USE (gimple_vuse_op (stmt), gimple_vop (cfun));
3546 mark_virtual_operands_for_renaming (cfun);
3547 todo |= TODO_update_ssa_only_virtuals;
3548 }
3549 }
3550
3551 return todo;
3552 }
3553
3554 /* At the end of inlining, we can lower EH_DISPATCH. Return true when
3555 we have found some duplicate labels and removed some edges. */
3556
3557 static bool
3558 lower_eh_dispatch (basic_block src, geh_dispatch *stmt)
3559 {
3560 gimple_stmt_iterator gsi;
3561 int region_nr;
3562 eh_region r;
3563 tree filter, fn;
3564 gimple x;
3565 bool redirected = false;
3566
3567 region_nr = gimple_eh_dispatch_region (stmt);
3568 r = get_eh_region_from_number (region_nr);
3569
3570 gsi = gsi_last_bb (src);
3571
3572 switch (r->type)
3573 {
3574 case ERT_TRY:
3575 {
3576 auto_vec<tree> labels;
3577 tree default_label = NULL;
3578 eh_catch c;
3579 edge_iterator ei;
3580 edge e;
3581 hash_set<tree> seen_values;
3582
3583 /* Collect the labels for a switch. Zero the post_landing_pad
3584 field becase we'll no longer have anything keeping these labels
3585 in existence and the optimizer will be free to merge these
3586 blocks at will. */
3587 for (c = r->u.eh_try.first_catch; c ; c = c->next_catch)
3588 {
3589 tree tp_node, flt_node, lab = c->label;
3590 bool have_label = false;
3591
3592 c->label = NULL;
3593 tp_node = c->type_list;
3594 flt_node = c->filter_list;
3595
3596 if (tp_node == NULL)
3597 {
3598 default_label = lab;
3599 break;
3600 }
3601 do
3602 {
3603 /* Filter out duplicate labels that arise when this handler
3604 is shadowed by an earlier one. When no labels are
3605 attached to the handler anymore, we remove
3606 the corresponding edge and then we delete unreachable
3607 blocks at the end of this pass. */
3608 if (! seen_values.contains (TREE_VALUE (flt_node)))
3609 {
3610 tree t = build_case_label (TREE_VALUE (flt_node),
3611 NULL, lab);
3612 labels.safe_push (t);
3613 seen_values.add (TREE_VALUE (flt_node));
3614 have_label = true;
3615 }
3616
3617 tp_node = TREE_CHAIN (tp_node);
3618 flt_node = TREE_CHAIN (flt_node);
3619 }
3620 while (tp_node);
3621 if (! have_label)
3622 {
3623 remove_edge (find_edge (src, label_to_block (lab)));
3624 redirected = true;
3625 }
3626 }
3627
3628 /* Clean up the edge flags. */
3629 FOR_EACH_EDGE (e, ei, src->succs)
3630 {
3631 if (e->flags & EDGE_FALLTHRU)
3632 {
3633 /* If there was no catch-all, use the fallthru edge. */
3634 if (default_label == NULL)
3635 default_label = gimple_block_label (e->dest);
3636 e->flags &= ~EDGE_FALLTHRU;
3637 }
3638 }
3639 gcc_assert (default_label != NULL);
3640
3641 /* Don't generate a switch if there's only a default case.
3642 This is common in the form of try { A; } catch (...) { B; }. */
3643 if (!labels.exists ())
3644 {
3645 e = single_succ_edge (src);
3646 e->flags |= EDGE_FALLTHRU;
3647 }
3648 else
3649 {
3650 fn = builtin_decl_implicit (BUILT_IN_EH_FILTER);
3651 x = gimple_build_call (fn, 1, build_int_cst (integer_type_node,
3652 region_nr));
3653 filter = create_tmp_var (TREE_TYPE (TREE_TYPE (fn)));
3654 filter = make_ssa_name (filter, x);
3655 gimple_call_set_lhs (x, filter);
3656 gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3657
3658 /* Turn the default label into a default case. */
3659 default_label = build_case_label (NULL, NULL, default_label);
3660 sort_case_labels (labels);
3661
3662 x = gimple_build_switch (filter, default_label, labels);
3663 gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3664 }
3665 }
3666 break;
3667
3668 case ERT_ALLOWED_EXCEPTIONS:
3669 {
3670 edge b_e = BRANCH_EDGE (src);
3671 edge f_e = FALLTHRU_EDGE (src);
3672
3673 fn = builtin_decl_implicit (BUILT_IN_EH_FILTER);
3674 x = gimple_build_call (fn, 1, build_int_cst (integer_type_node,
3675 region_nr));
3676 filter = create_tmp_var (TREE_TYPE (TREE_TYPE (fn)));
3677 filter = make_ssa_name (filter, x);
3678 gimple_call_set_lhs (x, filter);
3679 gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3680
3681 r->u.allowed.label = NULL;
3682 x = gimple_build_cond (EQ_EXPR, filter,
3683 build_int_cst (TREE_TYPE (filter),
3684 r->u.allowed.filter),
3685 NULL_TREE, NULL_TREE);
3686 gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3687
3688 b_e->flags = b_e->flags | EDGE_TRUE_VALUE;
3689 f_e->flags = (f_e->flags & ~EDGE_FALLTHRU) | EDGE_FALSE_VALUE;
3690 }
3691 break;
3692
3693 default:
3694 gcc_unreachable ();
3695 }
3696
3697 /* Replace the EH_DISPATCH with the SWITCH or COND generated above. */
3698 gsi_remove (&gsi, true);
3699 return redirected;
3700 }
3701
3702 namespace {
3703
3704 const pass_data pass_data_lower_eh_dispatch =
3705 {
3706 GIMPLE_PASS, /* type */
3707 "ehdisp", /* name */
3708 OPTGROUP_NONE, /* optinfo_flags */
3709 TV_TREE_EH, /* tv_id */
3710 PROP_gimple_lcf, /* properties_required */
3711 0, /* properties_provided */
3712 0, /* properties_destroyed */
3713 0, /* todo_flags_start */
3714 0, /* todo_flags_finish */
3715 };
3716
3717 class pass_lower_eh_dispatch : public gimple_opt_pass
3718 {
3719 public:
3720 pass_lower_eh_dispatch (gcc::context *ctxt)
3721 : gimple_opt_pass (pass_data_lower_eh_dispatch, ctxt)
3722 {}
3723
3724 /* opt_pass methods: */
3725 virtual bool gate (function *fun) { return fun->eh->region_tree != NULL; }
3726 virtual unsigned int execute (function *);
3727
3728 }; // class pass_lower_eh_dispatch
3729
3730 unsigned
3731 pass_lower_eh_dispatch::execute (function *fun)
3732 {
3733 basic_block bb;
3734 int flags = 0;
3735 bool redirected = false;
3736
3737 assign_filter_values ();
3738
3739 FOR_EACH_BB_FN (bb, fun)
3740 {
3741 gimple last = last_stmt (bb);
3742 if (last == NULL)
3743 continue;
3744 if (gimple_code (last) == GIMPLE_EH_DISPATCH)
3745 {
3746 redirected |= lower_eh_dispatch (bb,
3747 as_a <geh_dispatch *> (last));
3748 flags |= TODO_update_ssa_only_virtuals;
3749 }
3750 else if (gimple_code (last) == GIMPLE_RESX)
3751 {
3752 if (stmt_can_throw_external (last))
3753 optimize_clobbers (bb);
3754 else
3755 flags |= sink_clobbers (bb);
3756 }
3757 }
3758
3759 if (redirected)
3760 delete_unreachable_blocks ();
3761 return flags;
3762 }
3763
3764 } // anon namespace
3765
3766 gimple_opt_pass *
3767 make_pass_lower_eh_dispatch (gcc::context *ctxt)
3768 {
3769 return new pass_lower_eh_dispatch (ctxt);
3770 }
3771 \f
3772 /* Walk statements, see what regions and, optionally, landing pads
3773 are really referenced.
3774
3775 Returns in R_REACHABLEP an sbitmap with bits set for reachable regions,
3776 and in LP_REACHABLE an sbitmap with bits set for reachable landing pads.
3777
3778 Passing NULL for LP_REACHABLE is valid, in this case only reachable
3779 regions are marked.
3780
3781 The caller is responsible for freeing the returned sbitmaps. */
3782
3783 static void
3784 mark_reachable_handlers (sbitmap *r_reachablep, sbitmap *lp_reachablep)
3785 {
3786 sbitmap r_reachable, lp_reachable;
3787 basic_block bb;
3788 bool mark_landing_pads = (lp_reachablep != NULL);
3789 gcc_checking_assert (r_reachablep != NULL);
3790
3791 r_reachable = sbitmap_alloc (cfun->eh->region_array->length ());
3792 bitmap_clear (r_reachable);
3793 *r_reachablep = r_reachable;
3794
3795 if (mark_landing_pads)
3796 {
3797 lp_reachable = sbitmap_alloc (cfun->eh->lp_array->length ());
3798 bitmap_clear (lp_reachable);
3799 *lp_reachablep = lp_reachable;
3800 }
3801 else
3802 lp_reachable = NULL;
3803
3804 FOR_EACH_BB_FN (bb, cfun)
3805 {
3806 gimple_stmt_iterator gsi;
3807
3808 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
3809 {
3810 gimple stmt = gsi_stmt (gsi);
3811
3812 if (mark_landing_pads)
3813 {
3814 int lp_nr = lookup_stmt_eh_lp (stmt);
3815
3816 /* Negative LP numbers are MUST_NOT_THROW regions which
3817 are not considered BB enders. */
3818 if (lp_nr < 0)
3819 bitmap_set_bit (r_reachable, -lp_nr);
3820
3821 /* Positive LP numbers are real landing pads, and BB enders. */
3822 else if (lp_nr > 0)
3823 {
3824 gcc_assert (gsi_one_before_end_p (gsi));
3825 eh_region region = get_eh_region_from_lp_number (lp_nr);
3826 bitmap_set_bit (r_reachable, region->index);
3827 bitmap_set_bit (lp_reachable, lp_nr);
3828 }
3829 }
3830
3831 /* Avoid removing regions referenced from RESX/EH_DISPATCH. */
3832 switch (gimple_code (stmt))
3833 {
3834 case GIMPLE_RESX:
3835 bitmap_set_bit (r_reachable,
3836 gimple_resx_region (as_a <gresx *> (stmt)));
3837 break;
3838 case GIMPLE_EH_DISPATCH:
3839 bitmap_set_bit (r_reachable,
3840 gimple_eh_dispatch_region (
3841 as_a <geh_dispatch *> (stmt)));
3842 break;
3843 case GIMPLE_CALL:
3844 if (gimple_call_builtin_p (stmt, BUILT_IN_EH_COPY_VALUES))
3845 for (int i = 0; i < 2; ++i)
3846 {
3847 tree rt = gimple_call_arg (stmt, i);
3848 HOST_WIDE_INT ri = tree_to_shwi (rt);
3849
3850 gcc_assert (ri = (int)ri);
3851 bitmap_set_bit (r_reachable, ri);
3852 }
3853 break;
3854 default:
3855 break;
3856 }
3857 }
3858 }
3859 }
3860
3861 /* Remove unreachable handlers and unreachable landing pads. */
3862
3863 static void
3864 remove_unreachable_handlers (void)
3865 {
3866 sbitmap r_reachable, lp_reachable;
3867 eh_region region;
3868 eh_landing_pad lp;
3869 unsigned i;
3870
3871 mark_reachable_handlers (&r_reachable, &lp_reachable);
3872
3873 if (dump_file)
3874 {
3875 fprintf (dump_file, "Before removal of unreachable regions:\n");
3876 dump_eh_tree (dump_file, cfun);
3877 fprintf (dump_file, "Reachable regions: ");
3878 dump_bitmap_file (dump_file, r_reachable);
3879 fprintf (dump_file, "Reachable landing pads: ");
3880 dump_bitmap_file (dump_file, lp_reachable);
3881 }
3882
3883 if (dump_file)
3884 {
3885 FOR_EACH_VEC_SAFE_ELT (cfun->eh->region_array, i, region)
3886 if (region && !bitmap_bit_p (r_reachable, region->index))
3887 fprintf (dump_file,
3888 "Removing unreachable region %d\n",
3889 region->index);
3890 }
3891
3892 remove_unreachable_eh_regions (r_reachable);
3893
3894 FOR_EACH_VEC_SAFE_ELT (cfun->eh->lp_array, i, lp)
3895 if (lp && !bitmap_bit_p (lp_reachable, lp->index))
3896 {
3897 if (dump_file)
3898 fprintf (dump_file,
3899 "Removing unreachable landing pad %d\n",
3900 lp->index);
3901 remove_eh_landing_pad (lp);
3902 }
3903
3904 if (dump_file)
3905 {
3906 fprintf (dump_file, "\n\nAfter removal of unreachable regions:\n");
3907 dump_eh_tree (dump_file, cfun);
3908 fprintf (dump_file, "\n\n");
3909 }
3910
3911 sbitmap_free (r_reachable);
3912 sbitmap_free (lp_reachable);
3913
3914 #ifdef ENABLE_CHECKING
3915 verify_eh_tree (cfun);
3916 #endif
3917 }
3918
3919 /* Remove unreachable handlers if any landing pads have been removed after
3920 last ehcleanup pass (due to gimple_purge_dead_eh_edges). */
3921
3922 void
3923 maybe_remove_unreachable_handlers (void)
3924 {
3925 eh_landing_pad lp;
3926 unsigned i;
3927
3928 if (cfun->eh == NULL)
3929 return;
3930
3931 FOR_EACH_VEC_SAFE_ELT (cfun->eh->lp_array, i, lp)
3932 if (lp && lp->post_landing_pad)
3933 {
3934 if (label_to_block (lp->post_landing_pad) == NULL)
3935 {
3936 remove_unreachable_handlers ();
3937 return;
3938 }
3939 }
3940 }
3941
3942 /* Remove regions that do not have landing pads. This assumes
3943 that remove_unreachable_handlers has already been run, and
3944 that we've just manipulated the landing pads since then.
3945
3946 Preserve regions with landing pads and regions that prevent
3947 exceptions from propagating further, even if these regions
3948 are not reachable. */
3949
3950 static void
3951 remove_unreachable_handlers_no_lp (void)
3952 {
3953 eh_region region;
3954 sbitmap r_reachable;
3955 unsigned i;
3956
3957 mark_reachable_handlers (&r_reachable, /*lp_reachablep=*/NULL);
3958
3959 FOR_EACH_VEC_SAFE_ELT (cfun->eh->region_array, i, region)
3960 {
3961 if (! region)
3962 continue;
3963
3964 if (region->landing_pads != NULL
3965 || region->type == ERT_MUST_NOT_THROW)
3966 bitmap_set_bit (r_reachable, region->index);
3967
3968 if (dump_file
3969 && !bitmap_bit_p (r_reachable, region->index))
3970 fprintf (dump_file,
3971 "Removing unreachable region %d\n",
3972 region->index);
3973 }
3974
3975 remove_unreachable_eh_regions (r_reachable);
3976
3977 sbitmap_free (r_reachable);
3978 }
3979
3980 /* Undo critical edge splitting on an EH landing pad. Earlier, we
3981 optimisticaly split all sorts of edges, including EH edges. The
3982 optimization passes in between may not have needed them; if not,
3983 we should undo the split.
3984
3985 Recognize this case by having one EH edge incoming to the BB and
3986 one normal edge outgoing; BB should be empty apart from the
3987 post_landing_pad label.
3988
3989 Note that this is slightly different from the empty handler case
3990 handled by cleanup_empty_eh, in that the actual handler may yet
3991 have actual code but the landing pad has been separated from the
3992 handler. As such, cleanup_empty_eh relies on this transformation
3993 having been done first. */
3994
3995 static bool
3996 unsplit_eh (eh_landing_pad lp)
3997 {
3998 basic_block bb = label_to_block (lp->post_landing_pad);
3999 gimple_stmt_iterator gsi;
4000 edge e_in, e_out;
4001
4002 /* Quickly check the edge counts on BB for singularity. */
4003 if (!single_pred_p (bb) || !single_succ_p (bb))
4004 return false;
4005 e_in = single_pred_edge (bb);
4006 e_out = single_succ_edge (bb);
4007
4008 /* Input edge must be EH and output edge must be normal. */
4009 if ((e_in->flags & EDGE_EH) == 0 || (e_out->flags & EDGE_EH) != 0)
4010 return false;
4011
4012 /* The block must be empty except for the labels and debug insns. */
4013 gsi = gsi_after_labels (bb);
4014 if (!gsi_end_p (gsi) && is_gimple_debug (gsi_stmt (gsi)))
4015 gsi_next_nondebug (&gsi);
4016 if (!gsi_end_p (gsi))
4017 return false;
4018
4019 /* The destination block must not already have a landing pad
4020 for a different region. */
4021 for (gsi = gsi_start_bb (e_out->dest); !gsi_end_p (gsi); gsi_next (&gsi))
4022 {
4023 glabel *label_stmt = dyn_cast <glabel *> (gsi_stmt (gsi));
4024 tree lab;
4025 int lp_nr;
4026
4027 if (!label_stmt)
4028 break;
4029 lab = gimple_label_label (label_stmt);
4030 lp_nr = EH_LANDING_PAD_NR (lab);
4031 if (lp_nr && get_eh_region_from_lp_number (lp_nr) != lp->region)
4032 return false;
4033 }
4034
4035 /* The new destination block must not already be a destination of
4036 the source block, lest we merge fallthru and eh edges and get
4037 all sorts of confused. */
4038 if (find_edge (e_in->src, e_out->dest))
4039 return false;
4040
4041 /* ??? We can get degenerate phis due to cfg cleanups. I would have
4042 thought this should have been cleaned up by a phicprop pass, but
4043 that doesn't appear to handle virtuals. Propagate by hand. */
4044 if (!gimple_seq_empty_p (phi_nodes (bb)))
4045 {
4046 for (gphi_iterator gpi = gsi_start_phis (bb); !gsi_end_p (gpi); )
4047 {
4048 gimple use_stmt;
4049 gphi *phi = gpi.phi ();
4050 tree lhs = gimple_phi_result (phi);
4051 tree rhs = gimple_phi_arg_def (phi, 0);
4052 use_operand_p use_p;
4053 imm_use_iterator iter;
4054
4055 FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
4056 {
4057 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
4058 SET_USE (use_p, rhs);
4059 }
4060
4061 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs))
4062 SSA_NAME_OCCURS_IN_ABNORMAL_PHI (rhs) = 1;
4063
4064 remove_phi_node (&gpi, true);
4065 }
4066 }
4067
4068 if (dump_file && (dump_flags & TDF_DETAILS))
4069 fprintf (dump_file, "Unsplit EH landing pad %d to block %i.\n",
4070 lp->index, e_out->dest->index);
4071
4072 /* Redirect the edge. Since redirect_eh_edge_1 expects to be moving
4073 a successor edge, humor it. But do the real CFG change with the
4074 predecessor of E_OUT in order to preserve the ordering of arguments
4075 to the PHI nodes in E_OUT->DEST. */
4076 redirect_eh_edge_1 (e_in, e_out->dest, false);
4077 redirect_edge_pred (e_out, e_in->src);
4078 e_out->flags = e_in->flags;
4079 e_out->probability = e_in->probability;
4080 e_out->count = e_in->count;
4081 remove_edge (e_in);
4082
4083 return true;
4084 }
4085
4086 /* Examine each landing pad block and see if it matches unsplit_eh. */
4087
4088 static bool
4089 unsplit_all_eh (void)
4090 {
4091 bool changed = false;
4092 eh_landing_pad lp;
4093 int i;
4094
4095 for (i = 1; vec_safe_iterate (cfun->eh->lp_array, i, &lp); ++i)
4096 if (lp)
4097 changed |= unsplit_eh (lp);
4098
4099 return changed;
4100 }
4101
4102 /* A subroutine of cleanup_empty_eh. Redirect all EH edges incoming
4103 to OLD_BB to NEW_BB; return true on success, false on failure.
4104
4105 OLD_BB_OUT is the edge into NEW_BB from OLD_BB, so if we miss any
4106 PHI variables from OLD_BB we can pick them up from OLD_BB_OUT.
4107 Virtual PHIs may be deleted and marked for renaming. */
4108
4109 static bool
4110 cleanup_empty_eh_merge_phis (basic_block new_bb, basic_block old_bb,
4111 edge old_bb_out, bool change_region)
4112 {
4113 gphi_iterator ngsi, ogsi;
4114 edge_iterator ei;
4115 edge e;
4116 bitmap ophi_handled;
4117
4118 /* The destination block must not be a regular successor for any
4119 of the preds of the landing pad. Thus, avoid turning
4120 <..>
4121 | \ EH
4122 | <..>
4123 | /
4124 <..>
4125 into
4126 <..>
4127 | | EH
4128 <..>
4129 which CFG verification would choke on. See PR45172 and PR51089. */
4130 FOR_EACH_EDGE (e, ei, old_bb->preds)
4131 if (find_edge (e->src, new_bb))
4132 return false;
4133
4134 FOR_EACH_EDGE (e, ei, old_bb->preds)
4135 redirect_edge_var_map_clear (e);
4136
4137 ophi_handled = BITMAP_ALLOC (NULL);
4138
4139 /* First, iterate through the PHIs on NEW_BB and set up the edge_var_map
4140 for the edges we're going to move. */
4141 for (ngsi = gsi_start_phis (new_bb); !gsi_end_p (ngsi); gsi_next (&ngsi))
4142 {
4143 gphi *ophi, *nphi = ngsi.phi ();
4144 tree nresult, nop;
4145
4146 nresult = gimple_phi_result (nphi);
4147 nop = gimple_phi_arg_def (nphi, old_bb_out->dest_idx);
4148
4149 /* Find the corresponding PHI in OLD_BB so we can forward-propagate
4150 the source ssa_name. */
4151 ophi = NULL;
4152 for (ogsi = gsi_start_phis (old_bb); !gsi_end_p (ogsi); gsi_next (&ogsi))
4153 {
4154 ophi = ogsi.phi ();
4155 if (gimple_phi_result (ophi) == nop)
4156 break;
4157 ophi = NULL;
4158 }
4159
4160 /* If we did find the corresponding PHI, copy those inputs. */
4161 if (ophi)
4162 {
4163 /* If NOP is used somewhere else beyond phis in new_bb, give up. */
4164 if (!has_single_use (nop))
4165 {
4166 imm_use_iterator imm_iter;
4167 use_operand_p use_p;
4168
4169 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, nop)
4170 {
4171 if (!gimple_debug_bind_p (USE_STMT (use_p))
4172 && (gimple_code (USE_STMT (use_p)) != GIMPLE_PHI
4173 || gimple_bb (USE_STMT (use_p)) != new_bb))
4174 goto fail;
4175 }
4176 }
4177 bitmap_set_bit (ophi_handled, SSA_NAME_VERSION (nop));
4178 FOR_EACH_EDGE (e, ei, old_bb->preds)
4179 {
4180 location_t oloc;
4181 tree oop;
4182
4183 if ((e->flags & EDGE_EH) == 0)
4184 continue;
4185 oop = gimple_phi_arg_def (ophi, e->dest_idx);
4186 oloc = gimple_phi_arg_location (ophi, e->dest_idx);
4187 redirect_edge_var_map_add (e, nresult, oop, oloc);
4188 }
4189 }
4190 /* If we didn't find the PHI, if it's a real variable or a VOP, we know
4191 from the fact that OLD_BB is tree_empty_eh_handler_p that the
4192 variable is unchanged from input to the block and we can simply
4193 re-use the input to NEW_BB from the OLD_BB_OUT edge. */
4194 else
4195 {
4196 location_t nloc
4197 = gimple_phi_arg_location (nphi, old_bb_out->dest_idx);
4198 FOR_EACH_EDGE (e, ei, old_bb->preds)
4199 redirect_edge_var_map_add (e, nresult, nop, nloc);
4200 }
4201 }
4202
4203 /* Second, verify that all PHIs from OLD_BB have been handled. If not,
4204 we don't know what values from the other edges into NEW_BB to use. */
4205 for (ogsi = gsi_start_phis (old_bb); !gsi_end_p (ogsi); gsi_next (&ogsi))
4206 {
4207 gphi *ophi = ogsi.phi ();
4208 tree oresult = gimple_phi_result (ophi);
4209 if (!bitmap_bit_p (ophi_handled, SSA_NAME_VERSION (oresult)))
4210 goto fail;
4211 }
4212
4213 /* Finally, move the edges and update the PHIs. */
4214 for (ei = ei_start (old_bb->preds); (e = ei_safe_edge (ei)); )
4215 if (e->flags & EDGE_EH)
4216 {
4217 /* ??? CFG manipluation routines do not try to update loop
4218 form on edge redirection. Do so manually here for now. */
4219 /* If we redirect a loop entry or latch edge that will either create
4220 a multiple entry loop or rotate the loop. If the loops merge
4221 we may have created a loop with multiple latches.
4222 All of this isn't easily fixed thus cancel the affected loop
4223 and mark the other loop as possibly having multiple latches. */
4224 if (e->dest == e->dest->loop_father->header)
4225 {
4226 mark_loop_for_removal (e->dest->loop_father);
4227 new_bb->loop_father->latch = NULL;
4228 loops_state_set (LOOPS_MAY_HAVE_MULTIPLE_LATCHES);
4229 }
4230 redirect_eh_edge_1 (e, new_bb, change_region);
4231 redirect_edge_succ (e, new_bb);
4232 flush_pending_stmts (e);
4233 }
4234 else
4235 ei_next (&ei);
4236
4237 BITMAP_FREE (ophi_handled);
4238 return true;
4239
4240 fail:
4241 FOR_EACH_EDGE (e, ei, old_bb->preds)
4242 redirect_edge_var_map_clear (e);
4243 BITMAP_FREE (ophi_handled);
4244 return false;
4245 }
4246
4247 /* A subroutine of cleanup_empty_eh. Move a landing pad LP from its
4248 old region to NEW_REGION at BB. */
4249
4250 static void
4251 cleanup_empty_eh_move_lp (basic_block bb, edge e_out,
4252 eh_landing_pad lp, eh_region new_region)
4253 {
4254 gimple_stmt_iterator gsi;
4255 eh_landing_pad *pp;
4256
4257 for (pp = &lp->region->landing_pads; *pp != lp; pp = &(*pp)->next_lp)
4258 continue;
4259 *pp = lp->next_lp;
4260
4261 lp->region = new_region;
4262 lp->next_lp = new_region->landing_pads;
4263 new_region->landing_pads = lp;
4264
4265 /* Delete the RESX that was matched within the empty handler block. */
4266 gsi = gsi_last_bb (bb);
4267 unlink_stmt_vdef (gsi_stmt (gsi));
4268 gsi_remove (&gsi, true);
4269
4270 /* Clean up E_OUT for the fallthru. */
4271 e_out->flags = (e_out->flags & ~EDGE_EH) | EDGE_FALLTHRU;
4272 e_out->probability = REG_BR_PROB_BASE;
4273 }
4274
4275 /* A subroutine of cleanup_empty_eh. Handle more complex cases of
4276 unsplitting than unsplit_eh was prepared to handle, e.g. when
4277 multiple incoming edges and phis are involved. */
4278
4279 static bool
4280 cleanup_empty_eh_unsplit (basic_block bb, edge e_out, eh_landing_pad lp)
4281 {
4282 gimple_stmt_iterator gsi;
4283 tree lab;
4284
4285 /* We really ought not have totally lost everything following
4286 a landing pad label. Given that BB is empty, there had better
4287 be a successor. */
4288 gcc_assert (e_out != NULL);
4289
4290 /* The destination block must not already have a landing pad
4291 for a different region. */
4292 lab = NULL;
4293 for (gsi = gsi_start_bb (e_out->dest); !gsi_end_p (gsi); gsi_next (&gsi))
4294 {
4295 glabel *stmt = dyn_cast <glabel *> (gsi_stmt (gsi));
4296 int lp_nr;
4297
4298 if (!stmt)
4299 break;
4300 lab = gimple_label_label (stmt);
4301 lp_nr = EH_LANDING_PAD_NR (lab);
4302 if (lp_nr && get_eh_region_from_lp_number (lp_nr) != lp->region)
4303 return false;
4304 }
4305
4306 /* Attempt to move the PHIs into the successor block. */
4307 if (cleanup_empty_eh_merge_phis (e_out->dest, bb, e_out, false))
4308 {
4309 if (dump_file && (dump_flags & TDF_DETAILS))
4310 fprintf (dump_file,
4311 "Unsplit EH landing pad %d to block %i "
4312 "(via cleanup_empty_eh).\n",
4313 lp->index, e_out->dest->index);
4314 return true;
4315 }
4316
4317 return false;
4318 }
4319
4320 /* Return true if edge E_FIRST is part of an empty infinite loop
4321 or leads to such a loop through a series of single successor
4322 empty bbs. */
4323
4324 static bool
4325 infinite_empty_loop_p (edge e_first)
4326 {
4327 bool inf_loop = false;
4328 edge e;
4329
4330 if (e_first->dest == e_first->src)
4331 return true;
4332
4333 e_first->src->aux = (void *) 1;
4334 for (e = e_first; single_succ_p (e->dest); e = single_succ_edge (e->dest))
4335 {
4336 gimple_stmt_iterator gsi;
4337 if (e->dest->aux)
4338 {
4339 inf_loop = true;
4340 break;
4341 }
4342 e->dest->aux = (void *) 1;
4343 gsi = gsi_after_labels (e->dest);
4344 if (!gsi_end_p (gsi) && is_gimple_debug (gsi_stmt (gsi)))
4345 gsi_next_nondebug (&gsi);
4346 if (!gsi_end_p (gsi))
4347 break;
4348 }
4349 e_first->src->aux = NULL;
4350 for (e = e_first; e->dest->aux; e = single_succ_edge (e->dest))
4351 e->dest->aux = NULL;
4352
4353 return inf_loop;
4354 }
4355
4356 /* Examine the block associated with LP to determine if it's an empty
4357 handler for its EH region. If so, attempt to redirect EH edges to
4358 an outer region. Return true the CFG was updated in any way. This
4359 is similar to jump forwarding, just across EH edges. */
4360
4361 static bool
4362 cleanup_empty_eh (eh_landing_pad lp)
4363 {
4364 basic_block bb = label_to_block (lp->post_landing_pad);
4365 gimple_stmt_iterator gsi;
4366 gimple resx;
4367 eh_region new_region;
4368 edge_iterator ei;
4369 edge e, e_out;
4370 bool has_non_eh_pred;
4371 bool ret = false;
4372 int new_lp_nr;
4373
4374 /* There can be zero or one edges out of BB. This is the quickest test. */
4375 switch (EDGE_COUNT (bb->succs))
4376 {
4377 case 0:
4378 e_out = NULL;
4379 break;
4380 case 1:
4381 e_out = single_succ_edge (bb);
4382 break;
4383 default:
4384 return false;
4385 }
4386
4387 resx = last_stmt (bb);
4388 if (resx && is_gimple_resx (resx))
4389 {
4390 if (stmt_can_throw_external (resx))
4391 optimize_clobbers (bb);
4392 else if (sink_clobbers (bb))
4393 ret = true;
4394 }
4395
4396 gsi = gsi_after_labels (bb);
4397
4398 /* Make sure to skip debug statements. */
4399 if (!gsi_end_p (gsi) && is_gimple_debug (gsi_stmt (gsi)))
4400 gsi_next_nondebug (&gsi);
4401
4402 /* If the block is totally empty, look for more unsplitting cases. */
4403 if (gsi_end_p (gsi))
4404 {
4405 /* For the degenerate case of an infinite loop bail out.
4406 If bb has no successors and is totally empty, which can happen e.g.
4407 because of incorrect noreturn attribute, bail out too. */
4408 if (e_out == NULL
4409 || infinite_empty_loop_p (e_out))
4410 return ret;
4411
4412 return ret | cleanup_empty_eh_unsplit (bb, e_out, lp);
4413 }
4414
4415 /* The block should consist only of a single RESX statement, modulo a
4416 preceding call to __builtin_stack_restore if there is no outgoing
4417 edge, since the call can be eliminated in this case. */
4418 resx = gsi_stmt (gsi);
4419 if (!e_out && gimple_call_builtin_p (resx, BUILT_IN_STACK_RESTORE))
4420 {
4421 gsi_next (&gsi);
4422 resx = gsi_stmt (gsi);
4423 }
4424 if (!is_gimple_resx (resx))
4425 return ret;
4426 gcc_assert (gsi_one_before_end_p (gsi));
4427
4428 /* Determine if there are non-EH edges, or resx edges into the handler. */
4429 has_non_eh_pred = false;
4430 FOR_EACH_EDGE (e, ei, bb->preds)
4431 if (!(e->flags & EDGE_EH))
4432 has_non_eh_pred = true;
4433
4434 /* Find the handler that's outer of the empty handler by looking at
4435 where the RESX instruction was vectored. */
4436 new_lp_nr = lookup_stmt_eh_lp (resx);
4437 new_region = get_eh_region_from_lp_number (new_lp_nr);
4438
4439 /* If there's no destination region within the current function,
4440 redirection is trivial via removing the throwing statements from
4441 the EH region, removing the EH edges, and allowing the block
4442 to go unreachable. */
4443 if (new_region == NULL)
4444 {
4445 gcc_assert (e_out == NULL);
4446 for (ei = ei_start (bb->preds); (e = ei_safe_edge (ei)); )
4447 if (e->flags & EDGE_EH)
4448 {
4449 gimple stmt = last_stmt (e->src);
4450 remove_stmt_from_eh_lp (stmt);
4451 remove_edge (e);
4452 }
4453 else
4454 ei_next (&ei);
4455 goto succeed;
4456 }
4457
4458 /* If the destination region is a MUST_NOT_THROW, allow the runtime
4459 to handle the abort and allow the blocks to go unreachable. */
4460 if (new_region->type == ERT_MUST_NOT_THROW)
4461 {
4462 for (ei = ei_start (bb->preds); (e = ei_safe_edge (ei)); )
4463 if (e->flags & EDGE_EH)
4464 {
4465 gimple stmt = last_stmt (e->src);
4466 remove_stmt_from_eh_lp (stmt);
4467 add_stmt_to_eh_lp (stmt, new_lp_nr);
4468 remove_edge (e);
4469 }
4470 else
4471 ei_next (&ei);
4472 goto succeed;
4473 }
4474
4475 /* Try to redirect the EH edges and merge the PHIs into the destination
4476 landing pad block. If the merge succeeds, we'll already have redirected
4477 all the EH edges. The handler itself will go unreachable if there were
4478 no normal edges. */
4479 if (cleanup_empty_eh_merge_phis (e_out->dest, bb, e_out, true))
4480 goto succeed;
4481
4482 /* Finally, if all input edges are EH edges, then we can (potentially)
4483 reduce the number of transfers from the runtime by moving the landing
4484 pad from the original region to the new region. This is a win when
4485 we remove the last CLEANUP region along a particular exception
4486 propagation path. Since nothing changes except for the region with
4487 which the landing pad is associated, the PHI nodes do not need to be
4488 adjusted at all. */
4489 if (!has_non_eh_pred)
4490 {
4491 cleanup_empty_eh_move_lp (bb, e_out, lp, new_region);
4492 if (dump_file && (dump_flags & TDF_DETAILS))
4493 fprintf (dump_file, "Empty EH handler %i moved to EH region %i.\n",
4494 lp->index, new_region->index);
4495
4496 /* ??? The CFG didn't change, but we may have rendered the
4497 old EH region unreachable. Trigger a cleanup there. */
4498 return true;
4499 }
4500
4501 return ret;
4502
4503 succeed:
4504 if (dump_file && (dump_flags & TDF_DETAILS))
4505 fprintf (dump_file, "Empty EH handler %i removed.\n", lp->index);
4506 remove_eh_landing_pad (lp);
4507 return true;
4508 }
4509
4510 /* Do a post-order traversal of the EH region tree. Examine each
4511 post_landing_pad block and see if we can eliminate it as empty. */
4512
4513 static bool
4514 cleanup_all_empty_eh (void)
4515 {
4516 bool changed = false;
4517 eh_landing_pad lp;
4518 int i;
4519
4520 for (i = 1; vec_safe_iterate (cfun->eh->lp_array, i, &lp); ++i)
4521 if (lp)
4522 changed |= cleanup_empty_eh (lp);
4523
4524 return changed;
4525 }
4526
4527 /* Perform cleanups and lowering of exception handling
4528 1) cleanups regions with handlers doing nothing are optimized out
4529 2) MUST_NOT_THROW regions that became dead because of 1) are optimized out
4530 3) Info about regions that are containing instructions, and regions
4531 reachable via local EH edges is collected
4532 4) Eh tree is pruned for regions no longer necessary.
4533
4534 TODO: Push MUST_NOT_THROW regions to the root of the EH tree.
4535 Unify those that have the same failure decl and locus.
4536 */
4537
4538 static unsigned int
4539 execute_cleanup_eh_1 (void)
4540 {
4541 /* Do this first: unsplit_all_eh and cleanup_all_empty_eh can die
4542 looking up unreachable landing pads. */
4543 remove_unreachable_handlers ();
4544
4545 /* Watch out for the region tree vanishing due to all unreachable. */
4546 if (cfun->eh->region_tree)
4547 {
4548 bool changed = false;
4549
4550 if (optimize)
4551 changed |= unsplit_all_eh ();
4552 changed |= cleanup_all_empty_eh ();
4553
4554 if (changed)
4555 {
4556 free_dominance_info (CDI_DOMINATORS);
4557 free_dominance_info (CDI_POST_DOMINATORS);
4558
4559 /* We delayed all basic block deletion, as we may have performed
4560 cleanups on EH edges while non-EH edges were still present. */
4561 delete_unreachable_blocks ();
4562
4563 /* We manipulated the landing pads. Remove any region that no
4564 longer has a landing pad. */
4565 remove_unreachable_handlers_no_lp ();
4566
4567 return TODO_cleanup_cfg | TODO_update_ssa_only_virtuals;
4568 }
4569 }
4570
4571 return 0;
4572 }
4573
4574 namespace {
4575
4576 const pass_data pass_data_cleanup_eh =
4577 {
4578 GIMPLE_PASS, /* type */
4579 "ehcleanup", /* name */
4580 OPTGROUP_NONE, /* optinfo_flags */
4581 TV_TREE_EH, /* tv_id */
4582 PROP_gimple_lcf, /* properties_required */
4583 0, /* properties_provided */
4584 0, /* properties_destroyed */
4585 0, /* todo_flags_start */
4586 0, /* todo_flags_finish */
4587 };
4588
4589 class pass_cleanup_eh : public gimple_opt_pass
4590 {
4591 public:
4592 pass_cleanup_eh (gcc::context *ctxt)
4593 : gimple_opt_pass (pass_data_cleanup_eh, ctxt)
4594 {}
4595
4596 /* opt_pass methods: */
4597 opt_pass * clone () { return new pass_cleanup_eh (m_ctxt); }
4598 virtual bool gate (function *fun)
4599 {
4600 return fun->eh != NULL && fun->eh->region_tree != NULL;
4601 }
4602
4603 virtual unsigned int execute (function *);
4604
4605 }; // class pass_cleanup_eh
4606
4607 unsigned int
4608 pass_cleanup_eh::execute (function *fun)
4609 {
4610 int ret = execute_cleanup_eh_1 ();
4611
4612 /* If the function no longer needs an EH personality routine
4613 clear it. This exposes cross-language inlining opportunities
4614 and avoids references to a never defined personality routine. */
4615 if (DECL_FUNCTION_PERSONALITY (current_function_decl)
4616 && function_needs_eh_personality (fun) != eh_personality_lang)
4617 DECL_FUNCTION_PERSONALITY (current_function_decl) = NULL_TREE;
4618
4619 return ret;
4620 }
4621
4622 } // anon namespace
4623
4624 gimple_opt_pass *
4625 make_pass_cleanup_eh (gcc::context *ctxt)
4626 {
4627 return new pass_cleanup_eh (ctxt);
4628 }
4629 \f
4630 /* Verify that BB containing STMT as the last statement, has precisely the
4631 edge that make_eh_edges would create. */
4632
4633 DEBUG_FUNCTION bool
4634 verify_eh_edges (gimple stmt)
4635 {
4636 basic_block bb = gimple_bb (stmt);
4637 eh_landing_pad lp = NULL;
4638 int lp_nr;
4639 edge_iterator ei;
4640 edge e, eh_edge;
4641
4642 lp_nr = lookup_stmt_eh_lp (stmt);
4643 if (lp_nr > 0)
4644 lp = get_eh_landing_pad_from_number (lp_nr);
4645
4646 eh_edge = NULL;
4647 FOR_EACH_EDGE (e, ei, bb->succs)
4648 {
4649 if (e->flags & EDGE_EH)
4650 {
4651 if (eh_edge)
4652 {
4653 error ("BB %i has multiple EH edges", bb->index);
4654 return true;
4655 }
4656 else
4657 eh_edge = e;
4658 }
4659 }
4660
4661 if (lp == NULL)
4662 {
4663 if (eh_edge)
4664 {
4665 error ("BB %i can not throw but has an EH edge", bb->index);
4666 return true;
4667 }
4668 return false;
4669 }
4670
4671 if (!stmt_could_throw_p (stmt))
4672 {
4673 error ("BB %i last statement has incorrectly set lp", bb->index);
4674 return true;
4675 }
4676
4677 if (eh_edge == NULL)
4678 {
4679 error ("BB %i is missing an EH edge", bb->index);
4680 return true;
4681 }
4682
4683 if (eh_edge->dest != label_to_block (lp->post_landing_pad))
4684 {
4685 error ("Incorrect EH edge %i->%i", bb->index, eh_edge->dest->index);
4686 return true;
4687 }
4688
4689 return false;
4690 }
4691
4692 /* Similarly, but handle GIMPLE_EH_DISPATCH specifically. */
4693
4694 DEBUG_FUNCTION bool
4695 verify_eh_dispatch_edge (geh_dispatch *stmt)
4696 {
4697 eh_region r;
4698 eh_catch c;
4699 basic_block src, dst;
4700 bool want_fallthru = true;
4701 edge_iterator ei;
4702 edge e, fall_edge;
4703
4704 r = get_eh_region_from_number (gimple_eh_dispatch_region (stmt));
4705 src = gimple_bb (stmt);
4706
4707 FOR_EACH_EDGE (e, ei, src->succs)
4708 gcc_assert (e->aux == NULL);
4709
4710 switch (r->type)
4711 {
4712 case ERT_TRY:
4713 for (c = r->u.eh_try.first_catch; c ; c = c->next_catch)
4714 {
4715 dst = label_to_block (c->label);
4716 e = find_edge (src, dst);
4717 if (e == NULL)
4718 {
4719 error ("BB %i is missing an edge", src->index);
4720 return true;
4721 }
4722 e->aux = (void *)e;
4723
4724 /* A catch-all handler doesn't have a fallthru. */
4725 if (c->type_list == NULL)
4726 {
4727 want_fallthru = false;
4728 break;
4729 }
4730 }
4731 break;
4732
4733 case ERT_ALLOWED_EXCEPTIONS:
4734 dst = label_to_block (r->u.allowed.label);
4735 e = find_edge (src, dst);
4736 if (e == NULL)
4737 {
4738 error ("BB %i is missing an edge", src->index);
4739 return true;
4740 }
4741 e->aux = (void *)e;
4742 break;
4743
4744 default:
4745 gcc_unreachable ();
4746 }
4747
4748 fall_edge = NULL;
4749 FOR_EACH_EDGE (e, ei, src->succs)
4750 {
4751 if (e->flags & EDGE_FALLTHRU)
4752 {
4753 if (fall_edge != NULL)
4754 {
4755 error ("BB %i too many fallthru edges", src->index);
4756 return true;
4757 }
4758 fall_edge = e;
4759 }
4760 else if (e->aux)
4761 e->aux = NULL;
4762 else
4763 {
4764 error ("BB %i has incorrect edge", src->index);
4765 return true;
4766 }
4767 }
4768 if ((fall_edge != NULL) ^ want_fallthru)
4769 {
4770 error ("BB %i has incorrect fallthru edge", src->index);
4771 return true;
4772 }
4773
4774 return false;
4775 }