]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/tree-eh.c
use templates instead of gengtype for typed allocation functions
[thirdparty/gcc.git] / gcc / tree-eh.c
1 /* Exception handling semantics and decomposition for trees.
2 Copyright (C) 2003-2014 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
9 any later version.
10
11 GCC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "hash-table.h"
24 #include "tm.h"
25 #include "tree.h"
26 #include "expr.h"
27 #include "calls.h"
28 #include "flags.h"
29 #include "function.h"
30 #include "except.h"
31 #include "pointer-set.h"
32 #include "basic-block.h"
33 #include "tree-ssa-alias.h"
34 #include "internal-fn.h"
35 #include "tree-eh.h"
36 #include "gimple-expr.h"
37 #include "is-a.h"
38 #include "gimple.h"
39 #include "gimple-iterator.h"
40 #include "gimple-ssa.h"
41 #include "cgraph.h"
42 #include "tree-cfg.h"
43 #include "tree-phinodes.h"
44 #include "ssa-iterators.h"
45 #include "stringpool.h"
46 #include "tree-ssanames.h"
47 #include "tree-into-ssa.h"
48 #include "tree-ssa.h"
49 #include "tree-inline.h"
50 #include "tree-pass.h"
51 #include "langhooks.h"
52 #include "diagnostic-core.h"
53 #include "target.h"
54 #include "cfgloop.h"
55 #include "gimple-low.h"
56
57 /* In some instances a tree and a gimple need to be stored in a same table,
58 i.e. in hash tables. This is a structure to do this. */
59 typedef union {tree *tp; tree t; gimple g;} treemple;
60
61 /* Misc functions used in this file. */
62
63 /* Remember and lookup EH landing pad data for arbitrary statements.
64 Really this means any statement that could_throw_p. We could
65 stuff this information into the stmt_ann data structure, but:
66
67 (1) We absolutely rely on this information being kept until
68 we get to rtl. Once we're done with lowering here, if we lose
69 the information there's no way to recover it!
70
71 (2) There are many more statements that *cannot* throw as
72 compared to those that can. We should be saving some amount
73 of space by only allocating memory for those that can throw. */
74
75 /* Add statement T in function IFUN to landing pad NUM. */
76
77 static void
78 add_stmt_to_eh_lp_fn (struct function *ifun, gimple t, int num)
79 {
80 struct throw_stmt_node *n;
81 void **slot;
82
83 gcc_assert (num != 0);
84
85 n = ggc_alloc<throw_stmt_node> ();
86 n->stmt = t;
87 n->lp_nr = num;
88
89 if (!get_eh_throw_stmt_table (ifun))
90 set_eh_throw_stmt_table (ifun, htab_create_ggc (31, struct_ptr_hash,
91 struct_ptr_eq,
92 ggc_free));
93
94 slot = htab_find_slot (get_eh_throw_stmt_table (ifun), n, INSERT);
95 gcc_assert (!*slot);
96 *slot = n;
97 }
98
99 /* Add statement T in the current function (cfun) to EH landing pad NUM. */
100
101 void
102 add_stmt_to_eh_lp (gimple t, int num)
103 {
104 add_stmt_to_eh_lp_fn (cfun, t, num);
105 }
106
107 /* Add statement T to the single EH landing pad in REGION. */
108
109 static void
110 record_stmt_eh_region (eh_region region, gimple t)
111 {
112 if (region == NULL)
113 return;
114 if (region->type == ERT_MUST_NOT_THROW)
115 add_stmt_to_eh_lp_fn (cfun, t, -region->index);
116 else
117 {
118 eh_landing_pad lp = region->landing_pads;
119 if (lp == NULL)
120 lp = gen_eh_landing_pad (region);
121 else
122 gcc_assert (lp->next_lp == NULL);
123 add_stmt_to_eh_lp_fn (cfun, t, lp->index);
124 }
125 }
126
127
128 /* Remove statement T in function IFUN from its EH landing pad. */
129
130 bool
131 remove_stmt_from_eh_lp_fn (struct function *ifun, gimple t)
132 {
133 struct throw_stmt_node dummy;
134 void **slot;
135
136 if (!get_eh_throw_stmt_table (ifun))
137 return false;
138
139 dummy.stmt = t;
140 slot = htab_find_slot (get_eh_throw_stmt_table (ifun), &dummy,
141 NO_INSERT);
142 if (slot)
143 {
144 htab_clear_slot (get_eh_throw_stmt_table (ifun), slot);
145 return true;
146 }
147 else
148 return false;
149 }
150
151
152 /* Remove statement T in the current function (cfun) from its
153 EH landing pad. */
154
155 bool
156 remove_stmt_from_eh_lp (gimple t)
157 {
158 return remove_stmt_from_eh_lp_fn (cfun, t);
159 }
160
161 /* Determine if statement T is inside an EH region in function IFUN.
162 Positive numbers indicate a landing pad index; negative numbers
163 indicate a MUST_NOT_THROW region index; zero indicates that the
164 statement is not recorded in the region table. */
165
166 int
167 lookup_stmt_eh_lp_fn (struct function *ifun, gimple t)
168 {
169 struct throw_stmt_node *p, n;
170
171 if (ifun->eh->throw_stmt_table == NULL)
172 return 0;
173
174 n.stmt = t;
175 p = (struct throw_stmt_node *) htab_find (ifun->eh->throw_stmt_table, &n);
176 return p ? p->lp_nr : 0;
177 }
178
179 /* Likewise, but always use the current function. */
180
181 int
182 lookup_stmt_eh_lp (gimple t)
183 {
184 /* We can get called from initialized data when -fnon-call-exceptions
185 is on; prevent crash. */
186 if (!cfun)
187 return 0;
188 return lookup_stmt_eh_lp_fn (cfun, t);
189 }
190
191 /* First pass of EH node decomposition. Build up a tree of GIMPLE_TRY_FINALLY
192 nodes and LABEL_DECL nodes. We will use this during the second phase to
193 determine if a goto leaves the body of a TRY_FINALLY_EXPR node. */
194
195 struct finally_tree_node
196 {
197 /* When storing a GIMPLE_TRY, we have to record a gimple. However
198 when deciding whether a GOTO to a certain LABEL_DECL (which is a
199 tree) leaves the TRY block, its necessary to record a tree in
200 this field. Thus a treemple is used. */
201 treemple child;
202 gimple parent;
203 };
204
205 /* Hashtable helpers. */
206
207 struct finally_tree_hasher : typed_free_remove <finally_tree_node>
208 {
209 typedef finally_tree_node value_type;
210 typedef finally_tree_node compare_type;
211 static inline hashval_t hash (const value_type *);
212 static inline bool equal (const value_type *, const compare_type *);
213 };
214
215 inline hashval_t
216 finally_tree_hasher::hash (const value_type *v)
217 {
218 return (intptr_t)v->child.t >> 4;
219 }
220
221 inline bool
222 finally_tree_hasher::equal (const value_type *v, const compare_type *c)
223 {
224 return v->child.t == c->child.t;
225 }
226
227 /* Note that this table is *not* marked GTY. It is short-lived. */
228 static hash_table <finally_tree_hasher> finally_tree;
229
230 static void
231 record_in_finally_tree (treemple child, gimple parent)
232 {
233 struct finally_tree_node *n;
234 finally_tree_node **slot;
235
236 n = XNEW (struct finally_tree_node);
237 n->child = child;
238 n->parent = parent;
239
240 slot = finally_tree.find_slot (n, INSERT);
241 gcc_assert (!*slot);
242 *slot = n;
243 }
244
245 static void
246 collect_finally_tree (gimple stmt, gimple region);
247
248 /* Go through the gimple sequence. Works with collect_finally_tree to
249 record all GIMPLE_LABEL and GIMPLE_TRY statements. */
250
251 static void
252 collect_finally_tree_1 (gimple_seq seq, gimple region)
253 {
254 gimple_stmt_iterator gsi;
255
256 for (gsi = gsi_start (seq); !gsi_end_p (gsi); gsi_next (&gsi))
257 collect_finally_tree (gsi_stmt (gsi), region);
258 }
259
260 static void
261 collect_finally_tree (gimple stmt, gimple region)
262 {
263 treemple temp;
264
265 switch (gimple_code (stmt))
266 {
267 case GIMPLE_LABEL:
268 temp.t = gimple_label_label (stmt);
269 record_in_finally_tree (temp, region);
270 break;
271
272 case GIMPLE_TRY:
273 if (gimple_try_kind (stmt) == GIMPLE_TRY_FINALLY)
274 {
275 temp.g = stmt;
276 record_in_finally_tree (temp, region);
277 collect_finally_tree_1 (gimple_try_eval (stmt), stmt);
278 collect_finally_tree_1 (gimple_try_cleanup (stmt), region);
279 }
280 else if (gimple_try_kind (stmt) == GIMPLE_TRY_CATCH)
281 {
282 collect_finally_tree_1 (gimple_try_eval (stmt), region);
283 collect_finally_tree_1 (gimple_try_cleanup (stmt), region);
284 }
285 break;
286
287 case GIMPLE_CATCH:
288 collect_finally_tree_1 (gimple_catch_handler (stmt), region);
289 break;
290
291 case GIMPLE_EH_FILTER:
292 collect_finally_tree_1 (gimple_eh_filter_failure (stmt), region);
293 break;
294
295 case GIMPLE_EH_ELSE:
296 collect_finally_tree_1 (gimple_eh_else_n_body (stmt), region);
297 collect_finally_tree_1 (gimple_eh_else_e_body (stmt), region);
298 break;
299
300 default:
301 /* A type, a decl, or some kind of statement that we're not
302 interested in. Don't walk them. */
303 break;
304 }
305 }
306
307
308 /* Use the finally tree to determine if a jump from START to TARGET
309 would leave the try_finally node that START lives in. */
310
311 static bool
312 outside_finally_tree (treemple start, gimple target)
313 {
314 struct finally_tree_node n, *p;
315
316 do
317 {
318 n.child = start;
319 p = finally_tree.find (&n);
320 if (!p)
321 return true;
322 start.g = p->parent;
323 }
324 while (start.g != target);
325
326 return false;
327 }
328
329 /* Second pass of EH node decomposition. Actually transform the GIMPLE_TRY
330 nodes into a set of gotos, magic labels, and eh regions.
331 The eh region creation is straight-forward, but frobbing all the gotos
332 and such into shape isn't. */
333
334 /* The sequence into which we record all EH stuff. This will be
335 placed at the end of the function when we're all done. */
336 static gimple_seq eh_seq;
337
338 /* Record whether an EH region contains something that can throw,
339 indexed by EH region number. */
340 static bitmap eh_region_may_contain_throw_map;
341
342 /* The GOTO_QUEUE is is an array of GIMPLE_GOTO and GIMPLE_RETURN
343 statements that are seen to escape this GIMPLE_TRY_FINALLY node.
344 The idea is to record a gimple statement for everything except for
345 the conditionals, which get their labels recorded. Since labels are
346 of type 'tree', we need this node to store both gimple and tree
347 objects. REPL_STMT is the sequence used to replace the goto/return
348 statement. CONT_STMT is used to store the statement that allows
349 the return/goto to jump to the original destination. */
350
351 struct goto_queue_node
352 {
353 treemple stmt;
354 location_t location;
355 gimple_seq repl_stmt;
356 gimple cont_stmt;
357 int index;
358 /* This is used when index >= 0 to indicate that stmt is a label (as
359 opposed to a goto stmt). */
360 int is_label;
361 };
362
363 /* State of the world while lowering. */
364
365 struct leh_state
366 {
367 /* What's "current" while constructing the eh region tree. These
368 correspond to variables of the same name in cfun->eh, which we
369 don't have easy access to. */
370 eh_region cur_region;
371
372 /* What's "current" for the purposes of __builtin_eh_pointer. For
373 a CATCH, this is the associated TRY. For an EH_FILTER, this is
374 the associated ALLOWED_EXCEPTIONS, etc. */
375 eh_region ehp_region;
376
377 /* Processing of TRY_FINALLY requires a bit more state. This is
378 split out into a separate structure so that we don't have to
379 copy so much when processing other nodes. */
380 struct leh_tf_state *tf;
381 };
382
383 struct leh_tf_state
384 {
385 /* Pointer to the GIMPLE_TRY_FINALLY node under discussion. The
386 try_finally_expr is the original GIMPLE_TRY_FINALLY. We need to retain
387 this so that outside_finally_tree can reliably reference the tree used
388 in the collect_finally_tree data structures. */
389 gimple try_finally_expr;
390 gimple top_p;
391
392 /* While lowering a top_p usually it is expanded into multiple statements,
393 thus we need the following field to store them. */
394 gimple_seq top_p_seq;
395
396 /* The state outside this try_finally node. */
397 struct leh_state *outer;
398
399 /* The exception region created for it. */
400 eh_region region;
401
402 /* The goto queue. */
403 struct goto_queue_node *goto_queue;
404 size_t goto_queue_size;
405 size_t goto_queue_active;
406
407 /* Pointer map to help in searching goto_queue when it is large. */
408 struct pointer_map_t *goto_queue_map;
409
410 /* The set of unique labels seen as entries in the goto queue. */
411 vec<tree> dest_array;
412
413 /* A label to be added at the end of the completed transformed
414 sequence. It will be set if may_fallthru was true *at one time*,
415 though subsequent transformations may have cleared that flag. */
416 tree fallthru_label;
417
418 /* True if it is possible to fall out the bottom of the try block.
419 Cleared if the fallthru is converted to a goto. */
420 bool may_fallthru;
421
422 /* True if any entry in goto_queue is a GIMPLE_RETURN. */
423 bool may_return;
424
425 /* True if the finally block can receive an exception edge.
426 Cleared if the exception case is handled by code duplication. */
427 bool may_throw;
428 };
429
430 static gimple_seq lower_eh_must_not_throw (struct leh_state *, gimple);
431
432 /* Search for STMT in the goto queue. Return the replacement,
433 or null if the statement isn't in the queue. */
434
435 #define LARGE_GOTO_QUEUE 20
436
437 static void lower_eh_constructs_1 (struct leh_state *state, gimple_seq *seq);
438
439 static gimple_seq
440 find_goto_replacement (struct leh_tf_state *tf, treemple stmt)
441 {
442 unsigned int i;
443 void **slot;
444
445 if (tf->goto_queue_active < LARGE_GOTO_QUEUE)
446 {
447 for (i = 0; i < tf->goto_queue_active; i++)
448 if ( tf->goto_queue[i].stmt.g == stmt.g)
449 return tf->goto_queue[i].repl_stmt;
450 return NULL;
451 }
452
453 /* If we have a large number of entries in the goto_queue, create a
454 pointer map and use that for searching. */
455
456 if (!tf->goto_queue_map)
457 {
458 tf->goto_queue_map = pointer_map_create ();
459 for (i = 0; i < tf->goto_queue_active; i++)
460 {
461 slot = pointer_map_insert (tf->goto_queue_map,
462 tf->goto_queue[i].stmt.g);
463 gcc_assert (*slot == NULL);
464 *slot = &tf->goto_queue[i];
465 }
466 }
467
468 slot = pointer_map_contains (tf->goto_queue_map, stmt.g);
469 if (slot != NULL)
470 return (((struct goto_queue_node *) *slot)->repl_stmt);
471
472 return NULL;
473 }
474
475 /* A subroutine of replace_goto_queue_1. Handles the sub-clauses of a
476 lowered GIMPLE_COND. If, by chance, the replacement is a simple goto,
477 then we can just splat it in, otherwise we add the new stmts immediately
478 after the GIMPLE_COND and redirect. */
479
480 static void
481 replace_goto_queue_cond_clause (tree *tp, struct leh_tf_state *tf,
482 gimple_stmt_iterator *gsi)
483 {
484 tree label;
485 gimple_seq new_seq;
486 treemple temp;
487 location_t loc = gimple_location (gsi_stmt (*gsi));
488
489 temp.tp = tp;
490 new_seq = find_goto_replacement (tf, temp);
491 if (!new_seq)
492 return;
493
494 if (gimple_seq_singleton_p (new_seq)
495 && gimple_code (gimple_seq_first_stmt (new_seq)) == GIMPLE_GOTO)
496 {
497 *tp = gimple_goto_dest (gimple_seq_first_stmt (new_seq));
498 return;
499 }
500
501 label = create_artificial_label (loc);
502 /* Set the new label for the GIMPLE_COND */
503 *tp = label;
504
505 gsi_insert_after (gsi, gimple_build_label (label), GSI_CONTINUE_LINKING);
506 gsi_insert_seq_after (gsi, gimple_seq_copy (new_seq), GSI_CONTINUE_LINKING);
507 }
508
509 /* The real work of replace_goto_queue. Returns with TSI updated to
510 point to the next statement. */
511
512 static void replace_goto_queue_stmt_list (gimple_seq *, struct leh_tf_state *);
513
514 static void
515 replace_goto_queue_1 (gimple stmt, struct leh_tf_state *tf,
516 gimple_stmt_iterator *gsi)
517 {
518 gimple_seq seq;
519 treemple temp;
520 temp.g = NULL;
521
522 switch (gimple_code (stmt))
523 {
524 case GIMPLE_GOTO:
525 case GIMPLE_RETURN:
526 temp.g = stmt;
527 seq = find_goto_replacement (tf, temp);
528 if (seq)
529 {
530 gsi_insert_seq_before (gsi, gimple_seq_copy (seq), GSI_SAME_STMT);
531 gsi_remove (gsi, false);
532 return;
533 }
534 break;
535
536 case GIMPLE_COND:
537 replace_goto_queue_cond_clause (gimple_op_ptr (stmt, 2), tf, gsi);
538 replace_goto_queue_cond_clause (gimple_op_ptr (stmt, 3), tf, gsi);
539 break;
540
541 case GIMPLE_TRY:
542 replace_goto_queue_stmt_list (gimple_try_eval_ptr (stmt), tf);
543 replace_goto_queue_stmt_list (gimple_try_cleanup_ptr (stmt), tf);
544 break;
545 case GIMPLE_CATCH:
546 replace_goto_queue_stmt_list (gimple_catch_handler_ptr (stmt), tf);
547 break;
548 case GIMPLE_EH_FILTER:
549 replace_goto_queue_stmt_list (gimple_eh_filter_failure_ptr (stmt), tf);
550 break;
551 case GIMPLE_EH_ELSE:
552 replace_goto_queue_stmt_list (gimple_eh_else_n_body_ptr (stmt), tf);
553 replace_goto_queue_stmt_list (gimple_eh_else_e_body_ptr (stmt), tf);
554 break;
555
556 default:
557 /* These won't have gotos in them. */
558 break;
559 }
560
561 gsi_next (gsi);
562 }
563
564 /* A subroutine of replace_goto_queue. Handles GIMPLE_SEQ. */
565
566 static void
567 replace_goto_queue_stmt_list (gimple_seq *seq, struct leh_tf_state *tf)
568 {
569 gimple_stmt_iterator gsi = gsi_start (*seq);
570
571 while (!gsi_end_p (gsi))
572 replace_goto_queue_1 (gsi_stmt (gsi), tf, &gsi);
573 }
574
575 /* Replace all goto queue members. */
576
577 static void
578 replace_goto_queue (struct leh_tf_state *tf)
579 {
580 if (tf->goto_queue_active == 0)
581 return;
582 replace_goto_queue_stmt_list (&tf->top_p_seq, tf);
583 replace_goto_queue_stmt_list (&eh_seq, tf);
584 }
585
586 /* Add a new record to the goto queue contained in TF. NEW_STMT is the
587 data to be added, IS_LABEL indicates whether NEW_STMT is a label or
588 a gimple return. */
589
590 static void
591 record_in_goto_queue (struct leh_tf_state *tf,
592 treemple new_stmt,
593 int index,
594 bool is_label,
595 location_t location)
596 {
597 size_t active, size;
598 struct goto_queue_node *q;
599
600 gcc_assert (!tf->goto_queue_map);
601
602 active = tf->goto_queue_active;
603 size = tf->goto_queue_size;
604 if (active >= size)
605 {
606 size = (size ? size * 2 : 32);
607 tf->goto_queue_size = size;
608 tf->goto_queue
609 = XRESIZEVEC (struct goto_queue_node, tf->goto_queue, size);
610 }
611
612 q = &tf->goto_queue[active];
613 tf->goto_queue_active = active + 1;
614
615 memset (q, 0, sizeof (*q));
616 q->stmt = new_stmt;
617 q->index = index;
618 q->location = location;
619 q->is_label = is_label;
620 }
621
622 /* Record the LABEL label in the goto queue contained in TF.
623 TF is not null. */
624
625 static void
626 record_in_goto_queue_label (struct leh_tf_state *tf, treemple stmt, tree label,
627 location_t location)
628 {
629 int index;
630 treemple temp, new_stmt;
631
632 if (!label)
633 return;
634
635 /* Computed and non-local gotos do not get processed. Given
636 their nature we can neither tell whether we've escaped the
637 finally block nor redirect them if we knew. */
638 if (TREE_CODE (label) != LABEL_DECL)
639 return;
640
641 /* No need to record gotos that don't leave the try block. */
642 temp.t = label;
643 if (!outside_finally_tree (temp, tf->try_finally_expr))
644 return;
645
646 if (! tf->dest_array.exists ())
647 {
648 tf->dest_array.create (10);
649 tf->dest_array.quick_push (label);
650 index = 0;
651 }
652 else
653 {
654 int n = tf->dest_array.length ();
655 for (index = 0; index < n; ++index)
656 if (tf->dest_array[index] == label)
657 break;
658 if (index == n)
659 tf->dest_array.safe_push (label);
660 }
661
662 /* In the case of a GOTO we want to record the destination label,
663 since with a GIMPLE_COND we have an easy access to the then/else
664 labels. */
665 new_stmt = stmt;
666 record_in_goto_queue (tf, new_stmt, index, true, location);
667 }
668
669 /* For any GIMPLE_GOTO or GIMPLE_RETURN, decide whether it leaves a try_finally
670 node, and if so record that fact in the goto queue associated with that
671 try_finally node. */
672
673 static void
674 maybe_record_in_goto_queue (struct leh_state *state, gimple stmt)
675 {
676 struct leh_tf_state *tf = state->tf;
677 treemple new_stmt;
678
679 if (!tf)
680 return;
681
682 switch (gimple_code (stmt))
683 {
684 case GIMPLE_COND:
685 new_stmt.tp = gimple_op_ptr (stmt, 2);
686 record_in_goto_queue_label (tf, new_stmt, gimple_cond_true_label (stmt),
687 EXPR_LOCATION (*new_stmt.tp));
688 new_stmt.tp = gimple_op_ptr (stmt, 3);
689 record_in_goto_queue_label (tf, new_stmt, gimple_cond_false_label (stmt),
690 EXPR_LOCATION (*new_stmt.tp));
691 break;
692 case GIMPLE_GOTO:
693 new_stmt.g = stmt;
694 record_in_goto_queue_label (tf, new_stmt, gimple_goto_dest (stmt),
695 gimple_location (stmt));
696 break;
697
698 case GIMPLE_RETURN:
699 tf->may_return = true;
700 new_stmt.g = stmt;
701 record_in_goto_queue (tf, new_stmt, -1, false, gimple_location (stmt));
702 break;
703
704 default:
705 gcc_unreachable ();
706 }
707 }
708
709
710 #ifdef ENABLE_CHECKING
711 /* We do not process GIMPLE_SWITCHes for now. As long as the original source
712 was in fact structured, and we've not yet done jump threading, then none
713 of the labels will leave outer GIMPLE_TRY_FINALLY nodes. Verify this. */
714
715 static void
716 verify_norecord_switch_expr (struct leh_state *state, gimple switch_expr)
717 {
718 struct leh_tf_state *tf = state->tf;
719 size_t i, n;
720
721 if (!tf)
722 return;
723
724 n = gimple_switch_num_labels (switch_expr);
725
726 for (i = 0; i < n; ++i)
727 {
728 treemple temp;
729 tree lab = CASE_LABEL (gimple_switch_label (switch_expr, i));
730 temp.t = lab;
731 gcc_assert (!outside_finally_tree (temp, tf->try_finally_expr));
732 }
733 }
734 #else
735 #define verify_norecord_switch_expr(state, switch_expr)
736 #endif
737
738 /* Redirect a RETURN_EXPR pointed to by Q to FINLAB. If MOD is
739 non-null, insert it before the new branch. */
740
741 static void
742 do_return_redirection (struct goto_queue_node *q, tree finlab, gimple_seq mod)
743 {
744 gimple x;
745
746 /* In the case of a return, the queue node must be a gimple statement. */
747 gcc_assert (!q->is_label);
748
749 /* Note that the return value may have already been computed, e.g.,
750
751 int x;
752 int foo (void)
753 {
754 x = 0;
755 try {
756 return x;
757 } finally {
758 x++;
759 }
760 }
761
762 should return 0, not 1. We don't have to do anything to make
763 this happens because the return value has been placed in the
764 RESULT_DECL already. */
765
766 q->cont_stmt = q->stmt.g;
767
768 if (mod)
769 gimple_seq_add_seq (&q->repl_stmt, mod);
770
771 x = gimple_build_goto (finlab);
772 gimple_set_location (x, q->location);
773 gimple_seq_add_stmt (&q->repl_stmt, x);
774 }
775
776 /* Similar, but easier, for GIMPLE_GOTO. */
777
778 static void
779 do_goto_redirection (struct goto_queue_node *q, tree finlab, gimple_seq mod,
780 struct leh_tf_state *tf)
781 {
782 gimple x;
783
784 gcc_assert (q->is_label);
785
786 q->cont_stmt = gimple_build_goto (tf->dest_array[q->index]);
787
788 if (mod)
789 gimple_seq_add_seq (&q->repl_stmt, mod);
790
791 x = gimple_build_goto (finlab);
792 gimple_set_location (x, q->location);
793 gimple_seq_add_stmt (&q->repl_stmt, x);
794 }
795
796 /* Emit a standard landing pad sequence into SEQ for REGION. */
797
798 static void
799 emit_post_landing_pad (gimple_seq *seq, eh_region region)
800 {
801 eh_landing_pad lp = region->landing_pads;
802 gimple x;
803
804 if (lp == NULL)
805 lp = gen_eh_landing_pad (region);
806
807 lp->post_landing_pad = create_artificial_label (UNKNOWN_LOCATION);
808 EH_LANDING_PAD_NR (lp->post_landing_pad) = lp->index;
809
810 x = gimple_build_label (lp->post_landing_pad);
811 gimple_seq_add_stmt (seq, x);
812 }
813
814 /* Emit a RESX statement into SEQ for REGION. */
815
816 static void
817 emit_resx (gimple_seq *seq, eh_region region)
818 {
819 gimple x = gimple_build_resx (region->index);
820 gimple_seq_add_stmt (seq, x);
821 if (region->outer)
822 record_stmt_eh_region (region->outer, x);
823 }
824
825 /* Emit an EH_DISPATCH statement into SEQ for REGION. */
826
827 static void
828 emit_eh_dispatch (gimple_seq *seq, eh_region region)
829 {
830 gimple x = gimple_build_eh_dispatch (region->index);
831 gimple_seq_add_stmt (seq, x);
832 }
833
834 /* Note that the current EH region may contain a throw, or a
835 call to a function which itself may contain a throw. */
836
837 static void
838 note_eh_region_may_contain_throw (eh_region region)
839 {
840 while (bitmap_set_bit (eh_region_may_contain_throw_map, region->index))
841 {
842 if (region->type == ERT_MUST_NOT_THROW)
843 break;
844 region = region->outer;
845 if (region == NULL)
846 break;
847 }
848 }
849
850 /* Check if REGION has been marked as containing a throw. If REGION is
851 NULL, this predicate is false. */
852
853 static inline bool
854 eh_region_may_contain_throw (eh_region r)
855 {
856 return r && bitmap_bit_p (eh_region_may_contain_throw_map, r->index);
857 }
858
859 /* We want to transform
860 try { body; } catch { stuff; }
861 to
862 normal_seqence:
863 body;
864 over:
865 eh_seqence:
866 landing_pad:
867 stuff;
868 goto over;
869
870 TP is a GIMPLE_TRY node. REGION is the region whose post_landing_pad
871 should be placed before the second operand, or NULL. OVER is
872 an existing label that should be put at the exit, or NULL. */
873
874 static gimple_seq
875 frob_into_branch_around (gimple tp, eh_region region, tree over)
876 {
877 gimple x;
878 gimple_seq cleanup, result;
879 location_t loc = gimple_location (tp);
880
881 cleanup = gimple_try_cleanup (tp);
882 result = gimple_try_eval (tp);
883
884 if (region)
885 emit_post_landing_pad (&eh_seq, region);
886
887 if (gimple_seq_may_fallthru (cleanup))
888 {
889 if (!over)
890 over = create_artificial_label (loc);
891 x = gimple_build_goto (over);
892 gimple_set_location (x, loc);
893 gimple_seq_add_stmt (&cleanup, x);
894 }
895 gimple_seq_add_seq (&eh_seq, cleanup);
896
897 if (over)
898 {
899 x = gimple_build_label (over);
900 gimple_seq_add_stmt (&result, x);
901 }
902 return result;
903 }
904
905 /* A subroutine of lower_try_finally. Duplicate the tree rooted at T.
906 Make sure to record all new labels found. */
907
908 static gimple_seq
909 lower_try_finally_dup_block (gimple_seq seq, struct leh_state *outer_state,
910 location_t loc)
911 {
912 gimple region = NULL;
913 gimple_seq new_seq;
914 gimple_stmt_iterator gsi;
915
916 new_seq = copy_gimple_seq_and_replace_locals (seq);
917
918 for (gsi = gsi_start (new_seq); !gsi_end_p (gsi); gsi_next (&gsi))
919 {
920 gimple stmt = gsi_stmt (gsi);
921 if (LOCATION_LOCUS (gimple_location (stmt)) == UNKNOWN_LOCATION)
922 {
923 tree block = gimple_block (stmt);
924 gimple_set_location (stmt, loc);
925 gimple_set_block (stmt, block);
926 }
927 }
928
929 if (outer_state->tf)
930 region = outer_state->tf->try_finally_expr;
931 collect_finally_tree_1 (new_seq, region);
932
933 return new_seq;
934 }
935
936 /* A subroutine of lower_try_finally. Create a fallthru label for
937 the given try_finally state. The only tricky bit here is that
938 we have to make sure to record the label in our outer context. */
939
940 static tree
941 lower_try_finally_fallthru_label (struct leh_tf_state *tf)
942 {
943 tree label = tf->fallthru_label;
944 treemple temp;
945
946 if (!label)
947 {
948 label = create_artificial_label (gimple_location (tf->try_finally_expr));
949 tf->fallthru_label = label;
950 if (tf->outer->tf)
951 {
952 temp.t = label;
953 record_in_finally_tree (temp, tf->outer->tf->try_finally_expr);
954 }
955 }
956 return label;
957 }
958
959 /* A subroutine of lower_try_finally. If FINALLY consits of a
960 GIMPLE_EH_ELSE node, return it. */
961
962 static inline gimple
963 get_eh_else (gimple_seq finally)
964 {
965 gimple x = gimple_seq_first_stmt (finally);
966 if (gimple_code (x) == GIMPLE_EH_ELSE)
967 {
968 gcc_assert (gimple_seq_singleton_p (finally));
969 return x;
970 }
971 return NULL;
972 }
973
974 /* A subroutine of lower_try_finally. If the eh_protect_cleanup_actions
975 langhook returns non-null, then the language requires that the exception
976 path out of a try_finally be treated specially. To wit: the code within
977 the finally block may not itself throw an exception. We have two choices
978 here. First we can duplicate the finally block and wrap it in a
979 must_not_throw region. Second, we can generate code like
980
981 try {
982 finally_block;
983 } catch {
984 if (fintmp == eh_edge)
985 protect_cleanup_actions;
986 }
987
988 where "fintmp" is the temporary used in the switch statement generation
989 alternative considered below. For the nonce, we always choose the first
990 option.
991
992 THIS_STATE may be null if this is a try-cleanup, not a try-finally. */
993
994 static void
995 honor_protect_cleanup_actions (struct leh_state *outer_state,
996 struct leh_state *this_state,
997 struct leh_tf_state *tf)
998 {
999 tree protect_cleanup_actions;
1000 gimple_stmt_iterator gsi;
1001 bool finally_may_fallthru;
1002 gimple_seq finally;
1003 gimple x, eh_else;
1004
1005 /* First check for nothing to do. */
1006 if (lang_hooks.eh_protect_cleanup_actions == NULL)
1007 return;
1008 protect_cleanup_actions = lang_hooks.eh_protect_cleanup_actions ();
1009 if (protect_cleanup_actions == NULL)
1010 return;
1011
1012 finally = gimple_try_cleanup (tf->top_p);
1013 eh_else = get_eh_else (finally);
1014
1015 /* Duplicate the FINALLY block. Only need to do this for try-finally,
1016 and not for cleanups. If we've got an EH_ELSE, extract it now. */
1017 if (eh_else)
1018 {
1019 finally = gimple_eh_else_e_body (eh_else);
1020 gimple_try_set_cleanup (tf->top_p, gimple_eh_else_n_body (eh_else));
1021 }
1022 else if (this_state)
1023 finally = lower_try_finally_dup_block (finally, outer_state,
1024 gimple_location (tf->try_finally_expr));
1025 finally_may_fallthru = gimple_seq_may_fallthru (finally);
1026
1027 /* If this cleanup consists of a TRY_CATCH_EXPR with TRY_CATCH_IS_CLEANUP
1028 set, the handler of the TRY_CATCH_EXPR is another cleanup which ought
1029 to be in an enclosing scope, but needs to be implemented at this level
1030 to avoid a nesting violation (see wrap_temporary_cleanups in
1031 cp/decl.c). Since it's logically at an outer level, we should call
1032 terminate before we get to it, so strip it away before adding the
1033 MUST_NOT_THROW filter. */
1034 gsi = gsi_start (finally);
1035 x = gsi_stmt (gsi);
1036 if (gimple_code (x) == GIMPLE_TRY
1037 && gimple_try_kind (x) == GIMPLE_TRY_CATCH
1038 && gimple_try_catch_is_cleanup (x))
1039 {
1040 gsi_insert_seq_before (&gsi, gimple_try_eval (x), GSI_SAME_STMT);
1041 gsi_remove (&gsi, false);
1042 }
1043
1044 /* Wrap the block with protect_cleanup_actions as the action. */
1045 x = gimple_build_eh_must_not_throw (protect_cleanup_actions);
1046 x = gimple_build_try (finally, gimple_seq_alloc_with_stmt (x),
1047 GIMPLE_TRY_CATCH);
1048 finally = lower_eh_must_not_throw (outer_state, x);
1049
1050 /* Drop all of this into the exception sequence. */
1051 emit_post_landing_pad (&eh_seq, tf->region);
1052 gimple_seq_add_seq (&eh_seq, finally);
1053 if (finally_may_fallthru)
1054 emit_resx (&eh_seq, tf->region);
1055
1056 /* Having now been handled, EH isn't to be considered with
1057 the rest of the outgoing edges. */
1058 tf->may_throw = false;
1059 }
1060
1061 /* A subroutine of lower_try_finally. We have determined that there is
1062 no fallthru edge out of the finally block. This means that there is
1063 no outgoing edge corresponding to any incoming edge. Restructure the
1064 try_finally node for this special case. */
1065
1066 static void
1067 lower_try_finally_nofallthru (struct leh_state *state,
1068 struct leh_tf_state *tf)
1069 {
1070 tree lab;
1071 gimple x, eh_else;
1072 gimple_seq finally;
1073 struct goto_queue_node *q, *qe;
1074
1075 lab = create_artificial_label (gimple_location (tf->try_finally_expr));
1076
1077 /* We expect that tf->top_p is a GIMPLE_TRY. */
1078 finally = gimple_try_cleanup (tf->top_p);
1079 tf->top_p_seq = gimple_try_eval (tf->top_p);
1080
1081 x = gimple_build_label (lab);
1082 gimple_seq_add_stmt (&tf->top_p_seq, x);
1083
1084 q = tf->goto_queue;
1085 qe = q + tf->goto_queue_active;
1086 for (; q < qe; ++q)
1087 if (q->index < 0)
1088 do_return_redirection (q, lab, NULL);
1089 else
1090 do_goto_redirection (q, lab, NULL, tf);
1091
1092 replace_goto_queue (tf);
1093
1094 /* Emit the finally block into the stream. Lower EH_ELSE at this time. */
1095 eh_else = get_eh_else (finally);
1096 if (eh_else)
1097 {
1098 finally = gimple_eh_else_n_body (eh_else);
1099 lower_eh_constructs_1 (state, &finally);
1100 gimple_seq_add_seq (&tf->top_p_seq, finally);
1101
1102 if (tf->may_throw)
1103 {
1104 finally = gimple_eh_else_e_body (eh_else);
1105 lower_eh_constructs_1 (state, &finally);
1106
1107 emit_post_landing_pad (&eh_seq, tf->region);
1108 gimple_seq_add_seq (&eh_seq, finally);
1109 }
1110 }
1111 else
1112 {
1113 lower_eh_constructs_1 (state, &finally);
1114 gimple_seq_add_seq (&tf->top_p_seq, finally);
1115
1116 if (tf->may_throw)
1117 {
1118 emit_post_landing_pad (&eh_seq, tf->region);
1119
1120 x = gimple_build_goto (lab);
1121 gimple_set_location (x, gimple_location (tf->try_finally_expr));
1122 gimple_seq_add_stmt (&eh_seq, x);
1123 }
1124 }
1125 }
1126
1127 /* A subroutine of lower_try_finally. We have determined that there is
1128 exactly one destination of the finally block. Restructure the
1129 try_finally node for this special case. */
1130
1131 static void
1132 lower_try_finally_onedest (struct leh_state *state, struct leh_tf_state *tf)
1133 {
1134 struct goto_queue_node *q, *qe;
1135 gimple x;
1136 gimple_seq finally;
1137 gimple_stmt_iterator gsi;
1138 tree finally_label;
1139 location_t loc = gimple_location (tf->try_finally_expr);
1140
1141 finally = gimple_try_cleanup (tf->top_p);
1142 tf->top_p_seq = gimple_try_eval (tf->top_p);
1143
1144 /* Since there's only one destination, and the destination edge can only
1145 either be EH or non-EH, that implies that all of our incoming edges
1146 are of the same type. Therefore we can lower EH_ELSE immediately. */
1147 x = get_eh_else (finally);
1148 if (x)
1149 {
1150 if (tf->may_throw)
1151 finally = gimple_eh_else_e_body (x);
1152 else
1153 finally = gimple_eh_else_n_body (x);
1154 }
1155
1156 lower_eh_constructs_1 (state, &finally);
1157
1158 for (gsi = gsi_start (finally); !gsi_end_p (gsi); gsi_next (&gsi))
1159 {
1160 gimple stmt = gsi_stmt (gsi);
1161 if (LOCATION_LOCUS (gimple_location (stmt)) == UNKNOWN_LOCATION)
1162 {
1163 tree block = gimple_block (stmt);
1164 gimple_set_location (stmt, gimple_location (tf->try_finally_expr));
1165 gimple_set_block (stmt, block);
1166 }
1167 }
1168
1169 if (tf->may_throw)
1170 {
1171 /* Only reachable via the exception edge. Add the given label to
1172 the head of the FINALLY block. Append a RESX at the end. */
1173 emit_post_landing_pad (&eh_seq, tf->region);
1174 gimple_seq_add_seq (&eh_seq, finally);
1175 emit_resx (&eh_seq, tf->region);
1176 return;
1177 }
1178
1179 if (tf->may_fallthru)
1180 {
1181 /* Only reachable via the fallthru edge. Do nothing but let
1182 the two blocks run together; we'll fall out the bottom. */
1183 gimple_seq_add_seq (&tf->top_p_seq, finally);
1184 return;
1185 }
1186
1187 finally_label = create_artificial_label (loc);
1188 x = gimple_build_label (finally_label);
1189 gimple_seq_add_stmt (&tf->top_p_seq, x);
1190
1191 gimple_seq_add_seq (&tf->top_p_seq, finally);
1192
1193 q = tf->goto_queue;
1194 qe = q + tf->goto_queue_active;
1195
1196 if (tf->may_return)
1197 {
1198 /* Reachable by return expressions only. Redirect them. */
1199 for (; q < qe; ++q)
1200 do_return_redirection (q, finally_label, NULL);
1201 replace_goto_queue (tf);
1202 }
1203 else
1204 {
1205 /* Reachable by goto expressions only. Redirect them. */
1206 for (; q < qe; ++q)
1207 do_goto_redirection (q, finally_label, NULL, tf);
1208 replace_goto_queue (tf);
1209
1210 if (tf->dest_array[0] == tf->fallthru_label)
1211 {
1212 /* Reachable by goto to fallthru label only. Redirect it
1213 to the new label (already created, sadly), and do not
1214 emit the final branch out, or the fallthru label. */
1215 tf->fallthru_label = NULL;
1216 return;
1217 }
1218 }
1219
1220 /* Place the original return/goto to the original destination
1221 immediately after the finally block. */
1222 x = tf->goto_queue[0].cont_stmt;
1223 gimple_seq_add_stmt (&tf->top_p_seq, x);
1224 maybe_record_in_goto_queue (state, x);
1225 }
1226
1227 /* A subroutine of lower_try_finally. There are multiple edges incoming
1228 and outgoing from the finally block. Implement this by duplicating the
1229 finally block for every destination. */
1230
1231 static void
1232 lower_try_finally_copy (struct leh_state *state, struct leh_tf_state *tf)
1233 {
1234 gimple_seq finally;
1235 gimple_seq new_stmt;
1236 gimple_seq seq;
1237 gimple x, eh_else;
1238 tree tmp;
1239 location_t tf_loc = gimple_location (tf->try_finally_expr);
1240
1241 finally = gimple_try_cleanup (tf->top_p);
1242
1243 /* Notice EH_ELSE, and simplify some of the remaining code
1244 by considering FINALLY to be the normal return path only. */
1245 eh_else = get_eh_else (finally);
1246 if (eh_else)
1247 finally = gimple_eh_else_n_body (eh_else);
1248
1249 tf->top_p_seq = gimple_try_eval (tf->top_p);
1250 new_stmt = NULL;
1251
1252 if (tf->may_fallthru)
1253 {
1254 seq = lower_try_finally_dup_block (finally, state, tf_loc);
1255 lower_eh_constructs_1 (state, &seq);
1256 gimple_seq_add_seq (&new_stmt, seq);
1257
1258 tmp = lower_try_finally_fallthru_label (tf);
1259 x = gimple_build_goto (tmp);
1260 gimple_set_location (x, tf_loc);
1261 gimple_seq_add_stmt (&new_stmt, x);
1262 }
1263
1264 if (tf->may_throw)
1265 {
1266 /* We don't need to copy the EH path of EH_ELSE,
1267 since it is only emitted once. */
1268 if (eh_else)
1269 seq = gimple_eh_else_e_body (eh_else);
1270 else
1271 seq = lower_try_finally_dup_block (finally, state, tf_loc);
1272 lower_eh_constructs_1 (state, &seq);
1273
1274 emit_post_landing_pad (&eh_seq, tf->region);
1275 gimple_seq_add_seq (&eh_seq, seq);
1276 emit_resx (&eh_seq, tf->region);
1277 }
1278
1279 if (tf->goto_queue)
1280 {
1281 struct goto_queue_node *q, *qe;
1282 int return_index, index;
1283 struct labels_s
1284 {
1285 struct goto_queue_node *q;
1286 tree label;
1287 } *labels;
1288
1289 return_index = tf->dest_array.length ();
1290 labels = XCNEWVEC (struct labels_s, return_index + 1);
1291
1292 q = tf->goto_queue;
1293 qe = q + tf->goto_queue_active;
1294 for (; q < qe; q++)
1295 {
1296 index = q->index < 0 ? return_index : q->index;
1297
1298 if (!labels[index].q)
1299 labels[index].q = q;
1300 }
1301
1302 for (index = 0; index < return_index + 1; index++)
1303 {
1304 tree lab;
1305
1306 q = labels[index].q;
1307 if (! q)
1308 continue;
1309
1310 lab = labels[index].label
1311 = create_artificial_label (tf_loc);
1312
1313 if (index == return_index)
1314 do_return_redirection (q, lab, NULL);
1315 else
1316 do_goto_redirection (q, lab, NULL, tf);
1317
1318 x = gimple_build_label (lab);
1319 gimple_seq_add_stmt (&new_stmt, x);
1320
1321 seq = lower_try_finally_dup_block (finally, state, q->location);
1322 lower_eh_constructs_1 (state, &seq);
1323 gimple_seq_add_seq (&new_stmt, seq);
1324
1325 gimple_seq_add_stmt (&new_stmt, q->cont_stmt);
1326 maybe_record_in_goto_queue (state, q->cont_stmt);
1327 }
1328
1329 for (q = tf->goto_queue; q < qe; q++)
1330 {
1331 tree lab;
1332
1333 index = q->index < 0 ? return_index : q->index;
1334
1335 if (labels[index].q == q)
1336 continue;
1337
1338 lab = labels[index].label;
1339
1340 if (index == return_index)
1341 do_return_redirection (q, lab, NULL);
1342 else
1343 do_goto_redirection (q, lab, NULL, tf);
1344 }
1345
1346 replace_goto_queue (tf);
1347 free (labels);
1348 }
1349
1350 /* Need to link new stmts after running replace_goto_queue due
1351 to not wanting to process the same goto stmts twice. */
1352 gimple_seq_add_seq (&tf->top_p_seq, new_stmt);
1353 }
1354
1355 /* A subroutine of lower_try_finally. There are multiple edges incoming
1356 and outgoing from the finally block. Implement this by instrumenting
1357 each incoming edge and creating a switch statement at the end of the
1358 finally block that branches to the appropriate destination. */
1359
1360 static void
1361 lower_try_finally_switch (struct leh_state *state, struct leh_tf_state *tf)
1362 {
1363 struct goto_queue_node *q, *qe;
1364 tree finally_tmp, finally_label;
1365 int return_index, eh_index, fallthru_index;
1366 int nlabels, ndests, j, last_case_index;
1367 tree last_case;
1368 vec<tree> case_label_vec;
1369 gimple_seq switch_body = NULL;
1370 gimple x, eh_else;
1371 tree tmp;
1372 gimple switch_stmt;
1373 gimple_seq finally;
1374 struct pointer_map_t *cont_map = NULL;
1375 /* The location of the TRY_FINALLY stmt. */
1376 location_t tf_loc = gimple_location (tf->try_finally_expr);
1377 /* The location of the finally block. */
1378 location_t finally_loc;
1379
1380 finally = gimple_try_cleanup (tf->top_p);
1381 eh_else = get_eh_else (finally);
1382
1383 /* Mash the TRY block to the head of the chain. */
1384 tf->top_p_seq = gimple_try_eval (tf->top_p);
1385
1386 /* The location of the finally is either the last stmt in the finally
1387 block or the location of the TRY_FINALLY itself. */
1388 x = gimple_seq_last_stmt (finally);
1389 finally_loc = x ? gimple_location (x) : tf_loc;
1390
1391 /* Prepare for switch statement generation. */
1392 nlabels = tf->dest_array.length ();
1393 return_index = nlabels;
1394 eh_index = return_index + tf->may_return;
1395 fallthru_index = eh_index + (tf->may_throw && !eh_else);
1396 ndests = fallthru_index + tf->may_fallthru;
1397
1398 finally_tmp = create_tmp_var (integer_type_node, "finally_tmp");
1399 finally_label = create_artificial_label (finally_loc);
1400
1401 /* We use vec::quick_push on case_label_vec throughout this function,
1402 since we know the size in advance and allocate precisely as muce
1403 space as needed. */
1404 case_label_vec.create (ndests);
1405 last_case = NULL;
1406 last_case_index = 0;
1407
1408 /* Begin inserting code for getting to the finally block. Things
1409 are done in this order to correspond to the sequence the code is
1410 laid out. */
1411
1412 if (tf->may_fallthru)
1413 {
1414 x = gimple_build_assign (finally_tmp,
1415 build_int_cst (integer_type_node,
1416 fallthru_index));
1417 gimple_seq_add_stmt (&tf->top_p_seq, x);
1418
1419 tmp = build_int_cst (integer_type_node, fallthru_index);
1420 last_case = build_case_label (tmp, NULL,
1421 create_artificial_label (tf_loc));
1422 case_label_vec.quick_push (last_case);
1423 last_case_index++;
1424
1425 x = gimple_build_label (CASE_LABEL (last_case));
1426 gimple_seq_add_stmt (&switch_body, x);
1427
1428 tmp = lower_try_finally_fallthru_label (tf);
1429 x = gimple_build_goto (tmp);
1430 gimple_set_location (x, tf_loc);
1431 gimple_seq_add_stmt (&switch_body, x);
1432 }
1433
1434 /* For EH_ELSE, emit the exception path (plus resx) now, then
1435 subsequently we only need consider the normal path. */
1436 if (eh_else)
1437 {
1438 if (tf->may_throw)
1439 {
1440 finally = gimple_eh_else_e_body (eh_else);
1441 lower_eh_constructs_1 (state, &finally);
1442
1443 emit_post_landing_pad (&eh_seq, tf->region);
1444 gimple_seq_add_seq (&eh_seq, finally);
1445 emit_resx (&eh_seq, tf->region);
1446 }
1447
1448 finally = gimple_eh_else_n_body (eh_else);
1449 }
1450 else if (tf->may_throw)
1451 {
1452 emit_post_landing_pad (&eh_seq, tf->region);
1453
1454 x = gimple_build_assign (finally_tmp,
1455 build_int_cst (integer_type_node, eh_index));
1456 gimple_seq_add_stmt (&eh_seq, x);
1457
1458 x = gimple_build_goto (finally_label);
1459 gimple_set_location (x, tf_loc);
1460 gimple_seq_add_stmt (&eh_seq, x);
1461
1462 tmp = build_int_cst (integer_type_node, eh_index);
1463 last_case = build_case_label (tmp, NULL,
1464 create_artificial_label (tf_loc));
1465 case_label_vec.quick_push (last_case);
1466 last_case_index++;
1467
1468 x = gimple_build_label (CASE_LABEL (last_case));
1469 gimple_seq_add_stmt (&eh_seq, x);
1470 emit_resx (&eh_seq, tf->region);
1471 }
1472
1473 x = gimple_build_label (finally_label);
1474 gimple_seq_add_stmt (&tf->top_p_seq, x);
1475
1476 lower_eh_constructs_1 (state, &finally);
1477 gimple_seq_add_seq (&tf->top_p_seq, finally);
1478
1479 /* Redirect each incoming goto edge. */
1480 q = tf->goto_queue;
1481 qe = q + tf->goto_queue_active;
1482 j = last_case_index + tf->may_return;
1483 /* Prepare the assignments to finally_tmp that are executed upon the
1484 entrance through a particular edge. */
1485 for (; q < qe; ++q)
1486 {
1487 gimple_seq mod = NULL;
1488 int switch_id;
1489 unsigned int case_index;
1490
1491 if (q->index < 0)
1492 {
1493 x = gimple_build_assign (finally_tmp,
1494 build_int_cst (integer_type_node,
1495 return_index));
1496 gimple_seq_add_stmt (&mod, x);
1497 do_return_redirection (q, finally_label, mod);
1498 switch_id = return_index;
1499 }
1500 else
1501 {
1502 x = gimple_build_assign (finally_tmp,
1503 build_int_cst (integer_type_node, q->index));
1504 gimple_seq_add_stmt (&mod, x);
1505 do_goto_redirection (q, finally_label, mod, tf);
1506 switch_id = q->index;
1507 }
1508
1509 case_index = j + q->index;
1510 if (case_label_vec.length () <= case_index || !case_label_vec[case_index])
1511 {
1512 tree case_lab;
1513 void **slot;
1514 tmp = build_int_cst (integer_type_node, switch_id);
1515 case_lab = build_case_label (tmp, NULL,
1516 create_artificial_label (tf_loc));
1517 /* We store the cont_stmt in the pointer map, so that we can recover
1518 it in the loop below. */
1519 if (!cont_map)
1520 cont_map = pointer_map_create ();
1521 slot = pointer_map_insert (cont_map, case_lab);
1522 *slot = q->cont_stmt;
1523 case_label_vec.quick_push (case_lab);
1524 }
1525 }
1526 for (j = last_case_index; j < last_case_index + nlabels; j++)
1527 {
1528 gimple cont_stmt;
1529 void **slot;
1530
1531 last_case = case_label_vec[j];
1532
1533 gcc_assert (last_case);
1534 gcc_assert (cont_map);
1535
1536 slot = pointer_map_contains (cont_map, last_case);
1537 gcc_assert (slot);
1538 cont_stmt = *(gimple *) slot;
1539
1540 x = gimple_build_label (CASE_LABEL (last_case));
1541 gimple_seq_add_stmt (&switch_body, x);
1542 gimple_seq_add_stmt (&switch_body, cont_stmt);
1543 maybe_record_in_goto_queue (state, cont_stmt);
1544 }
1545 if (cont_map)
1546 pointer_map_destroy (cont_map);
1547
1548 replace_goto_queue (tf);
1549
1550 /* Make sure that the last case is the default label, as one is required.
1551 Then sort the labels, which is also required in GIMPLE. */
1552 CASE_LOW (last_case) = NULL;
1553 tree tem = case_label_vec.pop ();
1554 gcc_assert (tem == last_case);
1555 sort_case_labels (case_label_vec);
1556
1557 /* Build the switch statement, setting last_case to be the default
1558 label. */
1559 switch_stmt = gimple_build_switch (finally_tmp, last_case,
1560 case_label_vec);
1561 gimple_set_location (switch_stmt, finally_loc);
1562
1563 /* Need to link SWITCH_STMT after running replace_goto_queue
1564 due to not wanting to process the same goto stmts twice. */
1565 gimple_seq_add_stmt (&tf->top_p_seq, switch_stmt);
1566 gimple_seq_add_seq (&tf->top_p_seq, switch_body);
1567 }
1568
1569 /* Decide whether or not we are going to duplicate the finally block.
1570 There are several considerations.
1571
1572 First, if this is Java, then the finally block contains code
1573 written by the user. It has line numbers associated with it,
1574 so duplicating the block means it's difficult to set a breakpoint.
1575 Since controlling code generation via -g is verboten, we simply
1576 never duplicate code without optimization.
1577
1578 Second, we'd like to prevent egregious code growth. One way to
1579 do this is to estimate the size of the finally block, multiply
1580 that by the number of copies we'd need to make, and compare against
1581 the estimate of the size of the switch machinery we'd have to add. */
1582
1583 static bool
1584 decide_copy_try_finally (int ndests, bool may_throw, gimple_seq finally)
1585 {
1586 int f_estimate, sw_estimate;
1587 gimple eh_else;
1588
1589 /* If there's an EH_ELSE involved, the exception path is separate
1590 and really doesn't come into play for this computation. */
1591 eh_else = get_eh_else (finally);
1592 if (eh_else)
1593 {
1594 ndests -= may_throw;
1595 finally = gimple_eh_else_n_body (eh_else);
1596 }
1597
1598 if (!optimize)
1599 {
1600 gimple_stmt_iterator gsi;
1601
1602 if (ndests == 1)
1603 return true;
1604
1605 for (gsi = gsi_start (finally); !gsi_end_p (gsi); gsi_next (&gsi))
1606 {
1607 gimple stmt = gsi_stmt (gsi);
1608 if (!is_gimple_debug (stmt) && !gimple_clobber_p (stmt))
1609 return false;
1610 }
1611 return true;
1612 }
1613
1614 /* Finally estimate N times, plus N gotos. */
1615 f_estimate = count_insns_seq (finally, &eni_size_weights);
1616 f_estimate = (f_estimate + 1) * ndests;
1617
1618 /* Switch statement (cost 10), N variable assignments, N gotos. */
1619 sw_estimate = 10 + 2 * ndests;
1620
1621 /* Optimize for size clearly wants our best guess. */
1622 if (optimize_function_for_size_p (cfun))
1623 return f_estimate < sw_estimate;
1624
1625 /* ??? These numbers are completely made up so far. */
1626 if (optimize > 1)
1627 return f_estimate < 100 || f_estimate < sw_estimate * 2;
1628 else
1629 return f_estimate < 40 || f_estimate * 2 < sw_estimate * 3;
1630 }
1631
1632 /* REG is the enclosing region for a possible cleanup region, or the region
1633 itself. Returns TRUE if such a region would be unreachable.
1634
1635 Cleanup regions within a must-not-throw region aren't actually reachable
1636 even if there are throwing stmts within them, because the personality
1637 routine will call terminate before unwinding. */
1638
1639 static bool
1640 cleanup_is_dead_in (eh_region reg)
1641 {
1642 while (reg && reg->type == ERT_CLEANUP)
1643 reg = reg->outer;
1644 return (reg && reg->type == ERT_MUST_NOT_THROW);
1645 }
1646
1647 /* A subroutine of lower_eh_constructs_1. Lower a GIMPLE_TRY_FINALLY nodes
1648 to a sequence of labels and blocks, plus the exception region trees
1649 that record all the magic. This is complicated by the need to
1650 arrange for the FINALLY block to be executed on all exits. */
1651
1652 static gimple_seq
1653 lower_try_finally (struct leh_state *state, gimple tp)
1654 {
1655 struct leh_tf_state this_tf;
1656 struct leh_state this_state;
1657 int ndests;
1658 gimple_seq old_eh_seq;
1659
1660 /* Process the try block. */
1661
1662 memset (&this_tf, 0, sizeof (this_tf));
1663 this_tf.try_finally_expr = tp;
1664 this_tf.top_p = tp;
1665 this_tf.outer = state;
1666 if (using_eh_for_cleanups_p () && !cleanup_is_dead_in (state->cur_region))
1667 {
1668 this_tf.region = gen_eh_region_cleanup (state->cur_region);
1669 this_state.cur_region = this_tf.region;
1670 }
1671 else
1672 {
1673 this_tf.region = NULL;
1674 this_state.cur_region = state->cur_region;
1675 }
1676
1677 this_state.ehp_region = state->ehp_region;
1678 this_state.tf = &this_tf;
1679
1680 old_eh_seq = eh_seq;
1681 eh_seq = NULL;
1682
1683 lower_eh_constructs_1 (&this_state, gimple_try_eval_ptr (tp));
1684
1685 /* Determine if the try block is escaped through the bottom. */
1686 this_tf.may_fallthru = gimple_seq_may_fallthru (gimple_try_eval (tp));
1687
1688 /* Determine if any exceptions are possible within the try block. */
1689 if (this_tf.region)
1690 this_tf.may_throw = eh_region_may_contain_throw (this_tf.region);
1691 if (this_tf.may_throw)
1692 honor_protect_cleanup_actions (state, &this_state, &this_tf);
1693
1694 /* Determine how many edges (still) reach the finally block. Or rather,
1695 how many destinations are reached by the finally block. Use this to
1696 determine how we process the finally block itself. */
1697
1698 ndests = this_tf.dest_array.length ();
1699 ndests += this_tf.may_fallthru;
1700 ndests += this_tf.may_return;
1701 ndests += this_tf.may_throw;
1702
1703 /* If the FINALLY block is not reachable, dike it out. */
1704 if (ndests == 0)
1705 {
1706 gimple_seq_add_seq (&this_tf.top_p_seq, gimple_try_eval (tp));
1707 gimple_try_set_cleanup (tp, NULL);
1708 }
1709 /* If the finally block doesn't fall through, then any destination
1710 we might try to impose there isn't reached either. There may be
1711 some minor amount of cleanup and redirection still needed. */
1712 else if (!gimple_seq_may_fallthru (gimple_try_cleanup (tp)))
1713 lower_try_finally_nofallthru (state, &this_tf);
1714
1715 /* We can easily special-case redirection to a single destination. */
1716 else if (ndests == 1)
1717 lower_try_finally_onedest (state, &this_tf);
1718 else if (decide_copy_try_finally (ndests, this_tf.may_throw,
1719 gimple_try_cleanup (tp)))
1720 lower_try_finally_copy (state, &this_tf);
1721 else
1722 lower_try_finally_switch (state, &this_tf);
1723
1724 /* If someone requested we add a label at the end of the transformed
1725 block, do so. */
1726 if (this_tf.fallthru_label)
1727 {
1728 /* This must be reached only if ndests == 0. */
1729 gimple x = gimple_build_label (this_tf.fallthru_label);
1730 gimple_seq_add_stmt (&this_tf.top_p_seq, x);
1731 }
1732
1733 this_tf.dest_array.release ();
1734 free (this_tf.goto_queue);
1735 if (this_tf.goto_queue_map)
1736 pointer_map_destroy (this_tf.goto_queue_map);
1737
1738 /* If there was an old (aka outer) eh_seq, append the current eh_seq.
1739 If there was no old eh_seq, then the append is trivially already done. */
1740 if (old_eh_seq)
1741 {
1742 if (eh_seq == NULL)
1743 eh_seq = old_eh_seq;
1744 else
1745 {
1746 gimple_seq new_eh_seq = eh_seq;
1747 eh_seq = old_eh_seq;
1748 gimple_seq_add_seq (&eh_seq, new_eh_seq);
1749 }
1750 }
1751
1752 return this_tf.top_p_seq;
1753 }
1754
1755 /* A subroutine of lower_eh_constructs_1. Lower a GIMPLE_TRY_CATCH with a
1756 list of GIMPLE_CATCH to a sequence of labels and blocks, plus the
1757 exception region trees that records all the magic. */
1758
1759 static gimple_seq
1760 lower_catch (struct leh_state *state, gimple tp)
1761 {
1762 eh_region try_region = NULL;
1763 struct leh_state this_state = *state;
1764 gimple_stmt_iterator gsi;
1765 tree out_label;
1766 gimple_seq new_seq, cleanup;
1767 gimple x;
1768 location_t try_catch_loc = gimple_location (tp);
1769
1770 if (flag_exceptions)
1771 {
1772 try_region = gen_eh_region_try (state->cur_region);
1773 this_state.cur_region = try_region;
1774 }
1775
1776 lower_eh_constructs_1 (&this_state, gimple_try_eval_ptr (tp));
1777
1778 if (!eh_region_may_contain_throw (try_region))
1779 return gimple_try_eval (tp);
1780
1781 new_seq = NULL;
1782 emit_eh_dispatch (&new_seq, try_region);
1783 emit_resx (&new_seq, try_region);
1784
1785 this_state.cur_region = state->cur_region;
1786 this_state.ehp_region = try_region;
1787
1788 out_label = NULL;
1789 cleanup = gimple_try_cleanup (tp);
1790 for (gsi = gsi_start (cleanup);
1791 !gsi_end_p (gsi);
1792 gsi_next (&gsi))
1793 {
1794 eh_catch c;
1795 gimple gcatch;
1796 gimple_seq handler;
1797
1798 gcatch = gsi_stmt (gsi);
1799 c = gen_eh_region_catch (try_region, gimple_catch_types (gcatch));
1800
1801 handler = gimple_catch_handler (gcatch);
1802 lower_eh_constructs_1 (&this_state, &handler);
1803
1804 c->label = create_artificial_label (UNKNOWN_LOCATION);
1805 x = gimple_build_label (c->label);
1806 gimple_seq_add_stmt (&new_seq, x);
1807
1808 gimple_seq_add_seq (&new_seq, handler);
1809
1810 if (gimple_seq_may_fallthru (new_seq))
1811 {
1812 if (!out_label)
1813 out_label = create_artificial_label (try_catch_loc);
1814
1815 x = gimple_build_goto (out_label);
1816 gimple_seq_add_stmt (&new_seq, x);
1817 }
1818 if (!c->type_list)
1819 break;
1820 }
1821
1822 gimple_try_set_cleanup (tp, new_seq);
1823
1824 return frob_into_branch_around (tp, try_region, out_label);
1825 }
1826
1827 /* A subroutine of lower_eh_constructs_1. Lower a GIMPLE_TRY with a
1828 GIMPLE_EH_FILTER to a sequence of labels and blocks, plus the exception
1829 region trees that record all the magic. */
1830
1831 static gimple_seq
1832 lower_eh_filter (struct leh_state *state, gimple tp)
1833 {
1834 struct leh_state this_state = *state;
1835 eh_region this_region = NULL;
1836 gimple inner, x;
1837 gimple_seq new_seq;
1838
1839 inner = gimple_seq_first_stmt (gimple_try_cleanup (tp));
1840
1841 if (flag_exceptions)
1842 {
1843 this_region = gen_eh_region_allowed (state->cur_region,
1844 gimple_eh_filter_types (inner));
1845 this_state.cur_region = this_region;
1846 }
1847
1848 lower_eh_constructs_1 (&this_state, gimple_try_eval_ptr (tp));
1849
1850 if (!eh_region_may_contain_throw (this_region))
1851 return gimple_try_eval (tp);
1852
1853 new_seq = NULL;
1854 this_state.cur_region = state->cur_region;
1855 this_state.ehp_region = this_region;
1856
1857 emit_eh_dispatch (&new_seq, this_region);
1858 emit_resx (&new_seq, this_region);
1859
1860 this_region->u.allowed.label = create_artificial_label (UNKNOWN_LOCATION);
1861 x = gimple_build_label (this_region->u.allowed.label);
1862 gimple_seq_add_stmt (&new_seq, x);
1863
1864 lower_eh_constructs_1 (&this_state, gimple_eh_filter_failure_ptr (inner));
1865 gimple_seq_add_seq (&new_seq, gimple_eh_filter_failure (inner));
1866
1867 gimple_try_set_cleanup (tp, new_seq);
1868
1869 return frob_into_branch_around (tp, this_region, NULL);
1870 }
1871
1872 /* A subroutine of lower_eh_constructs_1. Lower a GIMPLE_TRY with
1873 an GIMPLE_EH_MUST_NOT_THROW to a sequence of labels and blocks,
1874 plus the exception region trees that record all the magic. */
1875
1876 static gimple_seq
1877 lower_eh_must_not_throw (struct leh_state *state, gimple tp)
1878 {
1879 struct leh_state this_state = *state;
1880
1881 if (flag_exceptions)
1882 {
1883 gimple inner = gimple_seq_first_stmt (gimple_try_cleanup (tp));
1884 eh_region this_region;
1885
1886 this_region = gen_eh_region_must_not_throw (state->cur_region);
1887 this_region->u.must_not_throw.failure_decl
1888 = gimple_eh_must_not_throw_fndecl (inner);
1889 this_region->u.must_not_throw.failure_loc
1890 = LOCATION_LOCUS (gimple_location (tp));
1891
1892 /* In order to get mangling applied to this decl, we must mark it
1893 used now. Otherwise, pass_ipa_free_lang_data won't think it
1894 needs to happen. */
1895 TREE_USED (this_region->u.must_not_throw.failure_decl) = 1;
1896
1897 this_state.cur_region = this_region;
1898 }
1899
1900 lower_eh_constructs_1 (&this_state, gimple_try_eval_ptr (tp));
1901
1902 return gimple_try_eval (tp);
1903 }
1904
1905 /* Implement a cleanup expression. This is similar to try-finally,
1906 except that we only execute the cleanup block for exception edges. */
1907
1908 static gimple_seq
1909 lower_cleanup (struct leh_state *state, gimple tp)
1910 {
1911 struct leh_state this_state = *state;
1912 eh_region this_region = NULL;
1913 struct leh_tf_state fake_tf;
1914 gimple_seq result;
1915 bool cleanup_dead = cleanup_is_dead_in (state->cur_region);
1916
1917 if (flag_exceptions && !cleanup_dead)
1918 {
1919 this_region = gen_eh_region_cleanup (state->cur_region);
1920 this_state.cur_region = this_region;
1921 }
1922
1923 lower_eh_constructs_1 (&this_state, gimple_try_eval_ptr (tp));
1924
1925 if (cleanup_dead || !eh_region_may_contain_throw (this_region))
1926 return gimple_try_eval (tp);
1927
1928 /* Build enough of a try-finally state so that we can reuse
1929 honor_protect_cleanup_actions. */
1930 memset (&fake_tf, 0, sizeof (fake_tf));
1931 fake_tf.top_p = fake_tf.try_finally_expr = tp;
1932 fake_tf.outer = state;
1933 fake_tf.region = this_region;
1934 fake_tf.may_fallthru = gimple_seq_may_fallthru (gimple_try_eval (tp));
1935 fake_tf.may_throw = true;
1936
1937 honor_protect_cleanup_actions (state, NULL, &fake_tf);
1938
1939 if (fake_tf.may_throw)
1940 {
1941 /* In this case honor_protect_cleanup_actions had nothing to do,
1942 and we should process this normally. */
1943 lower_eh_constructs_1 (state, gimple_try_cleanup_ptr (tp));
1944 result = frob_into_branch_around (tp, this_region,
1945 fake_tf.fallthru_label);
1946 }
1947 else
1948 {
1949 /* In this case honor_protect_cleanup_actions did nearly all of
1950 the work. All we have left is to append the fallthru_label. */
1951
1952 result = gimple_try_eval (tp);
1953 if (fake_tf.fallthru_label)
1954 {
1955 gimple x = gimple_build_label (fake_tf.fallthru_label);
1956 gimple_seq_add_stmt (&result, x);
1957 }
1958 }
1959 return result;
1960 }
1961
1962 /* Main loop for lowering eh constructs. Also moves gsi to the next
1963 statement. */
1964
1965 static void
1966 lower_eh_constructs_2 (struct leh_state *state, gimple_stmt_iterator *gsi)
1967 {
1968 gimple_seq replace;
1969 gimple x;
1970 gimple stmt = gsi_stmt (*gsi);
1971
1972 switch (gimple_code (stmt))
1973 {
1974 case GIMPLE_CALL:
1975 {
1976 tree fndecl = gimple_call_fndecl (stmt);
1977 tree rhs, lhs;
1978
1979 if (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
1980 switch (DECL_FUNCTION_CODE (fndecl))
1981 {
1982 case BUILT_IN_EH_POINTER:
1983 /* The front end may have generated a call to
1984 __builtin_eh_pointer (0) within a catch region. Replace
1985 this zero argument with the current catch region number. */
1986 if (state->ehp_region)
1987 {
1988 tree nr = build_int_cst (integer_type_node,
1989 state->ehp_region->index);
1990 gimple_call_set_arg (stmt, 0, nr);
1991 }
1992 else
1993 {
1994 /* The user has dome something silly. Remove it. */
1995 rhs = null_pointer_node;
1996 goto do_replace;
1997 }
1998 break;
1999
2000 case BUILT_IN_EH_FILTER:
2001 /* ??? This should never appear, but since it's a builtin it
2002 is accessible to abuse by users. Just remove it and
2003 replace the use with the arbitrary value zero. */
2004 rhs = build_int_cst (TREE_TYPE (TREE_TYPE (fndecl)), 0);
2005 do_replace:
2006 lhs = gimple_call_lhs (stmt);
2007 x = gimple_build_assign (lhs, rhs);
2008 gsi_insert_before (gsi, x, GSI_SAME_STMT);
2009 /* FALLTHRU */
2010
2011 case BUILT_IN_EH_COPY_VALUES:
2012 /* Likewise this should not appear. Remove it. */
2013 gsi_remove (gsi, true);
2014 return;
2015
2016 default:
2017 break;
2018 }
2019 }
2020 /* FALLTHRU */
2021
2022 case GIMPLE_ASSIGN:
2023 /* If the stmt can throw use a new temporary for the assignment
2024 to a LHS. This makes sure the old value of the LHS is
2025 available on the EH edge. Only do so for statements that
2026 potentially fall through (no noreturn calls e.g.), otherwise
2027 this new assignment might create fake fallthru regions. */
2028 if (stmt_could_throw_p (stmt)
2029 && gimple_has_lhs (stmt)
2030 && gimple_stmt_may_fallthru (stmt)
2031 && !tree_could_throw_p (gimple_get_lhs (stmt))
2032 && is_gimple_reg_type (TREE_TYPE (gimple_get_lhs (stmt))))
2033 {
2034 tree lhs = gimple_get_lhs (stmt);
2035 tree tmp = create_tmp_var (TREE_TYPE (lhs), NULL);
2036 gimple s = gimple_build_assign (lhs, tmp);
2037 gimple_set_location (s, gimple_location (stmt));
2038 gimple_set_block (s, gimple_block (stmt));
2039 gimple_set_lhs (stmt, tmp);
2040 if (TREE_CODE (TREE_TYPE (tmp)) == COMPLEX_TYPE
2041 || TREE_CODE (TREE_TYPE (tmp)) == VECTOR_TYPE)
2042 DECL_GIMPLE_REG_P (tmp) = 1;
2043 gsi_insert_after (gsi, s, GSI_SAME_STMT);
2044 }
2045 /* Look for things that can throw exceptions, and record them. */
2046 if (state->cur_region && stmt_could_throw_p (stmt))
2047 {
2048 record_stmt_eh_region (state->cur_region, stmt);
2049 note_eh_region_may_contain_throw (state->cur_region);
2050 }
2051 break;
2052
2053 case GIMPLE_COND:
2054 case GIMPLE_GOTO:
2055 case GIMPLE_RETURN:
2056 maybe_record_in_goto_queue (state, stmt);
2057 break;
2058
2059 case GIMPLE_SWITCH:
2060 verify_norecord_switch_expr (state, stmt);
2061 break;
2062
2063 case GIMPLE_TRY:
2064 if (gimple_try_kind (stmt) == GIMPLE_TRY_FINALLY)
2065 replace = lower_try_finally (state, stmt);
2066 else
2067 {
2068 x = gimple_seq_first_stmt (gimple_try_cleanup (stmt));
2069 if (!x)
2070 {
2071 replace = gimple_try_eval (stmt);
2072 lower_eh_constructs_1 (state, &replace);
2073 }
2074 else
2075 switch (gimple_code (x))
2076 {
2077 case GIMPLE_CATCH:
2078 replace = lower_catch (state, stmt);
2079 break;
2080 case GIMPLE_EH_FILTER:
2081 replace = lower_eh_filter (state, stmt);
2082 break;
2083 case GIMPLE_EH_MUST_NOT_THROW:
2084 replace = lower_eh_must_not_throw (state, stmt);
2085 break;
2086 case GIMPLE_EH_ELSE:
2087 /* This code is only valid with GIMPLE_TRY_FINALLY. */
2088 gcc_unreachable ();
2089 default:
2090 replace = lower_cleanup (state, stmt);
2091 break;
2092 }
2093 }
2094
2095 /* Remove the old stmt and insert the transformed sequence
2096 instead. */
2097 gsi_insert_seq_before (gsi, replace, GSI_SAME_STMT);
2098 gsi_remove (gsi, true);
2099
2100 /* Return since we don't want gsi_next () */
2101 return;
2102
2103 case GIMPLE_EH_ELSE:
2104 /* We should be eliminating this in lower_try_finally et al. */
2105 gcc_unreachable ();
2106
2107 default:
2108 /* A type, a decl, or some kind of statement that we're not
2109 interested in. Don't walk them. */
2110 break;
2111 }
2112
2113 gsi_next (gsi);
2114 }
2115
2116 /* A helper to unwrap a gimple_seq and feed stmts to lower_eh_constructs_2. */
2117
2118 static void
2119 lower_eh_constructs_1 (struct leh_state *state, gimple_seq *pseq)
2120 {
2121 gimple_stmt_iterator gsi;
2122 for (gsi = gsi_start (*pseq); !gsi_end_p (gsi);)
2123 lower_eh_constructs_2 (state, &gsi);
2124 }
2125
2126 namespace {
2127
2128 const pass_data pass_data_lower_eh =
2129 {
2130 GIMPLE_PASS, /* type */
2131 "eh", /* name */
2132 OPTGROUP_NONE, /* optinfo_flags */
2133 true, /* has_execute */
2134 TV_TREE_EH, /* tv_id */
2135 PROP_gimple_lcf, /* properties_required */
2136 PROP_gimple_leh, /* properties_provided */
2137 0, /* properties_destroyed */
2138 0, /* todo_flags_start */
2139 0, /* todo_flags_finish */
2140 };
2141
2142 class pass_lower_eh : public gimple_opt_pass
2143 {
2144 public:
2145 pass_lower_eh (gcc::context *ctxt)
2146 : gimple_opt_pass (pass_data_lower_eh, ctxt)
2147 {}
2148
2149 /* opt_pass methods: */
2150 virtual unsigned int execute (function *);
2151
2152 }; // class pass_lower_eh
2153
2154 unsigned int
2155 pass_lower_eh::execute (function *fun)
2156 {
2157 struct leh_state null_state;
2158 gimple_seq bodyp;
2159
2160 bodyp = gimple_body (current_function_decl);
2161 if (bodyp == NULL)
2162 return 0;
2163
2164 finally_tree.create (31);
2165 eh_region_may_contain_throw_map = BITMAP_ALLOC (NULL);
2166 memset (&null_state, 0, sizeof (null_state));
2167
2168 collect_finally_tree_1 (bodyp, NULL);
2169 lower_eh_constructs_1 (&null_state, &bodyp);
2170 gimple_set_body (current_function_decl, bodyp);
2171
2172 /* We assume there's a return statement, or something, at the end of
2173 the function, and thus ploping the EH sequence afterward won't
2174 change anything. */
2175 gcc_assert (!gimple_seq_may_fallthru (bodyp));
2176 gimple_seq_add_seq (&bodyp, eh_seq);
2177
2178 /* We assume that since BODYP already existed, adding EH_SEQ to it
2179 didn't change its value, and we don't have to re-set the function. */
2180 gcc_assert (bodyp == gimple_body (current_function_decl));
2181
2182 finally_tree.dispose ();
2183 BITMAP_FREE (eh_region_may_contain_throw_map);
2184 eh_seq = NULL;
2185
2186 /* If this function needs a language specific EH personality routine
2187 and the frontend didn't already set one do so now. */
2188 if (function_needs_eh_personality (fun) == eh_personality_lang
2189 && !DECL_FUNCTION_PERSONALITY (current_function_decl))
2190 DECL_FUNCTION_PERSONALITY (current_function_decl)
2191 = lang_hooks.eh_personality ();
2192
2193 return 0;
2194 }
2195
2196 } // anon namespace
2197
2198 gimple_opt_pass *
2199 make_pass_lower_eh (gcc::context *ctxt)
2200 {
2201 return new pass_lower_eh (ctxt);
2202 }
2203 \f
2204 /* Create the multiple edges from an EH_DISPATCH statement to all of
2205 the possible handlers for its EH region. Return true if there's
2206 no fallthru edge; false if there is. */
2207
2208 bool
2209 make_eh_dispatch_edges (gimple stmt)
2210 {
2211 eh_region r;
2212 eh_catch c;
2213 basic_block src, dst;
2214
2215 r = get_eh_region_from_number (gimple_eh_dispatch_region (stmt));
2216 src = gimple_bb (stmt);
2217
2218 switch (r->type)
2219 {
2220 case ERT_TRY:
2221 for (c = r->u.eh_try.first_catch; c ; c = c->next_catch)
2222 {
2223 dst = label_to_block (c->label);
2224 make_edge (src, dst, 0);
2225
2226 /* A catch-all handler doesn't have a fallthru. */
2227 if (c->type_list == NULL)
2228 return false;
2229 }
2230 break;
2231
2232 case ERT_ALLOWED_EXCEPTIONS:
2233 dst = label_to_block (r->u.allowed.label);
2234 make_edge (src, dst, 0);
2235 break;
2236
2237 default:
2238 gcc_unreachable ();
2239 }
2240
2241 return true;
2242 }
2243
2244 /* Create the single EH edge from STMT to its nearest landing pad,
2245 if there is such a landing pad within the current function. */
2246
2247 void
2248 make_eh_edges (gimple stmt)
2249 {
2250 basic_block src, dst;
2251 eh_landing_pad lp;
2252 int lp_nr;
2253
2254 lp_nr = lookup_stmt_eh_lp (stmt);
2255 if (lp_nr <= 0)
2256 return;
2257
2258 lp = get_eh_landing_pad_from_number (lp_nr);
2259 gcc_assert (lp != NULL);
2260
2261 src = gimple_bb (stmt);
2262 dst = label_to_block (lp->post_landing_pad);
2263 make_edge (src, dst, EDGE_EH);
2264 }
2265
2266 /* Do the work in redirecting EDGE_IN to NEW_BB within the EH region tree;
2267 do not actually perform the final edge redirection.
2268
2269 CHANGE_REGION is true when we're being called from cleanup_empty_eh and
2270 we intend to change the destination EH region as well; this means
2271 EH_LANDING_PAD_NR must already be set on the destination block label.
2272 If false, we're being called from generic cfg manipulation code and we
2273 should preserve our place within the region tree. */
2274
2275 static void
2276 redirect_eh_edge_1 (edge edge_in, basic_block new_bb, bool change_region)
2277 {
2278 eh_landing_pad old_lp, new_lp;
2279 basic_block old_bb;
2280 gimple throw_stmt;
2281 int old_lp_nr, new_lp_nr;
2282 tree old_label, new_label;
2283 edge_iterator ei;
2284 edge e;
2285
2286 old_bb = edge_in->dest;
2287 old_label = gimple_block_label (old_bb);
2288 old_lp_nr = EH_LANDING_PAD_NR (old_label);
2289 gcc_assert (old_lp_nr > 0);
2290 old_lp = get_eh_landing_pad_from_number (old_lp_nr);
2291
2292 throw_stmt = last_stmt (edge_in->src);
2293 gcc_assert (lookup_stmt_eh_lp (throw_stmt) == old_lp_nr);
2294
2295 new_label = gimple_block_label (new_bb);
2296
2297 /* Look for an existing region that might be using NEW_BB already. */
2298 new_lp_nr = EH_LANDING_PAD_NR (new_label);
2299 if (new_lp_nr)
2300 {
2301 new_lp = get_eh_landing_pad_from_number (new_lp_nr);
2302 gcc_assert (new_lp);
2303
2304 /* Unless CHANGE_REGION is true, the new and old landing pad
2305 had better be associated with the same EH region. */
2306 gcc_assert (change_region || new_lp->region == old_lp->region);
2307 }
2308 else
2309 {
2310 new_lp = NULL;
2311 gcc_assert (!change_region);
2312 }
2313
2314 /* Notice when we redirect the last EH edge away from OLD_BB. */
2315 FOR_EACH_EDGE (e, ei, old_bb->preds)
2316 if (e != edge_in && (e->flags & EDGE_EH))
2317 break;
2318
2319 if (new_lp)
2320 {
2321 /* NEW_LP already exists. If there are still edges into OLD_LP,
2322 there's nothing to do with the EH tree. If there are no more
2323 edges into OLD_LP, then we want to remove OLD_LP as it is unused.
2324 If CHANGE_REGION is true, then our caller is expecting to remove
2325 the landing pad. */
2326 if (e == NULL && !change_region)
2327 remove_eh_landing_pad (old_lp);
2328 }
2329 else
2330 {
2331 /* No correct landing pad exists. If there are no more edges
2332 into OLD_LP, then we can simply re-use the existing landing pad.
2333 Otherwise, we have to create a new landing pad. */
2334 if (e == NULL)
2335 {
2336 EH_LANDING_PAD_NR (old_lp->post_landing_pad) = 0;
2337 new_lp = old_lp;
2338 }
2339 else
2340 new_lp = gen_eh_landing_pad (old_lp->region);
2341 new_lp->post_landing_pad = new_label;
2342 EH_LANDING_PAD_NR (new_label) = new_lp->index;
2343 }
2344
2345 /* Maybe move the throwing statement to the new region. */
2346 if (old_lp != new_lp)
2347 {
2348 remove_stmt_from_eh_lp (throw_stmt);
2349 add_stmt_to_eh_lp (throw_stmt, new_lp->index);
2350 }
2351 }
2352
2353 /* Redirect EH edge E to NEW_BB. */
2354
2355 edge
2356 redirect_eh_edge (edge edge_in, basic_block new_bb)
2357 {
2358 redirect_eh_edge_1 (edge_in, new_bb, false);
2359 return ssa_redirect_edge (edge_in, new_bb);
2360 }
2361
2362 /* This is a subroutine of gimple_redirect_edge_and_branch. Update the
2363 labels for redirecting a non-fallthru EH_DISPATCH edge E to NEW_BB.
2364 The actual edge update will happen in the caller. */
2365
2366 void
2367 redirect_eh_dispatch_edge (gimple stmt, edge e, basic_block new_bb)
2368 {
2369 tree new_lab = gimple_block_label (new_bb);
2370 bool any_changed = false;
2371 basic_block old_bb;
2372 eh_region r;
2373 eh_catch c;
2374
2375 r = get_eh_region_from_number (gimple_eh_dispatch_region (stmt));
2376 switch (r->type)
2377 {
2378 case ERT_TRY:
2379 for (c = r->u.eh_try.first_catch; c ; c = c->next_catch)
2380 {
2381 old_bb = label_to_block (c->label);
2382 if (old_bb == e->dest)
2383 {
2384 c->label = new_lab;
2385 any_changed = true;
2386 }
2387 }
2388 break;
2389
2390 case ERT_ALLOWED_EXCEPTIONS:
2391 old_bb = label_to_block (r->u.allowed.label);
2392 gcc_assert (old_bb == e->dest);
2393 r->u.allowed.label = new_lab;
2394 any_changed = true;
2395 break;
2396
2397 default:
2398 gcc_unreachable ();
2399 }
2400
2401 gcc_assert (any_changed);
2402 }
2403 \f
2404 /* Helper function for operation_could_trap_p and stmt_could_throw_p. */
2405
2406 bool
2407 operation_could_trap_helper_p (enum tree_code op,
2408 bool fp_operation,
2409 bool honor_trapv,
2410 bool honor_nans,
2411 bool honor_snans,
2412 tree divisor,
2413 bool *handled)
2414 {
2415 *handled = true;
2416 switch (op)
2417 {
2418 case TRUNC_DIV_EXPR:
2419 case CEIL_DIV_EXPR:
2420 case FLOOR_DIV_EXPR:
2421 case ROUND_DIV_EXPR:
2422 case EXACT_DIV_EXPR:
2423 case CEIL_MOD_EXPR:
2424 case FLOOR_MOD_EXPR:
2425 case ROUND_MOD_EXPR:
2426 case TRUNC_MOD_EXPR:
2427 case RDIV_EXPR:
2428 if (honor_snans || honor_trapv)
2429 return true;
2430 if (fp_operation)
2431 return flag_trapping_math;
2432 if (!TREE_CONSTANT (divisor) || integer_zerop (divisor))
2433 return true;
2434 return false;
2435
2436 case LT_EXPR:
2437 case LE_EXPR:
2438 case GT_EXPR:
2439 case GE_EXPR:
2440 case LTGT_EXPR:
2441 /* Some floating point comparisons may trap. */
2442 return honor_nans;
2443
2444 case EQ_EXPR:
2445 case NE_EXPR:
2446 case UNORDERED_EXPR:
2447 case ORDERED_EXPR:
2448 case UNLT_EXPR:
2449 case UNLE_EXPR:
2450 case UNGT_EXPR:
2451 case UNGE_EXPR:
2452 case UNEQ_EXPR:
2453 return honor_snans;
2454
2455 case CONVERT_EXPR:
2456 case FIX_TRUNC_EXPR:
2457 /* Conversion of floating point might trap. */
2458 return honor_nans;
2459
2460 case NEGATE_EXPR:
2461 case ABS_EXPR:
2462 case CONJ_EXPR:
2463 /* These operations don't trap with floating point. */
2464 if (honor_trapv)
2465 return true;
2466 return false;
2467
2468 case PLUS_EXPR:
2469 case MINUS_EXPR:
2470 case MULT_EXPR:
2471 /* Any floating arithmetic may trap. */
2472 if (fp_operation && flag_trapping_math)
2473 return true;
2474 if (honor_trapv)
2475 return true;
2476 return false;
2477
2478 case COMPLEX_EXPR:
2479 case CONSTRUCTOR:
2480 /* Constructing an object cannot trap. */
2481 return false;
2482
2483 default:
2484 /* Any floating arithmetic may trap. */
2485 if (fp_operation && flag_trapping_math)
2486 return true;
2487
2488 *handled = false;
2489 return false;
2490 }
2491 }
2492
2493 /* Return true if operation OP may trap. FP_OPERATION is true if OP is applied
2494 on floating-point values. HONOR_TRAPV is true if OP is applied on integer
2495 type operands that may trap. If OP is a division operator, DIVISOR contains
2496 the value of the divisor. */
2497
2498 bool
2499 operation_could_trap_p (enum tree_code op, bool fp_operation, bool honor_trapv,
2500 tree divisor)
2501 {
2502 bool honor_nans = (fp_operation && flag_trapping_math
2503 && !flag_finite_math_only);
2504 bool honor_snans = fp_operation && flag_signaling_nans != 0;
2505 bool handled;
2506
2507 if (TREE_CODE_CLASS (op) != tcc_comparison
2508 && TREE_CODE_CLASS (op) != tcc_unary
2509 && TREE_CODE_CLASS (op) != tcc_binary)
2510 return false;
2511
2512 return operation_could_trap_helper_p (op, fp_operation, honor_trapv,
2513 honor_nans, honor_snans, divisor,
2514 &handled);
2515 }
2516
2517
2518 /* Returns true if it is possible to prove that the index of
2519 an array access REF (an ARRAY_REF expression) falls into the
2520 array bounds. */
2521
2522 static bool
2523 in_array_bounds_p (tree ref)
2524 {
2525 tree idx = TREE_OPERAND (ref, 1);
2526 tree min, max;
2527
2528 if (TREE_CODE (idx) != INTEGER_CST)
2529 return false;
2530
2531 min = array_ref_low_bound (ref);
2532 max = array_ref_up_bound (ref);
2533 if (!min
2534 || !max
2535 || TREE_CODE (min) != INTEGER_CST
2536 || TREE_CODE (max) != INTEGER_CST)
2537 return false;
2538
2539 if (tree_int_cst_lt (idx, min)
2540 || tree_int_cst_lt (max, idx))
2541 return false;
2542
2543 return true;
2544 }
2545
2546 /* Returns true if it is possible to prove that the range of
2547 an array access REF (an ARRAY_RANGE_REF expression) falls
2548 into the array bounds. */
2549
2550 static bool
2551 range_in_array_bounds_p (tree ref)
2552 {
2553 tree domain_type = TYPE_DOMAIN (TREE_TYPE (ref));
2554 tree range_min, range_max, min, max;
2555
2556 range_min = TYPE_MIN_VALUE (domain_type);
2557 range_max = TYPE_MAX_VALUE (domain_type);
2558 if (!range_min
2559 || !range_max
2560 || TREE_CODE (range_min) != INTEGER_CST
2561 || TREE_CODE (range_max) != INTEGER_CST)
2562 return false;
2563
2564 min = array_ref_low_bound (ref);
2565 max = array_ref_up_bound (ref);
2566 if (!min
2567 || !max
2568 || TREE_CODE (min) != INTEGER_CST
2569 || TREE_CODE (max) != INTEGER_CST)
2570 return false;
2571
2572 if (tree_int_cst_lt (range_min, min)
2573 || tree_int_cst_lt (max, range_max))
2574 return false;
2575
2576 return true;
2577 }
2578
2579 /* Return true if EXPR can trap, as in dereferencing an invalid pointer
2580 location or floating point arithmetic. C.f. the rtl version, may_trap_p.
2581 This routine expects only GIMPLE lhs or rhs input. */
2582
2583 bool
2584 tree_could_trap_p (tree expr)
2585 {
2586 enum tree_code code;
2587 bool fp_operation = false;
2588 bool honor_trapv = false;
2589 tree t, base, div = NULL_TREE;
2590
2591 if (!expr)
2592 return false;
2593
2594 code = TREE_CODE (expr);
2595 t = TREE_TYPE (expr);
2596
2597 if (t)
2598 {
2599 if (COMPARISON_CLASS_P (expr))
2600 fp_operation = FLOAT_TYPE_P (TREE_TYPE (TREE_OPERAND (expr, 0)));
2601 else
2602 fp_operation = FLOAT_TYPE_P (t);
2603 honor_trapv = INTEGRAL_TYPE_P (t) && TYPE_OVERFLOW_TRAPS (t);
2604 }
2605
2606 if (TREE_CODE_CLASS (code) == tcc_binary)
2607 div = TREE_OPERAND (expr, 1);
2608 if (operation_could_trap_p (code, fp_operation, honor_trapv, div))
2609 return true;
2610
2611 restart:
2612 switch (code)
2613 {
2614 case COMPONENT_REF:
2615 case REALPART_EXPR:
2616 case IMAGPART_EXPR:
2617 case BIT_FIELD_REF:
2618 case VIEW_CONVERT_EXPR:
2619 case WITH_SIZE_EXPR:
2620 expr = TREE_OPERAND (expr, 0);
2621 code = TREE_CODE (expr);
2622 goto restart;
2623
2624 case ARRAY_RANGE_REF:
2625 base = TREE_OPERAND (expr, 0);
2626 if (tree_could_trap_p (base))
2627 return true;
2628 if (TREE_THIS_NOTRAP (expr))
2629 return false;
2630 return !range_in_array_bounds_p (expr);
2631
2632 case ARRAY_REF:
2633 base = TREE_OPERAND (expr, 0);
2634 if (tree_could_trap_p (base))
2635 return true;
2636 if (TREE_THIS_NOTRAP (expr))
2637 return false;
2638 return !in_array_bounds_p (expr);
2639
2640 case TARGET_MEM_REF:
2641 case MEM_REF:
2642 if (TREE_CODE (TREE_OPERAND (expr, 0)) == ADDR_EXPR
2643 && tree_could_trap_p (TREE_OPERAND (TREE_OPERAND (expr, 0), 0)))
2644 return true;
2645 if (TREE_THIS_NOTRAP (expr))
2646 return false;
2647 /* We cannot prove that the access is in-bounds when we have
2648 variable-index TARGET_MEM_REFs. */
2649 if (code == TARGET_MEM_REF
2650 && (TMR_INDEX (expr) || TMR_INDEX2 (expr)))
2651 return true;
2652 if (TREE_CODE (TREE_OPERAND (expr, 0)) == ADDR_EXPR)
2653 {
2654 tree base = TREE_OPERAND (TREE_OPERAND (expr, 0), 0);
2655 offset_int off = mem_ref_offset (expr);
2656 if (wi::neg_p (off, SIGNED))
2657 return true;
2658 if (TREE_CODE (base) == STRING_CST)
2659 return wi::leu_p (TREE_STRING_LENGTH (base), off);
2660 else if (DECL_SIZE_UNIT (base) == NULL_TREE
2661 || TREE_CODE (DECL_SIZE_UNIT (base)) != INTEGER_CST
2662 || wi::leu_p (wi::to_offset (DECL_SIZE_UNIT (base)), off))
2663 return true;
2664 /* Now we are sure the first byte of the access is inside
2665 the object. */
2666 return false;
2667 }
2668 return true;
2669
2670 case INDIRECT_REF:
2671 return !TREE_THIS_NOTRAP (expr);
2672
2673 case ASM_EXPR:
2674 return TREE_THIS_VOLATILE (expr);
2675
2676 case CALL_EXPR:
2677 t = get_callee_fndecl (expr);
2678 /* Assume that calls to weak functions may trap. */
2679 if (!t || !DECL_P (t))
2680 return true;
2681 if (DECL_WEAK (t))
2682 return tree_could_trap_p (t);
2683 return false;
2684
2685 case FUNCTION_DECL:
2686 /* Assume that accesses to weak functions may trap, unless we know
2687 they are certainly defined in current TU or in some other
2688 LTO partition. */
2689 if (DECL_WEAK (expr) && !DECL_COMDAT (expr))
2690 {
2691 struct cgraph_node *node;
2692 if (!DECL_EXTERNAL (expr))
2693 return false;
2694 node = cgraph_function_node (cgraph_get_node (expr), NULL);
2695 if (node && node->in_other_partition)
2696 return false;
2697 return true;
2698 }
2699 return false;
2700
2701 case VAR_DECL:
2702 /* Assume that accesses to weak vars may trap, unless we know
2703 they are certainly defined in current TU or in some other
2704 LTO partition. */
2705 if (DECL_WEAK (expr) && !DECL_COMDAT (expr))
2706 {
2707 varpool_node *node;
2708 if (!DECL_EXTERNAL (expr))
2709 return false;
2710 node = varpool_variable_node (varpool_get_node (expr), NULL);
2711 if (node && node->in_other_partition)
2712 return false;
2713 return true;
2714 }
2715 return false;
2716
2717 default:
2718 return false;
2719 }
2720 }
2721
2722
2723 /* Helper for stmt_could_throw_p. Return true if STMT (assumed to be a
2724 an assignment or a conditional) may throw. */
2725
2726 static bool
2727 stmt_could_throw_1_p (gimple stmt)
2728 {
2729 enum tree_code code = gimple_expr_code (stmt);
2730 bool honor_nans = false;
2731 bool honor_snans = false;
2732 bool fp_operation = false;
2733 bool honor_trapv = false;
2734 tree t;
2735 size_t i;
2736 bool handled, ret;
2737
2738 if (TREE_CODE_CLASS (code) == tcc_comparison
2739 || TREE_CODE_CLASS (code) == tcc_unary
2740 || TREE_CODE_CLASS (code) == tcc_binary)
2741 {
2742 if (is_gimple_assign (stmt)
2743 && TREE_CODE_CLASS (code) == tcc_comparison)
2744 t = TREE_TYPE (gimple_assign_rhs1 (stmt));
2745 else if (gimple_code (stmt) == GIMPLE_COND)
2746 t = TREE_TYPE (gimple_cond_lhs (stmt));
2747 else
2748 t = gimple_expr_type (stmt);
2749 fp_operation = FLOAT_TYPE_P (t);
2750 if (fp_operation)
2751 {
2752 honor_nans = flag_trapping_math && !flag_finite_math_only;
2753 honor_snans = flag_signaling_nans != 0;
2754 }
2755 else if (INTEGRAL_TYPE_P (t) && TYPE_OVERFLOW_TRAPS (t))
2756 honor_trapv = true;
2757 }
2758
2759 /* Check if the main expression may trap. */
2760 t = is_gimple_assign (stmt) ? gimple_assign_rhs2 (stmt) : NULL;
2761 ret = operation_could_trap_helper_p (code, fp_operation, honor_trapv,
2762 honor_nans, honor_snans, t,
2763 &handled);
2764 if (handled)
2765 return ret;
2766
2767 /* If the expression does not trap, see if any of the individual operands may
2768 trap. */
2769 for (i = 0; i < gimple_num_ops (stmt); i++)
2770 if (tree_could_trap_p (gimple_op (stmt, i)))
2771 return true;
2772
2773 return false;
2774 }
2775
2776
2777 /* Return true if statement STMT could throw an exception. */
2778
2779 bool
2780 stmt_could_throw_p (gimple stmt)
2781 {
2782 if (!flag_exceptions)
2783 return false;
2784
2785 /* The only statements that can throw an exception are assignments,
2786 conditionals, calls, resx, and asms. */
2787 switch (gimple_code (stmt))
2788 {
2789 case GIMPLE_RESX:
2790 return true;
2791
2792 case GIMPLE_CALL:
2793 return !gimple_call_nothrow_p (stmt);
2794
2795 case GIMPLE_ASSIGN:
2796 case GIMPLE_COND:
2797 if (!cfun->can_throw_non_call_exceptions)
2798 return false;
2799 return stmt_could_throw_1_p (stmt);
2800
2801 case GIMPLE_ASM:
2802 if (!cfun->can_throw_non_call_exceptions)
2803 return false;
2804 return gimple_asm_volatile_p (stmt);
2805
2806 default:
2807 return false;
2808 }
2809 }
2810
2811
2812 /* Return true if expression T could throw an exception. */
2813
2814 bool
2815 tree_could_throw_p (tree t)
2816 {
2817 if (!flag_exceptions)
2818 return false;
2819 if (TREE_CODE (t) == MODIFY_EXPR)
2820 {
2821 if (cfun->can_throw_non_call_exceptions
2822 && tree_could_trap_p (TREE_OPERAND (t, 0)))
2823 return true;
2824 t = TREE_OPERAND (t, 1);
2825 }
2826
2827 if (TREE_CODE (t) == WITH_SIZE_EXPR)
2828 t = TREE_OPERAND (t, 0);
2829 if (TREE_CODE (t) == CALL_EXPR)
2830 return (call_expr_flags (t) & ECF_NOTHROW) == 0;
2831 if (cfun->can_throw_non_call_exceptions)
2832 return tree_could_trap_p (t);
2833 return false;
2834 }
2835
2836 /* Return true if STMT can throw an exception that is not caught within
2837 the current function (CFUN). */
2838
2839 bool
2840 stmt_can_throw_external (gimple stmt)
2841 {
2842 int lp_nr;
2843
2844 if (!stmt_could_throw_p (stmt))
2845 return false;
2846
2847 lp_nr = lookup_stmt_eh_lp (stmt);
2848 return lp_nr == 0;
2849 }
2850
2851 /* Return true if STMT can throw an exception that is caught within
2852 the current function (CFUN). */
2853
2854 bool
2855 stmt_can_throw_internal (gimple stmt)
2856 {
2857 int lp_nr;
2858
2859 if (!stmt_could_throw_p (stmt))
2860 return false;
2861
2862 lp_nr = lookup_stmt_eh_lp (stmt);
2863 return lp_nr > 0;
2864 }
2865
2866 /* Given a statement STMT in IFUN, if STMT can no longer throw, then
2867 remove any entry it might have from the EH table. Return true if
2868 any change was made. */
2869
2870 bool
2871 maybe_clean_eh_stmt_fn (struct function *ifun, gimple stmt)
2872 {
2873 if (stmt_could_throw_p (stmt))
2874 return false;
2875 return remove_stmt_from_eh_lp_fn (ifun, stmt);
2876 }
2877
2878 /* Likewise, but always use the current function. */
2879
2880 bool
2881 maybe_clean_eh_stmt (gimple stmt)
2882 {
2883 return maybe_clean_eh_stmt_fn (cfun, stmt);
2884 }
2885
2886 /* Given a statement OLD_STMT and a new statement NEW_STMT that has replaced
2887 OLD_STMT in the function, remove OLD_STMT from the EH table and put NEW_STMT
2888 in the table if it should be in there. Return TRUE if a replacement was
2889 done that my require an EH edge purge. */
2890
2891 bool
2892 maybe_clean_or_replace_eh_stmt (gimple old_stmt, gimple new_stmt)
2893 {
2894 int lp_nr = lookup_stmt_eh_lp (old_stmt);
2895
2896 if (lp_nr != 0)
2897 {
2898 bool new_stmt_could_throw = stmt_could_throw_p (new_stmt);
2899
2900 if (new_stmt == old_stmt && new_stmt_could_throw)
2901 return false;
2902
2903 remove_stmt_from_eh_lp (old_stmt);
2904 if (new_stmt_could_throw)
2905 {
2906 add_stmt_to_eh_lp (new_stmt, lp_nr);
2907 return false;
2908 }
2909 else
2910 return true;
2911 }
2912
2913 return false;
2914 }
2915
2916 /* Given a statement OLD_STMT in OLD_FUN and a duplicate statement NEW_STMT
2917 in NEW_FUN, copy the EH table data from OLD_STMT to NEW_STMT. The MAP
2918 operand is the return value of duplicate_eh_regions. */
2919
2920 bool
2921 maybe_duplicate_eh_stmt_fn (struct function *new_fun, gimple new_stmt,
2922 struct function *old_fun, gimple old_stmt,
2923 struct pointer_map_t *map, int default_lp_nr)
2924 {
2925 int old_lp_nr, new_lp_nr;
2926 void **slot;
2927
2928 if (!stmt_could_throw_p (new_stmt))
2929 return false;
2930
2931 old_lp_nr = lookup_stmt_eh_lp_fn (old_fun, old_stmt);
2932 if (old_lp_nr == 0)
2933 {
2934 if (default_lp_nr == 0)
2935 return false;
2936 new_lp_nr = default_lp_nr;
2937 }
2938 else if (old_lp_nr > 0)
2939 {
2940 eh_landing_pad old_lp, new_lp;
2941
2942 old_lp = (*old_fun->eh->lp_array)[old_lp_nr];
2943 slot = pointer_map_contains (map, old_lp);
2944 new_lp = (eh_landing_pad) *slot;
2945 new_lp_nr = new_lp->index;
2946 }
2947 else
2948 {
2949 eh_region old_r, new_r;
2950
2951 old_r = (*old_fun->eh->region_array)[-old_lp_nr];
2952 slot = pointer_map_contains (map, old_r);
2953 new_r = (eh_region) *slot;
2954 new_lp_nr = -new_r->index;
2955 }
2956
2957 add_stmt_to_eh_lp_fn (new_fun, new_stmt, new_lp_nr);
2958 return true;
2959 }
2960
2961 /* Similar, but both OLD_STMT and NEW_STMT are within the current function,
2962 and thus no remapping is required. */
2963
2964 bool
2965 maybe_duplicate_eh_stmt (gimple new_stmt, gimple old_stmt)
2966 {
2967 int lp_nr;
2968
2969 if (!stmt_could_throw_p (new_stmt))
2970 return false;
2971
2972 lp_nr = lookup_stmt_eh_lp (old_stmt);
2973 if (lp_nr == 0)
2974 return false;
2975
2976 add_stmt_to_eh_lp (new_stmt, lp_nr);
2977 return true;
2978 }
2979 \f
2980 /* Returns TRUE if oneh and twoh are exception handlers (gimple_try_cleanup of
2981 GIMPLE_TRY) that are similar enough to be considered the same. Currently
2982 this only handles handlers consisting of a single call, as that's the
2983 important case for C++: a destructor call for a particular object showing
2984 up in multiple handlers. */
2985
2986 static bool
2987 same_handler_p (gimple_seq oneh, gimple_seq twoh)
2988 {
2989 gimple_stmt_iterator gsi;
2990 gimple ones, twos;
2991 unsigned int ai;
2992
2993 gsi = gsi_start (oneh);
2994 if (!gsi_one_before_end_p (gsi))
2995 return false;
2996 ones = gsi_stmt (gsi);
2997
2998 gsi = gsi_start (twoh);
2999 if (!gsi_one_before_end_p (gsi))
3000 return false;
3001 twos = gsi_stmt (gsi);
3002
3003 if (!is_gimple_call (ones)
3004 || !is_gimple_call (twos)
3005 || gimple_call_lhs (ones)
3006 || gimple_call_lhs (twos)
3007 || gimple_call_chain (ones)
3008 || gimple_call_chain (twos)
3009 || !gimple_call_same_target_p (ones, twos)
3010 || gimple_call_num_args (ones) != gimple_call_num_args (twos))
3011 return false;
3012
3013 for (ai = 0; ai < gimple_call_num_args (ones); ++ai)
3014 if (!operand_equal_p (gimple_call_arg (ones, ai),
3015 gimple_call_arg (twos, ai), 0))
3016 return false;
3017
3018 return true;
3019 }
3020
3021 /* Optimize
3022 try { A() } finally { try { ~B() } catch { ~A() } }
3023 try { ... } finally { ~A() }
3024 into
3025 try { A() } catch { ~B() }
3026 try { ~B() ... } finally { ~A() }
3027
3028 This occurs frequently in C++, where A is a local variable and B is a
3029 temporary used in the initializer for A. */
3030
3031 static void
3032 optimize_double_finally (gimple one, gimple two)
3033 {
3034 gimple oneh;
3035 gimple_stmt_iterator gsi;
3036 gimple_seq cleanup;
3037
3038 cleanup = gimple_try_cleanup (one);
3039 gsi = gsi_start (cleanup);
3040 if (!gsi_one_before_end_p (gsi))
3041 return;
3042
3043 oneh = gsi_stmt (gsi);
3044 if (gimple_code (oneh) != GIMPLE_TRY
3045 || gimple_try_kind (oneh) != GIMPLE_TRY_CATCH)
3046 return;
3047
3048 if (same_handler_p (gimple_try_cleanup (oneh), gimple_try_cleanup (two)))
3049 {
3050 gimple_seq seq = gimple_try_eval (oneh);
3051
3052 gimple_try_set_cleanup (one, seq);
3053 gimple_try_set_kind (one, GIMPLE_TRY_CATCH);
3054 seq = copy_gimple_seq_and_replace_locals (seq);
3055 gimple_seq_add_seq (&seq, gimple_try_eval (two));
3056 gimple_try_set_eval (two, seq);
3057 }
3058 }
3059
3060 /* Perform EH refactoring optimizations that are simpler to do when code
3061 flow has been lowered but EH structures haven't. */
3062
3063 static void
3064 refactor_eh_r (gimple_seq seq)
3065 {
3066 gimple_stmt_iterator gsi;
3067 gimple one, two;
3068
3069 one = NULL;
3070 two = NULL;
3071 gsi = gsi_start (seq);
3072 while (1)
3073 {
3074 one = two;
3075 if (gsi_end_p (gsi))
3076 two = NULL;
3077 else
3078 two = gsi_stmt (gsi);
3079 if (one
3080 && two
3081 && gimple_code (one) == GIMPLE_TRY
3082 && gimple_code (two) == GIMPLE_TRY
3083 && gimple_try_kind (one) == GIMPLE_TRY_FINALLY
3084 && gimple_try_kind (two) == GIMPLE_TRY_FINALLY)
3085 optimize_double_finally (one, two);
3086 if (one)
3087 switch (gimple_code (one))
3088 {
3089 case GIMPLE_TRY:
3090 refactor_eh_r (gimple_try_eval (one));
3091 refactor_eh_r (gimple_try_cleanup (one));
3092 break;
3093 case GIMPLE_CATCH:
3094 refactor_eh_r (gimple_catch_handler (one));
3095 break;
3096 case GIMPLE_EH_FILTER:
3097 refactor_eh_r (gimple_eh_filter_failure (one));
3098 break;
3099 case GIMPLE_EH_ELSE:
3100 refactor_eh_r (gimple_eh_else_n_body (one));
3101 refactor_eh_r (gimple_eh_else_e_body (one));
3102 break;
3103 default:
3104 break;
3105 }
3106 if (two)
3107 gsi_next (&gsi);
3108 else
3109 break;
3110 }
3111 }
3112
3113 namespace {
3114
3115 const pass_data pass_data_refactor_eh =
3116 {
3117 GIMPLE_PASS, /* type */
3118 "ehopt", /* name */
3119 OPTGROUP_NONE, /* optinfo_flags */
3120 true, /* has_execute */
3121 TV_TREE_EH, /* tv_id */
3122 PROP_gimple_lcf, /* properties_required */
3123 0, /* properties_provided */
3124 0, /* properties_destroyed */
3125 0, /* todo_flags_start */
3126 0, /* todo_flags_finish */
3127 };
3128
3129 class pass_refactor_eh : public gimple_opt_pass
3130 {
3131 public:
3132 pass_refactor_eh (gcc::context *ctxt)
3133 : gimple_opt_pass (pass_data_refactor_eh, ctxt)
3134 {}
3135
3136 /* opt_pass methods: */
3137 virtual bool gate (function *) { return flag_exceptions != 0; }
3138 virtual unsigned int execute (function *)
3139 {
3140 refactor_eh_r (gimple_body (current_function_decl));
3141 return 0;
3142 }
3143
3144 }; // class pass_refactor_eh
3145
3146 } // anon namespace
3147
3148 gimple_opt_pass *
3149 make_pass_refactor_eh (gcc::context *ctxt)
3150 {
3151 return new pass_refactor_eh (ctxt);
3152 }
3153 \f
3154 /* At the end of gimple optimization, we can lower RESX. */
3155
3156 static bool
3157 lower_resx (basic_block bb, gimple stmt, struct pointer_map_t *mnt_map)
3158 {
3159 int lp_nr;
3160 eh_region src_r, dst_r;
3161 gimple_stmt_iterator gsi;
3162 gimple x;
3163 tree fn, src_nr;
3164 bool ret = false;
3165
3166 lp_nr = lookup_stmt_eh_lp (stmt);
3167 if (lp_nr != 0)
3168 dst_r = get_eh_region_from_lp_number (lp_nr);
3169 else
3170 dst_r = NULL;
3171
3172 src_r = get_eh_region_from_number (gimple_resx_region (stmt));
3173 gsi = gsi_last_bb (bb);
3174
3175 if (src_r == NULL)
3176 {
3177 /* We can wind up with no source region when pass_cleanup_eh shows
3178 that there are no entries into an eh region and deletes it, but
3179 then the block that contains the resx isn't removed. This can
3180 happen without optimization when the switch statement created by
3181 lower_try_finally_switch isn't simplified to remove the eh case.
3182
3183 Resolve this by expanding the resx node to an abort. */
3184
3185 fn = builtin_decl_implicit (BUILT_IN_TRAP);
3186 x = gimple_build_call (fn, 0);
3187 gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3188
3189 while (EDGE_COUNT (bb->succs) > 0)
3190 remove_edge (EDGE_SUCC (bb, 0));
3191 }
3192 else if (dst_r)
3193 {
3194 /* When we have a destination region, we resolve this by copying
3195 the excptr and filter values into place, and changing the edge
3196 to immediately after the landing pad. */
3197 edge e;
3198
3199 if (lp_nr < 0)
3200 {
3201 basic_block new_bb;
3202 void **slot;
3203 tree lab;
3204
3205 /* We are resuming into a MUST_NOT_CALL region. Expand a call to
3206 the failure decl into a new block, if needed. */
3207 gcc_assert (dst_r->type == ERT_MUST_NOT_THROW);
3208
3209 slot = pointer_map_contains (mnt_map, dst_r);
3210 if (slot == NULL)
3211 {
3212 gimple_stmt_iterator gsi2;
3213
3214 new_bb = create_empty_bb (bb);
3215 if (current_loops)
3216 add_bb_to_loop (new_bb, bb->loop_father);
3217 lab = gimple_block_label (new_bb);
3218 gsi2 = gsi_start_bb (new_bb);
3219
3220 fn = dst_r->u.must_not_throw.failure_decl;
3221 x = gimple_build_call (fn, 0);
3222 gimple_set_location (x, dst_r->u.must_not_throw.failure_loc);
3223 gsi_insert_after (&gsi2, x, GSI_CONTINUE_LINKING);
3224
3225 slot = pointer_map_insert (mnt_map, dst_r);
3226 *slot = lab;
3227 }
3228 else
3229 {
3230 lab = (tree) *slot;
3231 new_bb = label_to_block (lab);
3232 }
3233
3234 gcc_assert (EDGE_COUNT (bb->succs) == 0);
3235 e = make_edge (bb, new_bb, EDGE_FALLTHRU);
3236 e->count = bb->count;
3237 e->probability = REG_BR_PROB_BASE;
3238 }
3239 else
3240 {
3241 edge_iterator ei;
3242 tree dst_nr = build_int_cst (integer_type_node, dst_r->index);
3243
3244 fn = builtin_decl_implicit (BUILT_IN_EH_COPY_VALUES);
3245 src_nr = build_int_cst (integer_type_node, src_r->index);
3246 x = gimple_build_call (fn, 2, dst_nr, src_nr);
3247 gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3248
3249 /* Update the flags for the outgoing edge. */
3250 e = single_succ_edge (bb);
3251 gcc_assert (e->flags & EDGE_EH);
3252 e->flags = (e->flags & ~EDGE_EH) | EDGE_FALLTHRU;
3253
3254 /* If there are no more EH users of the landing pad, delete it. */
3255 FOR_EACH_EDGE (e, ei, e->dest->preds)
3256 if (e->flags & EDGE_EH)
3257 break;
3258 if (e == NULL)
3259 {
3260 eh_landing_pad lp = get_eh_landing_pad_from_number (lp_nr);
3261 remove_eh_landing_pad (lp);
3262 }
3263 }
3264
3265 ret = true;
3266 }
3267 else
3268 {
3269 tree var;
3270
3271 /* When we don't have a destination region, this exception escapes
3272 up the call chain. We resolve this by generating a call to the
3273 _Unwind_Resume library function. */
3274
3275 /* The ARM EABI redefines _Unwind_Resume as __cxa_end_cleanup
3276 with no arguments for C++ and Java. Check for that. */
3277 if (src_r->use_cxa_end_cleanup)
3278 {
3279 fn = builtin_decl_implicit (BUILT_IN_CXA_END_CLEANUP);
3280 x = gimple_build_call (fn, 0);
3281 gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3282 }
3283 else
3284 {
3285 fn = builtin_decl_implicit (BUILT_IN_EH_POINTER);
3286 src_nr = build_int_cst (integer_type_node, src_r->index);
3287 x = gimple_build_call (fn, 1, src_nr);
3288 var = create_tmp_var (ptr_type_node, NULL);
3289 var = make_ssa_name (var, x);
3290 gimple_call_set_lhs (x, var);
3291 gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3292
3293 fn = builtin_decl_implicit (BUILT_IN_UNWIND_RESUME);
3294 x = gimple_build_call (fn, 1, var);
3295 gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3296 }
3297
3298 gcc_assert (EDGE_COUNT (bb->succs) == 0);
3299 }
3300
3301 gsi_remove (&gsi, true);
3302
3303 return ret;
3304 }
3305
3306 namespace {
3307
3308 const pass_data pass_data_lower_resx =
3309 {
3310 GIMPLE_PASS, /* type */
3311 "resx", /* name */
3312 OPTGROUP_NONE, /* optinfo_flags */
3313 true, /* has_execute */
3314 TV_TREE_EH, /* tv_id */
3315 PROP_gimple_lcf, /* properties_required */
3316 0, /* properties_provided */
3317 0, /* properties_destroyed */
3318 0, /* todo_flags_start */
3319 0, /* todo_flags_finish */
3320 };
3321
3322 class pass_lower_resx : public gimple_opt_pass
3323 {
3324 public:
3325 pass_lower_resx (gcc::context *ctxt)
3326 : gimple_opt_pass (pass_data_lower_resx, ctxt)
3327 {}
3328
3329 /* opt_pass methods: */
3330 virtual bool gate (function *) { return flag_exceptions != 0; }
3331 virtual unsigned int execute (function *);
3332
3333 }; // class pass_lower_resx
3334
3335 unsigned
3336 pass_lower_resx::execute (function *fun)
3337 {
3338 basic_block bb;
3339 struct pointer_map_t *mnt_map;
3340 bool dominance_invalidated = false;
3341 bool any_rewritten = false;
3342
3343 mnt_map = pointer_map_create ();
3344
3345 FOR_EACH_BB_FN (bb, fun)
3346 {
3347 gimple last = last_stmt (bb);
3348 if (last && is_gimple_resx (last))
3349 {
3350 dominance_invalidated |= lower_resx (bb, last, mnt_map);
3351 any_rewritten = true;
3352 }
3353 }
3354
3355 pointer_map_destroy (mnt_map);
3356
3357 if (dominance_invalidated)
3358 {
3359 free_dominance_info (CDI_DOMINATORS);
3360 free_dominance_info (CDI_POST_DOMINATORS);
3361 }
3362
3363 return any_rewritten ? TODO_update_ssa_only_virtuals : 0;
3364 }
3365
3366 } // anon namespace
3367
3368 gimple_opt_pass *
3369 make_pass_lower_resx (gcc::context *ctxt)
3370 {
3371 return new pass_lower_resx (ctxt);
3372 }
3373
3374 /* Try to optimize var = {v} {CLOBBER} stmts followed just by
3375 external throw. */
3376
3377 static void
3378 optimize_clobbers (basic_block bb)
3379 {
3380 gimple_stmt_iterator gsi = gsi_last_bb (bb);
3381 bool any_clobbers = false;
3382 bool seen_stack_restore = false;
3383 edge_iterator ei;
3384 edge e;
3385
3386 /* Only optimize anything if the bb contains at least one clobber,
3387 ends with resx (checked by caller), optionally contains some
3388 debug stmts or labels, or at most one __builtin_stack_restore
3389 call, and has an incoming EH edge. */
3390 for (gsi_prev (&gsi); !gsi_end_p (gsi); gsi_prev (&gsi))
3391 {
3392 gimple stmt = gsi_stmt (gsi);
3393 if (is_gimple_debug (stmt))
3394 continue;
3395 if (gimple_clobber_p (stmt))
3396 {
3397 any_clobbers = true;
3398 continue;
3399 }
3400 if (!seen_stack_restore
3401 && gimple_call_builtin_p (stmt, BUILT_IN_STACK_RESTORE))
3402 {
3403 seen_stack_restore = true;
3404 continue;
3405 }
3406 if (gimple_code (stmt) == GIMPLE_LABEL)
3407 break;
3408 return;
3409 }
3410 if (!any_clobbers)
3411 return;
3412 FOR_EACH_EDGE (e, ei, bb->preds)
3413 if (e->flags & EDGE_EH)
3414 break;
3415 if (e == NULL)
3416 return;
3417 gsi = gsi_last_bb (bb);
3418 for (gsi_prev (&gsi); !gsi_end_p (gsi); gsi_prev (&gsi))
3419 {
3420 gimple stmt = gsi_stmt (gsi);
3421 if (!gimple_clobber_p (stmt))
3422 continue;
3423 unlink_stmt_vdef (stmt);
3424 gsi_remove (&gsi, true);
3425 release_defs (stmt);
3426 }
3427 }
3428
3429 /* Try to sink var = {v} {CLOBBER} stmts followed just by
3430 internal throw to successor BB. */
3431
3432 static int
3433 sink_clobbers (basic_block bb)
3434 {
3435 edge e;
3436 edge_iterator ei;
3437 gimple_stmt_iterator gsi, dgsi;
3438 basic_block succbb;
3439 bool any_clobbers = false;
3440 unsigned todo = 0;
3441
3442 /* Only optimize if BB has a single EH successor and
3443 all predecessor edges are EH too. */
3444 if (!single_succ_p (bb)
3445 || (single_succ_edge (bb)->flags & EDGE_EH) == 0)
3446 return 0;
3447
3448 FOR_EACH_EDGE (e, ei, bb->preds)
3449 {
3450 if ((e->flags & EDGE_EH) == 0)
3451 return 0;
3452 }
3453
3454 /* And BB contains only CLOBBER stmts before the final
3455 RESX. */
3456 gsi = gsi_last_bb (bb);
3457 for (gsi_prev (&gsi); !gsi_end_p (gsi); gsi_prev (&gsi))
3458 {
3459 gimple stmt = gsi_stmt (gsi);
3460 if (is_gimple_debug (stmt))
3461 continue;
3462 if (gimple_code (stmt) == GIMPLE_LABEL)
3463 break;
3464 if (!gimple_clobber_p (stmt))
3465 return 0;
3466 any_clobbers = true;
3467 }
3468 if (!any_clobbers)
3469 return 0;
3470
3471 edge succe = single_succ_edge (bb);
3472 succbb = succe->dest;
3473
3474 /* See if there is a virtual PHI node to take an updated virtual
3475 operand from. */
3476 gimple vphi = NULL;
3477 tree vuse = NULL_TREE;
3478 for (gsi = gsi_start_phis (succbb); !gsi_end_p (gsi); gsi_next (&gsi))
3479 {
3480 tree res = gimple_phi_result (gsi_stmt (gsi));
3481 if (virtual_operand_p (res))
3482 {
3483 vphi = gsi_stmt (gsi);
3484 vuse = res;
3485 break;
3486 }
3487 }
3488
3489 dgsi = gsi_after_labels (succbb);
3490 gsi = gsi_last_bb (bb);
3491 for (gsi_prev (&gsi); !gsi_end_p (gsi); gsi_prev (&gsi))
3492 {
3493 gimple stmt = gsi_stmt (gsi);
3494 tree lhs;
3495 if (is_gimple_debug (stmt))
3496 continue;
3497 if (gimple_code (stmt) == GIMPLE_LABEL)
3498 break;
3499 lhs = gimple_assign_lhs (stmt);
3500 /* Unfortunately we don't have dominance info updated at this
3501 point, so checking if
3502 dominated_by_p (CDI_DOMINATORS, succbb,
3503 gimple_bb (SSA_NAME_DEF_STMT (TREE_OPERAND (lhs, 0)))
3504 would be too costly. Thus, avoid sinking any clobbers that
3505 refer to non-(D) SSA_NAMEs. */
3506 if (TREE_CODE (lhs) == MEM_REF
3507 && TREE_CODE (TREE_OPERAND (lhs, 0)) == SSA_NAME
3508 && !SSA_NAME_IS_DEFAULT_DEF (TREE_OPERAND (lhs, 0)))
3509 {
3510 unlink_stmt_vdef (stmt);
3511 gsi_remove (&gsi, true);
3512 release_defs (stmt);
3513 continue;
3514 }
3515
3516 /* As we do not change stmt order when sinking across a
3517 forwarder edge we can keep virtual operands in place. */
3518 gsi_remove (&gsi, false);
3519 gsi_insert_before (&dgsi, stmt, GSI_NEW_STMT);
3520
3521 /* But adjust virtual operands if we sunk across a PHI node. */
3522 if (vuse)
3523 {
3524 gimple use_stmt;
3525 imm_use_iterator iter;
3526 use_operand_p use_p;
3527 FOR_EACH_IMM_USE_STMT (use_stmt, iter, vuse)
3528 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
3529 SET_USE (use_p, gimple_vdef (stmt));
3530 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (vuse))
3531 {
3532 SSA_NAME_OCCURS_IN_ABNORMAL_PHI (gimple_vdef (stmt)) = 1;
3533 SSA_NAME_OCCURS_IN_ABNORMAL_PHI (vuse) = 0;
3534 }
3535 /* Adjust the incoming virtual operand. */
3536 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (vphi, succe), gimple_vuse (stmt));
3537 SET_USE (gimple_vuse_op (stmt), vuse);
3538 }
3539 /* If there isn't a single predecessor but no virtual PHI node
3540 arrange for virtual operands to be renamed. */
3541 else if (gimple_vuse_op (stmt) != NULL_USE_OPERAND_P
3542 && !single_pred_p (succbb))
3543 {
3544 /* In this case there will be no use of the VDEF of this stmt.
3545 ??? Unless this is a secondary opportunity and we have not
3546 removed unreachable blocks yet, so we cannot assert this.
3547 Which also means we will end up renaming too many times. */
3548 SET_USE (gimple_vuse_op (stmt), gimple_vop (cfun));
3549 mark_virtual_operands_for_renaming (cfun);
3550 todo |= TODO_update_ssa_only_virtuals;
3551 }
3552 }
3553
3554 return todo;
3555 }
3556
3557 /* At the end of inlining, we can lower EH_DISPATCH. Return true when
3558 we have found some duplicate labels and removed some edges. */
3559
3560 static bool
3561 lower_eh_dispatch (basic_block src, gimple stmt)
3562 {
3563 gimple_stmt_iterator gsi;
3564 int region_nr;
3565 eh_region r;
3566 tree filter, fn;
3567 gimple x;
3568 bool redirected = false;
3569
3570 region_nr = gimple_eh_dispatch_region (stmt);
3571 r = get_eh_region_from_number (region_nr);
3572
3573 gsi = gsi_last_bb (src);
3574
3575 switch (r->type)
3576 {
3577 case ERT_TRY:
3578 {
3579 auto_vec<tree> labels;
3580 tree default_label = NULL;
3581 eh_catch c;
3582 edge_iterator ei;
3583 edge e;
3584 struct pointer_set_t *seen_values = pointer_set_create ();
3585
3586 /* Collect the labels for a switch. Zero the post_landing_pad
3587 field becase we'll no longer have anything keeping these labels
3588 in existence and the optimizer will be free to merge these
3589 blocks at will. */
3590 for (c = r->u.eh_try.first_catch; c ; c = c->next_catch)
3591 {
3592 tree tp_node, flt_node, lab = c->label;
3593 bool have_label = false;
3594
3595 c->label = NULL;
3596 tp_node = c->type_list;
3597 flt_node = c->filter_list;
3598
3599 if (tp_node == NULL)
3600 {
3601 default_label = lab;
3602 break;
3603 }
3604 do
3605 {
3606 /* Filter out duplicate labels that arise when this handler
3607 is shadowed by an earlier one. When no labels are
3608 attached to the handler anymore, we remove
3609 the corresponding edge and then we delete unreachable
3610 blocks at the end of this pass. */
3611 if (! pointer_set_contains (seen_values, TREE_VALUE (flt_node)))
3612 {
3613 tree t = build_case_label (TREE_VALUE (flt_node),
3614 NULL, lab);
3615 labels.safe_push (t);
3616 pointer_set_insert (seen_values, TREE_VALUE (flt_node));
3617 have_label = true;
3618 }
3619
3620 tp_node = TREE_CHAIN (tp_node);
3621 flt_node = TREE_CHAIN (flt_node);
3622 }
3623 while (tp_node);
3624 if (! have_label)
3625 {
3626 remove_edge (find_edge (src, label_to_block (lab)));
3627 redirected = true;
3628 }
3629 }
3630
3631 /* Clean up the edge flags. */
3632 FOR_EACH_EDGE (e, ei, src->succs)
3633 {
3634 if (e->flags & EDGE_FALLTHRU)
3635 {
3636 /* If there was no catch-all, use the fallthru edge. */
3637 if (default_label == NULL)
3638 default_label = gimple_block_label (e->dest);
3639 e->flags &= ~EDGE_FALLTHRU;
3640 }
3641 }
3642 gcc_assert (default_label != NULL);
3643
3644 /* Don't generate a switch if there's only a default case.
3645 This is common in the form of try { A; } catch (...) { B; }. */
3646 if (!labels.exists ())
3647 {
3648 e = single_succ_edge (src);
3649 e->flags |= EDGE_FALLTHRU;
3650 }
3651 else
3652 {
3653 fn = builtin_decl_implicit (BUILT_IN_EH_FILTER);
3654 x = gimple_build_call (fn, 1, build_int_cst (integer_type_node,
3655 region_nr));
3656 filter = create_tmp_var (TREE_TYPE (TREE_TYPE (fn)), NULL);
3657 filter = make_ssa_name (filter, x);
3658 gimple_call_set_lhs (x, filter);
3659 gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3660
3661 /* Turn the default label into a default case. */
3662 default_label = build_case_label (NULL, NULL, default_label);
3663 sort_case_labels (labels);
3664
3665 x = gimple_build_switch (filter, default_label, labels);
3666 gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3667 }
3668 pointer_set_destroy (seen_values);
3669 }
3670 break;
3671
3672 case ERT_ALLOWED_EXCEPTIONS:
3673 {
3674 edge b_e = BRANCH_EDGE (src);
3675 edge f_e = FALLTHRU_EDGE (src);
3676
3677 fn = builtin_decl_implicit (BUILT_IN_EH_FILTER);
3678 x = gimple_build_call (fn, 1, build_int_cst (integer_type_node,
3679 region_nr));
3680 filter = create_tmp_var (TREE_TYPE (TREE_TYPE (fn)), NULL);
3681 filter = make_ssa_name (filter, x);
3682 gimple_call_set_lhs (x, filter);
3683 gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3684
3685 r->u.allowed.label = NULL;
3686 x = gimple_build_cond (EQ_EXPR, filter,
3687 build_int_cst (TREE_TYPE (filter),
3688 r->u.allowed.filter),
3689 NULL_TREE, NULL_TREE);
3690 gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3691
3692 b_e->flags = b_e->flags | EDGE_TRUE_VALUE;
3693 f_e->flags = (f_e->flags & ~EDGE_FALLTHRU) | EDGE_FALSE_VALUE;
3694 }
3695 break;
3696
3697 default:
3698 gcc_unreachable ();
3699 }
3700
3701 /* Replace the EH_DISPATCH with the SWITCH or COND generated above. */
3702 gsi_remove (&gsi, true);
3703 return redirected;
3704 }
3705
3706 namespace {
3707
3708 const pass_data pass_data_lower_eh_dispatch =
3709 {
3710 GIMPLE_PASS, /* type */
3711 "ehdisp", /* name */
3712 OPTGROUP_NONE, /* optinfo_flags */
3713 true, /* has_execute */
3714 TV_TREE_EH, /* tv_id */
3715 PROP_gimple_lcf, /* properties_required */
3716 0, /* properties_provided */
3717 0, /* properties_destroyed */
3718 0, /* todo_flags_start */
3719 0, /* todo_flags_finish */
3720 };
3721
3722 class pass_lower_eh_dispatch : public gimple_opt_pass
3723 {
3724 public:
3725 pass_lower_eh_dispatch (gcc::context *ctxt)
3726 : gimple_opt_pass (pass_data_lower_eh_dispatch, ctxt)
3727 {}
3728
3729 /* opt_pass methods: */
3730 virtual bool gate (function *fun) { return fun->eh->region_tree != NULL; }
3731 virtual unsigned int execute (function *);
3732
3733 }; // class pass_lower_eh_dispatch
3734
3735 unsigned
3736 pass_lower_eh_dispatch::execute (function *fun)
3737 {
3738 basic_block bb;
3739 int flags = 0;
3740 bool redirected = false;
3741
3742 assign_filter_values ();
3743
3744 FOR_EACH_BB_FN (bb, fun)
3745 {
3746 gimple last = last_stmt (bb);
3747 if (last == NULL)
3748 continue;
3749 if (gimple_code (last) == GIMPLE_EH_DISPATCH)
3750 {
3751 redirected |= lower_eh_dispatch (bb, last);
3752 flags |= TODO_update_ssa_only_virtuals;
3753 }
3754 else if (gimple_code (last) == GIMPLE_RESX)
3755 {
3756 if (stmt_can_throw_external (last))
3757 optimize_clobbers (bb);
3758 else
3759 flags |= sink_clobbers (bb);
3760 }
3761 }
3762
3763 if (redirected)
3764 delete_unreachable_blocks ();
3765 return flags;
3766 }
3767
3768 } // anon namespace
3769
3770 gimple_opt_pass *
3771 make_pass_lower_eh_dispatch (gcc::context *ctxt)
3772 {
3773 return new pass_lower_eh_dispatch (ctxt);
3774 }
3775 \f
3776 /* Walk statements, see what regions and, optionally, landing pads
3777 are really referenced.
3778
3779 Returns in R_REACHABLEP an sbitmap with bits set for reachable regions,
3780 and in LP_REACHABLE an sbitmap with bits set for reachable landing pads.
3781
3782 Passing NULL for LP_REACHABLE is valid, in this case only reachable
3783 regions are marked.
3784
3785 The caller is responsible for freeing the returned sbitmaps. */
3786
3787 static void
3788 mark_reachable_handlers (sbitmap *r_reachablep, sbitmap *lp_reachablep)
3789 {
3790 sbitmap r_reachable, lp_reachable;
3791 basic_block bb;
3792 bool mark_landing_pads = (lp_reachablep != NULL);
3793 gcc_checking_assert (r_reachablep != NULL);
3794
3795 r_reachable = sbitmap_alloc (cfun->eh->region_array->length ());
3796 bitmap_clear (r_reachable);
3797 *r_reachablep = r_reachable;
3798
3799 if (mark_landing_pads)
3800 {
3801 lp_reachable = sbitmap_alloc (cfun->eh->lp_array->length ());
3802 bitmap_clear (lp_reachable);
3803 *lp_reachablep = lp_reachable;
3804 }
3805 else
3806 lp_reachable = NULL;
3807
3808 FOR_EACH_BB_FN (bb, cfun)
3809 {
3810 gimple_stmt_iterator gsi;
3811
3812 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
3813 {
3814 gimple stmt = gsi_stmt (gsi);
3815
3816 if (mark_landing_pads)
3817 {
3818 int lp_nr = lookup_stmt_eh_lp (stmt);
3819
3820 /* Negative LP numbers are MUST_NOT_THROW regions which
3821 are not considered BB enders. */
3822 if (lp_nr < 0)
3823 bitmap_set_bit (r_reachable, -lp_nr);
3824
3825 /* Positive LP numbers are real landing pads, and BB enders. */
3826 else if (lp_nr > 0)
3827 {
3828 gcc_assert (gsi_one_before_end_p (gsi));
3829 eh_region region = get_eh_region_from_lp_number (lp_nr);
3830 bitmap_set_bit (r_reachable, region->index);
3831 bitmap_set_bit (lp_reachable, lp_nr);
3832 }
3833 }
3834
3835 /* Avoid removing regions referenced from RESX/EH_DISPATCH. */
3836 switch (gimple_code (stmt))
3837 {
3838 case GIMPLE_RESX:
3839 bitmap_set_bit (r_reachable, gimple_resx_region (stmt));
3840 break;
3841 case GIMPLE_EH_DISPATCH:
3842 bitmap_set_bit (r_reachable, gimple_eh_dispatch_region (stmt));
3843 break;
3844 default:
3845 break;
3846 }
3847 }
3848 }
3849 }
3850
3851 /* Remove unreachable handlers and unreachable landing pads. */
3852
3853 static void
3854 remove_unreachable_handlers (void)
3855 {
3856 sbitmap r_reachable, lp_reachable;
3857 eh_region region;
3858 eh_landing_pad lp;
3859 unsigned i;
3860
3861 mark_reachable_handlers (&r_reachable, &lp_reachable);
3862
3863 if (dump_file)
3864 {
3865 fprintf (dump_file, "Before removal of unreachable regions:\n");
3866 dump_eh_tree (dump_file, cfun);
3867 fprintf (dump_file, "Reachable regions: ");
3868 dump_bitmap_file (dump_file, r_reachable);
3869 fprintf (dump_file, "Reachable landing pads: ");
3870 dump_bitmap_file (dump_file, lp_reachable);
3871 }
3872
3873 if (dump_file)
3874 {
3875 FOR_EACH_VEC_SAFE_ELT (cfun->eh->region_array, i, region)
3876 if (region && !bitmap_bit_p (r_reachable, region->index))
3877 fprintf (dump_file,
3878 "Removing unreachable region %d\n",
3879 region->index);
3880 }
3881
3882 remove_unreachable_eh_regions (r_reachable);
3883
3884 FOR_EACH_VEC_SAFE_ELT (cfun->eh->lp_array, i, lp)
3885 if (lp && !bitmap_bit_p (lp_reachable, lp->index))
3886 {
3887 if (dump_file)
3888 fprintf (dump_file,
3889 "Removing unreachable landing pad %d\n",
3890 lp->index);
3891 remove_eh_landing_pad (lp);
3892 }
3893
3894 if (dump_file)
3895 {
3896 fprintf (dump_file, "\n\nAfter removal of unreachable regions:\n");
3897 dump_eh_tree (dump_file, cfun);
3898 fprintf (dump_file, "\n\n");
3899 }
3900
3901 sbitmap_free (r_reachable);
3902 sbitmap_free (lp_reachable);
3903
3904 #ifdef ENABLE_CHECKING
3905 verify_eh_tree (cfun);
3906 #endif
3907 }
3908
3909 /* Remove unreachable handlers if any landing pads have been removed after
3910 last ehcleanup pass (due to gimple_purge_dead_eh_edges). */
3911
3912 void
3913 maybe_remove_unreachable_handlers (void)
3914 {
3915 eh_landing_pad lp;
3916 unsigned i;
3917
3918 if (cfun->eh == NULL)
3919 return;
3920
3921 FOR_EACH_VEC_SAFE_ELT (cfun->eh->lp_array, i, lp)
3922 if (lp && lp->post_landing_pad)
3923 {
3924 if (label_to_block (lp->post_landing_pad) == NULL)
3925 {
3926 remove_unreachable_handlers ();
3927 return;
3928 }
3929 }
3930 }
3931
3932 /* Remove regions that do not have landing pads. This assumes
3933 that remove_unreachable_handlers has already been run, and
3934 that we've just manipulated the landing pads since then.
3935
3936 Preserve regions with landing pads and regions that prevent
3937 exceptions from propagating further, even if these regions
3938 are not reachable. */
3939
3940 static void
3941 remove_unreachable_handlers_no_lp (void)
3942 {
3943 eh_region region;
3944 sbitmap r_reachable;
3945 unsigned i;
3946
3947 mark_reachable_handlers (&r_reachable, /*lp_reachablep=*/NULL);
3948
3949 FOR_EACH_VEC_SAFE_ELT (cfun->eh->region_array, i, region)
3950 {
3951 if (! region)
3952 continue;
3953
3954 if (region->landing_pads != NULL
3955 || region->type == ERT_MUST_NOT_THROW)
3956 bitmap_set_bit (r_reachable, region->index);
3957
3958 if (dump_file
3959 && !bitmap_bit_p (r_reachable, region->index))
3960 fprintf (dump_file,
3961 "Removing unreachable region %d\n",
3962 region->index);
3963 }
3964
3965 remove_unreachable_eh_regions (r_reachable);
3966
3967 sbitmap_free (r_reachable);
3968 }
3969
3970 /* Undo critical edge splitting on an EH landing pad. Earlier, we
3971 optimisticaly split all sorts of edges, including EH edges. The
3972 optimization passes in between may not have needed them; if not,
3973 we should undo the split.
3974
3975 Recognize this case by having one EH edge incoming to the BB and
3976 one normal edge outgoing; BB should be empty apart from the
3977 post_landing_pad label.
3978
3979 Note that this is slightly different from the empty handler case
3980 handled by cleanup_empty_eh, in that the actual handler may yet
3981 have actual code but the landing pad has been separated from the
3982 handler. As such, cleanup_empty_eh relies on this transformation
3983 having been done first. */
3984
3985 static bool
3986 unsplit_eh (eh_landing_pad lp)
3987 {
3988 basic_block bb = label_to_block (lp->post_landing_pad);
3989 gimple_stmt_iterator gsi;
3990 edge e_in, e_out;
3991
3992 /* Quickly check the edge counts on BB for singularity. */
3993 if (!single_pred_p (bb) || !single_succ_p (bb))
3994 return false;
3995 e_in = single_pred_edge (bb);
3996 e_out = single_succ_edge (bb);
3997
3998 /* Input edge must be EH and output edge must be normal. */
3999 if ((e_in->flags & EDGE_EH) == 0 || (e_out->flags & EDGE_EH) != 0)
4000 return false;
4001
4002 /* The block must be empty except for the labels and debug insns. */
4003 gsi = gsi_after_labels (bb);
4004 if (!gsi_end_p (gsi) && is_gimple_debug (gsi_stmt (gsi)))
4005 gsi_next_nondebug (&gsi);
4006 if (!gsi_end_p (gsi))
4007 return false;
4008
4009 /* The destination block must not already have a landing pad
4010 for a different region. */
4011 for (gsi = gsi_start_bb (e_out->dest); !gsi_end_p (gsi); gsi_next (&gsi))
4012 {
4013 gimple stmt = gsi_stmt (gsi);
4014 tree lab;
4015 int lp_nr;
4016
4017 if (gimple_code (stmt) != GIMPLE_LABEL)
4018 break;
4019 lab = gimple_label_label (stmt);
4020 lp_nr = EH_LANDING_PAD_NR (lab);
4021 if (lp_nr && get_eh_region_from_lp_number (lp_nr) != lp->region)
4022 return false;
4023 }
4024
4025 /* The new destination block must not already be a destination of
4026 the source block, lest we merge fallthru and eh edges and get
4027 all sorts of confused. */
4028 if (find_edge (e_in->src, e_out->dest))
4029 return false;
4030
4031 /* ??? We can get degenerate phis due to cfg cleanups. I would have
4032 thought this should have been cleaned up by a phicprop pass, but
4033 that doesn't appear to handle virtuals. Propagate by hand. */
4034 if (!gimple_seq_empty_p (phi_nodes (bb)))
4035 {
4036 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); )
4037 {
4038 gimple use_stmt, phi = gsi_stmt (gsi);
4039 tree lhs = gimple_phi_result (phi);
4040 tree rhs = gimple_phi_arg_def (phi, 0);
4041 use_operand_p use_p;
4042 imm_use_iterator iter;
4043
4044 FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
4045 {
4046 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
4047 SET_USE (use_p, rhs);
4048 }
4049
4050 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs))
4051 SSA_NAME_OCCURS_IN_ABNORMAL_PHI (rhs) = 1;
4052
4053 remove_phi_node (&gsi, true);
4054 }
4055 }
4056
4057 if (dump_file && (dump_flags & TDF_DETAILS))
4058 fprintf (dump_file, "Unsplit EH landing pad %d to block %i.\n",
4059 lp->index, e_out->dest->index);
4060
4061 /* Redirect the edge. Since redirect_eh_edge_1 expects to be moving
4062 a successor edge, humor it. But do the real CFG change with the
4063 predecessor of E_OUT in order to preserve the ordering of arguments
4064 to the PHI nodes in E_OUT->DEST. */
4065 redirect_eh_edge_1 (e_in, e_out->dest, false);
4066 redirect_edge_pred (e_out, e_in->src);
4067 e_out->flags = e_in->flags;
4068 e_out->probability = e_in->probability;
4069 e_out->count = e_in->count;
4070 remove_edge (e_in);
4071
4072 return true;
4073 }
4074
4075 /* Examine each landing pad block and see if it matches unsplit_eh. */
4076
4077 static bool
4078 unsplit_all_eh (void)
4079 {
4080 bool changed = false;
4081 eh_landing_pad lp;
4082 int i;
4083
4084 for (i = 1; vec_safe_iterate (cfun->eh->lp_array, i, &lp); ++i)
4085 if (lp)
4086 changed |= unsplit_eh (lp);
4087
4088 return changed;
4089 }
4090
4091 /* A subroutine of cleanup_empty_eh. Redirect all EH edges incoming
4092 to OLD_BB to NEW_BB; return true on success, false on failure.
4093
4094 OLD_BB_OUT is the edge into NEW_BB from OLD_BB, so if we miss any
4095 PHI variables from OLD_BB we can pick them up from OLD_BB_OUT.
4096 Virtual PHIs may be deleted and marked for renaming. */
4097
4098 static bool
4099 cleanup_empty_eh_merge_phis (basic_block new_bb, basic_block old_bb,
4100 edge old_bb_out, bool change_region)
4101 {
4102 gimple_stmt_iterator ngsi, ogsi;
4103 edge_iterator ei;
4104 edge e;
4105 bitmap ophi_handled;
4106
4107 /* The destination block must not be a regular successor for any
4108 of the preds of the landing pad. Thus, avoid turning
4109 <..>
4110 | \ EH
4111 | <..>
4112 | /
4113 <..>
4114 into
4115 <..>
4116 | | EH
4117 <..>
4118 which CFG verification would choke on. See PR45172 and PR51089. */
4119 FOR_EACH_EDGE (e, ei, old_bb->preds)
4120 if (find_edge (e->src, new_bb))
4121 return false;
4122
4123 FOR_EACH_EDGE (e, ei, old_bb->preds)
4124 redirect_edge_var_map_clear (e);
4125
4126 ophi_handled = BITMAP_ALLOC (NULL);
4127
4128 /* First, iterate through the PHIs on NEW_BB and set up the edge_var_map
4129 for the edges we're going to move. */
4130 for (ngsi = gsi_start_phis (new_bb); !gsi_end_p (ngsi); gsi_next (&ngsi))
4131 {
4132 gimple ophi, nphi = gsi_stmt (ngsi);
4133 tree nresult, nop;
4134
4135 nresult = gimple_phi_result (nphi);
4136 nop = gimple_phi_arg_def (nphi, old_bb_out->dest_idx);
4137
4138 /* Find the corresponding PHI in OLD_BB so we can forward-propagate
4139 the source ssa_name. */
4140 ophi = NULL;
4141 for (ogsi = gsi_start_phis (old_bb); !gsi_end_p (ogsi); gsi_next (&ogsi))
4142 {
4143 ophi = gsi_stmt (ogsi);
4144 if (gimple_phi_result (ophi) == nop)
4145 break;
4146 ophi = NULL;
4147 }
4148
4149 /* If we did find the corresponding PHI, copy those inputs. */
4150 if (ophi)
4151 {
4152 /* If NOP is used somewhere else beyond phis in new_bb, give up. */
4153 if (!has_single_use (nop))
4154 {
4155 imm_use_iterator imm_iter;
4156 use_operand_p use_p;
4157
4158 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, nop)
4159 {
4160 if (!gimple_debug_bind_p (USE_STMT (use_p))
4161 && (gimple_code (USE_STMT (use_p)) != GIMPLE_PHI
4162 || gimple_bb (USE_STMT (use_p)) != new_bb))
4163 goto fail;
4164 }
4165 }
4166 bitmap_set_bit (ophi_handled, SSA_NAME_VERSION (nop));
4167 FOR_EACH_EDGE (e, ei, old_bb->preds)
4168 {
4169 location_t oloc;
4170 tree oop;
4171
4172 if ((e->flags & EDGE_EH) == 0)
4173 continue;
4174 oop = gimple_phi_arg_def (ophi, e->dest_idx);
4175 oloc = gimple_phi_arg_location (ophi, e->dest_idx);
4176 redirect_edge_var_map_add (e, nresult, oop, oloc);
4177 }
4178 }
4179 /* If we didn't find the PHI, if it's a real variable or a VOP, we know
4180 from the fact that OLD_BB is tree_empty_eh_handler_p that the
4181 variable is unchanged from input to the block and we can simply
4182 re-use the input to NEW_BB from the OLD_BB_OUT edge. */
4183 else
4184 {
4185 location_t nloc
4186 = gimple_phi_arg_location (nphi, old_bb_out->dest_idx);
4187 FOR_EACH_EDGE (e, ei, old_bb->preds)
4188 redirect_edge_var_map_add (e, nresult, nop, nloc);
4189 }
4190 }
4191
4192 /* Second, verify that all PHIs from OLD_BB have been handled. If not,
4193 we don't know what values from the other edges into NEW_BB to use. */
4194 for (ogsi = gsi_start_phis (old_bb); !gsi_end_p (ogsi); gsi_next (&ogsi))
4195 {
4196 gimple ophi = gsi_stmt (ogsi);
4197 tree oresult = gimple_phi_result (ophi);
4198 if (!bitmap_bit_p (ophi_handled, SSA_NAME_VERSION (oresult)))
4199 goto fail;
4200 }
4201
4202 /* Finally, move the edges and update the PHIs. */
4203 for (ei = ei_start (old_bb->preds); (e = ei_safe_edge (ei)); )
4204 if (e->flags & EDGE_EH)
4205 {
4206 /* ??? CFG manipluation routines do not try to update loop
4207 form on edge redirection. Do so manually here for now. */
4208 /* If we redirect a loop entry or latch edge that will either create
4209 a multiple entry loop or rotate the loop. If the loops merge
4210 we may have created a loop with multiple latches.
4211 All of this isn't easily fixed thus cancel the affected loop
4212 and mark the other loop as possibly having multiple latches. */
4213 if (current_loops
4214 && e->dest == e->dest->loop_father->header)
4215 {
4216 e->dest->loop_father->header = NULL;
4217 e->dest->loop_father->latch = NULL;
4218 new_bb->loop_father->latch = NULL;
4219 loops_state_set (LOOPS_NEED_FIXUP|LOOPS_MAY_HAVE_MULTIPLE_LATCHES);
4220 }
4221 redirect_eh_edge_1 (e, new_bb, change_region);
4222 redirect_edge_succ (e, new_bb);
4223 flush_pending_stmts (e);
4224 }
4225 else
4226 ei_next (&ei);
4227
4228 BITMAP_FREE (ophi_handled);
4229 return true;
4230
4231 fail:
4232 FOR_EACH_EDGE (e, ei, old_bb->preds)
4233 redirect_edge_var_map_clear (e);
4234 BITMAP_FREE (ophi_handled);
4235 return false;
4236 }
4237
4238 /* A subroutine of cleanup_empty_eh. Move a landing pad LP from its
4239 old region to NEW_REGION at BB. */
4240
4241 static void
4242 cleanup_empty_eh_move_lp (basic_block bb, edge e_out,
4243 eh_landing_pad lp, eh_region new_region)
4244 {
4245 gimple_stmt_iterator gsi;
4246 eh_landing_pad *pp;
4247
4248 for (pp = &lp->region->landing_pads; *pp != lp; pp = &(*pp)->next_lp)
4249 continue;
4250 *pp = lp->next_lp;
4251
4252 lp->region = new_region;
4253 lp->next_lp = new_region->landing_pads;
4254 new_region->landing_pads = lp;
4255
4256 /* Delete the RESX that was matched within the empty handler block. */
4257 gsi = gsi_last_bb (bb);
4258 unlink_stmt_vdef (gsi_stmt (gsi));
4259 gsi_remove (&gsi, true);
4260
4261 /* Clean up E_OUT for the fallthru. */
4262 e_out->flags = (e_out->flags & ~EDGE_EH) | EDGE_FALLTHRU;
4263 e_out->probability = REG_BR_PROB_BASE;
4264 }
4265
4266 /* A subroutine of cleanup_empty_eh. Handle more complex cases of
4267 unsplitting than unsplit_eh was prepared to handle, e.g. when
4268 multiple incoming edges and phis are involved. */
4269
4270 static bool
4271 cleanup_empty_eh_unsplit (basic_block bb, edge e_out, eh_landing_pad lp)
4272 {
4273 gimple_stmt_iterator gsi;
4274 tree lab;
4275
4276 /* We really ought not have totally lost everything following
4277 a landing pad label. Given that BB is empty, there had better
4278 be a successor. */
4279 gcc_assert (e_out != NULL);
4280
4281 /* The destination block must not already have a landing pad
4282 for a different region. */
4283 lab = NULL;
4284 for (gsi = gsi_start_bb (e_out->dest); !gsi_end_p (gsi); gsi_next (&gsi))
4285 {
4286 gimple stmt = gsi_stmt (gsi);
4287 int lp_nr;
4288
4289 if (gimple_code (stmt) != GIMPLE_LABEL)
4290 break;
4291 lab = gimple_label_label (stmt);
4292 lp_nr = EH_LANDING_PAD_NR (lab);
4293 if (lp_nr && get_eh_region_from_lp_number (lp_nr) != lp->region)
4294 return false;
4295 }
4296
4297 /* Attempt to move the PHIs into the successor block. */
4298 if (cleanup_empty_eh_merge_phis (e_out->dest, bb, e_out, false))
4299 {
4300 if (dump_file && (dump_flags & TDF_DETAILS))
4301 fprintf (dump_file,
4302 "Unsplit EH landing pad %d to block %i "
4303 "(via cleanup_empty_eh).\n",
4304 lp->index, e_out->dest->index);
4305 return true;
4306 }
4307
4308 return false;
4309 }
4310
4311 /* Return true if edge E_FIRST is part of an empty infinite loop
4312 or leads to such a loop through a series of single successor
4313 empty bbs. */
4314
4315 static bool
4316 infinite_empty_loop_p (edge e_first)
4317 {
4318 bool inf_loop = false;
4319 edge e;
4320
4321 if (e_first->dest == e_first->src)
4322 return true;
4323
4324 e_first->src->aux = (void *) 1;
4325 for (e = e_first; single_succ_p (e->dest); e = single_succ_edge (e->dest))
4326 {
4327 gimple_stmt_iterator gsi;
4328 if (e->dest->aux)
4329 {
4330 inf_loop = true;
4331 break;
4332 }
4333 e->dest->aux = (void *) 1;
4334 gsi = gsi_after_labels (e->dest);
4335 if (!gsi_end_p (gsi) && is_gimple_debug (gsi_stmt (gsi)))
4336 gsi_next_nondebug (&gsi);
4337 if (!gsi_end_p (gsi))
4338 break;
4339 }
4340 e_first->src->aux = NULL;
4341 for (e = e_first; e->dest->aux; e = single_succ_edge (e->dest))
4342 e->dest->aux = NULL;
4343
4344 return inf_loop;
4345 }
4346
4347 /* Examine the block associated with LP to determine if it's an empty
4348 handler for its EH region. If so, attempt to redirect EH edges to
4349 an outer region. Return true the CFG was updated in any way. This
4350 is similar to jump forwarding, just across EH edges. */
4351
4352 static bool
4353 cleanup_empty_eh (eh_landing_pad lp)
4354 {
4355 basic_block bb = label_to_block (lp->post_landing_pad);
4356 gimple_stmt_iterator gsi;
4357 gimple resx;
4358 eh_region new_region;
4359 edge_iterator ei;
4360 edge e, e_out;
4361 bool has_non_eh_pred;
4362 bool ret = false;
4363 int new_lp_nr;
4364
4365 /* There can be zero or one edges out of BB. This is the quickest test. */
4366 switch (EDGE_COUNT (bb->succs))
4367 {
4368 case 0:
4369 e_out = NULL;
4370 break;
4371 case 1:
4372 e_out = single_succ_edge (bb);
4373 break;
4374 default:
4375 return false;
4376 }
4377
4378 resx = last_stmt (bb);
4379 if (resx && is_gimple_resx (resx))
4380 {
4381 if (stmt_can_throw_external (resx))
4382 optimize_clobbers (bb);
4383 else if (sink_clobbers (bb))
4384 ret = true;
4385 }
4386
4387 gsi = gsi_after_labels (bb);
4388
4389 /* Make sure to skip debug statements. */
4390 if (!gsi_end_p (gsi) && is_gimple_debug (gsi_stmt (gsi)))
4391 gsi_next_nondebug (&gsi);
4392
4393 /* If the block is totally empty, look for more unsplitting cases. */
4394 if (gsi_end_p (gsi))
4395 {
4396 /* For the degenerate case of an infinite loop bail out.
4397 If bb has no successors and is totally empty, which can happen e.g.
4398 because of incorrect noreturn attribute, bail out too. */
4399 if (e_out == NULL
4400 || infinite_empty_loop_p (e_out))
4401 return ret;
4402
4403 return ret | cleanup_empty_eh_unsplit (bb, e_out, lp);
4404 }
4405
4406 /* The block should consist only of a single RESX statement, modulo a
4407 preceding call to __builtin_stack_restore if there is no outgoing
4408 edge, since the call can be eliminated in this case. */
4409 resx = gsi_stmt (gsi);
4410 if (!e_out && gimple_call_builtin_p (resx, BUILT_IN_STACK_RESTORE))
4411 {
4412 gsi_next (&gsi);
4413 resx = gsi_stmt (gsi);
4414 }
4415 if (!is_gimple_resx (resx))
4416 return ret;
4417 gcc_assert (gsi_one_before_end_p (gsi));
4418
4419 /* Determine if there are non-EH edges, or resx edges into the handler. */
4420 has_non_eh_pred = false;
4421 FOR_EACH_EDGE (e, ei, bb->preds)
4422 if (!(e->flags & EDGE_EH))
4423 has_non_eh_pred = true;
4424
4425 /* Find the handler that's outer of the empty handler by looking at
4426 where the RESX instruction was vectored. */
4427 new_lp_nr = lookup_stmt_eh_lp (resx);
4428 new_region = get_eh_region_from_lp_number (new_lp_nr);
4429
4430 /* If there's no destination region within the current function,
4431 redirection is trivial via removing the throwing statements from
4432 the EH region, removing the EH edges, and allowing the block
4433 to go unreachable. */
4434 if (new_region == NULL)
4435 {
4436 gcc_assert (e_out == NULL);
4437 for (ei = ei_start (bb->preds); (e = ei_safe_edge (ei)); )
4438 if (e->flags & EDGE_EH)
4439 {
4440 gimple stmt = last_stmt (e->src);
4441 remove_stmt_from_eh_lp (stmt);
4442 remove_edge (e);
4443 }
4444 else
4445 ei_next (&ei);
4446 goto succeed;
4447 }
4448
4449 /* If the destination region is a MUST_NOT_THROW, allow the runtime
4450 to handle the abort and allow the blocks to go unreachable. */
4451 if (new_region->type == ERT_MUST_NOT_THROW)
4452 {
4453 for (ei = ei_start (bb->preds); (e = ei_safe_edge (ei)); )
4454 if (e->flags & EDGE_EH)
4455 {
4456 gimple stmt = last_stmt (e->src);
4457 remove_stmt_from_eh_lp (stmt);
4458 add_stmt_to_eh_lp (stmt, new_lp_nr);
4459 remove_edge (e);
4460 }
4461 else
4462 ei_next (&ei);
4463 goto succeed;
4464 }
4465
4466 /* Try to redirect the EH edges and merge the PHIs into the destination
4467 landing pad block. If the merge succeeds, we'll already have redirected
4468 all the EH edges. The handler itself will go unreachable if there were
4469 no normal edges. */
4470 if (cleanup_empty_eh_merge_phis (e_out->dest, bb, e_out, true))
4471 goto succeed;
4472
4473 /* Finally, if all input edges are EH edges, then we can (potentially)
4474 reduce the number of transfers from the runtime by moving the landing
4475 pad from the original region to the new region. This is a win when
4476 we remove the last CLEANUP region along a particular exception
4477 propagation path. Since nothing changes except for the region with
4478 which the landing pad is associated, the PHI nodes do not need to be
4479 adjusted at all. */
4480 if (!has_non_eh_pred)
4481 {
4482 cleanup_empty_eh_move_lp (bb, e_out, lp, new_region);
4483 if (dump_file && (dump_flags & TDF_DETAILS))
4484 fprintf (dump_file, "Empty EH handler %i moved to EH region %i.\n",
4485 lp->index, new_region->index);
4486
4487 /* ??? The CFG didn't change, but we may have rendered the
4488 old EH region unreachable. Trigger a cleanup there. */
4489 return true;
4490 }
4491
4492 return ret;
4493
4494 succeed:
4495 if (dump_file && (dump_flags & TDF_DETAILS))
4496 fprintf (dump_file, "Empty EH handler %i removed.\n", lp->index);
4497 remove_eh_landing_pad (lp);
4498 return true;
4499 }
4500
4501 /* Do a post-order traversal of the EH region tree. Examine each
4502 post_landing_pad block and see if we can eliminate it as empty. */
4503
4504 static bool
4505 cleanup_all_empty_eh (void)
4506 {
4507 bool changed = false;
4508 eh_landing_pad lp;
4509 int i;
4510
4511 for (i = 1; vec_safe_iterate (cfun->eh->lp_array, i, &lp); ++i)
4512 if (lp)
4513 changed |= cleanup_empty_eh (lp);
4514
4515 return changed;
4516 }
4517
4518 /* Perform cleanups and lowering of exception handling
4519 1) cleanups regions with handlers doing nothing are optimized out
4520 2) MUST_NOT_THROW regions that became dead because of 1) are optimized out
4521 3) Info about regions that are containing instructions, and regions
4522 reachable via local EH edges is collected
4523 4) Eh tree is pruned for regions no longer necessary.
4524
4525 TODO: Push MUST_NOT_THROW regions to the root of the EH tree.
4526 Unify those that have the same failure decl and locus.
4527 */
4528
4529 static unsigned int
4530 execute_cleanup_eh_1 (void)
4531 {
4532 /* Do this first: unsplit_all_eh and cleanup_all_empty_eh can die
4533 looking up unreachable landing pads. */
4534 remove_unreachable_handlers ();
4535
4536 /* Watch out for the region tree vanishing due to all unreachable. */
4537 if (cfun->eh->region_tree)
4538 {
4539 bool changed = false;
4540
4541 if (optimize)
4542 changed |= unsplit_all_eh ();
4543 changed |= cleanup_all_empty_eh ();
4544
4545 if (changed)
4546 {
4547 free_dominance_info (CDI_DOMINATORS);
4548 free_dominance_info (CDI_POST_DOMINATORS);
4549
4550 /* We delayed all basic block deletion, as we may have performed
4551 cleanups on EH edges while non-EH edges were still present. */
4552 delete_unreachable_blocks ();
4553
4554 /* We manipulated the landing pads. Remove any region that no
4555 longer has a landing pad. */
4556 remove_unreachable_handlers_no_lp ();
4557
4558 return TODO_cleanup_cfg | TODO_update_ssa_only_virtuals;
4559 }
4560 }
4561
4562 return 0;
4563 }
4564
4565 namespace {
4566
4567 const pass_data pass_data_cleanup_eh =
4568 {
4569 GIMPLE_PASS, /* type */
4570 "ehcleanup", /* name */
4571 OPTGROUP_NONE, /* optinfo_flags */
4572 true, /* has_execute */
4573 TV_TREE_EH, /* tv_id */
4574 PROP_gimple_lcf, /* properties_required */
4575 0, /* properties_provided */
4576 0, /* properties_destroyed */
4577 0, /* todo_flags_start */
4578 0, /* todo_flags_finish */
4579 };
4580
4581 class pass_cleanup_eh : public gimple_opt_pass
4582 {
4583 public:
4584 pass_cleanup_eh (gcc::context *ctxt)
4585 : gimple_opt_pass (pass_data_cleanup_eh, ctxt)
4586 {}
4587
4588 /* opt_pass methods: */
4589 opt_pass * clone () { return new pass_cleanup_eh (m_ctxt); }
4590 virtual bool gate (function *fun)
4591 {
4592 return fun->eh != NULL && fun->eh->region_tree != NULL;
4593 }
4594
4595 virtual unsigned int execute (function *);
4596
4597 }; // class pass_cleanup_eh
4598
4599 unsigned int
4600 pass_cleanup_eh::execute (function *fun)
4601 {
4602 int ret = execute_cleanup_eh_1 ();
4603
4604 /* If the function no longer needs an EH personality routine
4605 clear it. This exposes cross-language inlining opportunities
4606 and avoids references to a never defined personality routine. */
4607 if (DECL_FUNCTION_PERSONALITY (current_function_decl)
4608 && function_needs_eh_personality (fun) != eh_personality_lang)
4609 DECL_FUNCTION_PERSONALITY (current_function_decl) = NULL_TREE;
4610
4611 return ret;
4612 }
4613
4614 } // anon namespace
4615
4616 gimple_opt_pass *
4617 make_pass_cleanup_eh (gcc::context *ctxt)
4618 {
4619 return new pass_cleanup_eh (ctxt);
4620 }
4621 \f
4622 /* Verify that BB containing STMT as the last statement, has precisely the
4623 edge that make_eh_edges would create. */
4624
4625 DEBUG_FUNCTION bool
4626 verify_eh_edges (gimple stmt)
4627 {
4628 basic_block bb = gimple_bb (stmt);
4629 eh_landing_pad lp = NULL;
4630 int lp_nr;
4631 edge_iterator ei;
4632 edge e, eh_edge;
4633
4634 lp_nr = lookup_stmt_eh_lp (stmt);
4635 if (lp_nr > 0)
4636 lp = get_eh_landing_pad_from_number (lp_nr);
4637
4638 eh_edge = NULL;
4639 FOR_EACH_EDGE (e, ei, bb->succs)
4640 {
4641 if (e->flags & EDGE_EH)
4642 {
4643 if (eh_edge)
4644 {
4645 error ("BB %i has multiple EH edges", bb->index);
4646 return true;
4647 }
4648 else
4649 eh_edge = e;
4650 }
4651 }
4652
4653 if (lp == NULL)
4654 {
4655 if (eh_edge)
4656 {
4657 error ("BB %i can not throw but has an EH edge", bb->index);
4658 return true;
4659 }
4660 return false;
4661 }
4662
4663 if (!stmt_could_throw_p (stmt))
4664 {
4665 error ("BB %i last statement has incorrectly set lp", bb->index);
4666 return true;
4667 }
4668
4669 if (eh_edge == NULL)
4670 {
4671 error ("BB %i is missing an EH edge", bb->index);
4672 return true;
4673 }
4674
4675 if (eh_edge->dest != label_to_block (lp->post_landing_pad))
4676 {
4677 error ("Incorrect EH edge %i->%i", bb->index, eh_edge->dest->index);
4678 return true;
4679 }
4680
4681 return false;
4682 }
4683
4684 /* Similarly, but handle GIMPLE_EH_DISPATCH specifically. */
4685
4686 DEBUG_FUNCTION bool
4687 verify_eh_dispatch_edge (gimple stmt)
4688 {
4689 eh_region r;
4690 eh_catch c;
4691 basic_block src, dst;
4692 bool want_fallthru = true;
4693 edge_iterator ei;
4694 edge e, fall_edge;
4695
4696 r = get_eh_region_from_number (gimple_eh_dispatch_region (stmt));
4697 src = gimple_bb (stmt);
4698
4699 FOR_EACH_EDGE (e, ei, src->succs)
4700 gcc_assert (e->aux == NULL);
4701
4702 switch (r->type)
4703 {
4704 case ERT_TRY:
4705 for (c = r->u.eh_try.first_catch; c ; c = c->next_catch)
4706 {
4707 dst = label_to_block (c->label);
4708 e = find_edge (src, dst);
4709 if (e == NULL)
4710 {
4711 error ("BB %i is missing an edge", src->index);
4712 return true;
4713 }
4714 e->aux = (void *)e;
4715
4716 /* A catch-all handler doesn't have a fallthru. */
4717 if (c->type_list == NULL)
4718 {
4719 want_fallthru = false;
4720 break;
4721 }
4722 }
4723 break;
4724
4725 case ERT_ALLOWED_EXCEPTIONS:
4726 dst = label_to_block (r->u.allowed.label);
4727 e = find_edge (src, dst);
4728 if (e == NULL)
4729 {
4730 error ("BB %i is missing an edge", src->index);
4731 return true;
4732 }
4733 e->aux = (void *)e;
4734 break;
4735
4736 default:
4737 gcc_unreachable ();
4738 }
4739
4740 fall_edge = NULL;
4741 FOR_EACH_EDGE (e, ei, src->succs)
4742 {
4743 if (e->flags & EDGE_FALLTHRU)
4744 {
4745 if (fall_edge != NULL)
4746 {
4747 error ("BB %i too many fallthru edges", src->index);
4748 return true;
4749 }
4750 fall_edge = e;
4751 }
4752 else if (e->aux)
4753 e->aux = NULL;
4754 else
4755 {
4756 error ("BB %i has incorrect edge", src->index);
4757 return true;
4758 }
4759 }
4760 if ((fall_edge != NULL) ^ want_fallthru)
4761 {
4762 error ("BB %i has incorrect fallthru edge", src->index);
4763 return true;
4764 }
4765
4766 return false;
4767 }