]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/tree-eh.c
coretypes.h: Include input.h and as-a.h.
[thirdparty/gcc.git] / gcc / tree-eh.c
1 /* Exception handling semantics and decomposition for trees.
2 Copyright (C) 2003-2015 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
9 any later version.
10
11 GCC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "tm.h"
24 #include "alias.h"
25 #include "symtab.h"
26 #include "tree.h"
27 #include "fold-const.h"
28 #include "hard-reg-set.h"
29 #include "function.h"
30 #include "rtl.h"
31 #include "flags.h"
32 #include "insn-config.h"
33 #include "expmed.h"
34 #include "dojump.h"
35 #include "explow.h"
36 #include "calls.h"
37 #include "emit-rtl.h"
38 #include "varasm.h"
39 #include "stmt.h"
40 #include "expr.h"
41 #include "except.h"
42 #include "predict.h"
43 #include "dominance.h"
44 #include "cfg.h"
45 #include "cfganal.h"
46 #include "cfgcleanup.h"
47 #include "basic-block.h"
48 #include "tree-ssa-alias.h"
49 #include "internal-fn.h"
50 #include "tree-eh.h"
51 #include "gimple-expr.h"
52 #include "gimple.h"
53 #include "gimple-iterator.h"
54 #include "gimple-ssa.h"
55 #include "plugin-api.h"
56 #include "ipa-ref.h"
57 #include "cgraph.h"
58 #include "tree-cfg.h"
59 #include "tree-phinodes.h"
60 #include "ssa-iterators.h"
61 #include "stringpool.h"
62 #include "tree-ssanames.h"
63 #include "tree-into-ssa.h"
64 #include "tree-ssa.h"
65 #include "tree-inline.h"
66 #include "tree-pass.h"
67 #include "langhooks.h"
68 #include "diagnostic-core.h"
69 #include "target.h"
70 #include "cfgloop.h"
71 #include "gimple-low.h"
72
73 /* In some instances a tree and a gimple need to be stored in a same table,
74 i.e. in hash tables. This is a structure to do this. */
75 typedef union {tree *tp; tree t; gimple g;} treemple;
76
77 /* Misc functions used in this file. */
78
79 /* Remember and lookup EH landing pad data for arbitrary statements.
80 Really this means any statement that could_throw_p. We could
81 stuff this information into the stmt_ann data structure, but:
82
83 (1) We absolutely rely on this information being kept until
84 we get to rtl. Once we're done with lowering here, if we lose
85 the information there's no way to recover it!
86
87 (2) There are many more statements that *cannot* throw as
88 compared to those that can. We should be saving some amount
89 of space by only allocating memory for those that can throw. */
90
91 /* Add statement T in function IFUN to landing pad NUM. */
92
93 static void
94 add_stmt_to_eh_lp_fn (struct function *ifun, gimple t, int num)
95 {
96 gcc_assert (num != 0);
97
98 if (!get_eh_throw_stmt_table (ifun))
99 set_eh_throw_stmt_table (ifun, hash_map<gimple, int>::create_ggc (31));
100
101 gcc_assert (!get_eh_throw_stmt_table (ifun)->put (t, num));
102 }
103
104 /* Add statement T in the current function (cfun) to EH landing pad NUM. */
105
106 void
107 add_stmt_to_eh_lp (gimple t, int num)
108 {
109 add_stmt_to_eh_lp_fn (cfun, t, num);
110 }
111
112 /* Add statement T to the single EH landing pad in REGION. */
113
114 static void
115 record_stmt_eh_region (eh_region region, gimple t)
116 {
117 if (region == NULL)
118 return;
119 if (region->type == ERT_MUST_NOT_THROW)
120 add_stmt_to_eh_lp_fn (cfun, t, -region->index);
121 else
122 {
123 eh_landing_pad lp = region->landing_pads;
124 if (lp == NULL)
125 lp = gen_eh_landing_pad (region);
126 else
127 gcc_assert (lp->next_lp == NULL);
128 add_stmt_to_eh_lp_fn (cfun, t, lp->index);
129 }
130 }
131
132
133 /* Remove statement T in function IFUN from its EH landing pad. */
134
135 bool
136 remove_stmt_from_eh_lp_fn (struct function *ifun, gimple t)
137 {
138 if (!get_eh_throw_stmt_table (ifun))
139 return false;
140
141 if (!get_eh_throw_stmt_table (ifun)->get (t))
142 return false;
143
144 get_eh_throw_stmt_table (ifun)->remove (t);
145 return true;
146 }
147
148
149 /* Remove statement T in the current function (cfun) from its
150 EH landing pad. */
151
152 bool
153 remove_stmt_from_eh_lp (gimple t)
154 {
155 return remove_stmt_from_eh_lp_fn (cfun, t);
156 }
157
158 /* Determine if statement T is inside an EH region in function IFUN.
159 Positive numbers indicate a landing pad index; negative numbers
160 indicate a MUST_NOT_THROW region index; zero indicates that the
161 statement is not recorded in the region table. */
162
163 int
164 lookup_stmt_eh_lp_fn (struct function *ifun, gimple t)
165 {
166 if (ifun->eh->throw_stmt_table == NULL)
167 return 0;
168
169 int *lp_nr = ifun->eh->throw_stmt_table->get (t);
170 return lp_nr ? *lp_nr : 0;
171 }
172
173 /* Likewise, but always use the current function. */
174
175 int
176 lookup_stmt_eh_lp (gimple t)
177 {
178 /* We can get called from initialized data when -fnon-call-exceptions
179 is on; prevent crash. */
180 if (!cfun)
181 return 0;
182 return lookup_stmt_eh_lp_fn (cfun, t);
183 }
184
185 /* First pass of EH node decomposition. Build up a tree of GIMPLE_TRY_FINALLY
186 nodes and LABEL_DECL nodes. We will use this during the second phase to
187 determine if a goto leaves the body of a TRY_FINALLY_EXPR node. */
188
189 struct finally_tree_node
190 {
191 /* When storing a GIMPLE_TRY, we have to record a gimple. However
192 when deciding whether a GOTO to a certain LABEL_DECL (which is a
193 tree) leaves the TRY block, its necessary to record a tree in
194 this field. Thus a treemple is used. */
195 treemple child;
196 gtry *parent;
197 };
198
199 /* Hashtable helpers. */
200
201 struct finally_tree_hasher : typed_free_remove <finally_tree_node>
202 {
203 typedef finally_tree_node *value_type;
204 typedef finally_tree_node *compare_type;
205 static inline hashval_t hash (const finally_tree_node *);
206 static inline bool equal (const finally_tree_node *,
207 const finally_tree_node *);
208 };
209
210 inline hashval_t
211 finally_tree_hasher::hash (const finally_tree_node *v)
212 {
213 return (intptr_t)v->child.t >> 4;
214 }
215
216 inline bool
217 finally_tree_hasher::equal (const finally_tree_node *v,
218 const finally_tree_node *c)
219 {
220 return v->child.t == c->child.t;
221 }
222
223 /* Note that this table is *not* marked GTY. It is short-lived. */
224 static hash_table<finally_tree_hasher> *finally_tree;
225
226 static void
227 record_in_finally_tree (treemple child, gtry *parent)
228 {
229 struct finally_tree_node *n;
230 finally_tree_node **slot;
231
232 n = XNEW (struct finally_tree_node);
233 n->child = child;
234 n->parent = parent;
235
236 slot = finally_tree->find_slot (n, INSERT);
237 gcc_assert (!*slot);
238 *slot = n;
239 }
240
241 static void
242 collect_finally_tree (gimple stmt, gtry *region);
243
244 /* Go through the gimple sequence. Works with collect_finally_tree to
245 record all GIMPLE_LABEL and GIMPLE_TRY statements. */
246
247 static void
248 collect_finally_tree_1 (gimple_seq seq, gtry *region)
249 {
250 gimple_stmt_iterator gsi;
251
252 for (gsi = gsi_start (seq); !gsi_end_p (gsi); gsi_next (&gsi))
253 collect_finally_tree (gsi_stmt (gsi), region);
254 }
255
256 static void
257 collect_finally_tree (gimple stmt, gtry *region)
258 {
259 treemple temp;
260
261 switch (gimple_code (stmt))
262 {
263 case GIMPLE_LABEL:
264 temp.t = gimple_label_label (as_a <glabel *> (stmt));
265 record_in_finally_tree (temp, region);
266 break;
267
268 case GIMPLE_TRY:
269 if (gimple_try_kind (stmt) == GIMPLE_TRY_FINALLY)
270 {
271 temp.g = stmt;
272 record_in_finally_tree (temp, region);
273 collect_finally_tree_1 (gimple_try_eval (stmt),
274 as_a <gtry *> (stmt));
275 collect_finally_tree_1 (gimple_try_cleanup (stmt), region);
276 }
277 else if (gimple_try_kind (stmt) == GIMPLE_TRY_CATCH)
278 {
279 collect_finally_tree_1 (gimple_try_eval (stmt), region);
280 collect_finally_tree_1 (gimple_try_cleanup (stmt), region);
281 }
282 break;
283
284 case GIMPLE_CATCH:
285 collect_finally_tree_1 (gimple_catch_handler (
286 as_a <gcatch *> (stmt)),
287 region);
288 break;
289
290 case GIMPLE_EH_FILTER:
291 collect_finally_tree_1 (gimple_eh_filter_failure (stmt), region);
292 break;
293
294 case GIMPLE_EH_ELSE:
295 {
296 geh_else *eh_else_stmt = as_a <geh_else *> (stmt);
297 collect_finally_tree_1 (gimple_eh_else_n_body (eh_else_stmt), region);
298 collect_finally_tree_1 (gimple_eh_else_e_body (eh_else_stmt), region);
299 }
300 break;
301
302 default:
303 /* A type, a decl, or some kind of statement that we're not
304 interested in. Don't walk them. */
305 break;
306 }
307 }
308
309
310 /* Use the finally tree to determine if a jump from START to TARGET
311 would leave the try_finally node that START lives in. */
312
313 static bool
314 outside_finally_tree (treemple start, gimple target)
315 {
316 struct finally_tree_node n, *p;
317
318 do
319 {
320 n.child = start;
321 p = finally_tree->find (&n);
322 if (!p)
323 return true;
324 start.g = p->parent;
325 }
326 while (start.g != target);
327
328 return false;
329 }
330
331 /* Second pass of EH node decomposition. Actually transform the GIMPLE_TRY
332 nodes into a set of gotos, magic labels, and eh regions.
333 The eh region creation is straight-forward, but frobbing all the gotos
334 and such into shape isn't. */
335
336 /* The sequence into which we record all EH stuff. This will be
337 placed at the end of the function when we're all done. */
338 static gimple_seq eh_seq;
339
340 /* Record whether an EH region contains something that can throw,
341 indexed by EH region number. */
342 static bitmap eh_region_may_contain_throw_map;
343
344 /* The GOTO_QUEUE is is an array of GIMPLE_GOTO and GIMPLE_RETURN
345 statements that are seen to escape this GIMPLE_TRY_FINALLY node.
346 The idea is to record a gimple statement for everything except for
347 the conditionals, which get their labels recorded. Since labels are
348 of type 'tree', we need this node to store both gimple and tree
349 objects. REPL_STMT is the sequence used to replace the goto/return
350 statement. CONT_STMT is used to store the statement that allows
351 the return/goto to jump to the original destination. */
352
353 struct goto_queue_node
354 {
355 treemple stmt;
356 location_t location;
357 gimple_seq repl_stmt;
358 gimple cont_stmt;
359 int index;
360 /* This is used when index >= 0 to indicate that stmt is a label (as
361 opposed to a goto stmt). */
362 int is_label;
363 };
364
365 /* State of the world while lowering. */
366
367 struct leh_state
368 {
369 /* What's "current" while constructing the eh region tree. These
370 correspond to variables of the same name in cfun->eh, which we
371 don't have easy access to. */
372 eh_region cur_region;
373
374 /* What's "current" for the purposes of __builtin_eh_pointer. For
375 a CATCH, this is the associated TRY. For an EH_FILTER, this is
376 the associated ALLOWED_EXCEPTIONS, etc. */
377 eh_region ehp_region;
378
379 /* Processing of TRY_FINALLY requires a bit more state. This is
380 split out into a separate structure so that we don't have to
381 copy so much when processing other nodes. */
382 struct leh_tf_state *tf;
383 };
384
385 struct leh_tf_state
386 {
387 /* Pointer to the GIMPLE_TRY_FINALLY node under discussion. The
388 try_finally_expr is the original GIMPLE_TRY_FINALLY. We need to retain
389 this so that outside_finally_tree can reliably reference the tree used
390 in the collect_finally_tree data structures. */
391 gtry *try_finally_expr;
392 gtry *top_p;
393
394 /* While lowering a top_p usually it is expanded into multiple statements,
395 thus we need the following field to store them. */
396 gimple_seq top_p_seq;
397
398 /* The state outside this try_finally node. */
399 struct leh_state *outer;
400
401 /* The exception region created for it. */
402 eh_region region;
403
404 /* The goto queue. */
405 struct goto_queue_node *goto_queue;
406 size_t goto_queue_size;
407 size_t goto_queue_active;
408
409 /* Pointer map to help in searching goto_queue when it is large. */
410 hash_map<gimple, goto_queue_node *> *goto_queue_map;
411
412 /* The set of unique labels seen as entries in the goto queue. */
413 vec<tree> dest_array;
414
415 /* A label to be added at the end of the completed transformed
416 sequence. It will be set if may_fallthru was true *at one time*,
417 though subsequent transformations may have cleared that flag. */
418 tree fallthru_label;
419
420 /* True if it is possible to fall out the bottom of the try block.
421 Cleared if the fallthru is converted to a goto. */
422 bool may_fallthru;
423
424 /* True if any entry in goto_queue is a GIMPLE_RETURN. */
425 bool may_return;
426
427 /* True if the finally block can receive an exception edge.
428 Cleared if the exception case is handled by code duplication. */
429 bool may_throw;
430 };
431
432 static gimple_seq lower_eh_must_not_throw (struct leh_state *, gtry *);
433
434 /* Search for STMT in the goto queue. Return the replacement,
435 or null if the statement isn't in the queue. */
436
437 #define LARGE_GOTO_QUEUE 20
438
439 static void lower_eh_constructs_1 (struct leh_state *state, gimple_seq *seq);
440
441 static gimple_seq
442 find_goto_replacement (struct leh_tf_state *tf, treemple stmt)
443 {
444 unsigned int i;
445
446 if (tf->goto_queue_active < LARGE_GOTO_QUEUE)
447 {
448 for (i = 0; i < tf->goto_queue_active; i++)
449 if ( tf->goto_queue[i].stmt.g == stmt.g)
450 return tf->goto_queue[i].repl_stmt;
451 return NULL;
452 }
453
454 /* If we have a large number of entries in the goto_queue, create a
455 pointer map and use that for searching. */
456
457 if (!tf->goto_queue_map)
458 {
459 tf->goto_queue_map = new hash_map<gimple, goto_queue_node *>;
460 for (i = 0; i < tf->goto_queue_active; i++)
461 {
462 bool existed = tf->goto_queue_map->put (tf->goto_queue[i].stmt.g,
463 &tf->goto_queue[i]);
464 gcc_assert (!existed);
465 }
466 }
467
468 goto_queue_node **slot = tf->goto_queue_map->get (stmt.g);
469 if (slot != NULL)
470 return ((*slot)->repl_stmt);
471
472 return NULL;
473 }
474
475 /* A subroutine of replace_goto_queue_1. Handles the sub-clauses of a
476 lowered GIMPLE_COND. If, by chance, the replacement is a simple goto,
477 then we can just splat it in, otherwise we add the new stmts immediately
478 after the GIMPLE_COND and redirect. */
479
480 static void
481 replace_goto_queue_cond_clause (tree *tp, struct leh_tf_state *tf,
482 gimple_stmt_iterator *gsi)
483 {
484 tree label;
485 gimple_seq new_seq;
486 treemple temp;
487 location_t loc = gimple_location (gsi_stmt (*gsi));
488
489 temp.tp = tp;
490 new_seq = find_goto_replacement (tf, temp);
491 if (!new_seq)
492 return;
493
494 if (gimple_seq_singleton_p (new_seq)
495 && gimple_code (gimple_seq_first_stmt (new_seq)) == GIMPLE_GOTO)
496 {
497 *tp = gimple_goto_dest (gimple_seq_first_stmt (new_seq));
498 return;
499 }
500
501 label = create_artificial_label (loc);
502 /* Set the new label for the GIMPLE_COND */
503 *tp = label;
504
505 gsi_insert_after (gsi, gimple_build_label (label), GSI_CONTINUE_LINKING);
506 gsi_insert_seq_after (gsi, gimple_seq_copy (new_seq), GSI_CONTINUE_LINKING);
507 }
508
509 /* The real work of replace_goto_queue. Returns with TSI updated to
510 point to the next statement. */
511
512 static void replace_goto_queue_stmt_list (gimple_seq *, struct leh_tf_state *);
513
514 static void
515 replace_goto_queue_1 (gimple stmt, struct leh_tf_state *tf,
516 gimple_stmt_iterator *gsi)
517 {
518 gimple_seq seq;
519 treemple temp;
520 temp.g = NULL;
521
522 switch (gimple_code (stmt))
523 {
524 case GIMPLE_GOTO:
525 case GIMPLE_RETURN:
526 temp.g = stmt;
527 seq = find_goto_replacement (tf, temp);
528 if (seq)
529 {
530 gsi_insert_seq_before (gsi, gimple_seq_copy (seq), GSI_SAME_STMT);
531 gsi_remove (gsi, false);
532 return;
533 }
534 break;
535
536 case GIMPLE_COND:
537 replace_goto_queue_cond_clause (gimple_op_ptr (stmt, 2), tf, gsi);
538 replace_goto_queue_cond_clause (gimple_op_ptr (stmt, 3), tf, gsi);
539 break;
540
541 case GIMPLE_TRY:
542 replace_goto_queue_stmt_list (gimple_try_eval_ptr (stmt), tf);
543 replace_goto_queue_stmt_list (gimple_try_cleanup_ptr (stmt), tf);
544 break;
545 case GIMPLE_CATCH:
546 replace_goto_queue_stmt_list (gimple_catch_handler_ptr (
547 as_a <gcatch *> (stmt)),
548 tf);
549 break;
550 case GIMPLE_EH_FILTER:
551 replace_goto_queue_stmt_list (gimple_eh_filter_failure_ptr (stmt), tf);
552 break;
553 case GIMPLE_EH_ELSE:
554 {
555 geh_else *eh_else_stmt = as_a <geh_else *> (stmt);
556 replace_goto_queue_stmt_list (gimple_eh_else_n_body_ptr (eh_else_stmt),
557 tf);
558 replace_goto_queue_stmt_list (gimple_eh_else_e_body_ptr (eh_else_stmt),
559 tf);
560 }
561 break;
562
563 default:
564 /* These won't have gotos in them. */
565 break;
566 }
567
568 gsi_next (gsi);
569 }
570
571 /* A subroutine of replace_goto_queue. Handles GIMPLE_SEQ. */
572
573 static void
574 replace_goto_queue_stmt_list (gimple_seq *seq, struct leh_tf_state *tf)
575 {
576 gimple_stmt_iterator gsi = gsi_start (*seq);
577
578 while (!gsi_end_p (gsi))
579 replace_goto_queue_1 (gsi_stmt (gsi), tf, &gsi);
580 }
581
582 /* Replace all goto queue members. */
583
584 static void
585 replace_goto_queue (struct leh_tf_state *tf)
586 {
587 if (tf->goto_queue_active == 0)
588 return;
589 replace_goto_queue_stmt_list (&tf->top_p_seq, tf);
590 replace_goto_queue_stmt_list (&eh_seq, tf);
591 }
592
593 /* Add a new record to the goto queue contained in TF. NEW_STMT is the
594 data to be added, IS_LABEL indicates whether NEW_STMT is a label or
595 a gimple return. */
596
597 static void
598 record_in_goto_queue (struct leh_tf_state *tf,
599 treemple new_stmt,
600 int index,
601 bool is_label,
602 location_t location)
603 {
604 size_t active, size;
605 struct goto_queue_node *q;
606
607 gcc_assert (!tf->goto_queue_map);
608
609 active = tf->goto_queue_active;
610 size = tf->goto_queue_size;
611 if (active >= size)
612 {
613 size = (size ? size * 2 : 32);
614 tf->goto_queue_size = size;
615 tf->goto_queue
616 = XRESIZEVEC (struct goto_queue_node, tf->goto_queue, size);
617 }
618
619 q = &tf->goto_queue[active];
620 tf->goto_queue_active = active + 1;
621
622 memset (q, 0, sizeof (*q));
623 q->stmt = new_stmt;
624 q->index = index;
625 q->location = location;
626 q->is_label = is_label;
627 }
628
629 /* Record the LABEL label in the goto queue contained in TF.
630 TF is not null. */
631
632 static void
633 record_in_goto_queue_label (struct leh_tf_state *tf, treemple stmt, tree label,
634 location_t location)
635 {
636 int index;
637 treemple temp, new_stmt;
638
639 if (!label)
640 return;
641
642 /* Computed and non-local gotos do not get processed. Given
643 their nature we can neither tell whether we've escaped the
644 finally block nor redirect them if we knew. */
645 if (TREE_CODE (label) != LABEL_DECL)
646 return;
647
648 /* No need to record gotos that don't leave the try block. */
649 temp.t = label;
650 if (!outside_finally_tree (temp, tf->try_finally_expr))
651 return;
652
653 if (! tf->dest_array.exists ())
654 {
655 tf->dest_array.create (10);
656 tf->dest_array.quick_push (label);
657 index = 0;
658 }
659 else
660 {
661 int n = tf->dest_array.length ();
662 for (index = 0; index < n; ++index)
663 if (tf->dest_array[index] == label)
664 break;
665 if (index == n)
666 tf->dest_array.safe_push (label);
667 }
668
669 /* In the case of a GOTO we want to record the destination label,
670 since with a GIMPLE_COND we have an easy access to the then/else
671 labels. */
672 new_stmt = stmt;
673 record_in_goto_queue (tf, new_stmt, index, true, location);
674 }
675
676 /* For any GIMPLE_GOTO or GIMPLE_RETURN, decide whether it leaves a try_finally
677 node, and if so record that fact in the goto queue associated with that
678 try_finally node. */
679
680 static void
681 maybe_record_in_goto_queue (struct leh_state *state, gimple stmt)
682 {
683 struct leh_tf_state *tf = state->tf;
684 treemple new_stmt;
685
686 if (!tf)
687 return;
688
689 switch (gimple_code (stmt))
690 {
691 case GIMPLE_COND:
692 {
693 gcond *cond_stmt = as_a <gcond *> (stmt);
694 new_stmt.tp = gimple_op_ptr (cond_stmt, 2);
695 record_in_goto_queue_label (tf, new_stmt,
696 gimple_cond_true_label (cond_stmt),
697 EXPR_LOCATION (*new_stmt.tp));
698 new_stmt.tp = gimple_op_ptr (cond_stmt, 3);
699 record_in_goto_queue_label (tf, new_stmt,
700 gimple_cond_false_label (cond_stmt),
701 EXPR_LOCATION (*new_stmt.tp));
702 }
703 break;
704 case GIMPLE_GOTO:
705 new_stmt.g = stmt;
706 record_in_goto_queue_label (tf, new_stmt, gimple_goto_dest (stmt),
707 gimple_location (stmt));
708 break;
709
710 case GIMPLE_RETURN:
711 tf->may_return = true;
712 new_stmt.g = stmt;
713 record_in_goto_queue (tf, new_stmt, -1, false, gimple_location (stmt));
714 break;
715
716 default:
717 gcc_unreachable ();
718 }
719 }
720
721
722 #ifdef ENABLE_CHECKING
723 /* We do not process GIMPLE_SWITCHes for now. As long as the original source
724 was in fact structured, and we've not yet done jump threading, then none
725 of the labels will leave outer GIMPLE_TRY_FINALLY nodes. Verify this. */
726
727 static void
728 verify_norecord_switch_expr (struct leh_state *state,
729 gswitch *switch_expr)
730 {
731 struct leh_tf_state *tf = state->tf;
732 size_t i, n;
733
734 if (!tf)
735 return;
736
737 n = gimple_switch_num_labels (switch_expr);
738
739 for (i = 0; i < n; ++i)
740 {
741 treemple temp;
742 tree lab = CASE_LABEL (gimple_switch_label (switch_expr, i));
743 temp.t = lab;
744 gcc_assert (!outside_finally_tree (temp, tf->try_finally_expr));
745 }
746 }
747 #else
748 #define verify_norecord_switch_expr(state, switch_expr)
749 #endif
750
751 /* Redirect a RETURN_EXPR pointed to by Q to FINLAB. If MOD is
752 non-null, insert it before the new branch. */
753
754 static void
755 do_return_redirection (struct goto_queue_node *q, tree finlab, gimple_seq mod)
756 {
757 gimple x;
758
759 /* In the case of a return, the queue node must be a gimple statement. */
760 gcc_assert (!q->is_label);
761
762 /* Note that the return value may have already been computed, e.g.,
763
764 int x;
765 int foo (void)
766 {
767 x = 0;
768 try {
769 return x;
770 } finally {
771 x++;
772 }
773 }
774
775 should return 0, not 1. We don't have to do anything to make
776 this happens because the return value has been placed in the
777 RESULT_DECL already. */
778
779 q->cont_stmt = q->stmt.g;
780
781 if (mod)
782 gimple_seq_add_seq (&q->repl_stmt, mod);
783
784 x = gimple_build_goto (finlab);
785 gimple_set_location (x, q->location);
786 gimple_seq_add_stmt (&q->repl_stmt, x);
787 }
788
789 /* Similar, but easier, for GIMPLE_GOTO. */
790
791 static void
792 do_goto_redirection (struct goto_queue_node *q, tree finlab, gimple_seq mod,
793 struct leh_tf_state *tf)
794 {
795 ggoto *x;
796
797 gcc_assert (q->is_label);
798
799 q->cont_stmt = gimple_build_goto (tf->dest_array[q->index]);
800
801 if (mod)
802 gimple_seq_add_seq (&q->repl_stmt, mod);
803
804 x = gimple_build_goto (finlab);
805 gimple_set_location (x, q->location);
806 gimple_seq_add_stmt (&q->repl_stmt, x);
807 }
808
809 /* Emit a standard landing pad sequence into SEQ for REGION. */
810
811 static void
812 emit_post_landing_pad (gimple_seq *seq, eh_region region)
813 {
814 eh_landing_pad lp = region->landing_pads;
815 glabel *x;
816
817 if (lp == NULL)
818 lp = gen_eh_landing_pad (region);
819
820 lp->post_landing_pad = create_artificial_label (UNKNOWN_LOCATION);
821 EH_LANDING_PAD_NR (lp->post_landing_pad) = lp->index;
822
823 x = gimple_build_label (lp->post_landing_pad);
824 gimple_seq_add_stmt (seq, x);
825 }
826
827 /* Emit a RESX statement into SEQ for REGION. */
828
829 static void
830 emit_resx (gimple_seq *seq, eh_region region)
831 {
832 gresx *x = gimple_build_resx (region->index);
833 gimple_seq_add_stmt (seq, x);
834 if (region->outer)
835 record_stmt_eh_region (region->outer, x);
836 }
837
838 /* Emit an EH_DISPATCH statement into SEQ for REGION. */
839
840 static void
841 emit_eh_dispatch (gimple_seq *seq, eh_region region)
842 {
843 geh_dispatch *x = gimple_build_eh_dispatch (region->index);
844 gimple_seq_add_stmt (seq, x);
845 }
846
847 /* Note that the current EH region may contain a throw, or a
848 call to a function which itself may contain a throw. */
849
850 static void
851 note_eh_region_may_contain_throw (eh_region region)
852 {
853 while (bitmap_set_bit (eh_region_may_contain_throw_map, region->index))
854 {
855 if (region->type == ERT_MUST_NOT_THROW)
856 break;
857 region = region->outer;
858 if (region == NULL)
859 break;
860 }
861 }
862
863 /* Check if REGION has been marked as containing a throw. If REGION is
864 NULL, this predicate is false. */
865
866 static inline bool
867 eh_region_may_contain_throw (eh_region r)
868 {
869 return r && bitmap_bit_p (eh_region_may_contain_throw_map, r->index);
870 }
871
872 /* We want to transform
873 try { body; } catch { stuff; }
874 to
875 normal_sequence:
876 body;
877 over:
878 eh_sequence:
879 landing_pad:
880 stuff;
881 goto over;
882
883 TP is a GIMPLE_TRY node. REGION is the region whose post_landing_pad
884 should be placed before the second operand, or NULL. OVER is
885 an existing label that should be put at the exit, or NULL. */
886
887 static gimple_seq
888 frob_into_branch_around (gtry *tp, eh_region region, tree over)
889 {
890 gimple x;
891 gimple_seq cleanup, result;
892 location_t loc = gimple_location (tp);
893
894 cleanup = gimple_try_cleanup (tp);
895 result = gimple_try_eval (tp);
896
897 if (region)
898 emit_post_landing_pad (&eh_seq, region);
899
900 if (gimple_seq_may_fallthru (cleanup))
901 {
902 if (!over)
903 over = create_artificial_label (loc);
904 x = gimple_build_goto (over);
905 gimple_set_location (x, loc);
906 gimple_seq_add_stmt (&cleanup, x);
907 }
908 gimple_seq_add_seq (&eh_seq, cleanup);
909
910 if (over)
911 {
912 x = gimple_build_label (over);
913 gimple_seq_add_stmt (&result, x);
914 }
915 return result;
916 }
917
918 /* A subroutine of lower_try_finally. Duplicate the tree rooted at T.
919 Make sure to record all new labels found. */
920
921 static gimple_seq
922 lower_try_finally_dup_block (gimple_seq seq, struct leh_state *outer_state,
923 location_t loc)
924 {
925 gtry *region = NULL;
926 gimple_seq new_seq;
927 gimple_stmt_iterator gsi;
928
929 new_seq = copy_gimple_seq_and_replace_locals (seq);
930
931 for (gsi = gsi_start (new_seq); !gsi_end_p (gsi); gsi_next (&gsi))
932 {
933 gimple stmt = gsi_stmt (gsi);
934 if (LOCATION_LOCUS (gimple_location (stmt)) == UNKNOWN_LOCATION)
935 {
936 tree block = gimple_block (stmt);
937 gimple_set_location (stmt, loc);
938 gimple_set_block (stmt, block);
939 }
940 }
941
942 if (outer_state->tf)
943 region = outer_state->tf->try_finally_expr;
944 collect_finally_tree_1 (new_seq, region);
945
946 return new_seq;
947 }
948
949 /* A subroutine of lower_try_finally. Create a fallthru label for
950 the given try_finally state. The only tricky bit here is that
951 we have to make sure to record the label in our outer context. */
952
953 static tree
954 lower_try_finally_fallthru_label (struct leh_tf_state *tf)
955 {
956 tree label = tf->fallthru_label;
957 treemple temp;
958
959 if (!label)
960 {
961 label = create_artificial_label (gimple_location (tf->try_finally_expr));
962 tf->fallthru_label = label;
963 if (tf->outer->tf)
964 {
965 temp.t = label;
966 record_in_finally_tree (temp, tf->outer->tf->try_finally_expr);
967 }
968 }
969 return label;
970 }
971
972 /* A subroutine of lower_try_finally. If FINALLY consits of a
973 GIMPLE_EH_ELSE node, return it. */
974
975 static inline geh_else *
976 get_eh_else (gimple_seq finally)
977 {
978 gimple x = gimple_seq_first_stmt (finally);
979 if (gimple_code (x) == GIMPLE_EH_ELSE)
980 {
981 gcc_assert (gimple_seq_singleton_p (finally));
982 return as_a <geh_else *> (x);
983 }
984 return NULL;
985 }
986
987 /* A subroutine of lower_try_finally. If the eh_protect_cleanup_actions
988 langhook returns non-null, then the language requires that the exception
989 path out of a try_finally be treated specially. To wit: the code within
990 the finally block may not itself throw an exception. We have two choices
991 here. First we can duplicate the finally block and wrap it in a
992 must_not_throw region. Second, we can generate code like
993
994 try {
995 finally_block;
996 } catch {
997 if (fintmp == eh_edge)
998 protect_cleanup_actions;
999 }
1000
1001 where "fintmp" is the temporary used in the switch statement generation
1002 alternative considered below. For the nonce, we always choose the first
1003 option.
1004
1005 THIS_STATE may be null if this is a try-cleanup, not a try-finally. */
1006
1007 static void
1008 honor_protect_cleanup_actions (struct leh_state *outer_state,
1009 struct leh_state *this_state,
1010 struct leh_tf_state *tf)
1011 {
1012 tree protect_cleanup_actions;
1013 gimple_stmt_iterator gsi;
1014 bool finally_may_fallthru;
1015 gimple_seq finally;
1016 gimple x;
1017 geh_mnt *eh_mnt;
1018 gtry *try_stmt;
1019 geh_else *eh_else;
1020
1021 /* First check for nothing to do. */
1022 if (lang_hooks.eh_protect_cleanup_actions == NULL)
1023 return;
1024 protect_cleanup_actions = lang_hooks.eh_protect_cleanup_actions ();
1025 if (protect_cleanup_actions == NULL)
1026 return;
1027
1028 finally = gimple_try_cleanup (tf->top_p);
1029 eh_else = get_eh_else (finally);
1030
1031 /* Duplicate the FINALLY block. Only need to do this for try-finally,
1032 and not for cleanups. If we've got an EH_ELSE, extract it now. */
1033 if (eh_else)
1034 {
1035 finally = gimple_eh_else_e_body (eh_else);
1036 gimple_try_set_cleanup (tf->top_p, gimple_eh_else_n_body (eh_else));
1037 }
1038 else if (this_state)
1039 finally = lower_try_finally_dup_block (finally, outer_state,
1040 gimple_location (tf->try_finally_expr));
1041 finally_may_fallthru = gimple_seq_may_fallthru (finally);
1042
1043 /* If this cleanup consists of a TRY_CATCH_EXPR with TRY_CATCH_IS_CLEANUP
1044 set, the handler of the TRY_CATCH_EXPR is another cleanup which ought
1045 to be in an enclosing scope, but needs to be implemented at this level
1046 to avoid a nesting violation (see wrap_temporary_cleanups in
1047 cp/decl.c). Since it's logically at an outer level, we should call
1048 terminate before we get to it, so strip it away before adding the
1049 MUST_NOT_THROW filter. */
1050 gsi = gsi_start (finally);
1051 x = gsi_stmt (gsi);
1052 if (gimple_code (x) == GIMPLE_TRY
1053 && gimple_try_kind (x) == GIMPLE_TRY_CATCH
1054 && gimple_try_catch_is_cleanup (x))
1055 {
1056 gsi_insert_seq_before (&gsi, gimple_try_eval (x), GSI_SAME_STMT);
1057 gsi_remove (&gsi, false);
1058 }
1059
1060 /* Wrap the block with protect_cleanup_actions as the action. */
1061 eh_mnt = gimple_build_eh_must_not_throw (protect_cleanup_actions);
1062 try_stmt = gimple_build_try (finally, gimple_seq_alloc_with_stmt (eh_mnt),
1063 GIMPLE_TRY_CATCH);
1064 finally = lower_eh_must_not_throw (outer_state, try_stmt);
1065
1066 /* Drop all of this into the exception sequence. */
1067 emit_post_landing_pad (&eh_seq, tf->region);
1068 gimple_seq_add_seq (&eh_seq, finally);
1069 if (finally_may_fallthru)
1070 emit_resx (&eh_seq, tf->region);
1071
1072 /* Having now been handled, EH isn't to be considered with
1073 the rest of the outgoing edges. */
1074 tf->may_throw = false;
1075 }
1076
1077 /* A subroutine of lower_try_finally. We have determined that there is
1078 no fallthru edge out of the finally block. This means that there is
1079 no outgoing edge corresponding to any incoming edge. Restructure the
1080 try_finally node for this special case. */
1081
1082 static void
1083 lower_try_finally_nofallthru (struct leh_state *state,
1084 struct leh_tf_state *tf)
1085 {
1086 tree lab;
1087 gimple x;
1088 geh_else *eh_else;
1089 gimple_seq finally;
1090 struct goto_queue_node *q, *qe;
1091
1092 lab = create_artificial_label (gimple_location (tf->try_finally_expr));
1093
1094 /* We expect that tf->top_p is a GIMPLE_TRY. */
1095 finally = gimple_try_cleanup (tf->top_p);
1096 tf->top_p_seq = gimple_try_eval (tf->top_p);
1097
1098 x = gimple_build_label (lab);
1099 gimple_seq_add_stmt (&tf->top_p_seq, x);
1100
1101 q = tf->goto_queue;
1102 qe = q + tf->goto_queue_active;
1103 for (; q < qe; ++q)
1104 if (q->index < 0)
1105 do_return_redirection (q, lab, NULL);
1106 else
1107 do_goto_redirection (q, lab, NULL, tf);
1108
1109 replace_goto_queue (tf);
1110
1111 /* Emit the finally block into the stream. Lower EH_ELSE at this time. */
1112 eh_else = get_eh_else (finally);
1113 if (eh_else)
1114 {
1115 finally = gimple_eh_else_n_body (eh_else);
1116 lower_eh_constructs_1 (state, &finally);
1117 gimple_seq_add_seq (&tf->top_p_seq, finally);
1118
1119 if (tf->may_throw)
1120 {
1121 finally = gimple_eh_else_e_body (eh_else);
1122 lower_eh_constructs_1 (state, &finally);
1123
1124 emit_post_landing_pad (&eh_seq, tf->region);
1125 gimple_seq_add_seq (&eh_seq, finally);
1126 }
1127 }
1128 else
1129 {
1130 lower_eh_constructs_1 (state, &finally);
1131 gimple_seq_add_seq (&tf->top_p_seq, finally);
1132
1133 if (tf->may_throw)
1134 {
1135 emit_post_landing_pad (&eh_seq, tf->region);
1136
1137 x = gimple_build_goto (lab);
1138 gimple_set_location (x, gimple_location (tf->try_finally_expr));
1139 gimple_seq_add_stmt (&eh_seq, x);
1140 }
1141 }
1142 }
1143
1144 /* A subroutine of lower_try_finally. We have determined that there is
1145 exactly one destination of the finally block. Restructure the
1146 try_finally node for this special case. */
1147
1148 static void
1149 lower_try_finally_onedest (struct leh_state *state, struct leh_tf_state *tf)
1150 {
1151 struct goto_queue_node *q, *qe;
1152 geh_else *eh_else;
1153 glabel *label_stmt;
1154 gimple x;
1155 gimple_seq finally;
1156 gimple_stmt_iterator gsi;
1157 tree finally_label;
1158 location_t loc = gimple_location (tf->try_finally_expr);
1159
1160 finally = gimple_try_cleanup (tf->top_p);
1161 tf->top_p_seq = gimple_try_eval (tf->top_p);
1162
1163 /* Since there's only one destination, and the destination edge can only
1164 either be EH or non-EH, that implies that all of our incoming edges
1165 are of the same type. Therefore we can lower EH_ELSE immediately. */
1166 eh_else = get_eh_else (finally);
1167 if (eh_else)
1168 {
1169 if (tf->may_throw)
1170 finally = gimple_eh_else_e_body (eh_else);
1171 else
1172 finally = gimple_eh_else_n_body (eh_else);
1173 }
1174
1175 lower_eh_constructs_1 (state, &finally);
1176
1177 for (gsi = gsi_start (finally); !gsi_end_p (gsi); gsi_next (&gsi))
1178 {
1179 gimple stmt = gsi_stmt (gsi);
1180 if (LOCATION_LOCUS (gimple_location (stmt)) == UNKNOWN_LOCATION)
1181 {
1182 tree block = gimple_block (stmt);
1183 gimple_set_location (stmt, gimple_location (tf->try_finally_expr));
1184 gimple_set_block (stmt, block);
1185 }
1186 }
1187
1188 if (tf->may_throw)
1189 {
1190 /* Only reachable via the exception edge. Add the given label to
1191 the head of the FINALLY block. Append a RESX at the end. */
1192 emit_post_landing_pad (&eh_seq, tf->region);
1193 gimple_seq_add_seq (&eh_seq, finally);
1194 emit_resx (&eh_seq, tf->region);
1195 return;
1196 }
1197
1198 if (tf->may_fallthru)
1199 {
1200 /* Only reachable via the fallthru edge. Do nothing but let
1201 the two blocks run together; we'll fall out the bottom. */
1202 gimple_seq_add_seq (&tf->top_p_seq, finally);
1203 return;
1204 }
1205
1206 finally_label = create_artificial_label (loc);
1207 label_stmt = gimple_build_label (finally_label);
1208 gimple_seq_add_stmt (&tf->top_p_seq, label_stmt);
1209
1210 gimple_seq_add_seq (&tf->top_p_seq, finally);
1211
1212 q = tf->goto_queue;
1213 qe = q + tf->goto_queue_active;
1214
1215 if (tf->may_return)
1216 {
1217 /* Reachable by return expressions only. Redirect them. */
1218 for (; q < qe; ++q)
1219 do_return_redirection (q, finally_label, NULL);
1220 replace_goto_queue (tf);
1221 }
1222 else
1223 {
1224 /* Reachable by goto expressions only. Redirect them. */
1225 for (; q < qe; ++q)
1226 do_goto_redirection (q, finally_label, NULL, tf);
1227 replace_goto_queue (tf);
1228
1229 if (tf->dest_array[0] == tf->fallthru_label)
1230 {
1231 /* Reachable by goto to fallthru label only. Redirect it
1232 to the new label (already created, sadly), and do not
1233 emit the final branch out, or the fallthru label. */
1234 tf->fallthru_label = NULL;
1235 return;
1236 }
1237 }
1238
1239 /* Place the original return/goto to the original destination
1240 immediately after the finally block. */
1241 x = tf->goto_queue[0].cont_stmt;
1242 gimple_seq_add_stmt (&tf->top_p_seq, x);
1243 maybe_record_in_goto_queue (state, x);
1244 }
1245
1246 /* A subroutine of lower_try_finally. There are multiple edges incoming
1247 and outgoing from the finally block. Implement this by duplicating the
1248 finally block for every destination. */
1249
1250 static void
1251 lower_try_finally_copy (struct leh_state *state, struct leh_tf_state *tf)
1252 {
1253 gimple_seq finally;
1254 gimple_seq new_stmt;
1255 gimple_seq seq;
1256 gimple x;
1257 geh_else *eh_else;
1258 tree tmp;
1259 location_t tf_loc = gimple_location (tf->try_finally_expr);
1260
1261 finally = gimple_try_cleanup (tf->top_p);
1262
1263 /* Notice EH_ELSE, and simplify some of the remaining code
1264 by considering FINALLY to be the normal return path only. */
1265 eh_else = get_eh_else (finally);
1266 if (eh_else)
1267 finally = gimple_eh_else_n_body (eh_else);
1268
1269 tf->top_p_seq = gimple_try_eval (tf->top_p);
1270 new_stmt = NULL;
1271
1272 if (tf->may_fallthru)
1273 {
1274 seq = lower_try_finally_dup_block (finally, state, tf_loc);
1275 lower_eh_constructs_1 (state, &seq);
1276 gimple_seq_add_seq (&new_stmt, seq);
1277
1278 tmp = lower_try_finally_fallthru_label (tf);
1279 x = gimple_build_goto (tmp);
1280 gimple_set_location (x, tf_loc);
1281 gimple_seq_add_stmt (&new_stmt, x);
1282 }
1283
1284 if (tf->may_throw)
1285 {
1286 /* We don't need to copy the EH path of EH_ELSE,
1287 since it is only emitted once. */
1288 if (eh_else)
1289 seq = gimple_eh_else_e_body (eh_else);
1290 else
1291 seq = lower_try_finally_dup_block (finally, state, tf_loc);
1292 lower_eh_constructs_1 (state, &seq);
1293
1294 emit_post_landing_pad (&eh_seq, tf->region);
1295 gimple_seq_add_seq (&eh_seq, seq);
1296 emit_resx (&eh_seq, tf->region);
1297 }
1298
1299 if (tf->goto_queue)
1300 {
1301 struct goto_queue_node *q, *qe;
1302 int return_index, index;
1303 struct labels_s
1304 {
1305 struct goto_queue_node *q;
1306 tree label;
1307 } *labels;
1308
1309 return_index = tf->dest_array.length ();
1310 labels = XCNEWVEC (struct labels_s, return_index + 1);
1311
1312 q = tf->goto_queue;
1313 qe = q + tf->goto_queue_active;
1314 for (; q < qe; q++)
1315 {
1316 index = q->index < 0 ? return_index : q->index;
1317
1318 if (!labels[index].q)
1319 labels[index].q = q;
1320 }
1321
1322 for (index = 0; index < return_index + 1; index++)
1323 {
1324 tree lab;
1325
1326 q = labels[index].q;
1327 if (! q)
1328 continue;
1329
1330 lab = labels[index].label
1331 = create_artificial_label (tf_loc);
1332
1333 if (index == return_index)
1334 do_return_redirection (q, lab, NULL);
1335 else
1336 do_goto_redirection (q, lab, NULL, tf);
1337
1338 x = gimple_build_label (lab);
1339 gimple_seq_add_stmt (&new_stmt, x);
1340
1341 seq = lower_try_finally_dup_block (finally, state, q->location);
1342 lower_eh_constructs_1 (state, &seq);
1343 gimple_seq_add_seq (&new_stmt, seq);
1344
1345 gimple_seq_add_stmt (&new_stmt, q->cont_stmt);
1346 maybe_record_in_goto_queue (state, q->cont_stmt);
1347 }
1348
1349 for (q = tf->goto_queue; q < qe; q++)
1350 {
1351 tree lab;
1352
1353 index = q->index < 0 ? return_index : q->index;
1354
1355 if (labels[index].q == q)
1356 continue;
1357
1358 lab = labels[index].label;
1359
1360 if (index == return_index)
1361 do_return_redirection (q, lab, NULL);
1362 else
1363 do_goto_redirection (q, lab, NULL, tf);
1364 }
1365
1366 replace_goto_queue (tf);
1367 free (labels);
1368 }
1369
1370 /* Need to link new stmts after running replace_goto_queue due
1371 to not wanting to process the same goto stmts twice. */
1372 gimple_seq_add_seq (&tf->top_p_seq, new_stmt);
1373 }
1374
1375 /* A subroutine of lower_try_finally. There are multiple edges incoming
1376 and outgoing from the finally block. Implement this by instrumenting
1377 each incoming edge and creating a switch statement at the end of the
1378 finally block that branches to the appropriate destination. */
1379
1380 static void
1381 lower_try_finally_switch (struct leh_state *state, struct leh_tf_state *tf)
1382 {
1383 struct goto_queue_node *q, *qe;
1384 tree finally_tmp, finally_label;
1385 int return_index, eh_index, fallthru_index;
1386 int nlabels, ndests, j, last_case_index;
1387 tree last_case;
1388 vec<tree> case_label_vec;
1389 gimple_seq switch_body = NULL;
1390 gimple x;
1391 geh_else *eh_else;
1392 tree tmp;
1393 gimple switch_stmt;
1394 gimple_seq finally;
1395 hash_map<tree, gimple> *cont_map = NULL;
1396 /* The location of the TRY_FINALLY stmt. */
1397 location_t tf_loc = gimple_location (tf->try_finally_expr);
1398 /* The location of the finally block. */
1399 location_t finally_loc;
1400
1401 finally = gimple_try_cleanup (tf->top_p);
1402 eh_else = get_eh_else (finally);
1403
1404 /* Mash the TRY block to the head of the chain. */
1405 tf->top_p_seq = gimple_try_eval (tf->top_p);
1406
1407 /* The location of the finally is either the last stmt in the finally
1408 block or the location of the TRY_FINALLY itself. */
1409 x = gimple_seq_last_stmt (finally);
1410 finally_loc = x ? gimple_location (x) : tf_loc;
1411
1412 /* Prepare for switch statement generation. */
1413 nlabels = tf->dest_array.length ();
1414 return_index = nlabels;
1415 eh_index = return_index + tf->may_return;
1416 fallthru_index = eh_index + (tf->may_throw && !eh_else);
1417 ndests = fallthru_index + tf->may_fallthru;
1418
1419 finally_tmp = create_tmp_var (integer_type_node, "finally_tmp");
1420 finally_label = create_artificial_label (finally_loc);
1421
1422 /* We use vec::quick_push on case_label_vec throughout this function,
1423 since we know the size in advance and allocate precisely as muce
1424 space as needed. */
1425 case_label_vec.create (ndests);
1426 last_case = NULL;
1427 last_case_index = 0;
1428
1429 /* Begin inserting code for getting to the finally block. Things
1430 are done in this order to correspond to the sequence the code is
1431 laid out. */
1432
1433 if (tf->may_fallthru)
1434 {
1435 x = gimple_build_assign (finally_tmp,
1436 build_int_cst (integer_type_node,
1437 fallthru_index));
1438 gimple_seq_add_stmt (&tf->top_p_seq, x);
1439
1440 tmp = build_int_cst (integer_type_node, fallthru_index);
1441 last_case = build_case_label (tmp, NULL,
1442 create_artificial_label (tf_loc));
1443 case_label_vec.quick_push (last_case);
1444 last_case_index++;
1445
1446 x = gimple_build_label (CASE_LABEL (last_case));
1447 gimple_seq_add_stmt (&switch_body, x);
1448
1449 tmp = lower_try_finally_fallthru_label (tf);
1450 x = gimple_build_goto (tmp);
1451 gimple_set_location (x, tf_loc);
1452 gimple_seq_add_stmt (&switch_body, x);
1453 }
1454
1455 /* For EH_ELSE, emit the exception path (plus resx) now, then
1456 subsequently we only need consider the normal path. */
1457 if (eh_else)
1458 {
1459 if (tf->may_throw)
1460 {
1461 finally = gimple_eh_else_e_body (eh_else);
1462 lower_eh_constructs_1 (state, &finally);
1463
1464 emit_post_landing_pad (&eh_seq, tf->region);
1465 gimple_seq_add_seq (&eh_seq, finally);
1466 emit_resx (&eh_seq, tf->region);
1467 }
1468
1469 finally = gimple_eh_else_n_body (eh_else);
1470 }
1471 else if (tf->may_throw)
1472 {
1473 emit_post_landing_pad (&eh_seq, tf->region);
1474
1475 x = gimple_build_assign (finally_tmp,
1476 build_int_cst (integer_type_node, eh_index));
1477 gimple_seq_add_stmt (&eh_seq, x);
1478
1479 x = gimple_build_goto (finally_label);
1480 gimple_set_location (x, tf_loc);
1481 gimple_seq_add_stmt (&eh_seq, x);
1482
1483 tmp = build_int_cst (integer_type_node, eh_index);
1484 last_case = build_case_label (tmp, NULL,
1485 create_artificial_label (tf_loc));
1486 case_label_vec.quick_push (last_case);
1487 last_case_index++;
1488
1489 x = gimple_build_label (CASE_LABEL (last_case));
1490 gimple_seq_add_stmt (&eh_seq, x);
1491 emit_resx (&eh_seq, tf->region);
1492 }
1493
1494 x = gimple_build_label (finally_label);
1495 gimple_seq_add_stmt (&tf->top_p_seq, x);
1496
1497 lower_eh_constructs_1 (state, &finally);
1498 gimple_seq_add_seq (&tf->top_p_seq, finally);
1499
1500 /* Redirect each incoming goto edge. */
1501 q = tf->goto_queue;
1502 qe = q + tf->goto_queue_active;
1503 j = last_case_index + tf->may_return;
1504 /* Prepare the assignments to finally_tmp that are executed upon the
1505 entrance through a particular edge. */
1506 for (; q < qe; ++q)
1507 {
1508 gimple_seq mod = NULL;
1509 int switch_id;
1510 unsigned int case_index;
1511
1512 if (q->index < 0)
1513 {
1514 x = gimple_build_assign (finally_tmp,
1515 build_int_cst (integer_type_node,
1516 return_index));
1517 gimple_seq_add_stmt (&mod, x);
1518 do_return_redirection (q, finally_label, mod);
1519 switch_id = return_index;
1520 }
1521 else
1522 {
1523 x = gimple_build_assign (finally_tmp,
1524 build_int_cst (integer_type_node, q->index));
1525 gimple_seq_add_stmt (&mod, x);
1526 do_goto_redirection (q, finally_label, mod, tf);
1527 switch_id = q->index;
1528 }
1529
1530 case_index = j + q->index;
1531 if (case_label_vec.length () <= case_index || !case_label_vec[case_index])
1532 {
1533 tree case_lab;
1534 tmp = build_int_cst (integer_type_node, switch_id);
1535 case_lab = build_case_label (tmp, NULL,
1536 create_artificial_label (tf_loc));
1537 /* We store the cont_stmt in the pointer map, so that we can recover
1538 it in the loop below. */
1539 if (!cont_map)
1540 cont_map = new hash_map<tree, gimple>;
1541 cont_map->put (case_lab, q->cont_stmt);
1542 case_label_vec.quick_push (case_lab);
1543 }
1544 }
1545 for (j = last_case_index; j < last_case_index + nlabels; j++)
1546 {
1547 gimple cont_stmt;
1548
1549 last_case = case_label_vec[j];
1550
1551 gcc_assert (last_case);
1552 gcc_assert (cont_map);
1553
1554 cont_stmt = *cont_map->get (last_case);
1555
1556 x = gimple_build_label (CASE_LABEL (last_case));
1557 gimple_seq_add_stmt (&switch_body, x);
1558 gimple_seq_add_stmt (&switch_body, cont_stmt);
1559 maybe_record_in_goto_queue (state, cont_stmt);
1560 }
1561 if (cont_map)
1562 delete cont_map;
1563
1564 replace_goto_queue (tf);
1565
1566 /* Make sure that the last case is the default label, as one is required.
1567 Then sort the labels, which is also required in GIMPLE. */
1568 CASE_LOW (last_case) = NULL;
1569 tree tem = case_label_vec.pop ();
1570 gcc_assert (tem == last_case);
1571 sort_case_labels (case_label_vec);
1572
1573 /* Build the switch statement, setting last_case to be the default
1574 label. */
1575 switch_stmt = gimple_build_switch (finally_tmp, last_case,
1576 case_label_vec);
1577 gimple_set_location (switch_stmt, finally_loc);
1578
1579 /* Need to link SWITCH_STMT after running replace_goto_queue
1580 due to not wanting to process the same goto stmts twice. */
1581 gimple_seq_add_stmt (&tf->top_p_seq, switch_stmt);
1582 gimple_seq_add_seq (&tf->top_p_seq, switch_body);
1583 }
1584
1585 /* Decide whether or not we are going to duplicate the finally block.
1586 There are several considerations.
1587
1588 First, if this is Java, then the finally block contains code
1589 written by the user. It has line numbers associated with it,
1590 so duplicating the block means it's difficult to set a breakpoint.
1591 Since controlling code generation via -g is verboten, we simply
1592 never duplicate code without optimization.
1593
1594 Second, we'd like to prevent egregious code growth. One way to
1595 do this is to estimate the size of the finally block, multiply
1596 that by the number of copies we'd need to make, and compare against
1597 the estimate of the size of the switch machinery we'd have to add. */
1598
1599 static bool
1600 decide_copy_try_finally (int ndests, bool may_throw, gimple_seq finally)
1601 {
1602 int f_estimate, sw_estimate;
1603 geh_else *eh_else;
1604
1605 /* If there's an EH_ELSE involved, the exception path is separate
1606 and really doesn't come into play for this computation. */
1607 eh_else = get_eh_else (finally);
1608 if (eh_else)
1609 {
1610 ndests -= may_throw;
1611 finally = gimple_eh_else_n_body (eh_else);
1612 }
1613
1614 if (!optimize)
1615 {
1616 gimple_stmt_iterator gsi;
1617
1618 if (ndests == 1)
1619 return true;
1620
1621 for (gsi = gsi_start (finally); !gsi_end_p (gsi); gsi_next (&gsi))
1622 {
1623 gimple stmt = gsi_stmt (gsi);
1624 if (!is_gimple_debug (stmt) && !gimple_clobber_p (stmt))
1625 return false;
1626 }
1627 return true;
1628 }
1629
1630 /* Finally estimate N times, plus N gotos. */
1631 f_estimate = count_insns_seq (finally, &eni_size_weights);
1632 f_estimate = (f_estimate + 1) * ndests;
1633
1634 /* Switch statement (cost 10), N variable assignments, N gotos. */
1635 sw_estimate = 10 + 2 * ndests;
1636
1637 /* Optimize for size clearly wants our best guess. */
1638 if (optimize_function_for_size_p (cfun))
1639 return f_estimate < sw_estimate;
1640
1641 /* ??? These numbers are completely made up so far. */
1642 if (optimize > 1)
1643 return f_estimate < 100 || f_estimate < sw_estimate * 2;
1644 else
1645 return f_estimate < 40 || f_estimate * 2 < sw_estimate * 3;
1646 }
1647
1648 /* REG is the enclosing region for a possible cleanup region, or the region
1649 itself. Returns TRUE if such a region would be unreachable.
1650
1651 Cleanup regions within a must-not-throw region aren't actually reachable
1652 even if there are throwing stmts within them, because the personality
1653 routine will call terminate before unwinding. */
1654
1655 static bool
1656 cleanup_is_dead_in (eh_region reg)
1657 {
1658 while (reg && reg->type == ERT_CLEANUP)
1659 reg = reg->outer;
1660 return (reg && reg->type == ERT_MUST_NOT_THROW);
1661 }
1662
1663 /* A subroutine of lower_eh_constructs_1. Lower a GIMPLE_TRY_FINALLY nodes
1664 to a sequence of labels and blocks, plus the exception region trees
1665 that record all the magic. This is complicated by the need to
1666 arrange for the FINALLY block to be executed on all exits. */
1667
1668 static gimple_seq
1669 lower_try_finally (struct leh_state *state, gtry *tp)
1670 {
1671 struct leh_tf_state this_tf;
1672 struct leh_state this_state;
1673 int ndests;
1674 gimple_seq old_eh_seq;
1675
1676 /* Process the try block. */
1677
1678 memset (&this_tf, 0, sizeof (this_tf));
1679 this_tf.try_finally_expr = tp;
1680 this_tf.top_p = tp;
1681 this_tf.outer = state;
1682 if (using_eh_for_cleanups_p () && !cleanup_is_dead_in (state->cur_region))
1683 {
1684 this_tf.region = gen_eh_region_cleanup (state->cur_region);
1685 this_state.cur_region = this_tf.region;
1686 }
1687 else
1688 {
1689 this_tf.region = NULL;
1690 this_state.cur_region = state->cur_region;
1691 }
1692
1693 this_state.ehp_region = state->ehp_region;
1694 this_state.tf = &this_tf;
1695
1696 old_eh_seq = eh_seq;
1697 eh_seq = NULL;
1698
1699 lower_eh_constructs_1 (&this_state, gimple_try_eval_ptr (tp));
1700
1701 /* Determine if the try block is escaped through the bottom. */
1702 this_tf.may_fallthru = gimple_seq_may_fallthru (gimple_try_eval (tp));
1703
1704 /* Determine if any exceptions are possible within the try block. */
1705 if (this_tf.region)
1706 this_tf.may_throw = eh_region_may_contain_throw (this_tf.region);
1707 if (this_tf.may_throw)
1708 honor_protect_cleanup_actions (state, &this_state, &this_tf);
1709
1710 /* Determine how many edges (still) reach the finally block. Or rather,
1711 how many destinations are reached by the finally block. Use this to
1712 determine how we process the finally block itself. */
1713
1714 ndests = this_tf.dest_array.length ();
1715 ndests += this_tf.may_fallthru;
1716 ndests += this_tf.may_return;
1717 ndests += this_tf.may_throw;
1718
1719 /* If the FINALLY block is not reachable, dike it out. */
1720 if (ndests == 0)
1721 {
1722 gimple_seq_add_seq (&this_tf.top_p_seq, gimple_try_eval (tp));
1723 gimple_try_set_cleanup (tp, NULL);
1724 }
1725 /* If the finally block doesn't fall through, then any destination
1726 we might try to impose there isn't reached either. There may be
1727 some minor amount of cleanup and redirection still needed. */
1728 else if (!gimple_seq_may_fallthru (gimple_try_cleanup (tp)))
1729 lower_try_finally_nofallthru (state, &this_tf);
1730
1731 /* We can easily special-case redirection to a single destination. */
1732 else if (ndests == 1)
1733 lower_try_finally_onedest (state, &this_tf);
1734 else if (decide_copy_try_finally (ndests, this_tf.may_throw,
1735 gimple_try_cleanup (tp)))
1736 lower_try_finally_copy (state, &this_tf);
1737 else
1738 lower_try_finally_switch (state, &this_tf);
1739
1740 /* If someone requested we add a label at the end of the transformed
1741 block, do so. */
1742 if (this_tf.fallthru_label)
1743 {
1744 /* This must be reached only if ndests == 0. */
1745 gimple x = gimple_build_label (this_tf.fallthru_label);
1746 gimple_seq_add_stmt (&this_tf.top_p_seq, x);
1747 }
1748
1749 this_tf.dest_array.release ();
1750 free (this_tf.goto_queue);
1751 if (this_tf.goto_queue_map)
1752 delete this_tf.goto_queue_map;
1753
1754 /* If there was an old (aka outer) eh_seq, append the current eh_seq.
1755 If there was no old eh_seq, then the append is trivially already done. */
1756 if (old_eh_seq)
1757 {
1758 if (eh_seq == NULL)
1759 eh_seq = old_eh_seq;
1760 else
1761 {
1762 gimple_seq new_eh_seq = eh_seq;
1763 eh_seq = old_eh_seq;
1764 gimple_seq_add_seq (&eh_seq, new_eh_seq);
1765 }
1766 }
1767
1768 return this_tf.top_p_seq;
1769 }
1770
1771 /* A subroutine of lower_eh_constructs_1. Lower a GIMPLE_TRY_CATCH with a
1772 list of GIMPLE_CATCH to a sequence of labels and blocks, plus the
1773 exception region trees that records all the magic. */
1774
1775 static gimple_seq
1776 lower_catch (struct leh_state *state, gtry *tp)
1777 {
1778 eh_region try_region = NULL;
1779 struct leh_state this_state = *state;
1780 gimple_stmt_iterator gsi;
1781 tree out_label;
1782 gimple_seq new_seq, cleanup;
1783 gimple x;
1784 location_t try_catch_loc = gimple_location (tp);
1785
1786 if (flag_exceptions)
1787 {
1788 try_region = gen_eh_region_try (state->cur_region);
1789 this_state.cur_region = try_region;
1790 }
1791
1792 lower_eh_constructs_1 (&this_state, gimple_try_eval_ptr (tp));
1793
1794 if (!eh_region_may_contain_throw (try_region))
1795 return gimple_try_eval (tp);
1796
1797 new_seq = NULL;
1798 emit_eh_dispatch (&new_seq, try_region);
1799 emit_resx (&new_seq, try_region);
1800
1801 this_state.cur_region = state->cur_region;
1802 this_state.ehp_region = try_region;
1803
1804 /* Add eh_seq from lowering EH in the cleanup sequence after the cleanup
1805 itself, so that e.g. for coverage purposes the nested cleanups don't
1806 appear before the cleanup body. See PR64634 for details. */
1807 gimple_seq old_eh_seq = eh_seq;
1808 eh_seq = NULL;
1809
1810 out_label = NULL;
1811 cleanup = gimple_try_cleanup (tp);
1812 for (gsi = gsi_start (cleanup);
1813 !gsi_end_p (gsi);
1814 gsi_next (&gsi))
1815 {
1816 eh_catch c;
1817 gcatch *catch_stmt;
1818 gimple_seq handler;
1819
1820 catch_stmt = as_a <gcatch *> (gsi_stmt (gsi));
1821 c = gen_eh_region_catch (try_region, gimple_catch_types (catch_stmt));
1822
1823 handler = gimple_catch_handler (catch_stmt);
1824 lower_eh_constructs_1 (&this_state, &handler);
1825
1826 c->label = create_artificial_label (UNKNOWN_LOCATION);
1827 x = gimple_build_label (c->label);
1828 gimple_seq_add_stmt (&new_seq, x);
1829
1830 gimple_seq_add_seq (&new_seq, handler);
1831
1832 if (gimple_seq_may_fallthru (new_seq))
1833 {
1834 if (!out_label)
1835 out_label = create_artificial_label (try_catch_loc);
1836
1837 x = gimple_build_goto (out_label);
1838 gimple_seq_add_stmt (&new_seq, x);
1839 }
1840 if (!c->type_list)
1841 break;
1842 }
1843
1844 gimple_try_set_cleanup (tp, new_seq);
1845
1846 gimple_seq new_eh_seq = eh_seq;
1847 eh_seq = old_eh_seq;
1848 gimple_seq ret_seq = frob_into_branch_around (tp, try_region, out_label);
1849 gimple_seq_add_seq (&eh_seq, new_eh_seq);
1850 return ret_seq;
1851 }
1852
1853 /* A subroutine of lower_eh_constructs_1. Lower a GIMPLE_TRY with a
1854 GIMPLE_EH_FILTER to a sequence of labels and blocks, plus the exception
1855 region trees that record all the magic. */
1856
1857 static gimple_seq
1858 lower_eh_filter (struct leh_state *state, gtry *tp)
1859 {
1860 struct leh_state this_state = *state;
1861 eh_region this_region = NULL;
1862 gimple inner, x;
1863 gimple_seq new_seq;
1864
1865 inner = gimple_seq_first_stmt (gimple_try_cleanup (tp));
1866
1867 if (flag_exceptions)
1868 {
1869 this_region = gen_eh_region_allowed (state->cur_region,
1870 gimple_eh_filter_types (inner));
1871 this_state.cur_region = this_region;
1872 }
1873
1874 lower_eh_constructs_1 (&this_state, gimple_try_eval_ptr (tp));
1875
1876 if (!eh_region_may_contain_throw (this_region))
1877 return gimple_try_eval (tp);
1878
1879 new_seq = NULL;
1880 this_state.cur_region = state->cur_region;
1881 this_state.ehp_region = this_region;
1882
1883 emit_eh_dispatch (&new_seq, this_region);
1884 emit_resx (&new_seq, this_region);
1885
1886 this_region->u.allowed.label = create_artificial_label (UNKNOWN_LOCATION);
1887 x = gimple_build_label (this_region->u.allowed.label);
1888 gimple_seq_add_stmt (&new_seq, x);
1889
1890 lower_eh_constructs_1 (&this_state, gimple_eh_filter_failure_ptr (inner));
1891 gimple_seq_add_seq (&new_seq, gimple_eh_filter_failure (inner));
1892
1893 gimple_try_set_cleanup (tp, new_seq);
1894
1895 return frob_into_branch_around (tp, this_region, NULL);
1896 }
1897
1898 /* A subroutine of lower_eh_constructs_1. Lower a GIMPLE_TRY with
1899 an GIMPLE_EH_MUST_NOT_THROW to a sequence of labels and blocks,
1900 plus the exception region trees that record all the magic. */
1901
1902 static gimple_seq
1903 lower_eh_must_not_throw (struct leh_state *state, gtry *tp)
1904 {
1905 struct leh_state this_state = *state;
1906
1907 if (flag_exceptions)
1908 {
1909 gimple inner = gimple_seq_first_stmt (gimple_try_cleanup (tp));
1910 eh_region this_region;
1911
1912 this_region = gen_eh_region_must_not_throw (state->cur_region);
1913 this_region->u.must_not_throw.failure_decl
1914 = gimple_eh_must_not_throw_fndecl (
1915 as_a <geh_mnt *> (inner));
1916 this_region->u.must_not_throw.failure_loc
1917 = LOCATION_LOCUS (gimple_location (tp));
1918
1919 /* In order to get mangling applied to this decl, we must mark it
1920 used now. Otherwise, pass_ipa_free_lang_data won't think it
1921 needs to happen. */
1922 TREE_USED (this_region->u.must_not_throw.failure_decl) = 1;
1923
1924 this_state.cur_region = this_region;
1925 }
1926
1927 lower_eh_constructs_1 (&this_state, gimple_try_eval_ptr (tp));
1928
1929 return gimple_try_eval (tp);
1930 }
1931
1932 /* Implement a cleanup expression. This is similar to try-finally,
1933 except that we only execute the cleanup block for exception edges. */
1934
1935 static gimple_seq
1936 lower_cleanup (struct leh_state *state, gtry *tp)
1937 {
1938 struct leh_state this_state = *state;
1939 eh_region this_region = NULL;
1940 struct leh_tf_state fake_tf;
1941 gimple_seq result;
1942 bool cleanup_dead = cleanup_is_dead_in (state->cur_region);
1943
1944 if (flag_exceptions && !cleanup_dead)
1945 {
1946 this_region = gen_eh_region_cleanup (state->cur_region);
1947 this_state.cur_region = this_region;
1948 }
1949
1950 lower_eh_constructs_1 (&this_state, gimple_try_eval_ptr (tp));
1951
1952 if (cleanup_dead || !eh_region_may_contain_throw (this_region))
1953 return gimple_try_eval (tp);
1954
1955 /* Build enough of a try-finally state so that we can reuse
1956 honor_protect_cleanup_actions. */
1957 memset (&fake_tf, 0, sizeof (fake_tf));
1958 fake_tf.top_p = fake_tf.try_finally_expr = tp;
1959 fake_tf.outer = state;
1960 fake_tf.region = this_region;
1961 fake_tf.may_fallthru = gimple_seq_may_fallthru (gimple_try_eval (tp));
1962 fake_tf.may_throw = true;
1963
1964 honor_protect_cleanup_actions (state, NULL, &fake_tf);
1965
1966 if (fake_tf.may_throw)
1967 {
1968 /* In this case honor_protect_cleanup_actions had nothing to do,
1969 and we should process this normally. */
1970 lower_eh_constructs_1 (state, gimple_try_cleanup_ptr (tp));
1971 result = frob_into_branch_around (tp, this_region,
1972 fake_tf.fallthru_label);
1973 }
1974 else
1975 {
1976 /* In this case honor_protect_cleanup_actions did nearly all of
1977 the work. All we have left is to append the fallthru_label. */
1978
1979 result = gimple_try_eval (tp);
1980 if (fake_tf.fallthru_label)
1981 {
1982 gimple x = gimple_build_label (fake_tf.fallthru_label);
1983 gimple_seq_add_stmt (&result, x);
1984 }
1985 }
1986 return result;
1987 }
1988
1989 /* Main loop for lowering eh constructs. Also moves gsi to the next
1990 statement. */
1991
1992 static void
1993 lower_eh_constructs_2 (struct leh_state *state, gimple_stmt_iterator *gsi)
1994 {
1995 gimple_seq replace;
1996 gimple x;
1997 gimple stmt = gsi_stmt (*gsi);
1998
1999 switch (gimple_code (stmt))
2000 {
2001 case GIMPLE_CALL:
2002 {
2003 tree fndecl = gimple_call_fndecl (stmt);
2004 tree rhs, lhs;
2005
2006 if (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
2007 switch (DECL_FUNCTION_CODE (fndecl))
2008 {
2009 case BUILT_IN_EH_POINTER:
2010 /* The front end may have generated a call to
2011 __builtin_eh_pointer (0) within a catch region. Replace
2012 this zero argument with the current catch region number. */
2013 if (state->ehp_region)
2014 {
2015 tree nr = build_int_cst (integer_type_node,
2016 state->ehp_region->index);
2017 gimple_call_set_arg (stmt, 0, nr);
2018 }
2019 else
2020 {
2021 /* The user has dome something silly. Remove it. */
2022 rhs = null_pointer_node;
2023 goto do_replace;
2024 }
2025 break;
2026
2027 case BUILT_IN_EH_FILTER:
2028 /* ??? This should never appear, but since it's a builtin it
2029 is accessible to abuse by users. Just remove it and
2030 replace the use with the arbitrary value zero. */
2031 rhs = build_int_cst (TREE_TYPE (TREE_TYPE (fndecl)), 0);
2032 do_replace:
2033 lhs = gimple_call_lhs (stmt);
2034 x = gimple_build_assign (lhs, rhs);
2035 gsi_insert_before (gsi, x, GSI_SAME_STMT);
2036 /* FALLTHRU */
2037
2038 case BUILT_IN_EH_COPY_VALUES:
2039 /* Likewise this should not appear. Remove it. */
2040 gsi_remove (gsi, true);
2041 return;
2042
2043 default:
2044 break;
2045 }
2046 }
2047 /* FALLTHRU */
2048
2049 case GIMPLE_ASSIGN:
2050 /* If the stmt can throw use a new temporary for the assignment
2051 to a LHS. This makes sure the old value of the LHS is
2052 available on the EH edge. Only do so for statements that
2053 potentially fall through (no noreturn calls e.g.), otherwise
2054 this new assignment might create fake fallthru regions. */
2055 if (stmt_could_throw_p (stmt)
2056 && gimple_has_lhs (stmt)
2057 && gimple_stmt_may_fallthru (stmt)
2058 && !tree_could_throw_p (gimple_get_lhs (stmt))
2059 && is_gimple_reg_type (TREE_TYPE (gimple_get_lhs (stmt))))
2060 {
2061 tree lhs = gimple_get_lhs (stmt);
2062 tree tmp = create_tmp_var (TREE_TYPE (lhs));
2063 gimple s = gimple_build_assign (lhs, tmp);
2064 gimple_set_location (s, gimple_location (stmt));
2065 gimple_set_block (s, gimple_block (stmt));
2066 gimple_set_lhs (stmt, tmp);
2067 if (TREE_CODE (TREE_TYPE (tmp)) == COMPLEX_TYPE
2068 || TREE_CODE (TREE_TYPE (tmp)) == VECTOR_TYPE)
2069 DECL_GIMPLE_REG_P (tmp) = 1;
2070 gsi_insert_after (gsi, s, GSI_SAME_STMT);
2071 }
2072 /* Look for things that can throw exceptions, and record them. */
2073 if (state->cur_region && stmt_could_throw_p (stmt))
2074 {
2075 record_stmt_eh_region (state->cur_region, stmt);
2076 note_eh_region_may_contain_throw (state->cur_region);
2077 }
2078 break;
2079
2080 case GIMPLE_COND:
2081 case GIMPLE_GOTO:
2082 case GIMPLE_RETURN:
2083 maybe_record_in_goto_queue (state, stmt);
2084 break;
2085
2086 case GIMPLE_SWITCH:
2087 verify_norecord_switch_expr (state, as_a <gswitch *> (stmt));
2088 break;
2089
2090 case GIMPLE_TRY:
2091 {
2092 gtry *try_stmt = as_a <gtry *> (stmt);
2093 if (gimple_try_kind (try_stmt) == GIMPLE_TRY_FINALLY)
2094 replace = lower_try_finally (state, try_stmt);
2095 else
2096 {
2097 x = gimple_seq_first_stmt (gimple_try_cleanup (try_stmt));
2098 if (!x)
2099 {
2100 replace = gimple_try_eval (try_stmt);
2101 lower_eh_constructs_1 (state, &replace);
2102 }
2103 else
2104 switch (gimple_code (x))
2105 {
2106 case GIMPLE_CATCH:
2107 replace = lower_catch (state, try_stmt);
2108 break;
2109 case GIMPLE_EH_FILTER:
2110 replace = lower_eh_filter (state, try_stmt);
2111 break;
2112 case GIMPLE_EH_MUST_NOT_THROW:
2113 replace = lower_eh_must_not_throw (state, try_stmt);
2114 break;
2115 case GIMPLE_EH_ELSE:
2116 /* This code is only valid with GIMPLE_TRY_FINALLY. */
2117 gcc_unreachable ();
2118 default:
2119 replace = lower_cleanup (state, try_stmt);
2120 break;
2121 }
2122 }
2123 }
2124
2125 /* Remove the old stmt and insert the transformed sequence
2126 instead. */
2127 gsi_insert_seq_before (gsi, replace, GSI_SAME_STMT);
2128 gsi_remove (gsi, true);
2129
2130 /* Return since we don't want gsi_next () */
2131 return;
2132
2133 case GIMPLE_EH_ELSE:
2134 /* We should be eliminating this in lower_try_finally et al. */
2135 gcc_unreachable ();
2136
2137 default:
2138 /* A type, a decl, or some kind of statement that we're not
2139 interested in. Don't walk them. */
2140 break;
2141 }
2142
2143 gsi_next (gsi);
2144 }
2145
2146 /* A helper to unwrap a gimple_seq and feed stmts to lower_eh_constructs_2. */
2147
2148 static void
2149 lower_eh_constructs_1 (struct leh_state *state, gimple_seq *pseq)
2150 {
2151 gimple_stmt_iterator gsi;
2152 for (gsi = gsi_start (*pseq); !gsi_end_p (gsi);)
2153 lower_eh_constructs_2 (state, &gsi);
2154 }
2155
2156 namespace {
2157
2158 const pass_data pass_data_lower_eh =
2159 {
2160 GIMPLE_PASS, /* type */
2161 "eh", /* name */
2162 OPTGROUP_NONE, /* optinfo_flags */
2163 TV_TREE_EH, /* tv_id */
2164 PROP_gimple_lcf, /* properties_required */
2165 PROP_gimple_leh, /* properties_provided */
2166 0, /* properties_destroyed */
2167 0, /* todo_flags_start */
2168 0, /* todo_flags_finish */
2169 };
2170
2171 class pass_lower_eh : public gimple_opt_pass
2172 {
2173 public:
2174 pass_lower_eh (gcc::context *ctxt)
2175 : gimple_opt_pass (pass_data_lower_eh, ctxt)
2176 {}
2177
2178 /* opt_pass methods: */
2179 virtual unsigned int execute (function *);
2180
2181 }; // class pass_lower_eh
2182
2183 unsigned int
2184 pass_lower_eh::execute (function *fun)
2185 {
2186 struct leh_state null_state;
2187 gimple_seq bodyp;
2188
2189 bodyp = gimple_body (current_function_decl);
2190 if (bodyp == NULL)
2191 return 0;
2192
2193 finally_tree = new hash_table<finally_tree_hasher> (31);
2194 eh_region_may_contain_throw_map = BITMAP_ALLOC (NULL);
2195 memset (&null_state, 0, sizeof (null_state));
2196
2197 collect_finally_tree_1 (bodyp, NULL);
2198 lower_eh_constructs_1 (&null_state, &bodyp);
2199 gimple_set_body (current_function_decl, bodyp);
2200
2201 /* We assume there's a return statement, or something, at the end of
2202 the function, and thus ploping the EH sequence afterward won't
2203 change anything. */
2204 gcc_assert (!gimple_seq_may_fallthru (bodyp));
2205 gimple_seq_add_seq (&bodyp, eh_seq);
2206
2207 /* We assume that since BODYP already existed, adding EH_SEQ to it
2208 didn't change its value, and we don't have to re-set the function. */
2209 gcc_assert (bodyp == gimple_body (current_function_decl));
2210
2211 delete finally_tree;
2212 finally_tree = NULL;
2213 BITMAP_FREE (eh_region_may_contain_throw_map);
2214 eh_seq = NULL;
2215
2216 /* If this function needs a language specific EH personality routine
2217 and the frontend didn't already set one do so now. */
2218 if (function_needs_eh_personality (fun) == eh_personality_lang
2219 && !DECL_FUNCTION_PERSONALITY (current_function_decl))
2220 DECL_FUNCTION_PERSONALITY (current_function_decl)
2221 = lang_hooks.eh_personality ();
2222
2223 return 0;
2224 }
2225
2226 } // anon namespace
2227
2228 gimple_opt_pass *
2229 make_pass_lower_eh (gcc::context *ctxt)
2230 {
2231 return new pass_lower_eh (ctxt);
2232 }
2233 \f
2234 /* Create the multiple edges from an EH_DISPATCH statement to all of
2235 the possible handlers for its EH region. Return true if there's
2236 no fallthru edge; false if there is. */
2237
2238 bool
2239 make_eh_dispatch_edges (geh_dispatch *stmt)
2240 {
2241 eh_region r;
2242 eh_catch c;
2243 basic_block src, dst;
2244
2245 r = get_eh_region_from_number (gimple_eh_dispatch_region (stmt));
2246 src = gimple_bb (stmt);
2247
2248 switch (r->type)
2249 {
2250 case ERT_TRY:
2251 for (c = r->u.eh_try.first_catch; c ; c = c->next_catch)
2252 {
2253 dst = label_to_block (c->label);
2254 make_edge (src, dst, 0);
2255
2256 /* A catch-all handler doesn't have a fallthru. */
2257 if (c->type_list == NULL)
2258 return false;
2259 }
2260 break;
2261
2262 case ERT_ALLOWED_EXCEPTIONS:
2263 dst = label_to_block (r->u.allowed.label);
2264 make_edge (src, dst, 0);
2265 break;
2266
2267 default:
2268 gcc_unreachable ();
2269 }
2270
2271 return true;
2272 }
2273
2274 /* Create the single EH edge from STMT to its nearest landing pad,
2275 if there is such a landing pad within the current function. */
2276
2277 void
2278 make_eh_edges (gimple stmt)
2279 {
2280 basic_block src, dst;
2281 eh_landing_pad lp;
2282 int lp_nr;
2283
2284 lp_nr = lookup_stmt_eh_lp (stmt);
2285 if (lp_nr <= 0)
2286 return;
2287
2288 lp = get_eh_landing_pad_from_number (lp_nr);
2289 gcc_assert (lp != NULL);
2290
2291 src = gimple_bb (stmt);
2292 dst = label_to_block (lp->post_landing_pad);
2293 make_edge (src, dst, EDGE_EH);
2294 }
2295
2296 /* Do the work in redirecting EDGE_IN to NEW_BB within the EH region tree;
2297 do not actually perform the final edge redirection.
2298
2299 CHANGE_REGION is true when we're being called from cleanup_empty_eh and
2300 we intend to change the destination EH region as well; this means
2301 EH_LANDING_PAD_NR must already be set on the destination block label.
2302 If false, we're being called from generic cfg manipulation code and we
2303 should preserve our place within the region tree. */
2304
2305 static void
2306 redirect_eh_edge_1 (edge edge_in, basic_block new_bb, bool change_region)
2307 {
2308 eh_landing_pad old_lp, new_lp;
2309 basic_block old_bb;
2310 gimple throw_stmt;
2311 int old_lp_nr, new_lp_nr;
2312 tree old_label, new_label;
2313 edge_iterator ei;
2314 edge e;
2315
2316 old_bb = edge_in->dest;
2317 old_label = gimple_block_label (old_bb);
2318 old_lp_nr = EH_LANDING_PAD_NR (old_label);
2319 gcc_assert (old_lp_nr > 0);
2320 old_lp = get_eh_landing_pad_from_number (old_lp_nr);
2321
2322 throw_stmt = last_stmt (edge_in->src);
2323 gcc_assert (lookup_stmt_eh_lp (throw_stmt) == old_lp_nr);
2324
2325 new_label = gimple_block_label (new_bb);
2326
2327 /* Look for an existing region that might be using NEW_BB already. */
2328 new_lp_nr = EH_LANDING_PAD_NR (new_label);
2329 if (new_lp_nr)
2330 {
2331 new_lp = get_eh_landing_pad_from_number (new_lp_nr);
2332 gcc_assert (new_lp);
2333
2334 /* Unless CHANGE_REGION is true, the new and old landing pad
2335 had better be associated with the same EH region. */
2336 gcc_assert (change_region || new_lp->region == old_lp->region);
2337 }
2338 else
2339 {
2340 new_lp = NULL;
2341 gcc_assert (!change_region);
2342 }
2343
2344 /* Notice when we redirect the last EH edge away from OLD_BB. */
2345 FOR_EACH_EDGE (e, ei, old_bb->preds)
2346 if (e != edge_in && (e->flags & EDGE_EH))
2347 break;
2348
2349 if (new_lp)
2350 {
2351 /* NEW_LP already exists. If there are still edges into OLD_LP,
2352 there's nothing to do with the EH tree. If there are no more
2353 edges into OLD_LP, then we want to remove OLD_LP as it is unused.
2354 If CHANGE_REGION is true, then our caller is expecting to remove
2355 the landing pad. */
2356 if (e == NULL && !change_region)
2357 remove_eh_landing_pad (old_lp);
2358 }
2359 else
2360 {
2361 /* No correct landing pad exists. If there are no more edges
2362 into OLD_LP, then we can simply re-use the existing landing pad.
2363 Otherwise, we have to create a new landing pad. */
2364 if (e == NULL)
2365 {
2366 EH_LANDING_PAD_NR (old_lp->post_landing_pad) = 0;
2367 new_lp = old_lp;
2368 }
2369 else
2370 new_lp = gen_eh_landing_pad (old_lp->region);
2371 new_lp->post_landing_pad = new_label;
2372 EH_LANDING_PAD_NR (new_label) = new_lp->index;
2373 }
2374
2375 /* Maybe move the throwing statement to the new region. */
2376 if (old_lp != new_lp)
2377 {
2378 remove_stmt_from_eh_lp (throw_stmt);
2379 add_stmt_to_eh_lp (throw_stmt, new_lp->index);
2380 }
2381 }
2382
2383 /* Redirect EH edge E to NEW_BB. */
2384
2385 edge
2386 redirect_eh_edge (edge edge_in, basic_block new_bb)
2387 {
2388 redirect_eh_edge_1 (edge_in, new_bb, false);
2389 return ssa_redirect_edge (edge_in, new_bb);
2390 }
2391
2392 /* This is a subroutine of gimple_redirect_edge_and_branch. Update the
2393 labels for redirecting a non-fallthru EH_DISPATCH edge E to NEW_BB.
2394 The actual edge update will happen in the caller. */
2395
2396 void
2397 redirect_eh_dispatch_edge (geh_dispatch *stmt, edge e, basic_block new_bb)
2398 {
2399 tree new_lab = gimple_block_label (new_bb);
2400 bool any_changed = false;
2401 basic_block old_bb;
2402 eh_region r;
2403 eh_catch c;
2404
2405 r = get_eh_region_from_number (gimple_eh_dispatch_region (stmt));
2406 switch (r->type)
2407 {
2408 case ERT_TRY:
2409 for (c = r->u.eh_try.first_catch; c ; c = c->next_catch)
2410 {
2411 old_bb = label_to_block (c->label);
2412 if (old_bb == e->dest)
2413 {
2414 c->label = new_lab;
2415 any_changed = true;
2416 }
2417 }
2418 break;
2419
2420 case ERT_ALLOWED_EXCEPTIONS:
2421 old_bb = label_to_block (r->u.allowed.label);
2422 gcc_assert (old_bb == e->dest);
2423 r->u.allowed.label = new_lab;
2424 any_changed = true;
2425 break;
2426
2427 default:
2428 gcc_unreachable ();
2429 }
2430
2431 gcc_assert (any_changed);
2432 }
2433 \f
2434 /* Helper function for operation_could_trap_p and stmt_could_throw_p. */
2435
2436 bool
2437 operation_could_trap_helper_p (enum tree_code op,
2438 bool fp_operation,
2439 bool honor_trapv,
2440 bool honor_nans,
2441 bool honor_snans,
2442 tree divisor,
2443 bool *handled)
2444 {
2445 *handled = true;
2446 switch (op)
2447 {
2448 case TRUNC_DIV_EXPR:
2449 case CEIL_DIV_EXPR:
2450 case FLOOR_DIV_EXPR:
2451 case ROUND_DIV_EXPR:
2452 case EXACT_DIV_EXPR:
2453 case CEIL_MOD_EXPR:
2454 case FLOOR_MOD_EXPR:
2455 case ROUND_MOD_EXPR:
2456 case TRUNC_MOD_EXPR:
2457 case RDIV_EXPR:
2458 if (honor_snans || honor_trapv)
2459 return true;
2460 if (fp_operation)
2461 return flag_trapping_math;
2462 if (!TREE_CONSTANT (divisor) || integer_zerop (divisor))
2463 return true;
2464 return false;
2465
2466 case LT_EXPR:
2467 case LE_EXPR:
2468 case GT_EXPR:
2469 case GE_EXPR:
2470 case LTGT_EXPR:
2471 /* Some floating point comparisons may trap. */
2472 return honor_nans;
2473
2474 case EQ_EXPR:
2475 case NE_EXPR:
2476 case UNORDERED_EXPR:
2477 case ORDERED_EXPR:
2478 case UNLT_EXPR:
2479 case UNLE_EXPR:
2480 case UNGT_EXPR:
2481 case UNGE_EXPR:
2482 case UNEQ_EXPR:
2483 return honor_snans;
2484
2485 case NEGATE_EXPR:
2486 case ABS_EXPR:
2487 case CONJ_EXPR:
2488 /* These operations don't trap with floating point. */
2489 if (honor_trapv)
2490 return true;
2491 return false;
2492
2493 case PLUS_EXPR:
2494 case MINUS_EXPR:
2495 case MULT_EXPR:
2496 /* Any floating arithmetic may trap. */
2497 if (fp_operation && flag_trapping_math)
2498 return true;
2499 if (honor_trapv)
2500 return true;
2501 return false;
2502
2503 case COMPLEX_EXPR:
2504 case CONSTRUCTOR:
2505 /* Constructing an object cannot trap. */
2506 return false;
2507
2508 default:
2509 /* Any floating arithmetic may trap. */
2510 if (fp_operation && flag_trapping_math)
2511 return true;
2512
2513 *handled = false;
2514 return false;
2515 }
2516 }
2517
2518 /* Return true if operation OP may trap. FP_OPERATION is true if OP is applied
2519 on floating-point values. HONOR_TRAPV is true if OP is applied on integer
2520 type operands that may trap. If OP is a division operator, DIVISOR contains
2521 the value of the divisor. */
2522
2523 bool
2524 operation_could_trap_p (enum tree_code op, bool fp_operation, bool honor_trapv,
2525 tree divisor)
2526 {
2527 bool honor_nans = (fp_operation && flag_trapping_math
2528 && !flag_finite_math_only);
2529 bool honor_snans = fp_operation && flag_signaling_nans != 0;
2530 bool handled;
2531
2532 if (TREE_CODE_CLASS (op) != tcc_comparison
2533 && TREE_CODE_CLASS (op) != tcc_unary
2534 && TREE_CODE_CLASS (op) != tcc_binary)
2535 return false;
2536
2537 return operation_could_trap_helper_p (op, fp_operation, honor_trapv,
2538 honor_nans, honor_snans, divisor,
2539 &handled);
2540 }
2541
2542
2543 /* Returns true if it is possible to prove that the index of
2544 an array access REF (an ARRAY_REF expression) falls into the
2545 array bounds. */
2546
2547 static bool
2548 in_array_bounds_p (tree ref)
2549 {
2550 tree idx = TREE_OPERAND (ref, 1);
2551 tree min, max;
2552
2553 if (TREE_CODE (idx) != INTEGER_CST)
2554 return false;
2555
2556 min = array_ref_low_bound (ref);
2557 max = array_ref_up_bound (ref);
2558 if (!min
2559 || !max
2560 || TREE_CODE (min) != INTEGER_CST
2561 || TREE_CODE (max) != INTEGER_CST)
2562 return false;
2563
2564 if (tree_int_cst_lt (idx, min)
2565 || tree_int_cst_lt (max, idx))
2566 return false;
2567
2568 return true;
2569 }
2570
2571 /* Returns true if it is possible to prove that the range of
2572 an array access REF (an ARRAY_RANGE_REF expression) falls
2573 into the array bounds. */
2574
2575 static bool
2576 range_in_array_bounds_p (tree ref)
2577 {
2578 tree domain_type = TYPE_DOMAIN (TREE_TYPE (ref));
2579 tree range_min, range_max, min, max;
2580
2581 range_min = TYPE_MIN_VALUE (domain_type);
2582 range_max = TYPE_MAX_VALUE (domain_type);
2583 if (!range_min
2584 || !range_max
2585 || TREE_CODE (range_min) != INTEGER_CST
2586 || TREE_CODE (range_max) != INTEGER_CST)
2587 return false;
2588
2589 min = array_ref_low_bound (ref);
2590 max = array_ref_up_bound (ref);
2591 if (!min
2592 || !max
2593 || TREE_CODE (min) != INTEGER_CST
2594 || TREE_CODE (max) != INTEGER_CST)
2595 return false;
2596
2597 if (tree_int_cst_lt (range_min, min)
2598 || tree_int_cst_lt (max, range_max))
2599 return false;
2600
2601 return true;
2602 }
2603
2604 /* Return true if EXPR can trap, as in dereferencing an invalid pointer
2605 location or floating point arithmetic. C.f. the rtl version, may_trap_p.
2606 This routine expects only GIMPLE lhs or rhs input. */
2607
2608 bool
2609 tree_could_trap_p (tree expr)
2610 {
2611 enum tree_code code;
2612 bool fp_operation = false;
2613 bool honor_trapv = false;
2614 tree t, base, div = NULL_TREE;
2615
2616 if (!expr)
2617 return false;
2618
2619 code = TREE_CODE (expr);
2620 t = TREE_TYPE (expr);
2621
2622 if (t)
2623 {
2624 if (COMPARISON_CLASS_P (expr))
2625 fp_operation = FLOAT_TYPE_P (TREE_TYPE (TREE_OPERAND (expr, 0)));
2626 else
2627 fp_operation = FLOAT_TYPE_P (t);
2628 honor_trapv = INTEGRAL_TYPE_P (t) && TYPE_OVERFLOW_TRAPS (t);
2629 }
2630
2631 if (TREE_CODE_CLASS (code) == tcc_binary)
2632 div = TREE_OPERAND (expr, 1);
2633 if (operation_could_trap_p (code, fp_operation, honor_trapv, div))
2634 return true;
2635
2636 restart:
2637 switch (code)
2638 {
2639 case COMPONENT_REF:
2640 case REALPART_EXPR:
2641 case IMAGPART_EXPR:
2642 case BIT_FIELD_REF:
2643 case VIEW_CONVERT_EXPR:
2644 case WITH_SIZE_EXPR:
2645 expr = TREE_OPERAND (expr, 0);
2646 code = TREE_CODE (expr);
2647 goto restart;
2648
2649 case ARRAY_RANGE_REF:
2650 base = TREE_OPERAND (expr, 0);
2651 if (tree_could_trap_p (base))
2652 return true;
2653 if (TREE_THIS_NOTRAP (expr))
2654 return false;
2655 return !range_in_array_bounds_p (expr);
2656
2657 case ARRAY_REF:
2658 base = TREE_OPERAND (expr, 0);
2659 if (tree_could_trap_p (base))
2660 return true;
2661 if (TREE_THIS_NOTRAP (expr))
2662 return false;
2663 return !in_array_bounds_p (expr);
2664
2665 case TARGET_MEM_REF:
2666 case MEM_REF:
2667 if (TREE_CODE (TREE_OPERAND (expr, 0)) == ADDR_EXPR
2668 && tree_could_trap_p (TREE_OPERAND (TREE_OPERAND (expr, 0), 0)))
2669 return true;
2670 if (TREE_THIS_NOTRAP (expr))
2671 return false;
2672 /* We cannot prove that the access is in-bounds when we have
2673 variable-index TARGET_MEM_REFs. */
2674 if (code == TARGET_MEM_REF
2675 && (TMR_INDEX (expr) || TMR_INDEX2 (expr)))
2676 return true;
2677 if (TREE_CODE (TREE_OPERAND (expr, 0)) == ADDR_EXPR)
2678 {
2679 tree base = TREE_OPERAND (TREE_OPERAND (expr, 0), 0);
2680 offset_int off = mem_ref_offset (expr);
2681 if (wi::neg_p (off, SIGNED))
2682 return true;
2683 if (TREE_CODE (base) == STRING_CST)
2684 return wi::leu_p (TREE_STRING_LENGTH (base), off);
2685 else if (DECL_SIZE_UNIT (base) == NULL_TREE
2686 || TREE_CODE (DECL_SIZE_UNIT (base)) != INTEGER_CST
2687 || wi::leu_p (wi::to_offset (DECL_SIZE_UNIT (base)), off))
2688 return true;
2689 /* Now we are sure the first byte of the access is inside
2690 the object. */
2691 return false;
2692 }
2693 return true;
2694
2695 case INDIRECT_REF:
2696 return !TREE_THIS_NOTRAP (expr);
2697
2698 case ASM_EXPR:
2699 return TREE_THIS_VOLATILE (expr);
2700
2701 case CALL_EXPR:
2702 t = get_callee_fndecl (expr);
2703 /* Assume that calls to weak functions may trap. */
2704 if (!t || !DECL_P (t))
2705 return true;
2706 if (DECL_WEAK (t))
2707 return tree_could_trap_p (t);
2708 return false;
2709
2710 case FUNCTION_DECL:
2711 /* Assume that accesses to weak functions may trap, unless we know
2712 they are certainly defined in current TU or in some other
2713 LTO partition. */
2714 if (DECL_WEAK (expr) && !DECL_COMDAT (expr) && DECL_EXTERNAL (expr))
2715 {
2716 cgraph_node *node = cgraph_node::get (expr);
2717 if (node)
2718 node = node->function_symbol ();
2719 return !(node && node->in_other_partition);
2720 }
2721 return false;
2722
2723 case VAR_DECL:
2724 /* Assume that accesses to weak vars may trap, unless we know
2725 they are certainly defined in current TU or in some other
2726 LTO partition. */
2727 if (DECL_WEAK (expr) && !DECL_COMDAT (expr) && DECL_EXTERNAL (expr))
2728 {
2729 varpool_node *node = varpool_node::get (expr);
2730 if (node)
2731 node = node->ultimate_alias_target ();
2732 return !(node && node->in_other_partition);
2733 }
2734 return false;
2735
2736 default:
2737 return false;
2738 }
2739 }
2740
2741
2742 /* Helper for stmt_could_throw_p. Return true if STMT (assumed to be a
2743 an assignment or a conditional) may throw. */
2744
2745 static bool
2746 stmt_could_throw_1_p (gimple stmt)
2747 {
2748 enum tree_code code = gimple_expr_code (stmt);
2749 bool honor_nans = false;
2750 bool honor_snans = false;
2751 bool fp_operation = false;
2752 bool honor_trapv = false;
2753 tree t;
2754 size_t i;
2755 bool handled, ret;
2756
2757 if (TREE_CODE_CLASS (code) == tcc_comparison
2758 || TREE_CODE_CLASS (code) == tcc_unary
2759 || TREE_CODE_CLASS (code) == tcc_binary)
2760 {
2761 if (is_gimple_assign (stmt)
2762 && TREE_CODE_CLASS (code) == tcc_comparison)
2763 t = TREE_TYPE (gimple_assign_rhs1 (stmt));
2764 else if (gimple_code (stmt) == GIMPLE_COND)
2765 t = TREE_TYPE (gimple_cond_lhs (stmt));
2766 else
2767 t = gimple_expr_type (stmt);
2768 fp_operation = FLOAT_TYPE_P (t);
2769 if (fp_operation)
2770 {
2771 honor_nans = flag_trapping_math && !flag_finite_math_only;
2772 honor_snans = flag_signaling_nans != 0;
2773 }
2774 else if (INTEGRAL_TYPE_P (t) && TYPE_OVERFLOW_TRAPS (t))
2775 honor_trapv = true;
2776 }
2777
2778 /* Check if the main expression may trap. */
2779 t = is_gimple_assign (stmt) ? gimple_assign_rhs2 (stmt) : NULL;
2780 ret = operation_could_trap_helper_p (code, fp_operation, honor_trapv,
2781 honor_nans, honor_snans, t,
2782 &handled);
2783 if (handled)
2784 return ret;
2785
2786 /* If the expression does not trap, see if any of the individual operands may
2787 trap. */
2788 for (i = 0; i < gimple_num_ops (stmt); i++)
2789 if (tree_could_trap_p (gimple_op (stmt, i)))
2790 return true;
2791
2792 return false;
2793 }
2794
2795
2796 /* Return true if statement STMT could throw an exception. */
2797
2798 bool
2799 stmt_could_throw_p (gimple stmt)
2800 {
2801 if (!flag_exceptions)
2802 return false;
2803
2804 /* The only statements that can throw an exception are assignments,
2805 conditionals, calls, resx, and asms. */
2806 switch (gimple_code (stmt))
2807 {
2808 case GIMPLE_RESX:
2809 return true;
2810
2811 case GIMPLE_CALL:
2812 return !gimple_call_nothrow_p (as_a <gcall *> (stmt));
2813
2814 case GIMPLE_ASSIGN:
2815 case GIMPLE_COND:
2816 if (!cfun->can_throw_non_call_exceptions)
2817 return false;
2818 return stmt_could_throw_1_p (stmt);
2819
2820 case GIMPLE_ASM:
2821 if (!cfun->can_throw_non_call_exceptions)
2822 return false;
2823 return gimple_asm_volatile_p (as_a <gasm *> (stmt));
2824
2825 default:
2826 return false;
2827 }
2828 }
2829
2830
2831 /* Return true if expression T could throw an exception. */
2832
2833 bool
2834 tree_could_throw_p (tree t)
2835 {
2836 if (!flag_exceptions)
2837 return false;
2838 if (TREE_CODE (t) == MODIFY_EXPR)
2839 {
2840 if (cfun->can_throw_non_call_exceptions
2841 && tree_could_trap_p (TREE_OPERAND (t, 0)))
2842 return true;
2843 t = TREE_OPERAND (t, 1);
2844 }
2845
2846 if (TREE_CODE (t) == WITH_SIZE_EXPR)
2847 t = TREE_OPERAND (t, 0);
2848 if (TREE_CODE (t) == CALL_EXPR)
2849 return (call_expr_flags (t) & ECF_NOTHROW) == 0;
2850 if (cfun->can_throw_non_call_exceptions)
2851 return tree_could_trap_p (t);
2852 return false;
2853 }
2854
2855 /* Return true if STMT can throw an exception that is not caught within
2856 the current function (CFUN). */
2857
2858 bool
2859 stmt_can_throw_external (gimple stmt)
2860 {
2861 int lp_nr;
2862
2863 if (!stmt_could_throw_p (stmt))
2864 return false;
2865
2866 lp_nr = lookup_stmt_eh_lp (stmt);
2867 return lp_nr == 0;
2868 }
2869
2870 /* Return true if STMT can throw an exception that is caught within
2871 the current function (CFUN). */
2872
2873 bool
2874 stmt_can_throw_internal (gimple stmt)
2875 {
2876 int lp_nr;
2877
2878 if (!stmt_could_throw_p (stmt))
2879 return false;
2880
2881 lp_nr = lookup_stmt_eh_lp (stmt);
2882 return lp_nr > 0;
2883 }
2884
2885 /* Given a statement STMT in IFUN, if STMT can no longer throw, then
2886 remove any entry it might have from the EH table. Return true if
2887 any change was made. */
2888
2889 bool
2890 maybe_clean_eh_stmt_fn (struct function *ifun, gimple stmt)
2891 {
2892 if (stmt_could_throw_p (stmt))
2893 return false;
2894 return remove_stmt_from_eh_lp_fn (ifun, stmt);
2895 }
2896
2897 /* Likewise, but always use the current function. */
2898
2899 bool
2900 maybe_clean_eh_stmt (gimple stmt)
2901 {
2902 return maybe_clean_eh_stmt_fn (cfun, stmt);
2903 }
2904
2905 /* Given a statement OLD_STMT and a new statement NEW_STMT that has replaced
2906 OLD_STMT in the function, remove OLD_STMT from the EH table and put NEW_STMT
2907 in the table if it should be in there. Return TRUE if a replacement was
2908 done that my require an EH edge purge. */
2909
2910 bool
2911 maybe_clean_or_replace_eh_stmt (gimple old_stmt, gimple new_stmt)
2912 {
2913 int lp_nr = lookup_stmt_eh_lp (old_stmt);
2914
2915 if (lp_nr != 0)
2916 {
2917 bool new_stmt_could_throw = stmt_could_throw_p (new_stmt);
2918
2919 if (new_stmt == old_stmt && new_stmt_could_throw)
2920 return false;
2921
2922 remove_stmt_from_eh_lp (old_stmt);
2923 if (new_stmt_could_throw)
2924 {
2925 add_stmt_to_eh_lp (new_stmt, lp_nr);
2926 return false;
2927 }
2928 else
2929 return true;
2930 }
2931
2932 return false;
2933 }
2934
2935 /* Given a statement OLD_STMT in OLD_FUN and a duplicate statement NEW_STMT
2936 in NEW_FUN, copy the EH table data from OLD_STMT to NEW_STMT. The MAP
2937 operand is the return value of duplicate_eh_regions. */
2938
2939 bool
2940 maybe_duplicate_eh_stmt_fn (struct function *new_fun, gimple new_stmt,
2941 struct function *old_fun, gimple old_stmt,
2942 hash_map<void *, void *> *map,
2943 int default_lp_nr)
2944 {
2945 int old_lp_nr, new_lp_nr;
2946
2947 if (!stmt_could_throw_p (new_stmt))
2948 return false;
2949
2950 old_lp_nr = lookup_stmt_eh_lp_fn (old_fun, old_stmt);
2951 if (old_lp_nr == 0)
2952 {
2953 if (default_lp_nr == 0)
2954 return false;
2955 new_lp_nr = default_lp_nr;
2956 }
2957 else if (old_lp_nr > 0)
2958 {
2959 eh_landing_pad old_lp, new_lp;
2960
2961 old_lp = (*old_fun->eh->lp_array)[old_lp_nr];
2962 new_lp = static_cast<eh_landing_pad> (*map->get (old_lp));
2963 new_lp_nr = new_lp->index;
2964 }
2965 else
2966 {
2967 eh_region old_r, new_r;
2968
2969 old_r = (*old_fun->eh->region_array)[-old_lp_nr];
2970 new_r = static_cast<eh_region> (*map->get (old_r));
2971 new_lp_nr = -new_r->index;
2972 }
2973
2974 add_stmt_to_eh_lp_fn (new_fun, new_stmt, new_lp_nr);
2975 return true;
2976 }
2977
2978 /* Similar, but both OLD_STMT and NEW_STMT are within the current function,
2979 and thus no remapping is required. */
2980
2981 bool
2982 maybe_duplicate_eh_stmt (gimple new_stmt, gimple old_stmt)
2983 {
2984 int lp_nr;
2985
2986 if (!stmt_could_throw_p (new_stmt))
2987 return false;
2988
2989 lp_nr = lookup_stmt_eh_lp (old_stmt);
2990 if (lp_nr == 0)
2991 return false;
2992
2993 add_stmt_to_eh_lp (new_stmt, lp_nr);
2994 return true;
2995 }
2996 \f
2997 /* Returns TRUE if oneh and twoh are exception handlers (gimple_try_cleanup of
2998 GIMPLE_TRY) that are similar enough to be considered the same. Currently
2999 this only handles handlers consisting of a single call, as that's the
3000 important case for C++: a destructor call for a particular object showing
3001 up in multiple handlers. */
3002
3003 static bool
3004 same_handler_p (gimple_seq oneh, gimple_seq twoh)
3005 {
3006 gimple_stmt_iterator gsi;
3007 gimple ones, twos;
3008 unsigned int ai;
3009
3010 gsi = gsi_start (oneh);
3011 if (!gsi_one_before_end_p (gsi))
3012 return false;
3013 ones = gsi_stmt (gsi);
3014
3015 gsi = gsi_start (twoh);
3016 if (!gsi_one_before_end_p (gsi))
3017 return false;
3018 twos = gsi_stmt (gsi);
3019
3020 if (!is_gimple_call (ones)
3021 || !is_gimple_call (twos)
3022 || gimple_call_lhs (ones)
3023 || gimple_call_lhs (twos)
3024 || gimple_call_chain (ones)
3025 || gimple_call_chain (twos)
3026 || !gimple_call_same_target_p (ones, twos)
3027 || gimple_call_num_args (ones) != gimple_call_num_args (twos))
3028 return false;
3029
3030 for (ai = 0; ai < gimple_call_num_args (ones); ++ai)
3031 if (!operand_equal_p (gimple_call_arg (ones, ai),
3032 gimple_call_arg (twos, ai), 0))
3033 return false;
3034
3035 return true;
3036 }
3037
3038 /* Optimize
3039 try { A() } finally { try { ~B() } catch { ~A() } }
3040 try { ... } finally { ~A() }
3041 into
3042 try { A() } catch { ~B() }
3043 try { ~B() ... } finally { ~A() }
3044
3045 This occurs frequently in C++, where A is a local variable and B is a
3046 temporary used in the initializer for A. */
3047
3048 static void
3049 optimize_double_finally (gtry *one, gtry *two)
3050 {
3051 gimple oneh;
3052 gimple_stmt_iterator gsi;
3053 gimple_seq cleanup;
3054
3055 cleanup = gimple_try_cleanup (one);
3056 gsi = gsi_start (cleanup);
3057 if (!gsi_one_before_end_p (gsi))
3058 return;
3059
3060 oneh = gsi_stmt (gsi);
3061 if (gimple_code (oneh) != GIMPLE_TRY
3062 || gimple_try_kind (oneh) != GIMPLE_TRY_CATCH)
3063 return;
3064
3065 if (same_handler_p (gimple_try_cleanup (oneh), gimple_try_cleanup (two)))
3066 {
3067 gimple_seq seq = gimple_try_eval (oneh);
3068
3069 gimple_try_set_cleanup (one, seq);
3070 gimple_try_set_kind (one, GIMPLE_TRY_CATCH);
3071 seq = copy_gimple_seq_and_replace_locals (seq);
3072 gimple_seq_add_seq (&seq, gimple_try_eval (two));
3073 gimple_try_set_eval (two, seq);
3074 }
3075 }
3076
3077 /* Perform EH refactoring optimizations that are simpler to do when code
3078 flow has been lowered but EH structures haven't. */
3079
3080 static void
3081 refactor_eh_r (gimple_seq seq)
3082 {
3083 gimple_stmt_iterator gsi;
3084 gimple one, two;
3085
3086 one = NULL;
3087 two = NULL;
3088 gsi = gsi_start (seq);
3089 while (1)
3090 {
3091 one = two;
3092 if (gsi_end_p (gsi))
3093 two = NULL;
3094 else
3095 two = gsi_stmt (gsi);
3096 if (one && two)
3097 if (gtry *try_one = dyn_cast <gtry *> (one))
3098 if (gtry *try_two = dyn_cast <gtry *> (two))
3099 if (gimple_try_kind (try_one) == GIMPLE_TRY_FINALLY
3100 && gimple_try_kind (try_two) == GIMPLE_TRY_FINALLY)
3101 optimize_double_finally (try_one, try_two);
3102 if (one)
3103 switch (gimple_code (one))
3104 {
3105 case GIMPLE_TRY:
3106 refactor_eh_r (gimple_try_eval (one));
3107 refactor_eh_r (gimple_try_cleanup (one));
3108 break;
3109 case GIMPLE_CATCH:
3110 refactor_eh_r (gimple_catch_handler (as_a <gcatch *> (one)));
3111 break;
3112 case GIMPLE_EH_FILTER:
3113 refactor_eh_r (gimple_eh_filter_failure (one));
3114 break;
3115 case GIMPLE_EH_ELSE:
3116 {
3117 geh_else *eh_else_stmt = as_a <geh_else *> (one);
3118 refactor_eh_r (gimple_eh_else_n_body (eh_else_stmt));
3119 refactor_eh_r (gimple_eh_else_e_body (eh_else_stmt));
3120 }
3121 break;
3122 default:
3123 break;
3124 }
3125 if (two)
3126 gsi_next (&gsi);
3127 else
3128 break;
3129 }
3130 }
3131
3132 namespace {
3133
3134 const pass_data pass_data_refactor_eh =
3135 {
3136 GIMPLE_PASS, /* type */
3137 "ehopt", /* name */
3138 OPTGROUP_NONE, /* optinfo_flags */
3139 TV_TREE_EH, /* tv_id */
3140 PROP_gimple_lcf, /* properties_required */
3141 0, /* properties_provided */
3142 0, /* properties_destroyed */
3143 0, /* todo_flags_start */
3144 0, /* todo_flags_finish */
3145 };
3146
3147 class pass_refactor_eh : public gimple_opt_pass
3148 {
3149 public:
3150 pass_refactor_eh (gcc::context *ctxt)
3151 : gimple_opt_pass (pass_data_refactor_eh, ctxt)
3152 {}
3153
3154 /* opt_pass methods: */
3155 virtual bool gate (function *) { return flag_exceptions != 0; }
3156 virtual unsigned int execute (function *)
3157 {
3158 refactor_eh_r (gimple_body (current_function_decl));
3159 return 0;
3160 }
3161
3162 }; // class pass_refactor_eh
3163
3164 } // anon namespace
3165
3166 gimple_opt_pass *
3167 make_pass_refactor_eh (gcc::context *ctxt)
3168 {
3169 return new pass_refactor_eh (ctxt);
3170 }
3171 \f
3172 /* At the end of gimple optimization, we can lower RESX. */
3173
3174 static bool
3175 lower_resx (basic_block bb, gresx *stmt,
3176 hash_map<eh_region, tree> *mnt_map)
3177 {
3178 int lp_nr;
3179 eh_region src_r, dst_r;
3180 gimple_stmt_iterator gsi;
3181 gimple x;
3182 tree fn, src_nr;
3183 bool ret = false;
3184
3185 lp_nr = lookup_stmt_eh_lp (stmt);
3186 if (lp_nr != 0)
3187 dst_r = get_eh_region_from_lp_number (lp_nr);
3188 else
3189 dst_r = NULL;
3190
3191 src_r = get_eh_region_from_number (gimple_resx_region (stmt));
3192 gsi = gsi_last_bb (bb);
3193
3194 if (src_r == NULL)
3195 {
3196 /* We can wind up with no source region when pass_cleanup_eh shows
3197 that there are no entries into an eh region and deletes it, but
3198 then the block that contains the resx isn't removed. This can
3199 happen without optimization when the switch statement created by
3200 lower_try_finally_switch isn't simplified to remove the eh case.
3201
3202 Resolve this by expanding the resx node to an abort. */
3203
3204 fn = builtin_decl_implicit (BUILT_IN_TRAP);
3205 x = gimple_build_call (fn, 0);
3206 gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3207
3208 while (EDGE_COUNT (bb->succs) > 0)
3209 remove_edge (EDGE_SUCC (bb, 0));
3210 }
3211 else if (dst_r)
3212 {
3213 /* When we have a destination region, we resolve this by copying
3214 the excptr and filter values into place, and changing the edge
3215 to immediately after the landing pad. */
3216 edge e;
3217
3218 if (lp_nr < 0)
3219 {
3220 basic_block new_bb;
3221 tree lab;
3222
3223 /* We are resuming into a MUST_NOT_CALL region. Expand a call to
3224 the failure decl into a new block, if needed. */
3225 gcc_assert (dst_r->type == ERT_MUST_NOT_THROW);
3226
3227 tree *slot = mnt_map->get (dst_r);
3228 if (slot == NULL)
3229 {
3230 gimple_stmt_iterator gsi2;
3231
3232 new_bb = create_empty_bb (bb);
3233 add_bb_to_loop (new_bb, bb->loop_father);
3234 lab = gimple_block_label (new_bb);
3235 gsi2 = gsi_start_bb (new_bb);
3236
3237 fn = dst_r->u.must_not_throw.failure_decl;
3238 x = gimple_build_call (fn, 0);
3239 gimple_set_location (x, dst_r->u.must_not_throw.failure_loc);
3240 gsi_insert_after (&gsi2, x, GSI_CONTINUE_LINKING);
3241
3242 mnt_map->put (dst_r, lab);
3243 }
3244 else
3245 {
3246 lab = *slot;
3247 new_bb = label_to_block (lab);
3248 }
3249
3250 gcc_assert (EDGE_COUNT (bb->succs) == 0);
3251 e = make_edge (bb, new_bb, EDGE_FALLTHRU);
3252 e->count = bb->count;
3253 e->probability = REG_BR_PROB_BASE;
3254 }
3255 else
3256 {
3257 edge_iterator ei;
3258 tree dst_nr = build_int_cst (integer_type_node, dst_r->index);
3259
3260 fn = builtin_decl_implicit (BUILT_IN_EH_COPY_VALUES);
3261 src_nr = build_int_cst (integer_type_node, src_r->index);
3262 x = gimple_build_call (fn, 2, dst_nr, src_nr);
3263 gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3264
3265 /* Update the flags for the outgoing edge. */
3266 e = single_succ_edge (bb);
3267 gcc_assert (e->flags & EDGE_EH);
3268 e->flags = (e->flags & ~EDGE_EH) | EDGE_FALLTHRU;
3269
3270 /* If there are no more EH users of the landing pad, delete it. */
3271 FOR_EACH_EDGE (e, ei, e->dest->preds)
3272 if (e->flags & EDGE_EH)
3273 break;
3274 if (e == NULL)
3275 {
3276 eh_landing_pad lp = get_eh_landing_pad_from_number (lp_nr);
3277 remove_eh_landing_pad (lp);
3278 }
3279 }
3280
3281 ret = true;
3282 }
3283 else
3284 {
3285 tree var;
3286
3287 /* When we don't have a destination region, this exception escapes
3288 up the call chain. We resolve this by generating a call to the
3289 _Unwind_Resume library function. */
3290
3291 /* The ARM EABI redefines _Unwind_Resume as __cxa_end_cleanup
3292 with no arguments for C++ and Java. Check for that. */
3293 if (src_r->use_cxa_end_cleanup)
3294 {
3295 fn = builtin_decl_implicit (BUILT_IN_CXA_END_CLEANUP);
3296 x = gimple_build_call (fn, 0);
3297 gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3298 }
3299 else
3300 {
3301 fn = builtin_decl_implicit (BUILT_IN_EH_POINTER);
3302 src_nr = build_int_cst (integer_type_node, src_r->index);
3303 x = gimple_build_call (fn, 1, src_nr);
3304 var = create_tmp_var (ptr_type_node);
3305 var = make_ssa_name (var, x);
3306 gimple_call_set_lhs (x, var);
3307 gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3308
3309 fn = builtin_decl_implicit (BUILT_IN_UNWIND_RESUME);
3310 x = gimple_build_call (fn, 1, var);
3311 gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3312 }
3313
3314 gcc_assert (EDGE_COUNT (bb->succs) == 0);
3315 }
3316
3317 gsi_remove (&gsi, true);
3318
3319 return ret;
3320 }
3321
3322 namespace {
3323
3324 const pass_data pass_data_lower_resx =
3325 {
3326 GIMPLE_PASS, /* type */
3327 "resx", /* name */
3328 OPTGROUP_NONE, /* optinfo_flags */
3329 TV_TREE_EH, /* tv_id */
3330 PROP_gimple_lcf, /* properties_required */
3331 0, /* properties_provided */
3332 0, /* properties_destroyed */
3333 0, /* todo_flags_start */
3334 0, /* todo_flags_finish */
3335 };
3336
3337 class pass_lower_resx : public gimple_opt_pass
3338 {
3339 public:
3340 pass_lower_resx (gcc::context *ctxt)
3341 : gimple_opt_pass (pass_data_lower_resx, ctxt)
3342 {}
3343
3344 /* opt_pass methods: */
3345 virtual bool gate (function *) { return flag_exceptions != 0; }
3346 virtual unsigned int execute (function *);
3347
3348 }; // class pass_lower_resx
3349
3350 unsigned
3351 pass_lower_resx::execute (function *fun)
3352 {
3353 basic_block bb;
3354 bool dominance_invalidated = false;
3355 bool any_rewritten = false;
3356
3357 hash_map<eh_region, tree> mnt_map;
3358
3359 FOR_EACH_BB_FN (bb, fun)
3360 {
3361 gimple last = last_stmt (bb);
3362 if (last && is_gimple_resx (last))
3363 {
3364 dominance_invalidated |=
3365 lower_resx (bb, as_a <gresx *> (last), &mnt_map);
3366 any_rewritten = true;
3367 }
3368 }
3369
3370 if (dominance_invalidated)
3371 {
3372 free_dominance_info (CDI_DOMINATORS);
3373 free_dominance_info (CDI_POST_DOMINATORS);
3374 }
3375
3376 return any_rewritten ? TODO_update_ssa_only_virtuals : 0;
3377 }
3378
3379 } // anon namespace
3380
3381 gimple_opt_pass *
3382 make_pass_lower_resx (gcc::context *ctxt)
3383 {
3384 return new pass_lower_resx (ctxt);
3385 }
3386
3387 /* Try to optimize var = {v} {CLOBBER} stmts followed just by
3388 external throw. */
3389
3390 static void
3391 optimize_clobbers (basic_block bb)
3392 {
3393 gimple_stmt_iterator gsi = gsi_last_bb (bb);
3394 bool any_clobbers = false;
3395 bool seen_stack_restore = false;
3396 edge_iterator ei;
3397 edge e;
3398
3399 /* Only optimize anything if the bb contains at least one clobber,
3400 ends with resx (checked by caller), optionally contains some
3401 debug stmts or labels, or at most one __builtin_stack_restore
3402 call, and has an incoming EH edge. */
3403 for (gsi_prev (&gsi); !gsi_end_p (gsi); gsi_prev (&gsi))
3404 {
3405 gimple stmt = gsi_stmt (gsi);
3406 if (is_gimple_debug (stmt))
3407 continue;
3408 if (gimple_clobber_p (stmt))
3409 {
3410 any_clobbers = true;
3411 continue;
3412 }
3413 if (!seen_stack_restore
3414 && gimple_call_builtin_p (stmt, BUILT_IN_STACK_RESTORE))
3415 {
3416 seen_stack_restore = true;
3417 continue;
3418 }
3419 if (gimple_code (stmt) == GIMPLE_LABEL)
3420 break;
3421 return;
3422 }
3423 if (!any_clobbers)
3424 return;
3425 FOR_EACH_EDGE (e, ei, bb->preds)
3426 if (e->flags & EDGE_EH)
3427 break;
3428 if (e == NULL)
3429 return;
3430 gsi = gsi_last_bb (bb);
3431 for (gsi_prev (&gsi); !gsi_end_p (gsi); gsi_prev (&gsi))
3432 {
3433 gimple stmt = gsi_stmt (gsi);
3434 if (!gimple_clobber_p (stmt))
3435 continue;
3436 unlink_stmt_vdef (stmt);
3437 gsi_remove (&gsi, true);
3438 release_defs (stmt);
3439 }
3440 }
3441
3442 /* Try to sink var = {v} {CLOBBER} stmts followed just by
3443 internal throw to successor BB. */
3444
3445 static int
3446 sink_clobbers (basic_block bb)
3447 {
3448 edge e;
3449 edge_iterator ei;
3450 gimple_stmt_iterator gsi, dgsi;
3451 basic_block succbb;
3452 bool any_clobbers = false;
3453 unsigned todo = 0;
3454
3455 /* Only optimize if BB has a single EH successor and
3456 all predecessor edges are EH too. */
3457 if (!single_succ_p (bb)
3458 || (single_succ_edge (bb)->flags & EDGE_EH) == 0)
3459 return 0;
3460
3461 FOR_EACH_EDGE (e, ei, bb->preds)
3462 {
3463 if ((e->flags & EDGE_EH) == 0)
3464 return 0;
3465 }
3466
3467 /* And BB contains only CLOBBER stmts before the final
3468 RESX. */
3469 gsi = gsi_last_bb (bb);
3470 for (gsi_prev (&gsi); !gsi_end_p (gsi); gsi_prev (&gsi))
3471 {
3472 gimple stmt = gsi_stmt (gsi);
3473 if (is_gimple_debug (stmt))
3474 continue;
3475 if (gimple_code (stmt) == GIMPLE_LABEL)
3476 break;
3477 if (!gimple_clobber_p (stmt))
3478 return 0;
3479 any_clobbers = true;
3480 }
3481 if (!any_clobbers)
3482 return 0;
3483
3484 edge succe = single_succ_edge (bb);
3485 succbb = succe->dest;
3486
3487 /* See if there is a virtual PHI node to take an updated virtual
3488 operand from. */
3489 gphi *vphi = NULL;
3490 tree vuse = NULL_TREE;
3491 for (gphi_iterator gpi = gsi_start_phis (succbb);
3492 !gsi_end_p (gpi); gsi_next (&gpi))
3493 {
3494 tree res = gimple_phi_result (gpi.phi ());
3495 if (virtual_operand_p (res))
3496 {
3497 vphi = gpi.phi ();
3498 vuse = res;
3499 break;
3500 }
3501 }
3502
3503 dgsi = gsi_after_labels (succbb);
3504 gsi = gsi_last_bb (bb);
3505 for (gsi_prev (&gsi); !gsi_end_p (gsi); gsi_prev (&gsi))
3506 {
3507 gimple stmt = gsi_stmt (gsi);
3508 tree lhs;
3509 if (is_gimple_debug (stmt))
3510 continue;
3511 if (gimple_code (stmt) == GIMPLE_LABEL)
3512 break;
3513 lhs = gimple_assign_lhs (stmt);
3514 /* Unfortunately we don't have dominance info updated at this
3515 point, so checking if
3516 dominated_by_p (CDI_DOMINATORS, succbb,
3517 gimple_bb (SSA_NAME_DEF_STMT (TREE_OPERAND (lhs, 0)))
3518 would be too costly. Thus, avoid sinking any clobbers that
3519 refer to non-(D) SSA_NAMEs. */
3520 if (TREE_CODE (lhs) == MEM_REF
3521 && TREE_CODE (TREE_OPERAND (lhs, 0)) == SSA_NAME
3522 && !SSA_NAME_IS_DEFAULT_DEF (TREE_OPERAND (lhs, 0)))
3523 {
3524 unlink_stmt_vdef (stmt);
3525 gsi_remove (&gsi, true);
3526 release_defs (stmt);
3527 continue;
3528 }
3529
3530 /* As we do not change stmt order when sinking across a
3531 forwarder edge we can keep virtual operands in place. */
3532 gsi_remove (&gsi, false);
3533 gsi_insert_before (&dgsi, stmt, GSI_NEW_STMT);
3534
3535 /* But adjust virtual operands if we sunk across a PHI node. */
3536 if (vuse)
3537 {
3538 gimple use_stmt;
3539 imm_use_iterator iter;
3540 use_operand_p use_p;
3541 FOR_EACH_IMM_USE_STMT (use_stmt, iter, vuse)
3542 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
3543 SET_USE (use_p, gimple_vdef (stmt));
3544 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (vuse))
3545 {
3546 SSA_NAME_OCCURS_IN_ABNORMAL_PHI (gimple_vdef (stmt)) = 1;
3547 SSA_NAME_OCCURS_IN_ABNORMAL_PHI (vuse) = 0;
3548 }
3549 /* Adjust the incoming virtual operand. */
3550 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (vphi, succe), gimple_vuse (stmt));
3551 SET_USE (gimple_vuse_op (stmt), vuse);
3552 }
3553 /* If there isn't a single predecessor but no virtual PHI node
3554 arrange for virtual operands to be renamed. */
3555 else if (gimple_vuse_op (stmt) != NULL_USE_OPERAND_P
3556 && !single_pred_p (succbb))
3557 {
3558 /* In this case there will be no use of the VDEF of this stmt.
3559 ??? Unless this is a secondary opportunity and we have not
3560 removed unreachable blocks yet, so we cannot assert this.
3561 Which also means we will end up renaming too many times. */
3562 SET_USE (gimple_vuse_op (stmt), gimple_vop (cfun));
3563 mark_virtual_operands_for_renaming (cfun);
3564 todo |= TODO_update_ssa_only_virtuals;
3565 }
3566 }
3567
3568 return todo;
3569 }
3570
3571 /* At the end of inlining, we can lower EH_DISPATCH. Return true when
3572 we have found some duplicate labels and removed some edges. */
3573
3574 static bool
3575 lower_eh_dispatch (basic_block src, geh_dispatch *stmt)
3576 {
3577 gimple_stmt_iterator gsi;
3578 int region_nr;
3579 eh_region r;
3580 tree filter, fn;
3581 gimple x;
3582 bool redirected = false;
3583
3584 region_nr = gimple_eh_dispatch_region (stmt);
3585 r = get_eh_region_from_number (region_nr);
3586
3587 gsi = gsi_last_bb (src);
3588
3589 switch (r->type)
3590 {
3591 case ERT_TRY:
3592 {
3593 auto_vec<tree> labels;
3594 tree default_label = NULL;
3595 eh_catch c;
3596 edge_iterator ei;
3597 edge e;
3598 hash_set<tree> seen_values;
3599
3600 /* Collect the labels for a switch. Zero the post_landing_pad
3601 field becase we'll no longer have anything keeping these labels
3602 in existence and the optimizer will be free to merge these
3603 blocks at will. */
3604 for (c = r->u.eh_try.first_catch; c ; c = c->next_catch)
3605 {
3606 tree tp_node, flt_node, lab = c->label;
3607 bool have_label = false;
3608
3609 c->label = NULL;
3610 tp_node = c->type_list;
3611 flt_node = c->filter_list;
3612
3613 if (tp_node == NULL)
3614 {
3615 default_label = lab;
3616 break;
3617 }
3618 do
3619 {
3620 /* Filter out duplicate labels that arise when this handler
3621 is shadowed by an earlier one. When no labels are
3622 attached to the handler anymore, we remove
3623 the corresponding edge and then we delete unreachable
3624 blocks at the end of this pass. */
3625 if (! seen_values.contains (TREE_VALUE (flt_node)))
3626 {
3627 tree t = build_case_label (TREE_VALUE (flt_node),
3628 NULL, lab);
3629 labels.safe_push (t);
3630 seen_values.add (TREE_VALUE (flt_node));
3631 have_label = true;
3632 }
3633
3634 tp_node = TREE_CHAIN (tp_node);
3635 flt_node = TREE_CHAIN (flt_node);
3636 }
3637 while (tp_node);
3638 if (! have_label)
3639 {
3640 remove_edge (find_edge (src, label_to_block (lab)));
3641 redirected = true;
3642 }
3643 }
3644
3645 /* Clean up the edge flags. */
3646 FOR_EACH_EDGE (e, ei, src->succs)
3647 {
3648 if (e->flags & EDGE_FALLTHRU)
3649 {
3650 /* If there was no catch-all, use the fallthru edge. */
3651 if (default_label == NULL)
3652 default_label = gimple_block_label (e->dest);
3653 e->flags &= ~EDGE_FALLTHRU;
3654 }
3655 }
3656 gcc_assert (default_label != NULL);
3657
3658 /* Don't generate a switch if there's only a default case.
3659 This is common in the form of try { A; } catch (...) { B; }. */
3660 if (!labels.exists ())
3661 {
3662 e = single_succ_edge (src);
3663 e->flags |= EDGE_FALLTHRU;
3664 }
3665 else
3666 {
3667 fn = builtin_decl_implicit (BUILT_IN_EH_FILTER);
3668 x = gimple_build_call (fn, 1, build_int_cst (integer_type_node,
3669 region_nr));
3670 filter = create_tmp_var (TREE_TYPE (TREE_TYPE (fn)));
3671 filter = make_ssa_name (filter, x);
3672 gimple_call_set_lhs (x, filter);
3673 gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3674
3675 /* Turn the default label into a default case. */
3676 default_label = build_case_label (NULL, NULL, default_label);
3677 sort_case_labels (labels);
3678
3679 x = gimple_build_switch (filter, default_label, labels);
3680 gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3681 }
3682 }
3683 break;
3684
3685 case ERT_ALLOWED_EXCEPTIONS:
3686 {
3687 edge b_e = BRANCH_EDGE (src);
3688 edge f_e = FALLTHRU_EDGE (src);
3689
3690 fn = builtin_decl_implicit (BUILT_IN_EH_FILTER);
3691 x = gimple_build_call (fn, 1, build_int_cst (integer_type_node,
3692 region_nr));
3693 filter = create_tmp_var (TREE_TYPE (TREE_TYPE (fn)));
3694 filter = make_ssa_name (filter, x);
3695 gimple_call_set_lhs (x, filter);
3696 gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3697
3698 r->u.allowed.label = NULL;
3699 x = gimple_build_cond (EQ_EXPR, filter,
3700 build_int_cst (TREE_TYPE (filter),
3701 r->u.allowed.filter),
3702 NULL_TREE, NULL_TREE);
3703 gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3704
3705 b_e->flags = b_e->flags | EDGE_TRUE_VALUE;
3706 f_e->flags = (f_e->flags & ~EDGE_FALLTHRU) | EDGE_FALSE_VALUE;
3707 }
3708 break;
3709
3710 default:
3711 gcc_unreachable ();
3712 }
3713
3714 /* Replace the EH_DISPATCH with the SWITCH or COND generated above. */
3715 gsi_remove (&gsi, true);
3716 return redirected;
3717 }
3718
3719 namespace {
3720
3721 const pass_data pass_data_lower_eh_dispatch =
3722 {
3723 GIMPLE_PASS, /* type */
3724 "ehdisp", /* name */
3725 OPTGROUP_NONE, /* optinfo_flags */
3726 TV_TREE_EH, /* tv_id */
3727 PROP_gimple_lcf, /* properties_required */
3728 0, /* properties_provided */
3729 0, /* properties_destroyed */
3730 0, /* todo_flags_start */
3731 0, /* todo_flags_finish */
3732 };
3733
3734 class pass_lower_eh_dispatch : public gimple_opt_pass
3735 {
3736 public:
3737 pass_lower_eh_dispatch (gcc::context *ctxt)
3738 : gimple_opt_pass (pass_data_lower_eh_dispatch, ctxt)
3739 {}
3740
3741 /* opt_pass methods: */
3742 virtual bool gate (function *fun) { return fun->eh->region_tree != NULL; }
3743 virtual unsigned int execute (function *);
3744
3745 }; // class pass_lower_eh_dispatch
3746
3747 unsigned
3748 pass_lower_eh_dispatch::execute (function *fun)
3749 {
3750 basic_block bb;
3751 int flags = 0;
3752 bool redirected = false;
3753
3754 assign_filter_values ();
3755
3756 FOR_EACH_BB_FN (bb, fun)
3757 {
3758 gimple last = last_stmt (bb);
3759 if (last == NULL)
3760 continue;
3761 if (gimple_code (last) == GIMPLE_EH_DISPATCH)
3762 {
3763 redirected |= lower_eh_dispatch (bb,
3764 as_a <geh_dispatch *> (last));
3765 flags |= TODO_update_ssa_only_virtuals;
3766 }
3767 else if (gimple_code (last) == GIMPLE_RESX)
3768 {
3769 if (stmt_can_throw_external (last))
3770 optimize_clobbers (bb);
3771 else
3772 flags |= sink_clobbers (bb);
3773 }
3774 }
3775
3776 if (redirected)
3777 delete_unreachable_blocks ();
3778 return flags;
3779 }
3780
3781 } // anon namespace
3782
3783 gimple_opt_pass *
3784 make_pass_lower_eh_dispatch (gcc::context *ctxt)
3785 {
3786 return new pass_lower_eh_dispatch (ctxt);
3787 }
3788 \f
3789 /* Walk statements, see what regions and, optionally, landing pads
3790 are really referenced.
3791
3792 Returns in R_REACHABLEP an sbitmap with bits set for reachable regions,
3793 and in LP_REACHABLE an sbitmap with bits set for reachable landing pads.
3794
3795 Passing NULL for LP_REACHABLE is valid, in this case only reachable
3796 regions are marked.
3797
3798 The caller is responsible for freeing the returned sbitmaps. */
3799
3800 static void
3801 mark_reachable_handlers (sbitmap *r_reachablep, sbitmap *lp_reachablep)
3802 {
3803 sbitmap r_reachable, lp_reachable;
3804 basic_block bb;
3805 bool mark_landing_pads = (lp_reachablep != NULL);
3806 gcc_checking_assert (r_reachablep != NULL);
3807
3808 r_reachable = sbitmap_alloc (cfun->eh->region_array->length ());
3809 bitmap_clear (r_reachable);
3810 *r_reachablep = r_reachable;
3811
3812 if (mark_landing_pads)
3813 {
3814 lp_reachable = sbitmap_alloc (cfun->eh->lp_array->length ());
3815 bitmap_clear (lp_reachable);
3816 *lp_reachablep = lp_reachable;
3817 }
3818 else
3819 lp_reachable = NULL;
3820
3821 FOR_EACH_BB_FN (bb, cfun)
3822 {
3823 gimple_stmt_iterator gsi;
3824
3825 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
3826 {
3827 gimple stmt = gsi_stmt (gsi);
3828
3829 if (mark_landing_pads)
3830 {
3831 int lp_nr = lookup_stmt_eh_lp (stmt);
3832
3833 /* Negative LP numbers are MUST_NOT_THROW regions which
3834 are not considered BB enders. */
3835 if (lp_nr < 0)
3836 bitmap_set_bit (r_reachable, -lp_nr);
3837
3838 /* Positive LP numbers are real landing pads, and BB enders. */
3839 else if (lp_nr > 0)
3840 {
3841 gcc_assert (gsi_one_before_end_p (gsi));
3842 eh_region region = get_eh_region_from_lp_number (lp_nr);
3843 bitmap_set_bit (r_reachable, region->index);
3844 bitmap_set_bit (lp_reachable, lp_nr);
3845 }
3846 }
3847
3848 /* Avoid removing regions referenced from RESX/EH_DISPATCH. */
3849 switch (gimple_code (stmt))
3850 {
3851 case GIMPLE_RESX:
3852 bitmap_set_bit (r_reachable,
3853 gimple_resx_region (as_a <gresx *> (stmt)));
3854 break;
3855 case GIMPLE_EH_DISPATCH:
3856 bitmap_set_bit (r_reachable,
3857 gimple_eh_dispatch_region (
3858 as_a <geh_dispatch *> (stmt)));
3859 break;
3860 case GIMPLE_CALL:
3861 if (gimple_call_builtin_p (stmt, BUILT_IN_EH_COPY_VALUES))
3862 for (int i = 0; i < 2; ++i)
3863 {
3864 tree rt = gimple_call_arg (stmt, i);
3865 HOST_WIDE_INT ri = tree_to_shwi (rt);
3866
3867 gcc_assert (ri = (int)ri);
3868 bitmap_set_bit (r_reachable, ri);
3869 }
3870 break;
3871 default:
3872 break;
3873 }
3874 }
3875 }
3876 }
3877
3878 /* Remove unreachable handlers and unreachable landing pads. */
3879
3880 static void
3881 remove_unreachable_handlers (void)
3882 {
3883 sbitmap r_reachable, lp_reachable;
3884 eh_region region;
3885 eh_landing_pad lp;
3886 unsigned i;
3887
3888 mark_reachable_handlers (&r_reachable, &lp_reachable);
3889
3890 if (dump_file)
3891 {
3892 fprintf (dump_file, "Before removal of unreachable regions:\n");
3893 dump_eh_tree (dump_file, cfun);
3894 fprintf (dump_file, "Reachable regions: ");
3895 dump_bitmap_file (dump_file, r_reachable);
3896 fprintf (dump_file, "Reachable landing pads: ");
3897 dump_bitmap_file (dump_file, lp_reachable);
3898 }
3899
3900 if (dump_file)
3901 {
3902 FOR_EACH_VEC_SAFE_ELT (cfun->eh->region_array, i, region)
3903 if (region && !bitmap_bit_p (r_reachable, region->index))
3904 fprintf (dump_file,
3905 "Removing unreachable region %d\n",
3906 region->index);
3907 }
3908
3909 remove_unreachable_eh_regions (r_reachable);
3910
3911 FOR_EACH_VEC_SAFE_ELT (cfun->eh->lp_array, i, lp)
3912 if (lp && !bitmap_bit_p (lp_reachable, lp->index))
3913 {
3914 if (dump_file)
3915 fprintf (dump_file,
3916 "Removing unreachable landing pad %d\n",
3917 lp->index);
3918 remove_eh_landing_pad (lp);
3919 }
3920
3921 if (dump_file)
3922 {
3923 fprintf (dump_file, "\n\nAfter removal of unreachable regions:\n");
3924 dump_eh_tree (dump_file, cfun);
3925 fprintf (dump_file, "\n\n");
3926 }
3927
3928 sbitmap_free (r_reachable);
3929 sbitmap_free (lp_reachable);
3930
3931 #ifdef ENABLE_CHECKING
3932 verify_eh_tree (cfun);
3933 #endif
3934 }
3935
3936 /* Remove unreachable handlers if any landing pads have been removed after
3937 last ehcleanup pass (due to gimple_purge_dead_eh_edges). */
3938
3939 void
3940 maybe_remove_unreachable_handlers (void)
3941 {
3942 eh_landing_pad lp;
3943 unsigned i;
3944
3945 if (cfun->eh == NULL)
3946 return;
3947
3948 FOR_EACH_VEC_SAFE_ELT (cfun->eh->lp_array, i, lp)
3949 if (lp && lp->post_landing_pad)
3950 {
3951 if (label_to_block (lp->post_landing_pad) == NULL)
3952 {
3953 remove_unreachable_handlers ();
3954 return;
3955 }
3956 }
3957 }
3958
3959 /* Remove regions that do not have landing pads. This assumes
3960 that remove_unreachable_handlers has already been run, and
3961 that we've just manipulated the landing pads since then.
3962
3963 Preserve regions with landing pads and regions that prevent
3964 exceptions from propagating further, even if these regions
3965 are not reachable. */
3966
3967 static void
3968 remove_unreachable_handlers_no_lp (void)
3969 {
3970 eh_region region;
3971 sbitmap r_reachable;
3972 unsigned i;
3973
3974 mark_reachable_handlers (&r_reachable, /*lp_reachablep=*/NULL);
3975
3976 FOR_EACH_VEC_SAFE_ELT (cfun->eh->region_array, i, region)
3977 {
3978 if (! region)
3979 continue;
3980
3981 if (region->landing_pads != NULL
3982 || region->type == ERT_MUST_NOT_THROW)
3983 bitmap_set_bit (r_reachable, region->index);
3984
3985 if (dump_file
3986 && !bitmap_bit_p (r_reachable, region->index))
3987 fprintf (dump_file,
3988 "Removing unreachable region %d\n",
3989 region->index);
3990 }
3991
3992 remove_unreachable_eh_regions (r_reachable);
3993
3994 sbitmap_free (r_reachable);
3995 }
3996
3997 /* Undo critical edge splitting on an EH landing pad. Earlier, we
3998 optimisticaly split all sorts of edges, including EH edges. The
3999 optimization passes in between may not have needed them; if not,
4000 we should undo the split.
4001
4002 Recognize this case by having one EH edge incoming to the BB and
4003 one normal edge outgoing; BB should be empty apart from the
4004 post_landing_pad label.
4005
4006 Note that this is slightly different from the empty handler case
4007 handled by cleanup_empty_eh, in that the actual handler may yet
4008 have actual code but the landing pad has been separated from the
4009 handler. As such, cleanup_empty_eh relies on this transformation
4010 having been done first. */
4011
4012 static bool
4013 unsplit_eh (eh_landing_pad lp)
4014 {
4015 basic_block bb = label_to_block (lp->post_landing_pad);
4016 gimple_stmt_iterator gsi;
4017 edge e_in, e_out;
4018
4019 /* Quickly check the edge counts on BB for singularity. */
4020 if (!single_pred_p (bb) || !single_succ_p (bb))
4021 return false;
4022 e_in = single_pred_edge (bb);
4023 e_out = single_succ_edge (bb);
4024
4025 /* Input edge must be EH and output edge must be normal. */
4026 if ((e_in->flags & EDGE_EH) == 0 || (e_out->flags & EDGE_EH) != 0)
4027 return false;
4028
4029 /* The block must be empty except for the labels and debug insns. */
4030 gsi = gsi_after_labels (bb);
4031 if (!gsi_end_p (gsi) && is_gimple_debug (gsi_stmt (gsi)))
4032 gsi_next_nondebug (&gsi);
4033 if (!gsi_end_p (gsi))
4034 return false;
4035
4036 /* The destination block must not already have a landing pad
4037 for a different region. */
4038 for (gsi = gsi_start_bb (e_out->dest); !gsi_end_p (gsi); gsi_next (&gsi))
4039 {
4040 glabel *label_stmt = dyn_cast <glabel *> (gsi_stmt (gsi));
4041 tree lab;
4042 int lp_nr;
4043
4044 if (!label_stmt)
4045 break;
4046 lab = gimple_label_label (label_stmt);
4047 lp_nr = EH_LANDING_PAD_NR (lab);
4048 if (lp_nr && get_eh_region_from_lp_number (lp_nr) != lp->region)
4049 return false;
4050 }
4051
4052 /* The new destination block must not already be a destination of
4053 the source block, lest we merge fallthru and eh edges and get
4054 all sorts of confused. */
4055 if (find_edge (e_in->src, e_out->dest))
4056 return false;
4057
4058 /* ??? We can get degenerate phis due to cfg cleanups. I would have
4059 thought this should have been cleaned up by a phicprop pass, but
4060 that doesn't appear to handle virtuals. Propagate by hand. */
4061 if (!gimple_seq_empty_p (phi_nodes (bb)))
4062 {
4063 for (gphi_iterator gpi = gsi_start_phis (bb); !gsi_end_p (gpi); )
4064 {
4065 gimple use_stmt;
4066 gphi *phi = gpi.phi ();
4067 tree lhs = gimple_phi_result (phi);
4068 tree rhs = gimple_phi_arg_def (phi, 0);
4069 use_operand_p use_p;
4070 imm_use_iterator iter;
4071
4072 FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
4073 {
4074 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
4075 SET_USE (use_p, rhs);
4076 }
4077
4078 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs))
4079 SSA_NAME_OCCURS_IN_ABNORMAL_PHI (rhs) = 1;
4080
4081 remove_phi_node (&gpi, true);
4082 }
4083 }
4084
4085 if (dump_file && (dump_flags & TDF_DETAILS))
4086 fprintf (dump_file, "Unsplit EH landing pad %d to block %i.\n",
4087 lp->index, e_out->dest->index);
4088
4089 /* Redirect the edge. Since redirect_eh_edge_1 expects to be moving
4090 a successor edge, humor it. But do the real CFG change with the
4091 predecessor of E_OUT in order to preserve the ordering of arguments
4092 to the PHI nodes in E_OUT->DEST. */
4093 redirect_eh_edge_1 (e_in, e_out->dest, false);
4094 redirect_edge_pred (e_out, e_in->src);
4095 e_out->flags = e_in->flags;
4096 e_out->probability = e_in->probability;
4097 e_out->count = e_in->count;
4098 remove_edge (e_in);
4099
4100 return true;
4101 }
4102
4103 /* Examine each landing pad block and see if it matches unsplit_eh. */
4104
4105 static bool
4106 unsplit_all_eh (void)
4107 {
4108 bool changed = false;
4109 eh_landing_pad lp;
4110 int i;
4111
4112 for (i = 1; vec_safe_iterate (cfun->eh->lp_array, i, &lp); ++i)
4113 if (lp)
4114 changed |= unsplit_eh (lp);
4115
4116 return changed;
4117 }
4118
4119 /* A subroutine of cleanup_empty_eh. Redirect all EH edges incoming
4120 to OLD_BB to NEW_BB; return true on success, false on failure.
4121
4122 OLD_BB_OUT is the edge into NEW_BB from OLD_BB, so if we miss any
4123 PHI variables from OLD_BB we can pick them up from OLD_BB_OUT.
4124 Virtual PHIs may be deleted and marked for renaming. */
4125
4126 static bool
4127 cleanup_empty_eh_merge_phis (basic_block new_bb, basic_block old_bb,
4128 edge old_bb_out, bool change_region)
4129 {
4130 gphi_iterator ngsi, ogsi;
4131 edge_iterator ei;
4132 edge e;
4133 bitmap ophi_handled;
4134
4135 /* The destination block must not be a regular successor for any
4136 of the preds of the landing pad. Thus, avoid turning
4137 <..>
4138 | \ EH
4139 | <..>
4140 | /
4141 <..>
4142 into
4143 <..>
4144 | | EH
4145 <..>
4146 which CFG verification would choke on. See PR45172 and PR51089. */
4147 FOR_EACH_EDGE (e, ei, old_bb->preds)
4148 if (find_edge (e->src, new_bb))
4149 return false;
4150
4151 FOR_EACH_EDGE (e, ei, old_bb->preds)
4152 redirect_edge_var_map_clear (e);
4153
4154 ophi_handled = BITMAP_ALLOC (NULL);
4155
4156 /* First, iterate through the PHIs on NEW_BB and set up the edge_var_map
4157 for the edges we're going to move. */
4158 for (ngsi = gsi_start_phis (new_bb); !gsi_end_p (ngsi); gsi_next (&ngsi))
4159 {
4160 gphi *ophi, *nphi = ngsi.phi ();
4161 tree nresult, nop;
4162
4163 nresult = gimple_phi_result (nphi);
4164 nop = gimple_phi_arg_def (nphi, old_bb_out->dest_idx);
4165
4166 /* Find the corresponding PHI in OLD_BB so we can forward-propagate
4167 the source ssa_name. */
4168 ophi = NULL;
4169 for (ogsi = gsi_start_phis (old_bb); !gsi_end_p (ogsi); gsi_next (&ogsi))
4170 {
4171 ophi = ogsi.phi ();
4172 if (gimple_phi_result (ophi) == nop)
4173 break;
4174 ophi = NULL;
4175 }
4176
4177 /* If we did find the corresponding PHI, copy those inputs. */
4178 if (ophi)
4179 {
4180 /* If NOP is used somewhere else beyond phis in new_bb, give up. */
4181 if (!has_single_use (nop))
4182 {
4183 imm_use_iterator imm_iter;
4184 use_operand_p use_p;
4185
4186 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, nop)
4187 {
4188 if (!gimple_debug_bind_p (USE_STMT (use_p))
4189 && (gimple_code (USE_STMT (use_p)) != GIMPLE_PHI
4190 || gimple_bb (USE_STMT (use_p)) != new_bb))
4191 goto fail;
4192 }
4193 }
4194 bitmap_set_bit (ophi_handled, SSA_NAME_VERSION (nop));
4195 FOR_EACH_EDGE (e, ei, old_bb->preds)
4196 {
4197 location_t oloc;
4198 tree oop;
4199
4200 if ((e->flags & EDGE_EH) == 0)
4201 continue;
4202 oop = gimple_phi_arg_def (ophi, e->dest_idx);
4203 oloc = gimple_phi_arg_location (ophi, e->dest_idx);
4204 redirect_edge_var_map_add (e, nresult, oop, oloc);
4205 }
4206 }
4207 /* If we didn't find the PHI, if it's a real variable or a VOP, we know
4208 from the fact that OLD_BB is tree_empty_eh_handler_p that the
4209 variable is unchanged from input to the block and we can simply
4210 re-use the input to NEW_BB from the OLD_BB_OUT edge. */
4211 else
4212 {
4213 location_t nloc
4214 = gimple_phi_arg_location (nphi, old_bb_out->dest_idx);
4215 FOR_EACH_EDGE (e, ei, old_bb->preds)
4216 redirect_edge_var_map_add (e, nresult, nop, nloc);
4217 }
4218 }
4219
4220 /* Second, verify that all PHIs from OLD_BB have been handled. If not,
4221 we don't know what values from the other edges into NEW_BB to use. */
4222 for (ogsi = gsi_start_phis (old_bb); !gsi_end_p (ogsi); gsi_next (&ogsi))
4223 {
4224 gphi *ophi = ogsi.phi ();
4225 tree oresult = gimple_phi_result (ophi);
4226 if (!bitmap_bit_p (ophi_handled, SSA_NAME_VERSION (oresult)))
4227 goto fail;
4228 }
4229
4230 /* Finally, move the edges and update the PHIs. */
4231 for (ei = ei_start (old_bb->preds); (e = ei_safe_edge (ei)); )
4232 if (e->flags & EDGE_EH)
4233 {
4234 /* ??? CFG manipluation routines do not try to update loop
4235 form on edge redirection. Do so manually here for now. */
4236 /* If we redirect a loop entry or latch edge that will either create
4237 a multiple entry loop or rotate the loop. If the loops merge
4238 we may have created a loop with multiple latches.
4239 All of this isn't easily fixed thus cancel the affected loop
4240 and mark the other loop as possibly having multiple latches. */
4241 if (e->dest == e->dest->loop_father->header)
4242 {
4243 mark_loop_for_removal (e->dest->loop_father);
4244 new_bb->loop_father->latch = NULL;
4245 loops_state_set (LOOPS_MAY_HAVE_MULTIPLE_LATCHES);
4246 }
4247 redirect_eh_edge_1 (e, new_bb, change_region);
4248 redirect_edge_succ (e, new_bb);
4249 flush_pending_stmts (e);
4250 }
4251 else
4252 ei_next (&ei);
4253
4254 BITMAP_FREE (ophi_handled);
4255 return true;
4256
4257 fail:
4258 FOR_EACH_EDGE (e, ei, old_bb->preds)
4259 redirect_edge_var_map_clear (e);
4260 BITMAP_FREE (ophi_handled);
4261 return false;
4262 }
4263
4264 /* A subroutine of cleanup_empty_eh. Move a landing pad LP from its
4265 old region to NEW_REGION at BB. */
4266
4267 static void
4268 cleanup_empty_eh_move_lp (basic_block bb, edge e_out,
4269 eh_landing_pad lp, eh_region new_region)
4270 {
4271 gimple_stmt_iterator gsi;
4272 eh_landing_pad *pp;
4273
4274 for (pp = &lp->region->landing_pads; *pp != lp; pp = &(*pp)->next_lp)
4275 continue;
4276 *pp = lp->next_lp;
4277
4278 lp->region = new_region;
4279 lp->next_lp = new_region->landing_pads;
4280 new_region->landing_pads = lp;
4281
4282 /* Delete the RESX that was matched within the empty handler block. */
4283 gsi = gsi_last_bb (bb);
4284 unlink_stmt_vdef (gsi_stmt (gsi));
4285 gsi_remove (&gsi, true);
4286
4287 /* Clean up E_OUT for the fallthru. */
4288 e_out->flags = (e_out->flags & ~EDGE_EH) | EDGE_FALLTHRU;
4289 e_out->probability = REG_BR_PROB_BASE;
4290 }
4291
4292 /* A subroutine of cleanup_empty_eh. Handle more complex cases of
4293 unsplitting than unsplit_eh was prepared to handle, e.g. when
4294 multiple incoming edges and phis are involved. */
4295
4296 static bool
4297 cleanup_empty_eh_unsplit (basic_block bb, edge e_out, eh_landing_pad lp)
4298 {
4299 gimple_stmt_iterator gsi;
4300 tree lab;
4301
4302 /* We really ought not have totally lost everything following
4303 a landing pad label. Given that BB is empty, there had better
4304 be a successor. */
4305 gcc_assert (e_out != NULL);
4306
4307 /* The destination block must not already have a landing pad
4308 for a different region. */
4309 lab = NULL;
4310 for (gsi = gsi_start_bb (e_out->dest); !gsi_end_p (gsi); gsi_next (&gsi))
4311 {
4312 glabel *stmt = dyn_cast <glabel *> (gsi_stmt (gsi));
4313 int lp_nr;
4314
4315 if (!stmt)
4316 break;
4317 lab = gimple_label_label (stmt);
4318 lp_nr = EH_LANDING_PAD_NR (lab);
4319 if (lp_nr && get_eh_region_from_lp_number (lp_nr) != lp->region)
4320 return false;
4321 }
4322
4323 /* Attempt to move the PHIs into the successor block. */
4324 if (cleanup_empty_eh_merge_phis (e_out->dest, bb, e_out, false))
4325 {
4326 if (dump_file && (dump_flags & TDF_DETAILS))
4327 fprintf (dump_file,
4328 "Unsplit EH landing pad %d to block %i "
4329 "(via cleanup_empty_eh).\n",
4330 lp->index, e_out->dest->index);
4331 return true;
4332 }
4333
4334 return false;
4335 }
4336
4337 /* Return true if edge E_FIRST is part of an empty infinite loop
4338 or leads to such a loop through a series of single successor
4339 empty bbs. */
4340
4341 static bool
4342 infinite_empty_loop_p (edge e_first)
4343 {
4344 bool inf_loop = false;
4345 edge e;
4346
4347 if (e_first->dest == e_first->src)
4348 return true;
4349
4350 e_first->src->aux = (void *) 1;
4351 for (e = e_first; single_succ_p (e->dest); e = single_succ_edge (e->dest))
4352 {
4353 gimple_stmt_iterator gsi;
4354 if (e->dest->aux)
4355 {
4356 inf_loop = true;
4357 break;
4358 }
4359 e->dest->aux = (void *) 1;
4360 gsi = gsi_after_labels (e->dest);
4361 if (!gsi_end_p (gsi) && is_gimple_debug (gsi_stmt (gsi)))
4362 gsi_next_nondebug (&gsi);
4363 if (!gsi_end_p (gsi))
4364 break;
4365 }
4366 e_first->src->aux = NULL;
4367 for (e = e_first; e->dest->aux; e = single_succ_edge (e->dest))
4368 e->dest->aux = NULL;
4369
4370 return inf_loop;
4371 }
4372
4373 /* Examine the block associated with LP to determine if it's an empty
4374 handler for its EH region. If so, attempt to redirect EH edges to
4375 an outer region. Return true the CFG was updated in any way. This
4376 is similar to jump forwarding, just across EH edges. */
4377
4378 static bool
4379 cleanup_empty_eh (eh_landing_pad lp)
4380 {
4381 basic_block bb = label_to_block (lp->post_landing_pad);
4382 gimple_stmt_iterator gsi;
4383 gimple resx;
4384 eh_region new_region;
4385 edge_iterator ei;
4386 edge e, e_out;
4387 bool has_non_eh_pred;
4388 bool ret = false;
4389 int new_lp_nr;
4390
4391 /* There can be zero or one edges out of BB. This is the quickest test. */
4392 switch (EDGE_COUNT (bb->succs))
4393 {
4394 case 0:
4395 e_out = NULL;
4396 break;
4397 case 1:
4398 e_out = single_succ_edge (bb);
4399 break;
4400 default:
4401 return false;
4402 }
4403
4404 resx = last_stmt (bb);
4405 if (resx && is_gimple_resx (resx))
4406 {
4407 if (stmt_can_throw_external (resx))
4408 optimize_clobbers (bb);
4409 else if (sink_clobbers (bb))
4410 ret = true;
4411 }
4412
4413 gsi = gsi_after_labels (bb);
4414
4415 /* Make sure to skip debug statements. */
4416 if (!gsi_end_p (gsi) && is_gimple_debug (gsi_stmt (gsi)))
4417 gsi_next_nondebug (&gsi);
4418
4419 /* If the block is totally empty, look for more unsplitting cases. */
4420 if (gsi_end_p (gsi))
4421 {
4422 /* For the degenerate case of an infinite loop bail out.
4423 If bb has no successors and is totally empty, which can happen e.g.
4424 because of incorrect noreturn attribute, bail out too. */
4425 if (e_out == NULL
4426 || infinite_empty_loop_p (e_out))
4427 return ret;
4428
4429 return ret | cleanup_empty_eh_unsplit (bb, e_out, lp);
4430 }
4431
4432 /* The block should consist only of a single RESX statement, modulo a
4433 preceding call to __builtin_stack_restore if there is no outgoing
4434 edge, since the call can be eliminated in this case. */
4435 resx = gsi_stmt (gsi);
4436 if (!e_out && gimple_call_builtin_p (resx, BUILT_IN_STACK_RESTORE))
4437 {
4438 gsi_next (&gsi);
4439 resx = gsi_stmt (gsi);
4440 }
4441 if (!is_gimple_resx (resx))
4442 return ret;
4443 gcc_assert (gsi_one_before_end_p (gsi));
4444
4445 /* Determine if there are non-EH edges, or resx edges into the handler. */
4446 has_non_eh_pred = false;
4447 FOR_EACH_EDGE (e, ei, bb->preds)
4448 if (!(e->flags & EDGE_EH))
4449 has_non_eh_pred = true;
4450
4451 /* Find the handler that's outer of the empty handler by looking at
4452 where the RESX instruction was vectored. */
4453 new_lp_nr = lookup_stmt_eh_lp (resx);
4454 new_region = get_eh_region_from_lp_number (new_lp_nr);
4455
4456 /* If there's no destination region within the current function,
4457 redirection is trivial via removing the throwing statements from
4458 the EH region, removing the EH edges, and allowing the block
4459 to go unreachable. */
4460 if (new_region == NULL)
4461 {
4462 gcc_assert (e_out == NULL);
4463 for (ei = ei_start (bb->preds); (e = ei_safe_edge (ei)); )
4464 if (e->flags & EDGE_EH)
4465 {
4466 gimple stmt = last_stmt (e->src);
4467 remove_stmt_from_eh_lp (stmt);
4468 remove_edge (e);
4469 }
4470 else
4471 ei_next (&ei);
4472 goto succeed;
4473 }
4474
4475 /* If the destination region is a MUST_NOT_THROW, allow the runtime
4476 to handle the abort and allow the blocks to go unreachable. */
4477 if (new_region->type == ERT_MUST_NOT_THROW)
4478 {
4479 for (ei = ei_start (bb->preds); (e = ei_safe_edge (ei)); )
4480 if (e->flags & EDGE_EH)
4481 {
4482 gimple stmt = last_stmt (e->src);
4483 remove_stmt_from_eh_lp (stmt);
4484 add_stmt_to_eh_lp (stmt, new_lp_nr);
4485 remove_edge (e);
4486 }
4487 else
4488 ei_next (&ei);
4489 goto succeed;
4490 }
4491
4492 /* Try to redirect the EH edges and merge the PHIs into the destination
4493 landing pad block. If the merge succeeds, we'll already have redirected
4494 all the EH edges. The handler itself will go unreachable if there were
4495 no normal edges. */
4496 if (cleanup_empty_eh_merge_phis (e_out->dest, bb, e_out, true))
4497 goto succeed;
4498
4499 /* Finally, if all input edges are EH edges, then we can (potentially)
4500 reduce the number of transfers from the runtime by moving the landing
4501 pad from the original region to the new region. This is a win when
4502 we remove the last CLEANUP region along a particular exception
4503 propagation path. Since nothing changes except for the region with
4504 which the landing pad is associated, the PHI nodes do not need to be
4505 adjusted at all. */
4506 if (!has_non_eh_pred)
4507 {
4508 cleanup_empty_eh_move_lp (bb, e_out, lp, new_region);
4509 if (dump_file && (dump_flags & TDF_DETAILS))
4510 fprintf (dump_file, "Empty EH handler %i moved to EH region %i.\n",
4511 lp->index, new_region->index);
4512
4513 /* ??? The CFG didn't change, but we may have rendered the
4514 old EH region unreachable. Trigger a cleanup there. */
4515 return true;
4516 }
4517
4518 return ret;
4519
4520 succeed:
4521 if (dump_file && (dump_flags & TDF_DETAILS))
4522 fprintf (dump_file, "Empty EH handler %i removed.\n", lp->index);
4523 remove_eh_landing_pad (lp);
4524 return true;
4525 }
4526
4527 /* Do a post-order traversal of the EH region tree. Examine each
4528 post_landing_pad block and see if we can eliminate it as empty. */
4529
4530 static bool
4531 cleanup_all_empty_eh (void)
4532 {
4533 bool changed = false;
4534 eh_landing_pad lp;
4535 int i;
4536
4537 for (i = 1; vec_safe_iterate (cfun->eh->lp_array, i, &lp); ++i)
4538 if (lp)
4539 changed |= cleanup_empty_eh (lp);
4540
4541 return changed;
4542 }
4543
4544 /* Perform cleanups and lowering of exception handling
4545 1) cleanups regions with handlers doing nothing are optimized out
4546 2) MUST_NOT_THROW regions that became dead because of 1) are optimized out
4547 3) Info about regions that are containing instructions, and regions
4548 reachable via local EH edges is collected
4549 4) Eh tree is pruned for regions no longer necessary.
4550
4551 TODO: Push MUST_NOT_THROW regions to the root of the EH tree.
4552 Unify those that have the same failure decl and locus.
4553 */
4554
4555 static unsigned int
4556 execute_cleanup_eh_1 (void)
4557 {
4558 /* Do this first: unsplit_all_eh and cleanup_all_empty_eh can die
4559 looking up unreachable landing pads. */
4560 remove_unreachable_handlers ();
4561
4562 /* Watch out for the region tree vanishing due to all unreachable. */
4563 if (cfun->eh->region_tree)
4564 {
4565 bool changed = false;
4566
4567 if (optimize)
4568 changed |= unsplit_all_eh ();
4569 changed |= cleanup_all_empty_eh ();
4570
4571 if (changed)
4572 {
4573 free_dominance_info (CDI_DOMINATORS);
4574 free_dominance_info (CDI_POST_DOMINATORS);
4575
4576 /* We delayed all basic block deletion, as we may have performed
4577 cleanups on EH edges while non-EH edges were still present. */
4578 delete_unreachable_blocks ();
4579
4580 /* We manipulated the landing pads. Remove any region that no
4581 longer has a landing pad. */
4582 remove_unreachable_handlers_no_lp ();
4583
4584 return TODO_cleanup_cfg | TODO_update_ssa_only_virtuals;
4585 }
4586 }
4587
4588 return 0;
4589 }
4590
4591 namespace {
4592
4593 const pass_data pass_data_cleanup_eh =
4594 {
4595 GIMPLE_PASS, /* type */
4596 "ehcleanup", /* name */
4597 OPTGROUP_NONE, /* optinfo_flags */
4598 TV_TREE_EH, /* tv_id */
4599 PROP_gimple_lcf, /* properties_required */
4600 0, /* properties_provided */
4601 0, /* properties_destroyed */
4602 0, /* todo_flags_start */
4603 0, /* todo_flags_finish */
4604 };
4605
4606 class pass_cleanup_eh : public gimple_opt_pass
4607 {
4608 public:
4609 pass_cleanup_eh (gcc::context *ctxt)
4610 : gimple_opt_pass (pass_data_cleanup_eh, ctxt)
4611 {}
4612
4613 /* opt_pass methods: */
4614 opt_pass * clone () { return new pass_cleanup_eh (m_ctxt); }
4615 virtual bool gate (function *fun)
4616 {
4617 return fun->eh != NULL && fun->eh->region_tree != NULL;
4618 }
4619
4620 virtual unsigned int execute (function *);
4621
4622 }; // class pass_cleanup_eh
4623
4624 unsigned int
4625 pass_cleanup_eh::execute (function *fun)
4626 {
4627 int ret = execute_cleanup_eh_1 ();
4628
4629 /* If the function no longer needs an EH personality routine
4630 clear it. This exposes cross-language inlining opportunities
4631 and avoids references to a never defined personality routine. */
4632 if (DECL_FUNCTION_PERSONALITY (current_function_decl)
4633 && function_needs_eh_personality (fun) != eh_personality_lang)
4634 DECL_FUNCTION_PERSONALITY (current_function_decl) = NULL_TREE;
4635
4636 return ret;
4637 }
4638
4639 } // anon namespace
4640
4641 gimple_opt_pass *
4642 make_pass_cleanup_eh (gcc::context *ctxt)
4643 {
4644 return new pass_cleanup_eh (ctxt);
4645 }
4646 \f
4647 /* Verify that BB containing STMT as the last statement, has precisely the
4648 edge that make_eh_edges would create. */
4649
4650 DEBUG_FUNCTION bool
4651 verify_eh_edges (gimple stmt)
4652 {
4653 basic_block bb = gimple_bb (stmt);
4654 eh_landing_pad lp = NULL;
4655 int lp_nr;
4656 edge_iterator ei;
4657 edge e, eh_edge;
4658
4659 lp_nr = lookup_stmt_eh_lp (stmt);
4660 if (lp_nr > 0)
4661 lp = get_eh_landing_pad_from_number (lp_nr);
4662
4663 eh_edge = NULL;
4664 FOR_EACH_EDGE (e, ei, bb->succs)
4665 {
4666 if (e->flags & EDGE_EH)
4667 {
4668 if (eh_edge)
4669 {
4670 error ("BB %i has multiple EH edges", bb->index);
4671 return true;
4672 }
4673 else
4674 eh_edge = e;
4675 }
4676 }
4677
4678 if (lp == NULL)
4679 {
4680 if (eh_edge)
4681 {
4682 error ("BB %i can not throw but has an EH edge", bb->index);
4683 return true;
4684 }
4685 return false;
4686 }
4687
4688 if (!stmt_could_throw_p (stmt))
4689 {
4690 error ("BB %i last statement has incorrectly set lp", bb->index);
4691 return true;
4692 }
4693
4694 if (eh_edge == NULL)
4695 {
4696 error ("BB %i is missing an EH edge", bb->index);
4697 return true;
4698 }
4699
4700 if (eh_edge->dest != label_to_block (lp->post_landing_pad))
4701 {
4702 error ("Incorrect EH edge %i->%i", bb->index, eh_edge->dest->index);
4703 return true;
4704 }
4705
4706 return false;
4707 }
4708
4709 /* Similarly, but handle GIMPLE_EH_DISPATCH specifically. */
4710
4711 DEBUG_FUNCTION bool
4712 verify_eh_dispatch_edge (geh_dispatch *stmt)
4713 {
4714 eh_region r;
4715 eh_catch c;
4716 basic_block src, dst;
4717 bool want_fallthru = true;
4718 edge_iterator ei;
4719 edge e, fall_edge;
4720
4721 r = get_eh_region_from_number (gimple_eh_dispatch_region (stmt));
4722 src = gimple_bb (stmt);
4723
4724 FOR_EACH_EDGE (e, ei, src->succs)
4725 gcc_assert (e->aux == NULL);
4726
4727 switch (r->type)
4728 {
4729 case ERT_TRY:
4730 for (c = r->u.eh_try.first_catch; c ; c = c->next_catch)
4731 {
4732 dst = label_to_block (c->label);
4733 e = find_edge (src, dst);
4734 if (e == NULL)
4735 {
4736 error ("BB %i is missing an edge", src->index);
4737 return true;
4738 }
4739 e->aux = (void *)e;
4740
4741 /* A catch-all handler doesn't have a fallthru. */
4742 if (c->type_list == NULL)
4743 {
4744 want_fallthru = false;
4745 break;
4746 }
4747 }
4748 break;
4749
4750 case ERT_ALLOWED_EXCEPTIONS:
4751 dst = label_to_block (r->u.allowed.label);
4752 e = find_edge (src, dst);
4753 if (e == NULL)
4754 {
4755 error ("BB %i is missing an edge", src->index);
4756 return true;
4757 }
4758 e->aux = (void *)e;
4759 break;
4760
4761 default:
4762 gcc_unreachable ();
4763 }
4764
4765 fall_edge = NULL;
4766 FOR_EACH_EDGE (e, ei, src->succs)
4767 {
4768 if (e->flags & EDGE_FALLTHRU)
4769 {
4770 if (fall_edge != NULL)
4771 {
4772 error ("BB %i too many fallthru edges", src->index);
4773 return true;
4774 }
4775 fall_edge = e;
4776 }
4777 else if (e->aux)
4778 e->aux = NULL;
4779 else
4780 {
4781 error ("BB %i has incorrect edge", src->index);
4782 return true;
4783 }
4784 }
4785 if ((fall_edge != NULL) ^ want_fallthru)
4786 {
4787 error ("BB %i has incorrect fallthru edge", src->index);
4788 return true;
4789 }
4790
4791 return false;
4792 }