]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/gimple-low.c
* config/microblaze/microblaze.c (microblaze_expand_block_move): Treat
[thirdparty/gcc.git] / gcc / gimple-low.c
1 /* GIMPLE lowering pass. Converts High GIMPLE into Low GIMPLE.
2
3 Copyright (C) 2003-2019 Free Software Foundation, Inc.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "tree.h"
26 #include "gimple.h"
27 #include "tree-pass.h"
28 #include "fold-const.h"
29 #include "tree-nested.h"
30 #include "calls.h"
31 #include "gimple-iterator.h"
32 #include "gimple-low.h"
33 #include "predict.h"
34 #include "gimple-predict.h"
35 #include "gimple-fold.h"
36
37 /* The differences between High GIMPLE and Low GIMPLE are the
38 following:
39
40 1- Lexical scopes are removed (i.e., GIMPLE_BIND disappears).
41
42 2- GIMPLE_TRY and GIMPLE_CATCH are converted to abnormal control
43 flow and exception regions are built as an on-the-side region
44 hierarchy (See tree-eh.c:lower_eh_constructs).
45
46 3- Multiple identical return statements are grouped into a single
47 return and gotos to the unique return site. */
48
49 /* Match a return statement with a label. During lowering, we identify
50 identical return statements and replace duplicates with a jump to
51 the corresponding label. */
52 struct return_statements_t
53 {
54 tree label;
55 greturn *stmt;
56 };
57 typedef struct return_statements_t return_statements_t;
58
59
60 struct lower_data
61 {
62 /* Block the current statement belongs to. */
63 tree block;
64
65 /* A vector of label and return statements to be moved to the end
66 of the function. */
67 vec<return_statements_t> return_statements;
68
69 /* True if the current statement cannot fall through. */
70 bool cannot_fallthru;
71 };
72
73 static void lower_stmt (gimple_stmt_iterator *, struct lower_data *);
74 static void lower_gimple_bind (gimple_stmt_iterator *, struct lower_data *);
75 static void lower_try_catch (gimple_stmt_iterator *, struct lower_data *);
76 static void lower_gimple_return (gimple_stmt_iterator *, struct lower_data *);
77 static void lower_builtin_setjmp (gimple_stmt_iterator *);
78 static void lower_builtin_posix_memalign (gimple_stmt_iterator *);
79
80
81 /* Lower the body of current_function_decl from High GIMPLE into Low
82 GIMPLE. */
83
84 static unsigned int
85 lower_function_body (void)
86 {
87 struct lower_data data;
88 gimple_seq body = gimple_body (current_function_decl);
89 gimple_seq lowered_body;
90 gimple_stmt_iterator i;
91 gimple *bind;
92 gimple *x;
93
94 /* The gimplifier should've left a body of exactly one statement,
95 namely a GIMPLE_BIND. */
96 gcc_assert (gimple_seq_first (body) == gimple_seq_last (body)
97 && gimple_code (gimple_seq_first_stmt (body)) == GIMPLE_BIND);
98
99 memset (&data, 0, sizeof (data));
100 data.block = DECL_INITIAL (current_function_decl);
101 BLOCK_SUBBLOCKS (data.block) = NULL_TREE;
102 BLOCK_CHAIN (data.block) = NULL_TREE;
103 TREE_ASM_WRITTEN (data.block) = 1;
104 data.return_statements.create (8);
105
106 bind = gimple_seq_first_stmt (body);
107 lowered_body = NULL;
108 gimple_seq_add_stmt (&lowered_body, bind);
109 i = gsi_start (lowered_body);
110 lower_gimple_bind (&i, &data);
111
112 i = gsi_last (lowered_body);
113
114 /* If we had begin stmt markers from e.g. PCH, but this compilation
115 doesn't want them, lower_stmt will have cleaned them up; we can
116 now clear the flag that indicates we had them. */
117 if (!MAY_HAVE_DEBUG_MARKER_STMTS && cfun->debug_nonbind_markers)
118 {
119 /* This counter needs not be exact, but before lowering it will
120 most certainly be. */
121 gcc_assert (cfun->debug_marker_count == 0);
122 cfun->debug_nonbind_markers = false;
123 }
124
125 /* If the function falls off the end, we need a null return statement.
126 If we've already got one in the return_statements vector, we don't
127 need to do anything special. Otherwise build one by hand. */
128 bool may_fallthru = gimple_seq_may_fallthru (lowered_body);
129 if (may_fallthru
130 && (data.return_statements.is_empty ()
131 || (gimple_return_retval (data.return_statements.last().stmt)
132 != NULL)))
133 {
134 x = gimple_build_return (NULL);
135 gimple_set_location (x, cfun->function_end_locus);
136 gimple_set_block (x, DECL_INITIAL (current_function_decl));
137 gsi_insert_after (&i, x, GSI_CONTINUE_LINKING);
138 may_fallthru = false;
139 }
140
141 /* If we lowered any return statements, emit the representative
142 at the end of the function. */
143 while (!data.return_statements.is_empty ())
144 {
145 return_statements_t t = data.return_statements.pop ();
146 x = gimple_build_label (t.label);
147 gsi_insert_after (&i, x, GSI_CONTINUE_LINKING);
148 gsi_insert_after (&i, t.stmt, GSI_CONTINUE_LINKING);
149 if (may_fallthru)
150 {
151 /* Remove the line number from the representative return statement.
152 It now fills in for the fallthru too. Failure to remove this
153 will result in incorrect results for coverage analysis. */
154 gimple_set_location (t.stmt, UNKNOWN_LOCATION);
155 may_fallthru = false;
156 }
157 }
158
159 /* Once the old body has been lowered, replace it with the new
160 lowered sequence. */
161 gimple_set_body (current_function_decl, lowered_body);
162
163 gcc_assert (data.block == DECL_INITIAL (current_function_decl));
164 BLOCK_SUBBLOCKS (data.block)
165 = blocks_nreverse (BLOCK_SUBBLOCKS (data.block));
166
167 clear_block_marks (data.block);
168 data.return_statements.release ();
169 return 0;
170 }
171
172 namespace {
173
174 const pass_data pass_data_lower_cf =
175 {
176 GIMPLE_PASS, /* type */
177 "lower", /* name */
178 OPTGROUP_NONE, /* optinfo_flags */
179 TV_NONE, /* tv_id */
180 PROP_gimple_any, /* properties_required */
181 PROP_gimple_lcf, /* properties_provided */
182 0, /* properties_destroyed */
183 0, /* todo_flags_start */
184 0, /* todo_flags_finish */
185 };
186
187 class pass_lower_cf : public gimple_opt_pass
188 {
189 public:
190 pass_lower_cf (gcc::context *ctxt)
191 : gimple_opt_pass (pass_data_lower_cf, ctxt)
192 {}
193
194 /* opt_pass methods: */
195 virtual unsigned int execute (function *) { return lower_function_body (); }
196
197 }; // class pass_lower_cf
198
199 } // anon namespace
200
201 gimple_opt_pass *
202 make_pass_lower_cf (gcc::context *ctxt)
203 {
204 return new pass_lower_cf (ctxt);
205 }
206
207 /* Lower sequence SEQ. Unlike gimplification the statements are not relowered
208 when they are changed -- if this has to be done, the lowering routine must
209 do it explicitly. DATA is passed through the recursion. */
210
211 static void
212 lower_sequence (gimple_seq *seq, struct lower_data *data)
213 {
214 gimple_stmt_iterator gsi;
215
216 for (gsi = gsi_start (*seq); !gsi_end_p (gsi); )
217 lower_stmt (&gsi, data);
218 }
219
220
221 /* Lower the OpenMP directive statement pointed by GSI. DATA is
222 passed through the recursion. */
223
224 static void
225 lower_omp_directive (gimple_stmt_iterator *gsi, struct lower_data *data)
226 {
227 gimple *stmt;
228
229 stmt = gsi_stmt (*gsi);
230
231 lower_sequence (gimple_omp_body_ptr (stmt), data);
232 gsi_insert_seq_after (gsi, gimple_omp_body (stmt), GSI_CONTINUE_LINKING);
233 gimple_omp_set_body (stmt, NULL);
234 gsi_next (gsi);
235 }
236
237
238 /* Lower statement GSI. DATA is passed through the recursion. We try to
239 track the fallthruness of statements and get rid of unreachable return
240 statements in order to prevent the EH lowering pass from adding useless
241 edges that can cause bogus warnings to be issued later; this guess need
242 not be 100% accurate, simply be conservative and reset cannot_fallthru
243 to false if we don't know. */
244
245 static void
246 lower_stmt (gimple_stmt_iterator *gsi, struct lower_data *data)
247 {
248 gimple *stmt = gsi_stmt (*gsi);
249
250 gimple_set_block (stmt, data->block);
251
252 switch (gimple_code (stmt))
253 {
254 case GIMPLE_BIND:
255 lower_gimple_bind (gsi, data);
256 /* Propagate fallthruness. */
257 return;
258
259 case GIMPLE_COND:
260 case GIMPLE_GOTO:
261 case GIMPLE_SWITCH:
262 data->cannot_fallthru = true;
263 gsi_next (gsi);
264 return;
265
266 case GIMPLE_RETURN:
267 if (data->cannot_fallthru)
268 {
269 gsi_remove (gsi, false);
270 /* Propagate fallthruness. */
271 }
272 else
273 {
274 lower_gimple_return (gsi, data);
275 data->cannot_fallthru = true;
276 }
277 return;
278
279 case GIMPLE_TRY:
280 if (gimple_try_kind (stmt) == GIMPLE_TRY_CATCH)
281 lower_try_catch (gsi, data);
282 else
283 {
284 /* It must be a GIMPLE_TRY_FINALLY. */
285 bool cannot_fallthru;
286 lower_sequence (gimple_try_eval_ptr (stmt), data);
287 cannot_fallthru = data->cannot_fallthru;
288
289 /* The finally clause is always executed after the try clause,
290 so if it does not fall through, then the try-finally will not
291 fall through. Otherwise, if the try clause does not fall
292 through, then when the finally clause falls through it will
293 resume execution wherever the try clause was going. So the
294 whole try-finally will only fall through if both the try
295 clause and the finally clause fall through. */
296 data->cannot_fallthru = false;
297 lower_sequence (gimple_try_cleanup_ptr (stmt), data);
298 data->cannot_fallthru |= cannot_fallthru;
299 gsi_next (gsi);
300 }
301 return;
302
303 case GIMPLE_EH_ELSE:
304 {
305 geh_else *eh_else_stmt = as_a <geh_else *> (stmt);
306 lower_sequence (gimple_eh_else_n_body_ptr (eh_else_stmt), data);
307 lower_sequence (gimple_eh_else_e_body_ptr (eh_else_stmt), data);
308 }
309 break;
310
311 case GIMPLE_DEBUG:
312 gcc_checking_assert (cfun->debug_nonbind_markers);
313 /* We can't possibly have debug bind stmts before lowering, we
314 first emit them when entering SSA. */
315 gcc_checking_assert (gimple_debug_nonbind_marker_p (stmt));
316 /* Propagate fallthruness. */
317 /* If the function (e.g. from PCH) had debug stmts, but they're
318 disabled for this compilation, remove them. */
319 if (!MAY_HAVE_DEBUG_MARKER_STMTS)
320 gsi_remove (gsi, true);
321 else
322 gsi_next (gsi);
323 return;
324
325 case GIMPLE_NOP:
326 case GIMPLE_ASM:
327 case GIMPLE_ASSIGN:
328 case GIMPLE_PREDICT:
329 case GIMPLE_LABEL:
330 case GIMPLE_EH_MUST_NOT_THROW:
331 case GIMPLE_OMP_FOR:
332 case GIMPLE_OMP_SECTIONS:
333 case GIMPLE_OMP_SECTIONS_SWITCH:
334 case GIMPLE_OMP_SECTION:
335 case GIMPLE_OMP_SINGLE:
336 case GIMPLE_OMP_MASTER:
337 case GIMPLE_OMP_TASKGROUP:
338 case GIMPLE_OMP_ORDERED:
339 case GIMPLE_OMP_CRITICAL:
340 case GIMPLE_OMP_RETURN:
341 case GIMPLE_OMP_ATOMIC_LOAD:
342 case GIMPLE_OMP_ATOMIC_STORE:
343 case GIMPLE_OMP_CONTINUE:
344 break;
345
346 case GIMPLE_CALL:
347 {
348 tree decl = gimple_call_fndecl (stmt);
349 unsigned i;
350
351 for (i = 0; i < gimple_call_num_args (stmt); i++)
352 {
353 tree arg = gimple_call_arg (stmt, i);
354 if (EXPR_P (arg))
355 TREE_SET_BLOCK (arg, data->block);
356 }
357
358 if (decl
359 && fndecl_built_in_p (decl, BUILT_IN_NORMAL))
360 {
361 if (DECL_FUNCTION_CODE (decl) == BUILT_IN_SETJMP)
362 {
363 lower_builtin_setjmp (gsi);
364 data->cannot_fallthru = false;
365 return;
366 }
367 else if (DECL_FUNCTION_CODE (decl) == BUILT_IN_POSIX_MEMALIGN
368 && flag_tree_bit_ccp
369 && gimple_builtin_call_types_compatible_p (stmt, decl))
370 {
371 lower_builtin_posix_memalign (gsi);
372 return;
373 }
374 }
375
376 if (decl && (flags_from_decl_or_type (decl) & ECF_NORETURN))
377 {
378 data->cannot_fallthru = true;
379 gsi_next (gsi);
380 return;
381 }
382
383 /* We delay folding of built calls from gimplification to
384 here so the IL is in consistent state for the diagnostic
385 machineries job. */
386 if (gimple_call_builtin_p (stmt))
387 fold_stmt (gsi);
388 }
389 break;
390
391 case GIMPLE_OMP_PARALLEL:
392 case GIMPLE_OMP_TASK:
393 case GIMPLE_OMP_TARGET:
394 case GIMPLE_OMP_TEAMS:
395 case GIMPLE_OMP_GRID_BODY:
396 data->cannot_fallthru = false;
397 lower_omp_directive (gsi, data);
398 data->cannot_fallthru = false;
399 return;
400
401 case GIMPLE_TRANSACTION:
402 lower_sequence (gimple_transaction_body_ptr (
403 as_a <gtransaction *> (stmt)),
404 data);
405 break;
406
407 default:
408 gcc_unreachable ();
409 }
410
411 data->cannot_fallthru = false;
412 gsi_next (gsi);
413 }
414
415 /* Lower a bind_expr TSI. DATA is passed through the recursion. */
416
417 static void
418 lower_gimple_bind (gimple_stmt_iterator *gsi, struct lower_data *data)
419 {
420 tree old_block = data->block;
421 gbind *stmt = as_a <gbind *> (gsi_stmt (*gsi));
422 tree new_block = gimple_bind_block (stmt);
423
424 if (new_block)
425 {
426 if (new_block == old_block)
427 {
428 /* The outermost block of the original function may not be the
429 outermost statement chain of the gimplified function. So we
430 may see the outermost block just inside the function. */
431 gcc_assert (new_block == DECL_INITIAL (current_function_decl));
432 new_block = NULL;
433 }
434 else
435 {
436 /* We do not expect to handle duplicate blocks. */
437 gcc_assert (!TREE_ASM_WRITTEN (new_block));
438 TREE_ASM_WRITTEN (new_block) = 1;
439
440 /* Block tree may get clobbered by inlining. Normally this would
441 be fixed in rest_of_decl_compilation using block notes, but
442 since we are not going to emit them, it is up to us. */
443 BLOCK_CHAIN (new_block) = BLOCK_SUBBLOCKS (old_block);
444 BLOCK_SUBBLOCKS (old_block) = new_block;
445 BLOCK_SUBBLOCKS (new_block) = NULL_TREE;
446 BLOCK_SUPERCONTEXT (new_block) = old_block;
447
448 data->block = new_block;
449 }
450 }
451
452 record_vars (gimple_bind_vars (stmt));
453
454 /* Scrap DECL_CHAIN up to BLOCK_VARS to ease GC after we no longer
455 need gimple_bind_vars. */
456 tree next;
457 /* BLOCK_VARS and gimple_bind_vars share a common sub-chain. Find
458 it by marking all BLOCK_VARS. */
459 if (gimple_bind_block (stmt))
460 for (tree t = BLOCK_VARS (gimple_bind_block (stmt)); t; t = DECL_CHAIN (t))
461 TREE_VISITED (t) = 1;
462 for (tree var = gimple_bind_vars (stmt);
463 var && ! TREE_VISITED (var); var = next)
464 {
465 next = DECL_CHAIN (var);
466 DECL_CHAIN (var) = NULL_TREE;
467 }
468 /* Unmark BLOCK_VARS. */
469 if (gimple_bind_block (stmt))
470 for (tree t = BLOCK_VARS (gimple_bind_block (stmt)); t; t = DECL_CHAIN (t))
471 TREE_VISITED (t) = 0;
472
473 lower_sequence (gimple_bind_body_ptr (stmt), data);
474
475 if (new_block)
476 {
477 gcc_assert (data->block == new_block);
478
479 BLOCK_SUBBLOCKS (new_block)
480 = blocks_nreverse (BLOCK_SUBBLOCKS (new_block));
481 data->block = old_block;
482 }
483
484 /* The GIMPLE_BIND no longer carries any useful information -- kill it. */
485 gsi_insert_seq_before (gsi, gimple_bind_body (stmt), GSI_SAME_STMT);
486 gsi_remove (gsi, false);
487 }
488
489 /* Same as above, but for a GIMPLE_TRY_CATCH. */
490
491 static void
492 lower_try_catch (gimple_stmt_iterator *gsi, struct lower_data *data)
493 {
494 bool cannot_fallthru;
495 gimple *stmt = gsi_stmt (*gsi);
496 gimple_stmt_iterator i;
497
498 /* We don't handle GIMPLE_TRY_FINALLY. */
499 gcc_assert (gimple_try_kind (stmt) == GIMPLE_TRY_CATCH);
500
501 lower_sequence (gimple_try_eval_ptr (stmt), data);
502 cannot_fallthru = data->cannot_fallthru;
503
504 i = gsi_start (*gimple_try_cleanup_ptr (stmt));
505 switch (gimple_code (gsi_stmt (i)))
506 {
507 case GIMPLE_CATCH:
508 /* We expect to see a sequence of GIMPLE_CATCH stmts, each with a
509 catch expression and a body. The whole try/catch may fall
510 through iff any of the catch bodies falls through. */
511 for (; !gsi_end_p (i); gsi_next (&i))
512 {
513 data->cannot_fallthru = false;
514 lower_sequence (gimple_catch_handler_ptr (
515 as_a <gcatch *> (gsi_stmt (i))),
516 data);
517 if (!data->cannot_fallthru)
518 cannot_fallthru = false;
519 }
520 break;
521
522 case GIMPLE_EH_FILTER:
523 /* The exception filter expression only matters if there is an
524 exception. If the exception does not match EH_FILTER_TYPES,
525 we will execute EH_FILTER_FAILURE, and we will fall through
526 if that falls through. If the exception does match
527 EH_FILTER_TYPES, the stack unwinder will continue up the
528 stack, so we will not fall through. We don't know whether we
529 will throw an exception which matches EH_FILTER_TYPES or not,
530 so we just ignore EH_FILTER_TYPES and assume that we might
531 throw an exception which doesn't match. */
532 data->cannot_fallthru = false;
533 lower_sequence (gimple_eh_filter_failure_ptr (gsi_stmt (i)), data);
534 if (!data->cannot_fallthru)
535 cannot_fallthru = false;
536 break;
537
538 case GIMPLE_DEBUG:
539 gcc_checking_assert (gimple_debug_begin_stmt_p (stmt));
540 break;
541
542 default:
543 /* This case represents statements to be executed when an
544 exception occurs. Those statements are implicitly followed
545 by a GIMPLE_RESX to resume execution after the exception. So
546 in this case the try/catch never falls through. */
547 data->cannot_fallthru = false;
548 lower_sequence (gimple_try_cleanup_ptr (stmt), data);
549 break;
550 }
551
552 data->cannot_fallthru = cannot_fallthru;
553 gsi_next (gsi);
554 }
555
556
557 /* Try to determine whether a TRY_CATCH expression can fall through.
558 This is a subroutine of gimple_stmt_may_fallthru. */
559
560 static bool
561 gimple_try_catch_may_fallthru (gtry *stmt)
562 {
563 gimple_stmt_iterator i;
564
565 /* We don't handle GIMPLE_TRY_FINALLY. */
566 gcc_assert (gimple_try_kind (stmt) == GIMPLE_TRY_CATCH);
567
568 /* If the TRY block can fall through, the whole TRY_CATCH can
569 fall through. */
570 if (gimple_seq_may_fallthru (gimple_try_eval (stmt)))
571 return true;
572
573 i = gsi_start (*gimple_try_cleanup_ptr (stmt));
574 switch (gimple_code (gsi_stmt (i)))
575 {
576 case GIMPLE_CATCH:
577 /* We expect to see a sequence of GIMPLE_CATCH stmts, each with a
578 catch expression and a body. The whole try/catch may fall
579 through iff any of the catch bodies falls through. */
580 for (; !gsi_end_p (i); gsi_next (&i))
581 {
582 if (gimple_seq_may_fallthru (gimple_catch_handler (
583 as_a <gcatch *> (gsi_stmt (i)))))
584 return true;
585 }
586 return false;
587
588 case GIMPLE_EH_FILTER:
589 /* The exception filter expression only matters if there is an
590 exception. If the exception does not match EH_FILTER_TYPES,
591 we will execute EH_FILTER_FAILURE, and we will fall through
592 if that falls through. If the exception does match
593 EH_FILTER_TYPES, the stack unwinder will continue up the
594 stack, so we will not fall through. We don't know whether we
595 will throw an exception which matches EH_FILTER_TYPES or not,
596 so we just ignore EH_FILTER_TYPES and assume that we might
597 throw an exception which doesn't match. */
598 return gimple_seq_may_fallthru (gimple_eh_filter_failure (gsi_stmt (i)));
599
600 default:
601 /* This case represents statements to be executed when an
602 exception occurs. Those statements are implicitly followed
603 by a GIMPLE_RESX to resume execution after the exception. So
604 in this case the try/catch never falls through. */
605 return false;
606 }
607 }
608
609
610 /* Try to determine if we can continue executing the statement
611 immediately following STMT. This guess need not be 100% accurate;
612 simply be conservative and return true if we don't know. This is
613 used only to avoid stupidly generating extra code. If we're wrong,
614 we'll just delete the extra code later. */
615
616 bool
617 gimple_stmt_may_fallthru (gimple *stmt)
618 {
619 if (!stmt)
620 return true;
621
622 switch (gimple_code (stmt))
623 {
624 case GIMPLE_GOTO:
625 case GIMPLE_RETURN:
626 case GIMPLE_RESX:
627 /* Easy cases. If the last statement of the seq implies
628 control transfer, then we can't fall through. */
629 return false;
630
631 case GIMPLE_SWITCH:
632 /* Switch has already been lowered and represents a branch
633 to a selected label and hence can't fall through. */
634 return false;
635
636 case GIMPLE_COND:
637 /* GIMPLE_COND's are already lowered into a two-way branch. They
638 can't fall through. */
639 return false;
640
641 case GIMPLE_BIND:
642 return gimple_seq_may_fallthru (
643 gimple_bind_body (as_a <gbind *> (stmt)));
644
645 case GIMPLE_TRY:
646 if (gimple_try_kind (stmt) == GIMPLE_TRY_CATCH)
647 return gimple_try_catch_may_fallthru (as_a <gtry *> (stmt));
648
649 /* It must be a GIMPLE_TRY_FINALLY. */
650
651 /* The finally clause is always executed after the try clause,
652 so if it does not fall through, then the try-finally will not
653 fall through. Otherwise, if the try clause does not fall
654 through, then when the finally clause falls through it will
655 resume execution wherever the try clause was going. So the
656 whole try-finally will only fall through if both the try
657 clause and the finally clause fall through. */
658 return (gimple_seq_may_fallthru (gimple_try_eval (stmt))
659 && gimple_seq_may_fallthru (gimple_try_cleanup (stmt)));
660
661 case GIMPLE_EH_ELSE:
662 {
663 geh_else *eh_else_stmt = as_a <geh_else *> (stmt);
664 return (gimple_seq_may_fallthru (gimple_eh_else_n_body (eh_else_stmt))
665 || gimple_seq_may_fallthru (gimple_eh_else_e_body (
666 eh_else_stmt)));
667 }
668
669 case GIMPLE_CALL:
670 /* Functions that do not return do not fall through. */
671 return !gimple_call_noreturn_p (stmt);
672
673 default:
674 return true;
675 }
676 }
677
678
679 /* Same as gimple_stmt_may_fallthru, but for the gimple sequence SEQ. */
680
681 bool
682 gimple_seq_may_fallthru (gimple_seq seq)
683 {
684 return gimple_stmt_may_fallthru (gimple_seq_last_nondebug_stmt (seq));
685 }
686
687
688 /* Lower a GIMPLE_RETURN GSI. DATA is passed through the recursion. */
689
690 static void
691 lower_gimple_return (gimple_stmt_iterator *gsi, struct lower_data *data)
692 {
693 greturn *stmt = as_a <greturn *> (gsi_stmt (*gsi));
694 gimple *t;
695 int i;
696 return_statements_t tmp_rs;
697
698 /* Match this up with an existing return statement that's been created. */
699 for (i = data->return_statements.length () - 1;
700 i >= 0; i--)
701 {
702 tmp_rs = data->return_statements[i];
703
704 if (gimple_return_retval (stmt) == gimple_return_retval (tmp_rs.stmt))
705 {
706 /* Remove the line number from the representative return statement.
707 It now fills in for many such returns. Failure to remove this
708 will result in incorrect results for coverage analysis. */
709 gimple_set_location (tmp_rs.stmt, UNKNOWN_LOCATION);
710
711 goto found;
712 }
713 }
714
715 /* Not found. Create a new label and record the return statement. */
716 tmp_rs.label = create_artificial_label (cfun->function_end_locus);
717 tmp_rs.stmt = stmt;
718 data->return_statements.safe_push (tmp_rs);
719
720 /* Generate a goto statement and remove the return statement. */
721 found:
722 /* When not optimizing, make sure user returns are preserved. */
723 if (!optimize && gimple_has_location (stmt))
724 DECL_ARTIFICIAL (tmp_rs.label) = 0;
725 t = gimple_build_goto (tmp_rs.label);
726 /* location includes block. */
727 gimple_set_location (t, gimple_location (stmt));
728 gsi_insert_before (gsi, t, GSI_SAME_STMT);
729 gsi_remove (gsi, false);
730 }
731
732 /* Lower a __builtin_setjmp GSI.
733
734 __builtin_setjmp is passed a pointer to an array of five words (not
735 all will be used on all machines). It operates similarly to the C
736 library function of the same name, but is more efficient.
737
738 It is lowered into 2 other builtins, namely __builtin_setjmp_setup,
739 __builtin_setjmp_receiver.
740
741 After full lowering, the body of the function should look like:
742
743 {
744 int D.1844;
745 int D.2844;
746
747 [...]
748
749 __builtin_setjmp_setup (&buf, &<D1847>);
750 D.1844 = 0;
751 goto <D1846>;
752 <D1847>:;
753 __builtin_setjmp_receiver (&<D1847>);
754 D.1844 = 1;
755 <D1846>:;
756 if (D.1844 == 0) goto <D1848>; else goto <D1849>;
757
758 [...]
759
760 __builtin_setjmp_setup (&buf, &<D2847>);
761 D.2844 = 0;
762 goto <D2846>;
763 <D2847>:;
764 __builtin_setjmp_receiver (&<D2847>);
765 D.2844 = 1;
766 <D2846>:;
767 if (D.2844 == 0) goto <D2848>; else goto <D2849>;
768
769 [...]
770
771 <D3850>:;
772 return;
773 }
774
775 During cfg creation an extra per-function (or per-OpenMP region)
776 block with ABNORMAL_DISPATCHER internal call will be added, unique
777 destination of all the abnormal call edges and the unique source of
778 all the abnormal edges to the receivers, thus keeping the complexity
779 explosion localized. */
780
781 static void
782 lower_builtin_setjmp (gimple_stmt_iterator *gsi)
783 {
784 gimple *stmt = gsi_stmt (*gsi);
785 location_t loc = gimple_location (stmt);
786 tree cont_label = create_artificial_label (loc);
787 tree next_label = create_artificial_label (loc);
788 tree dest, t, arg;
789 gimple *g;
790
791 /* __builtin_setjmp_{setup,receiver} aren't ECF_RETURNS_TWICE and for RTL
792 these builtins are modelled as non-local label jumps to the label
793 that is passed to these two builtins, so pretend we have a non-local
794 label during GIMPLE passes too. See PR60003. */
795 cfun->has_nonlocal_label = 1;
796
797 /* NEXT_LABEL is the label __builtin_longjmp will jump to. Its address is
798 passed to both __builtin_setjmp_setup and __builtin_setjmp_receiver. */
799 FORCED_LABEL (next_label) = 1;
800
801 tree orig_dest = dest = gimple_call_lhs (stmt);
802 if (orig_dest && TREE_CODE (orig_dest) == SSA_NAME)
803 dest = create_tmp_reg (TREE_TYPE (orig_dest));
804
805 /* Build '__builtin_setjmp_setup (BUF, NEXT_LABEL)' and insert. */
806 arg = build_addr (next_label);
807 t = builtin_decl_implicit (BUILT_IN_SETJMP_SETUP);
808 g = gimple_build_call (t, 2, gimple_call_arg (stmt, 0), arg);
809 /* location includes block. */
810 gimple_set_location (g, loc);
811 gsi_insert_before (gsi, g, GSI_SAME_STMT);
812
813 /* Build 'DEST = 0' and insert. */
814 if (dest)
815 {
816 g = gimple_build_assign (dest, build_zero_cst (TREE_TYPE (dest)));
817 gimple_set_location (g, loc);
818 gsi_insert_before (gsi, g, GSI_SAME_STMT);
819 }
820
821 /* Build 'goto CONT_LABEL' and insert. */
822 g = gimple_build_goto (cont_label);
823 gsi_insert_before (gsi, g, GSI_SAME_STMT);
824
825 /* Build 'NEXT_LABEL:' and insert. */
826 g = gimple_build_label (next_label);
827 gsi_insert_before (gsi, g, GSI_SAME_STMT);
828
829 /* Build '__builtin_setjmp_receiver (NEXT_LABEL)' and insert. */
830 arg = build_addr (next_label);
831 t = builtin_decl_implicit (BUILT_IN_SETJMP_RECEIVER);
832 g = gimple_build_call (t, 1, arg);
833 gimple_set_location (g, loc);
834 gsi_insert_before (gsi, g, GSI_SAME_STMT);
835
836 /* Build 'DEST = 1' and insert. */
837 if (dest)
838 {
839 g = gimple_build_assign (dest, fold_convert_loc (loc, TREE_TYPE (dest),
840 integer_one_node));
841 gimple_set_location (g, loc);
842 gsi_insert_before (gsi, g, GSI_SAME_STMT);
843 }
844
845 /* Build 'CONT_LABEL:' and insert. */
846 g = gimple_build_label (cont_label);
847 gsi_insert_before (gsi, g, GSI_SAME_STMT);
848
849 /* Build orig_dest = dest if necessary. */
850 if (dest != orig_dest)
851 {
852 g = gimple_build_assign (orig_dest, dest);
853 gsi_insert_before (gsi, g, GSI_SAME_STMT);
854 }
855
856 /* Remove the call to __builtin_setjmp. */
857 gsi_remove (gsi, false);
858 }
859
860 /* Lower calls to posix_memalign to
861 res = posix_memalign (ptr, align, size);
862 if (res == 0)
863 *ptr = __builtin_assume_aligned (*ptr, align);
864 or to
865 void *tem;
866 res = posix_memalign (&tem, align, size);
867 if (res == 0)
868 ptr = __builtin_assume_aligned (tem, align);
869 in case the first argument was &ptr. That way we can get at the
870 alignment of the heap pointer in CCP. */
871
872 static void
873 lower_builtin_posix_memalign (gimple_stmt_iterator *gsi)
874 {
875 gimple *stmt, *call = gsi_stmt (*gsi);
876 tree pptr = gimple_call_arg (call, 0);
877 tree align = gimple_call_arg (call, 1);
878 tree res = gimple_call_lhs (call);
879 tree ptr = create_tmp_reg (ptr_type_node);
880 if (TREE_CODE (pptr) == ADDR_EXPR)
881 {
882 tree tem = create_tmp_var (ptr_type_node);
883 TREE_ADDRESSABLE (tem) = 1;
884 gimple_call_set_arg (call, 0, build_fold_addr_expr (tem));
885 stmt = gimple_build_assign (ptr, tem);
886 }
887 else
888 stmt = gimple_build_assign (ptr,
889 fold_build2 (MEM_REF, ptr_type_node, pptr,
890 build_int_cst (ptr_type_node, 0)));
891 if (res == NULL_TREE)
892 {
893 res = create_tmp_reg (integer_type_node);
894 gimple_call_set_lhs (call, res);
895 }
896 tree align_label = create_artificial_label (UNKNOWN_LOCATION);
897 tree noalign_label = create_artificial_label (UNKNOWN_LOCATION);
898 gimple *cond = gimple_build_cond (EQ_EXPR, res, integer_zero_node,
899 align_label, noalign_label);
900 gsi_insert_after (gsi, cond, GSI_NEW_STMT);
901 gsi_insert_after (gsi, gimple_build_label (align_label), GSI_NEW_STMT);
902 gsi_insert_after (gsi, stmt, GSI_NEW_STMT);
903 stmt = gimple_build_call (builtin_decl_implicit (BUILT_IN_ASSUME_ALIGNED),
904 2, ptr, align);
905 gimple_call_set_lhs (stmt, ptr);
906 gsi_insert_after (gsi, stmt, GSI_NEW_STMT);
907 stmt = gimple_build_assign (fold_build2 (MEM_REF, ptr_type_node, pptr,
908 build_int_cst (ptr_type_node, 0)),
909 ptr);
910 gsi_insert_after (gsi, stmt, GSI_NEW_STMT);
911 gsi_insert_after (gsi, gimple_build_label (noalign_label), GSI_NEW_STMT);
912 }
913 \f
914
915 /* Record the variables in VARS into function FN. */
916
917 void
918 record_vars_into (tree vars, tree fn)
919 {
920 for (; vars; vars = DECL_CHAIN (vars))
921 {
922 tree var = vars;
923
924 /* BIND_EXPRs contains also function/type/constant declarations
925 we don't need to care about. */
926 if (!VAR_P (var))
927 continue;
928
929 /* Nothing to do in this case. */
930 if (DECL_EXTERNAL (var))
931 continue;
932
933 /* Record the variable. */
934 add_local_decl (DECL_STRUCT_FUNCTION (fn), var);
935 }
936 }
937
938
939 /* Record the variables in VARS into current_function_decl. */
940
941 void
942 record_vars (tree vars)
943 {
944 record_vars_into (vars, current_function_decl);
945 }