]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/mode-switching.c
335a19278707a282fd21242a890efcb0528f0fb0
[thirdparty/gcc.git] / gcc / mode-switching.c
1 /* CPU mode switching
2 Copyright (C) 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2007
3 Free Software Foundation, Inc.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "rtl.h"
26 #include "regs.h"
27 #include "hard-reg-set.h"
28 #include "flags.h"
29 #include "real.h"
30 #include "insn-config.h"
31 #include "recog.h"
32 #include "basic-block.h"
33 #include "output.h"
34 #include "tm_p.h"
35 #include "function.h"
36 #include "tree-pass.h"
37 #include "timevar.h"
38 #include "df.h"
39
40 /* We want target macros for the mode switching code to be able to refer
41 to instruction attribute values. */
42 #include "insn-attr.h"
43
44 #ifdef OPTIMIZE_MODE_SWITCHING
45
46 /* The algorithm for setting the modes consists of scanning the insn list
47 and finding all the insns which require a specific mode. Each insn gets
48 a unique struct seginfo element. These structures are inserted into a list
49 for each basic block. For each entity, there is an array of bb_info over
50 the flow graph basic blocks (local var 'bb_info'), and contains a list
51 of all insns within that basic block, in the order they are encountered.
52
53 For each entity, any basic block WITHOUT any insns requiring a specific
54 mode are given a single entry, without a mode. (Each basic block
55 in the flow graph must have at least one entry in the segment table.)
56
57 The LCM algorithm is then run over the flow graph to determine where to
58 place the sets to the highest-priority value in respect of first the first
59 insn in any one block. Any adjustments required to the transparency
60 vectors are made, then the next iteration starts for the next-lower
61 priority mode, till for each entity all modes are exhausted.
62
63 More details are located in the code for optimize_mode_switching(). */
64 \f
65 /* This structure contains the information for each insn which requires
66 either single or double mode to be set.
67 MODE is the mode this insn must be executed in.
68 INSN_PTR is the insn to be executed (may be the note that marks the
69 beginning of a basic block).
70 BBNUM is the flow graph basic block this insn occurs in.
71 NEXT is the next insn in the same basic block. */
72 struct seginfo
73 {
74 int mode;
75 rtx insn_ptr;
76 int bbnum;
77 struct seginfo *next;
78 HARD_REG_SET regs_live;
79 };
80
81 struct bb_info
82 {
83 struct seginfo *seginfo;
84 int computing;
85 };
86
87 /* These bitmaps are used for the LCM algorithm. */
88
89 static sbitmap *antic;
90 static sbitmap *transp;
91 static sbitmap *comp;
92
93 static struct seginfo * new_seginfo (int, rtx, int, HARD_REG_SET);
94 static void add_seginfo (struct bb_info *, struct seginfo *);
95 static void reg_dies (rtx, HARD_REG_SET *);
96 static void reg_becomes_live (rtx, rtx, void *);
97 static void make_preds_opaque (basic_block, int);
98 \f
99
100 /* This function will allocate a new BBINFO structure, initialized
101 with the MODE, INSN, and basic block BB parameters. */
102
103 static struct seginfo *
104 new_seginfo (int mode, rtx insn, int bb, HARD_REG_SET regs_live)
105 {
106 struct seginfo *ptr;
107 ptr = XNEW (struct seginfo);
108 ptr->mode = mode;
109 ptr->insn_ptr = insn;
110 ptr->bbnum = bb;
111 ptr->next = NULL;
112 COPY_HARD_REG_SET (ptr->regs_live, regs_live);
113 return ptr;
114 }
115
116 /* Add a seginfo element to the end of a list.
117 HEAD is a pointer to the list beginning.
118 INFO is the structure to be linked in. */
119
120 static void
121 add_seginfo (struct bb_info *head, struct seginfo *info)
122 {
123 struct seginfo *ptr;
124
125 if (head->seginfo == NULL)
126 head->seginfo = info;
127 else
128 {
129 ptr = head->seginfo;
130 while (ptr->next != NULL)
131 ptr = ptr->next;
132 ptr->next = info;
133 }
134 }
135
136 /* Make all predecessors of basic block B opaque, recursively, till we hit
137 some that are already non-transparent, or an edge where aux is set; that
138 denotes that a mode set is to be done on that edge.
139 J is the bit number in the bitmaps that corresponds to the entity that
140 we are currently handling mode-switching for. */
141
142 static void
143 make_preds_opaque (basic_block b, int j)
144 {
145 edge e;
146 edge_iterator ei;
147
148 FOR_EACH_EDGE (e, ei, b->preds)
149 {
150 basic_block pb = e->src;
151
152 if (e->aux || ! TEST_BIT (transp[pb->index], j))
153 continue;
154
155 RESET_BIT (transp[pb->index], j);
156 make_preds_opaque (pb, j);
157 }
158 }
159
160 /* Record in LIVE that register REG died. */
161
162 static void
163 reg_dies (rtx reg, HARD_REG_SET *live)
164 {
165 int regno;
166
167 if (!REG_P (reg))
168 return;
169
170 regno = REGNO (reg);
171 if (regno < FIRST_PSEUDO_REGISTER)
172 remove_from_hard_reg_set (live, GET_MODE (reg), regno);
173 }
174
175 /* Record in LIVE that register REG became live.
176 This is called via note_stores. */
177
178 static void
179 reg_becomes_live (rtx reg, rtx setter ATTRIBUTE_UNUSED, void *live)
180 {
181 int regno;
182
183 if (GET_CODE (reg) == SUBREG)
184 reg = SUBREG_REG (reg);
185
186 if (!REG_P (reg))
187 return;
188
189 regno = REGNO (reg);
190 if (regno < FIRST_PSEUDO_REGISTER)
191 add_to_hard_reg_set ((HARD_REG_SET *) live, GET_MODE (reg), regno);
192 }
193
194 /* Make sure if MODE_ENTRY is defined the MODE_EXIT is defined
195 and vice versa. */
196 #if defined (MODE_ENTRY) != defined (MODE_EXIT)
197 #error "Both MODE_ENTRY and MODE_EXIT must be defined"
198 #endif
199
200 #if defined (MODE_ENTRY) && defined (MODE_EXIT)
201 /* Split the fallthrough edge to the exit block, so that we can note
202 that there NORMAL_MODE is required. Return the new block if it's
203 inserted before the exit block. Otherwise return null. */
204
205 static basic_block
206 create_pre_exit (int n_entities, int *entity_map, const int *num_modes)
207 {
208 edge eg;
209 edge_iterator ei;
210 basic_block pre_exit;
211
212 /* The only non-call predecessor at this stage is a block with a
213 fallthrough edge; there can be at most one, but there could be
214 none at all, e.g. when exit is called. */
215 pre_exit = 0;
216 FOR_EACH_EDGE (eg, ei, EXIT_BLOCK_PTR->preds)
217 if (eg->flags & EDGE_FALLTHRU)
218 {
219 basic_block src_bb = eg->src;
220 rtx last_insn, ret_reg;
221
222 gcc_assert (!pre_exit);
223 /* If this function returns a value at the end, we have to
224 insert the final mode switch before the return value copy
225 to its hard register. */
226 if (EDGE_COUNT (EXIT_BLOCK_PTR->preds) == 1
227 && NONJUMP_INSN_P ((last_insn = BB_END (src_bb)))
228 && GET_CODE (PATTERN (last_insn)) == USE
229 && GET_CODE ((ret_reg = XEXP (PATTERN (last_insn), 0))) == REG)
230 {
231 int ret_start = REGNO (ret_reg);
232 int nregs = hard_regno_nregs[ret_start][GET_MODE (ret_reg)];
233 int ret_end = ret_start + nregs;
234 int short_block = 0;
235 int maybe_builtin_apply = 0;
236 int forced_late_switch = 0;
237 rtx before_return_copy;
238
239 do
240 {
241 rtx return_copy = PREV_INSN (last_insn);
242 rtx return_copy_pat, copy_reg;
243 int copy_start, copy_num;
244 int j;
245
246 if (INSN_P (return_copy))
247 {
248 return_copy_pat = PATTERN (return_copy);
249 switch (GET_CODE (return_copy_pat))
250 {
251 case USE:
252 /* Skip __builtin_apply pattern. */
253 if (GET_CODE (XEXP (return_copy_pat, 0)) == REG
254 && (FUNCTION_VALUE_REGNO_P
255 (REGNO (XEXP (return_copy_pat, 0)))))
256 {
257 maybe_builtin_apply = 1;
258 last_insn = return_copy;
259 continue;
260 }
261 break;
262
263 case ASM_OPERANDS:
264 /* Skip barrier insns. */
265 if (!MEM_VOLATILE_P (return_copy_pat))
266 break;
267
268 /* Fall through. */
269
270 case ASM_INPUT:
271 case UNSPEC_VOLATILE:
272 last_insn = return_copy;
273 continue;
274
275 default:
276 break;
277 }
278
279 /* If the return register is not (in its entirety)
280 likely spilled, the return copy might be
281 partially or completely optimized away. */
282 return_copy_pat = single_set (return_copy);
283 if (!return_copy_pat)
284 {
285 return_copy_pat = PATTERN (return_copy);
286 if (GET_CODE (return_copy_pat) != CLOBBER)
287 break;
288 else if (!optimize)
289 {
290 /* This might be (clobber (reg [<result>]))
291 when not optimizing. Then check if
292 the previous insn is the clobber for
293 the return register. */
294 copy_reg = SET_DEST (return_copy_pat);
295 if (GET_CODE (copy_reg) == REG
296 && !HARD_REGISTER_NUM_P (REGNO (copy_reg)))
297 {
298 if (INSN_P (PREV_INSN (return_copy)))
299 {
300 return_copy = PREV_INSN (return_copy);
301 return_copy_pat = PATTERN (return_copy);
302 if (GET_CODE (return_copy_pat) != CLOBBER)
303 break;
304 }
305 }
306 }
307 }
308 copy_reg = SET_DEST (return_copy_pat);
309 if (GET_CODE (copy_reg) == REG)
310 copy_start = REGNO (copy_reg);
311 else if (GET_CODE (copy_reg) == SUBREG
312 && GET_CODE (SUBREG_REG (copy_reg)) == REG)
313 copy_start = REGNO (SUBREG_REG (copy_reg));
314 else
315 break;
316 if (copy_start >= FIRST_PSEUDO_REGISTER)
317 break;
318 copy_num
319 = hard_regno_nregs[copy_start][GET_MODE (copy_reg)];
320
321 /* If the return register is not likely spilled, - as is
322 the case for floating point on SH4 - then it might
323 be set by an arithmetic operation that needs a
324 different mode than the exit block. */
325 for (j = n_entities - 1; j >= 0; j--)
326 {
327 int e = entity_map[j];
328 int mode = MODE_NEEDED (e, return_copy);
329
330 if (mode != num_modes[e] && mode != MODE_EXIT (e))
331 break;
332 }
333 if (j >= 0)
334 {
335 /* For the SH4, floating point loads depend on fpscr,
336 thus we might need to put the final mode switch
337 after the return value copy. That is still OK,
338 because a floating point return value does not
339 conflict with address reloads. */
340 if (copy_start >= ret_start
341 && copy_start + copy_num <= ret_end
342 && OBJECT_P (SET_SRC (return_copy_pat)))
343 forced_late_switch = 1;
344 break;
345 }
346
347 if (copy_start >= ret_start
348 && copy_start + copy_num <= ret_end)
349 nregs -= copy_num;
350 else if (!maybe_builtin_apply
351 || !FUNCTION_VALUE_REGNO_P (copy_start))
352 break;
353 last_insn = return_copy;
354 }
355 /* ??? Exception handling can lead to the return value
356 copy being already separated from the return value use,
357 as in unwind-dw2.c .
358 Similarly, conditionally returning without a value,
359 and conditionally using builtin_return can lead to an
360 isolated use. */
361 if (return_copy == BB_HEAD (src_bb))
362 {
363 short_block = 1;
364 break;
365 }
366 last_insn = return_copy;
367 }
368 while (nregs);
369
370 /* If we didn't see a full return value copy, verify that there
371 is a plausible reason for this. If some, but not all of the
372 return register is likely spilled, we can expect that there
373 is a copy for the likely spilled part. */
374 gcc_assert (!nregs
375 || forced_late_switch
376 || short_block
377 || !(CLASS_LIKELY_SPILLED_P
378 (REGNO_REG_CLASS (ret_start)))
379 || (nregs
380 != hard_regno_nregs[ret_start][GET_MODE (ret_reg)])
381 /* For multi-hard-register floating point
382 values, sometimes the likely-spilled part
383 is ordinarily copied first, then the other
384 part is set with an arithmetic operation.
385 This doesn't actually cause reload
386 failures, so let it pass. */
387 || (GET_MODE_CLASS (GET_MODE (ret_reg)) != MODE_INT
388 && nregs != 1));
389
390 if (INSN_P (last_insn))
391 {
392 before_return_copy
393 = emit_note_before (NOTE_INSN_DELETED, last_insn);
394 /* Instructions preceding LAST_INSN in the same block might
395 require a different mode than MODE_EXIT, so if we might
396 have such instructions, keep them in a separate block
397 from pre_exit. */
398 if (last_insn != BB_HEAD (src_bb))
399 src_bb = split_block (src_bb,
400 PREV_INSN (before_return_copy))->dest;
401 }
402 else
403 before_return_copy = last_insn;
404 pre_exit = split_block (src_bb, before_return_copy)->src;
405 }
406 else
407 {
408 pre_exit = split_edge (eg);
409 }
410 }
411
412 return pre_exit;
413 }
414 #endif
415
416 /* Find all insns that need a particular mode setting, and insert the
417 necessary mode switches. Return true if we did work. */
418
419 static int
420 optimize_mode_switching (void)
421 {
422 rtx insn;
423 int e;
424 basic_block bb;
425 int need_commit = 0;
426 sbitmap *kill;
427 struct edge_list *edge_list;
428 static const int num_modes[] = NUM_MODES_FOR_MODE_SWITCHING;
429 #define N_ENTITIES ARRAY_SIZE (num_modes)
430 int entity_map[N_ENTITIES];
431 struct bb_info *bb_info[N_ENTITIES];
432 int i, j;
433 int n_entities;
434 int max_num_modes = 0;
435 bool emited = false;
436 basic_block post_entry ATTRIBUTE_UNUSED, pre_exit ATTRIBUTE_UNUSED;
437
438 for (e = N_ENTITIES - 1, n_entities = 0; e >= 0; e--)
439 if (OPTIMIZE_MODE_SWITCHING (e))
440 {
441 int entry_exit_extra = 0;
442
443 /* Create the list of segments within each basic block.
444 If NORMAL_MODE is defined, allow for two extra
445 blocks split from the entry and exit block. */
446 #if defined (MODE_ENTRY) && defined (MODE_EXIT)
447 entry_exit_extra = 3;
448 #endif
449 bb_info[n_entities]
450 = XCNEWVEC (struct bb_info, last_basic_block + entry_exit_extra);
451 entity_map[n_entities++] = e;
452 if (num_modes[e] > max_num_modes)
453 max_num_modes = num_modes[e];
454 }
455
456 if (! n_entities)
457 return 0;
458
459 #if defined (MODE_ENTRY) && defined (MODE_EXIT)
460 /* Split the edge from the entry block, so that we can note that
461 there NORMAL_MODE is supplied. */
462 post_entry = split_edge (single_succ_edge (ENTRY_BLOCK_PTR));
463 pre_exit = create_pre_exit (n_entities, entity_map, num_modes);
464 #endif
465
466 df_analyze ();
467
468 /* Create the bitmap vectors. */
469
470 antic = sbitmap_vector_alloc (last_basic_block, n_entities);
471 transp = sbitmap_vector_alloc (last_basic_block, n_entities);
472 comp = sbitmap_vector_alloc (last_basic_block, n_entities);
473
474 sbitmap_vector_ones (transp, last_basic_block);
475
476 for (j = n_entities - 1; j >= 0; j--)
477 {
478 int e = entity_map[j];
479 int no_mode = num_modes[e];
480 struct bb_info *info = bb_info[j];
481
482 /* Determine what the first use (if any) need for a mode of entity E is.
483 This will be the mode that is anticipatable for this block.
484 Also compute the initial transparency settings. */
485 FOR_EACH_BB (bb)
486 {
487 struct seginfo *ptr;
488 int last_mode = no_mode;
489 HARD_REG_SET live_now;
490
491 REG_SET_TO_HARD_REG_SET (live_now, df_get_live_in (bb));
492
493 /* Pretend the mode is clobbered across abnormal edges. */
494 {
495 edge_iterator ei;
496 edge e;
497 FOR_EACH_EDGE (e, ei, bb->preds)
498 if (e->flags & EDGE_COMPLEX)
499 break;
500 if (e)
501 {
502 ptr = new_seginfo (no_mode, BB_HEAD (bb), bb->index, live_now);
503 add_seginfo (info + bb->index, ptr);
504 RESET_BIT (transp[bb->index], j);
505 }
506 }
507
508 for (insn = BB_HEAD (bb);
509 insn != NULL && insn != NEXT_INSN (BB_END (bb));
510 insn = NEXT_INSN (insn))
511 {
512 if (INSN_P (insn))
513 {
514 int mode = MODE_NEEDED (e, insn);
515 rtx link;
516
517 if (mode != no_mode && mode != last_mode)
518 {
519 last_mode = mode;
520 ptr = new_seginfo (mode, insn, bb->index, live_now);
521 add_seginfo (info + bb->index, ptr);
522 RESET_BIT (transp[bb->index], j);
523 }
524 #ifdef MODE_AFTER
525 last_mode = MODE_AFTER (last_mode, insn);
526 #endif
527 /* Update LIVE_NOW. */
528 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
529 if (REG_NOTE_KIND (link) == REG_DEAD)
530 reg_dies (XEXP (link, 0), &live_now);
531
532 note_stores (PATTERN (insn), reg_becomes_live, &live_now);
533 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
534 if (REG_NOTE_KIND (link) == REG_UNUSED)
535 reg_dies (XEXP (link, 0), &live_now);
536 }
537 }
538
539 info[bb->index].computing = last_mode;
540 /* Check for blocks without ANY mode requirements. */
541 if (last_mode == no_mode)
542 {
543 ptr = new_seginfo (no_mode, BB_END (bb), bb->index, live_now);
544 add_seginfo (info + bb->index, ptr);
545 }
546 }
547 #if defined (MODE_ENTRY) && defined (MODE_EXIT)
548 {
549 int mode = MODE_ENTRY (e);
550
551 if (mode != no_mode)
552 {
553 bb = post_entry;
554
555 /* By always making this nontransparent, we save
556 an extra check in make_preds_opaque. We also
557 need this to avoid confusing pre_edge_lcm when
558 antic is cleared but transp and comp are set. */
559 RESET_BIT (transp[bb->index], j);
560
561 /* Insert a fake computing definition of MODE into entry
562 blocks which compute no mode. This represents the mode on
563 entry. */
564 info[bb->index].computing = mode;
565
566 if (pre_exit)
567 info[pre_exit->index].seginfo->mode = MODE_EXIT (e);
568 }
569 }
570 #endif /* NORMAL_MODE */
571 }
572
573 kill = sbitmap_vector_alloc (last_basic_block, n_entities);
574 for (i = 0; i < max_num_modes; i++)
575 {
576 int current_mode[N_ENTITIES];
577 sbitmap *delete;
578 sbitmap *insert;
579
580 /* Set the anticipatable and computing arrays. */
581 sbitmap_vector_zero (antic, last_basic_block);
582 sbitmap_vector_zero (comp, last_basic_block);
583 for (j = n_entities - 1; j >= 0; j--)
584 {
585 int m = current_mode[j] = MODE_PRIORITY_TO_MODE (entity_map[j], i);
586 struct bb_info *info = bb_info[j];
587
588 FOR_EACH_BB (bb)
589 {
590 if (info[bb->index].seginfo->mode == m)
591 SET_BIT (antic[bb->index], j);
592
593 if (info[bb->index].computing == m)
594 SET_BIT (comp[bb->index], j);
595 }
596 }
597
598 /* Calculate the optimal locations for the
599 placement mode switches to modes with priority I. */
600
601 FOR_EACH_BB (bb)
602 sbitmap_not (kill[bb->index], transp[bb->index]);
603 edge_list = pre_edge_lcm (n_entities, transp, comp, antic,
604 kill, &insert, &delete);
605
606 for (j = n_entities - 1; j >= 0; j--)
607 {
608 /* Insert all mode sets that have been inserted by lcm. */
609 int no_mode = num_modes[entity_map[j]];
610
611 /* Wherever we have moved a mode setting upwards in the flow graph,
612 the blocks between the new setting site and the now redundant
613 computation ceases to be transparent for any lower-priority
614 mode of the same entity. First set the aux field of each
615 insertion site edge non-transparent, then propagate the new
616 non-transparency from the redundant computation upwards till
617 we hit an insertion site or an already non-transparent block. */
618 for (e = NUM_EDGES (edge_list) - 1; e >= 0; e--)
619 {
620 edge eg = INDEX_EDGE (edge_list, e);
621 int mode;
622 basic_block src_bb;
623 HARD_REG_SET live_at_edge;
624 rtx mode_set;
625
626 eg->aux = 0;
627
628 if (! TEST_BIT (insert[e], j))
629 continue;
630
631 eg->aux = (void *)1;
632
633 mode = current_mode[j];
634 src_bb = eg->src;
635
636 REG_SET_TO_HARD_REG_SET (live_at_edge, df_get_live_out (src_bb));
637
638 start_sequence ();
639 EMIT_MODE_SET (entity_map[j], mode, live_at_edge);
640 mode_set = get_insns ();
641 end_sequence ();
642
643 /* Do not bother to insert empty sequence. */
644 if (mode_set == NULL_RTX)
645 continue;
646
647 /* We should not get an abnormal edge here. */
648 gcc_assert (! (eg->flags & EDGE_ABNORMAL));
649
650 need_commit = 1;
651 insert_insn_on_edge (mode_set, eg);
652 }
653
654 FOR_EACH_BB_REVERSE (bb)
655 if (TEST_BIT (delete[bb->index], j))
656 {
657 make_preds_opaque (bb, j);
658 /* Cancel the 'deleted' mode set. */
659 bb_info[j][bb->index].seginfo->mode = no_mode;
660 }
661 }
662
663 sbitmap_vector_free (delete);
664 sbitmap_vector_free (insert);
665 clear_aux_for_edges ();
666 free_edge_list (edge_list);
667 }
668
669 /* Now output the remaining mode sets in all the segments. */
670 for (j = n_entities - 1; j >= 0; j--)
671 {
672 int no_mode = num_modes[entity_map[j]];
673
674 FOR_EACH_BB_REVERSE (bb)
675 {
676 struct seginfo *ptr, *next;
677 for (ptr = bb_info[j][bb->index].seginfo; ptr; ptr = next)
678 {
679 next = ptr->next;
680 if (ptr->mode != no_mode)
681 {
682 rtx mode_set;
683
684 start_sequence ();
685 EMIT_MODE_SET (entity_map[j], ptr->mode, ptr->regs_live);
686 mode_set = get_insns ();
687 end_sequence ();
688
689 /* Insert MODE_SET only if it is nonempty. */
690 if (mode_set != NULL_RTX)
691 {
692 emited = true;
693 if (NOTE_INSN_BASIC_BLOCK_P (ptr->insn_ptr))
694 emit_insn_after (mode_set, ptr->insn_ptr);
695 else
696 emit_insn_before (mode_set, ptr->insn_ptr);
697 }
698 }
699
700 free (ptr);
701 }
702 }
703
704 free (bb_info[j]);
705 }
706
707 /* Finished. Free up all the things we've allocated. */
708 sbitmap_vector_free (kill);
709 sbitmap_vector_free (antic);
710 sbitmap_vector_free (transp);
711 sbitmap_vector_free (comp);
712
713 if (need_commit)
714 commit_edge_insertions ();
715
716 #if defined (MODE_ENTRY) && defined (MODE_EXIT)
717 cleanup_cfg (CLEANUP_NO_INSN_DEL);
718 #else
719 if (!need_commit && !emited)
720 return 0;
721 #endif
722
723 return 1;
724 }
725
726 #endif /* OPTIMIZE_MODE_SWITCHING */
727 \f
728 static bool
729 gate_mode_switching (void)
730 {
731 #ifdef OPTIMIZE_MODE_SWITCHING
732 return true;
733 #else
734 return false;
735 #endif
736 }
737
738 static unsigned int
739 rest_of_handle_mode_switching (void)
740 {
741 #ifdef OPTIMIZE_MODE_SWITCHING
742 optimize_mode_switching ();
743 #endif /* OPTIMIZE_MODE_SWITCHING */
744 return 0;
745 }
746
747
748 struct tree_opt_pass pass_mode_switching =
749 {
750 "mode-sw", /* name */
751 gate_mode_switching, /* gate */
752 rest_of_handle_mode_switching, /* execute */
753 NULL, /* sub */
754 NULL, /* next */
755 0, /* static_pass_number */
756 TV_MODE_SWITCH, /* tv_id */
757 0, /* properties_required */
758 0, /* properties_provided */
759 0, /* properties_destroyed */
760 0, /* todo_flags_start */
761 TODO_df_finish |
762 TODO_dump_func, /* todo_flags_finish */
763 0 /* letter */
764 };