]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/bb-reorder.c
89431cc32c06a3d51bf036b649b859258aa34b47
[thirdparty/gcc.git] / gcc / bb-reorder.c
1 /* Basic block reordering routines for the GNU compiler.
2 Copyright (C) 2000, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2010, 2011,
3 2012 Free Software Foundation, Inc.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 /* This (greedy) algorithm constructs traces in several rounds.
22 The construction starts from "seeds". The seed for the first round
23 is the entry point of function. When there are more than one seed
24 that one is selected first that has the lowest key in the heap
25 (see function bb_to_key). Then the algorithm repeatedly adds the most
26 probable successor to the end of a trace. Finally it connects the traces.
27
28 There are two parameters: Branch Threshold and Exec Threshold.
29 If the edge to a successor of the actual basic block is lower than
30 Branch Threshold or the frequency of the successor is lower than
31 Exec Threshold the successor will be the seed in one of the next rounds.
32 Each round has these parameters lower than the previous one.
33 The last round has to have these parameters set to zero
34 so that the remaining blocks are picked up.
35
36 The algorithm selects the most probable successor from all unvisited
37 successors and successors that have been added to this trace.
38 The other successors (that has not been "sent" to the next round) will be
39 other seeds for this round and the secondary traces will start in them.
40 If the successor has not been visited in this trace it is added to the trace
41 (however, there is some heuristic for simple branches).
42 If the successor has been visited in this trace the loop has been found.
43 If the loop has many iterations the loop is rotated so that the
44 source block of the most probable edge going out from the loop
45 is the last block of the trace.
46 If the loop has few iterations and there is no edge from the last block of
47 the loop going out from loop the loop header is duplicated.
48 Finally, the construction of the trace is terminated.
49
50 When connecting traces it first checks whether there is an edge from the
51 last block of one trace to the first block of another trace.
52 When there are still some unconnected traces it checks whether there exists
53 a basic block BB such that BB is a successor of the last bb of one trace
54 and BB is a predecessor of the first block of another trace. In this case,
55 BB is duplicated and the traces are connected through this duplicate.
56 The rest of traces are simply connected so there will be a jump to the
57 beginning of the rest of trace.
58
59
60 References:
61
62 "Software Trace Cache"
63 A. Ramirez, J. Larriba-Pey, C. Navarro, J. Torrellas and M. Valero; 1999
64 http://citeseer.nj.nec.com/15361.html
65
66 */
67
68 #include "config.h"
69 #include "system.h"
70 #include "coretypes.h"
71 #include "tm.h"
72 #include "rtl.h"
73 #include "regs.h"
74 #include "flags.h"
75 #include "timevar.h"
76 #include "output.h"
77 #include "cfglayout.h"
78 #include "fibheap.h"
79 #include "target.h"
80 #include "function.h"
81 #include "tm_p.h"
82 #include "obstack.h"
83 #include "expr.h"
84 #include "params.h"
85 #include "diagnostic-core.h"
86 #include "toplev.h" /* user_defined_section_attribute */
87 #include "tree-pass.h"
88 #include "df.h"
89 #include "bb-reorder.h"
90 #include "except.h"
91
92 /* The number of rounds. In most cases there will only be 4 rounds, but
93 when partitioning hot and cold basic blocks into separate sections of
94 the .o file there will be an extra round.*/
95 #define N_ROUNDS 5
96
97 /* Stubs in case we don't have a return insn.
98 We have to check at runtime too, not only compiletime. */
99
100 #ifndef HAVE_return
101 #define HAVE_return 0
102 #define gen_return() NULL_RTX
103 #endif
104
105
106 struct target_bb_reorder default_target_bb_reorder;
107 #if SWITCHABLE_TARGET
108 struct target_bb_reorder *this_target_bb_reorder = &default_target_bb_reorder;
109 #endif
110
111 #define uncond_jump_length \
112 (this_target_bb_reorder->x_uncond_jump_length)
113
114 /* Branch thresholds in thousandths (per mille) of the REG_BR_PROB_BASE. */
115 static int branch_threshold[N_ROUNDS] = {400, 200, 100, 0, 0};
116
117 /* Exec thresholds in thousandths (per mille) of the frequency of bb 0. */
118 static int exec_threshold[N_ROUNDS] = {500, 200, 50, 0, 0};
119
120 /* If edge frequency is lower than DUPLICATION_THRESHOLD per mille of entry
121 block the edge destination is not duplicated while connecting traces. */
122 #define DUPLICATION_THRESHOLD 100
123
124 /* Structure to hold needed information for each basic block. */
125 typedef struct bbro_basic_block_data_def
126 {
127 /* Which trace is the bb start of (-1 means it is not a start of a trace). */
128 int start_of_trace;
129
130 /* Which trace is the bb end of (-1 means it is not an end of a trace). */
131 int end_of_trace;
132
133 /* Which trace is the bb in? */
134 int in_trace;
135
136 /* Which trace was this bb visited in? */
137 int visited;
138
139 /* Which heap is BB in (if any)? */
140 fibheap_t heap;
141
142 /* Which heap node is BB in (if any)? */
143 fibnode_t node;
144 } bbro_basic_block_data;
145
146 /* The current size of the following dynamic array. */
147 static int array_size;
148
149 /* The array which holds needed information for basic blocks. */
150 static bbro_basic_block_data *bbd;
151
152 /* To avoid frequent reallocation the size of arrays is greater than needed,
153 the number of elements is (not less than) 1.25 * size_wanted. */
154 #define GET_ARRAY_SIZE(X) ((((X) / 4) + 1) * 5)
155
156 /* Free the memory and set the pointer to NULL. */
157 #define FREE(P) (gcc_assert (P), free (P), P = 0)
158
159 /* Structure for holding information about a trace. */
160 struct trace
161 {
162 /* First and last basic block of the trace. */
163 basic_block first, last;
164
165 /* The round of the STC creation which this trace was found in. */
166 int round;
167
168 /* The length (i.e. the number of basic blocks) of the trace. */
169 int length;
170 };
171
172 /* Maximum frequency and count of one of the entry blocks. */
173 static int max_entry_frequency;
174 static gcov_type max_entry_count;
175
176 /* Local function prototypes. */
177 static void find_traces (int *, struct trace *);
178 static basic_block rotate_loop (edge, struct trace *, int);
179 static void mark_bb_visited (basic_block, int);
180 static void find_traces_1_round (int, int, gcov_type, struct trace *, int *,
181 int, fibheap_t *, int);
182 static basic_block copy_bb (basic_block, edge, basic_block, int);
183 static fibheapkey_t bb_to_key (basic_block);
184 static bool better_edge_p (const_basic_block, const_edge, int, int, int, int, const_edge);
185 static void connect_traces (int, struct trace *);
186 static bool copy_bb_p (const_basic_block, int);
187 static bool push_to_next_round_p (const_basic_block, int, int, int, gcov_type);
188 \f
189 /* Return the trace number in which BB was visited. */
190
191 static int
192 bb_visited_trace (const_basic_block bb)
193 {
194 gcc_assert (bb->index < array_size);
195 return bbd[bb->index].visited;
196 }
197
198 /* This function marks BB that it was visited in trace number TRACE. */
199
200 static void
201 mark_bb_visited (basic_block bb, int trace)
202 {
203 bbd[bb->index].visited = trace;
204 if (bbd[bb->index].heap)
205 {
206 fibheap_delete_node (bbd[bb->index].heap, bbd[bb->index].node);
207 bbd[bb->index].heap = NULL;
208 bbd[bb->index].node = NULL;
209 }
210 }
211
212 /* Check to see if bb should be pushed into the next round of trace
213 collections or not. Reasons for pushing the block forward are 1).
214 If the block is cold, we are doing partitioning, and there will be
215 another round (cold partition blocks are not supposed to be
216 collected into traces until the very last round); or 2). There will
217 be another round, and the basic block is not "hot enough" for the
218 current round of trace collection. */
219
220 static bool
221 push_to_next_round_p (const_basic_block bb, int round, int number_of_rounds,
222 int exec_th, gcov_type count_th)
223 {
224 bool there_exists_another_round;
225 bool block_not_hot_enough;
226
227 there_exists_another_round = round < number_of_rounds - 1;
228
229 block_not_hot_enough = (bb->frequency < exec_th
230 || bb->count < count_th
231 || probably_never_executed_bb_p (bb));
232
233 if (there_exists_another_round
234 && block_not_hot_enough)
235 return true;
236 else
237 return false;
238 }
239
240 /* Find the traces for Software Trace Cache. Chain each trace through
241 RBI()->next. Store the number of traces to N_TRACES and description of
242 traces to TRACES. */
243
244 static void
245 find_traces (int *n_traces, struct trace *traces)
246 {
247 int i;
248 int number_of_rounds;
249 edge e;
250 edge_iterator ei;
251 fibheap_t heap;
252
253 /* Add one extra round of trace collection when partitioning hot/cold
254 basic blocks into separate sections. The last round is for all the
255 cold blocks (and ONLY the cold blocks). */
256
257 number_of_rounds = N_ROUNDS - 1;
258
259 /* Insert entry points of function into heap. */
260 heap = fibheap_new ();
261 max_entry_frequency = 0;
262 max_entry_count = 0;
263 FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR->succs)
264 {
265 bbd[e->dest->index].heap = heap;
266 bbd[e->dest->index].node = fibheap_insert (heap, bb_to_key (e->dest),
267 e->dest);
268 if (e->dest->frequency > max_entry_frequency)
269 max_entry_frequency = e->dest->frequency;
270 if (e->dest->count > max_entry_count)
271 max_entry_count = e->dest->count;
272 }
273
274 /* Find the traces. */
275 for (i = 0; i < number_of_rounds; i++)
276 {
277 gcov_type count_threshold;
278
279 if (dump_file)
280 fprintf (dump_file, "STC - round %d\n", i + 1);
281
282 if (max_entry_count < INT_MAX / 1000)
283 count_threshold = max_entry_count * exec_threshold[i] / 1000;
284 else
285 count_threshold = max_entry_count / 1000 * exec_threshold[i];
286
287 find_traces_1_round (REG_BR_PROB_BASE * branch_threshold[i] / 1000,
288 max_entry_frequency * exec_threshold[i] / 1000,
289 count_threshold, traces, n_traces, i, &heap,
290 number_of_rounds);
291 }
292 fibheap_delete (heap);
293
294 if (dump_file)
295 {
296 for (i = 0; i < *n_traces; i++)
297 {
298 basic_block bb;
299 fprintf (dump_file, "Trace %d (round %d): ", i + 1,
300 traces[i].round + 1);
301 for (bb = traces[i].first; bb != traces[i].last; bb = (basic_block) bb->aux)
302 fprintf (dump_file, "%d [%d] ", bb->index, bb->frequency);
303 fprintf (dump_file, "%d [%d]\n", bb->index, bb->frequency);
304 }
305 fflush (dump_file);
306 }
307 }
308
309 /* Rotate loop whose back edge is BACK_EDGE in the tail of trace TRACE
310 (with sequential number TRACE_N). */
311
312 static basic_block
313 rotate_loop (edge back_edge, struct trace *trace, int trace_n)
314 {
315 basic_block bb;
316
317 /* Information about the best end (end after rotation) of the loop. */
318 basic_block best_bb = NULL;
319 edge best_edge = NULL;
320 int best_freq = -1;
321 gcov_type best_count = -1;
322 /* The best edge is preferred when its destination is not visited yet
323 or is a start block of some trace. */
324 bool is_preferred = false;
325
326 /* Find the most frequent edge that goes out from current trace. */
327 bb = back_edge->dest;
328 do
329 {
330 edge e;
331 edge_iterator ei;
332
333 FOR_EACH_EDGE (e, ei, bb->succs)
334 if (e->dest != EXIT_BLOCK_PTR
335 && bb_visited_trace (e->dest) != trace_n
336 && (e->flags & EDGE_CAN_FALLTHRU)
337 && !(e->flags & EDGE_COMPLEX))
338 {
339 if (is_preferred)
340 {
341 /* The best edge is preferred. */
342 if (!bb_visited_trace (e->dest)
343 || bbd[e->dest->index].start_of_trace >= 0)
344 {
345 /* The current edge E is also preferred. */
346 int freq = EDGE_FREQUENCY (e);
347 if (freq > best_freq || e->count > best_count)
348 {
349 best_freq = freq;
350 best_count = e->count;
351 best_edge = e;
352 best_bb = bb;
353 }
354 }
355 }
356 else
357 {
358 if (!bb_visited_trace (e->dest)
359 || bbd[e->dest->index].start_of_trace >= 0)
360 {
361 /* The current edge E is preferred. */
362 is_preferred = true;
363 best_freq = EDGE_FREQUENCY (e);
364 best_count = e->count;
365 best_edge = e;
366 best_bb = bb;
367 }
368 else
369 {
370 int freq = EDGE_FREQUENCY (e);
371 if (!best_edge || freq > best_freq || e->count > best_count)
372 {
373 best_freq = freq;
374 best_count = e->count;
375 best_edge = e;
376 best_bb = bb;
377 }
378 }
379 }
380 }
381 bb = (basic_block) bb->aux;
382 }
383 while (bb != back_edge->dest);
384
385 if (best_bb)
386 {
387 /* Rotate the loop so that the BEST_EDGE goes out from the last block of
388 the trace. */
389 if (back_edge->dest == trace->first)
390 {
391 trace->first = (basic_block) best_bb->aux;
392 }
393 else
394 {
395 basic_block prev_bb;
396
397 for (prev_bb = trace->first;
398 prev_bb->aux != back_edge->dest;
399 prev_bb = (basic_block) prev_bb->aux)
400 ;
401 prev_bb->aux = best_bb->aux;
402
403 /* Try to get rid of uncond jump to cond jump. */
404 if (single_succ_p (prev_bb))
405 {
406 basic_block header = single_succ (prev_bb);
407
408 /* Duplicate HEADER if it is a small block containing cond jump
409 in the end. */
410 if (any_condjump_p (BB_END (header)) && copy_bb_p (header, 0)
411 && !find_reg_note (BB_END (header), REG_CROSSING_JUMP,
412 NULL_RTX))
413 copy_bb (header, single_succ_edge (prev_bb), prev_bb, trace_n);
414 }
415 }
416 }
417 else
418 {
419 /* We have not found suitable loop tail so do no rotation. */
420 best_bb = back_edge->src;
421 }
422 best_bb->aux = NULL;
423 return best_bb;
424 }
425
426 /* One round of finding traces. Find traces for BRANCH_TH and EXEC_TH i.e. do
427 not include basic blocks their probability is lower than BRANCH_TH or their
428 frequency is lower than EXEC_TH into traces (or count is lower than
429 COUNT_TH). It stores the new traces into TRACES and modifies the number of
430 traces *N_TRACES. Sets the round (which the trace belongs to) to ROUND. It
431 expects that starting basic blocks are in *HEAP and at the end it deletes
432 *HEAP and stores starting points for the next round into new *HEAP. */
433
434 static void
435 find_traces_1_round (int branch_th, int exec_th, gcov_type count_th,
436 struct trace *traces, int *n_traces, int round,
437 fibheap_t *heap, int number_of_rounds)
438 {
439 /* Heap for discarded basic blocks which are possible starting points for
440 the next round. */
441 fibheap_t new_heap = fibheap_new ();
442
443 while (!fibheap_empty (*heap))
444 {
445 basic_block bb;
446 struct trace *trace;
447 edge best_edge, e;
448 fibheapkey_t key;
449 edge_iterator ei;
450
451 bb = (basic_block) fibheap_extract_min (*heap);
452 bbd[bb->index].heap = NULL;
453 bbd[bb->index].node = NULL;
454
455 if (dump_file)
456 fprintf (dump_file, "Getting bb %d\n", bb->index);
457
458 /* If the BB's frequency is too low send BB to the next round. When
459 partitioning hot/cold blocks into separate sections, make sure all
460 the cold blocks (and ONLY the cold blocks) go into the (extra) final
461 round. */
462
463 if (push_to_next_round_p (bb, round, number_of_rounds, exec_th,
464 count_th))
465 {
466 int key = bb_to_key (bb);
467 bbd[bb->index].heap = new_heap;
468 bbd[bb->index].node = fibheap_insert (new_heap, key, bb);
469
470 if (dump_file)
471 fprintf (dump_file,
472 " Possible start point of next round: %d (key: %d)\n",
473 bb->index, key);
474 continue;
475 }
476
477 trace = traces + *n_traces;
478 trace->first = bb;
479 trace->round = round;
480 trace->length = 0;
481 bbd[bb->index].in_trace = *n_traces;
482 (*n_traces)++;
483
484 do
485 {
486 int prob, freq;
487 bool ends_in_call;
488
489 /* The probability and frequency of the best edge. */
490 int best_prob = INT_MIN / 2;
491 int best_freq = INT_MIN / 2;
492
493 best_edge = NULL;
494 mark_bb_visited (bb, *n_traces);
495 trace->length++;
496
497 if (dump_file)
498 fprintf (dump_file, "Basic block %d was visited in trace %d\n",
499 bb->index, *n_traces - 1);
500
501 ends_in_call = block_ends_with_call_p (bb);
502
503 /* Select the successor that will be placed after BB. */
504 FOR_EACH_EDGE (e, ei, bb->succs)
505 {
506 gcc_assert (!(e->flags & EDGE_FAKE));
507
508 if (e->dest == EXIT_BLOCK_PTR)
509 continue;
510
511 if (bb_visited_trace (e->dest)
512 && bb_visited_trace (e->dest) != *n_traces)
513 continue;
514
515 if (BB_PARTITION (e->dest) != BB_PARTITION (bb))
516 continue;
517
518 prob = e->probability;
519 freq = e->dest->frequency;
520
521 /* The only sensible preference for a call instruction is the
522 fallthru edge. Don't bother selecting anything else. */
523 if (ends_in_call)
524 {
525 if (e->flags & EDGE_CAN_FALLTHRU)
526 {
527 best_edge = e;
528 best_prob = prob;
529 best_freq = freq;
530 }
531 continue;
532 }
533
534 /* Edge that cannot be fallthru or improbable or infrequent
535 successor (i.e. it is unsuitable successor). */
536 if (!(e->flags & EDGE_CAN_FALLTHRU) || (e->flags & EDGE_COMPLEX)
537 || prob < branch_th || EDGE_FREQUENCY (e) < exec_th
538 || e->count < count_th)
539 continue;
540
541 /* If partitioning hot/cold basic blocks, don't consider edges
542 that cross section boundaries. */
543
544 if (better_edge_p (bb, e, prob, freq, best_prob, best_freq,
545 best_edge))
546 {
547 best_edge = e;
548 best_prob = prob;
549 best_freq = freq;
550 }
551 }
552
553 /* If the best destination has multiple predecessors, and can be
554 duplicated cheaper than a jump, don't allow it to be added
555 to a trace. We'll duplicate it when connecting traces. */
556 if (best_edge && EDGE_COUNT (best_edge->dest->preds) >= 2
557 && copy_bb_p (best_edge->dest, 0))
558 best_edge = NULL;
559
560 /* Add all non-selected successors to the heaps. */
561 FOR_EACH_EDGE (e, ei, bb->succs)
562 {
563 if (e == best_edge
564 || e->dest == EXIT_BLOCK_PTR
565 || bb_visited_trace (e->dest))
566 continue;
567
568 key = bb_to_key (e->dest);
569
570 if (bbd[e->dest->index].heap)
571 {
572 /* E->DEST is already in some heap. */
573 if (key != bbd[e->dest->index].node->key)
574 {
575 if (dump_file)
576 {
577 fprintf (dump_file,
578 "Changing key for bb %d from %ld to %ld.\n",
579 e->dest->index,
580 (long) bbd[e->dest->index].node->key,
581 key);
582 }
583 fibheap_replace_key (bbd[e->dest->index].heap,
584 bbd[e->dest->index].node, key);
585 }
586 }
587 else
588 {
589 fibheap_t which_heap = *heap;
590
591 prob = e->probability;
592 freq = EDGE_FREQUENCY (e);
593
594 if (!(e->flags & EDGE_CAN_FALLTHRU)
595 || (e->flags & EDGE_COMPLEX)
596 || prob < branch_th || freq < exec_th
597 || e->count < count_th)
598 {
599 /* When partitioning hot/cold basic blocks, make sure
600 the cold blocks (and only the cold blocks) all get
601 pushed to the last round of trace collection. */
602
603 if (push_to_next_round_p (e->dest, round,
604 number_of_rounds,
605 exec_th, count_th))
606 which_heap = new_heap;
607 }
608
609 bbd[e->dest->index].heap = which_heap;
610 bbd[e->dest->index].node = fibheap_insert (which_heap,
611 key, e->dest);
612
613 if (dump_file)
614 {
615 fprintf (dump_file,
616 " Possible start of %s round: %d (key: %ld)\n",
617 (which_heap == new_heap) ? "next" : "this",
618 e->dest->index, (long) key);
619 }
620
621 }
622 }
623
624 if (best_edge) /* Suitable successor was found. */
625 {
626 if (bb_visited_trace (best_edge->dest) == *n_traces)
627 {
628 /* We do nothing with one basic block loops. */
629 if (best_edge->dest != bb)
630 {
631 if (EDGE_FREQUENCY (best_edge)
632 > 4 * best_edge->dest->frequency / 5)
633 {
634 /* The loop has at least 4 iterations. If the loop
635 header is not the first block of the function
636 we can rotate the loop. */
637
638 if (best_edge->dest != ENTRY_BLOCK_PTR->next_bb)
639 {
640 if (dump_file)
641 {
642 fprintf (dump_file,
643 "Rotating loop %d - %d\n",
644 best_edge->dest->index, bb->index);
645 }
646 bb->aux = best_edge->dest;
647 bbd[best_edge->dest->index].in_trace =
648 (*n_traces) - 1;
649 bb = rotate_loop (best_edge, trace, *n_traces);
650 }
651 }
652 else
653 {
654 /* The loop has less than 4 iterations. */
655
656 if (single_succ_p (bb)
657 && copy_bb_p (best_edge->dest,
658 optimize_edge_for_speed_p (best_edge)))
659 {
660 bb = copy_bb (best_edge->dest, best_edge, bb,
661 *n_traces);
662 trace->length++;
663 }
664 }
665 }
666
667 /* Terminate the trace. */
668 break;
669 }
670 else
671 {
672 /* Check for a situation
673
674 A
675 /|
676 B |
677 \|
678 C
679
680 where
681 EDGE_FREQUENCY (AB) + EDGE_FREQUENCY (BC)
682 >= EDGE_FREQUENCY (AC).
683 (i.e. 2 * B->frequency >= EDGE_FREQUENCY (AC) )
684 Best ordering is then A B C.
685
686 This situation is created for example by:
687
688 if (A) B;
689 C;
690
691 */
692
693 FOR_EACH_EDGE (e, ei, bb->succs)
694 if (e != best_edge
695 && (e->flags & EDGE_CAN_FALLTHRU)
696 && !(e->flags & EDGE_COMPLEX)
697 && !bb_visited_trace (e->dest)
698 && single_pred_p (e->dest)
699 && !(e->flags & EDGE_CROSSING)
700 && single_succ_p (e->dest)
701 && (single_succ_edge (e->dest)->flags
702 & EDGE_CAN_FALLTHRU)
703 && !(single_succ_edge (e->dest)->flags & EDGE_COMPLEX)
704 && single_succ (e->dest) == best_edge->dest
705 && 2 * e->dest->frequency >= EDGE_FREQUENCY (best_edge))
706 {
707 best_edge = e;
708 if (dump_file)
709 fprintf (dump_file, "Selecting BB %d\n",
710 best_edge->dest->index);
711 break;
712 }
713
714 bb->aux = best_edge->dest;
715 bbd[best_edge->dest->index].in_trace = (*n_traces) - 1;
716 bb = best_edge->dest;
717 }
718 }
719 }
720 while (best_edge);
721 trace->last = bb;
722 bbd[trace->first->index].start_of_trace = *n_traces - 1;
723 bbd[trace->last->index].end_of_trace = *n_traces - 1;
724
725 /* The trace is terminated so we have to recount the keys in heap
726 (some block can have a lower key because now one of its predecessors
727 is an end of the trace). */
728 FOR_EACH_EDGE (e, ei, bb->succs)
729 {
730 if (e->dest == EXIT_BLOCK_PTR
731 || bb_visited_trace (e->dest))
732 continue;
733
734 if (bbd[e->dest->index].heap)
735 {
736 key = bb_to_key (e->dest);
737 if (key != bbd[e->dest->index].node->key)
738 {
739 if (dump_file)
740 {
741 fprintf (dump_file,
742 "Changing key for bb %d from %ld to %ld.\n",
743 e->dest->index,
744 (long) bbd[e->dest->index].node->key, key);
745 }
746 fibheap_replace_key (bbd[e->dest->index].heap,
747 bbd[e->dest->index].node,
748 key);
749 }
750 }
751 }
752 }
753
754 fibheap_delete (*heap);
755
756 /* "Return" the new heap. */
757 *heap = new_heap;
758 }
759
760 /* Create a duplicate of the basic block OLD_BB and redirect edge E to it, add
761 it to trace after BB, mark OLD_BB visited and update pass' data structures
762 (TRACE is a number of trace which OLD_BB is duplicated to). */
763
764 static basic_block
765 copy_bb (basic_block old_bb, edge e, basic_block bb, int trace)
766 {
767 basic_block new_bb;
768
769 new_bb = duplicate_block (old_bb, e, bb);
770 BB_COPY_PARTITION (new_bb, old_bb);
771
772 gcc_assert (e->dest == new_bb);
773
774 if (dump_file)
775 fprintf (dump_file,
776 "Duplicated bb %d (created bb %d)\n",
777 old_bb->index, new_bb->index);
778
779 if (new_bb->index >= array_size || last_basic_block > array_size)
780 {
781 int i;
782 int new_size;
783
784 new_size = MAX (last_basic_block, new_bb->index + 1);
785 new_size = GET_ARRAY_SIZE (new_size);
786 bbd = XRESIZEVEC (bbro_basic_block_data, bbd, new_size);
787 for (i = array_size; i < new_size; i++)
788 {
789 bbd[i].start_of_trace = -1;
790 bbd[i].end_of_trace = -1;
791 bbd[i].in_trace = -1;
792 bbd[i].visited = 0;
793 bbd[i].heap = NULL;
794 bbd[i].node = NULL;
795 }
796 array_size = new_size;
797
798 if (dump_file)
799 {
800 fprintf (dump_file,
801 "Growing the dynamic array to %d elements.\n",
802 array_size);
803 }
804 }
805
806 gcc_assert (!bb_visited_trace (e->dest));
807 mark_bb_visited (new_bb, trace);
808 new_bb->aux = bb->aux;
809 bb->aux = new_bb;
810
811 bbd[new_bb->index].in_trace = trace;
812
813 return new_bb;
814 }
815
816 /* Compute and return the key (for the heap) of the basic block BB. */
817
818 static fibheapkey_t
819 bb_to_key (basic_block bb)
820 {
821 edge e;
822 edge_iterator ei;
823 int priority = 0;
824
825 /* Do not start in probably never executed blocks. */
826
827 if (BB_PARTITION (bb) == BB_COLD_PARTITION
828 || probably_never_executed_bb_p (bb))
829 return BB_FREQ_MAX;
830
831 /* Prefer blocks whose predecessor is an end of some trace
832 or whose predecessor edge is EDGE_DFS_BACK. */
833 FOR_EACH_EDGE (e, ei, bb->preds)
834 {
835 if ((e->src != ENTRY_BLOCK_PTR && bbd[e->src->index].end_of_trace >= 0)
836 || (e->flags & EDGE_DFS_BACK))
837 {
838 int edge_freq = EDGE_FREQUENCY (e);
839
840 if (edge_freq > priority)
841 priority = edge_freq;
842 }
843 }
844
845 if (priority)
846 /* The block with priority should have significantly lower key. */
847 return -(100 * BB_FREQ_MAX + 100 * priority + bb->frequency);
848 return -bb->frequency;
849 }
850
851 /* Return true when the edge E from basic block BB is better than the temporary
852 best edge (details are in function). The probability of edge E is PROB. The
853 frequency of the successor is FREQ. The current best probability is
854 BEST_PROB, the best frequency is BEST_FREQ.
855 The edge is considered to be equivalent when PROB does not differ much from
856 BEST_PROB; similarly for frequency. */
857
858 static bool
859 better_edge_p (const_basic_block bb, const_edge e, int prob, int freq, int best_prob,
860 int best_freq, const_edge cur_best_edge)
861 {
862 bool is_better_edge;
863
864 /* The BEST_* values do not have to be best, but can be a bit smaller than
865 maximum values. */
866 int diff_prob = best_prob / 10;
867 int diff_freq = best_freq / 10;
868
869 if (prob > best_prob + diff_prob)
870 /* The edge has higher probability than the temporary best edge. */
871 is_better_edge = true;
872 else if (prob < best_prob - diff_prob)
873 /* The edge has lower probability than the temporary best edge. */
874 is_better_edge = false;
875 else if (freq < best_freq - diff_freq)
876 /* The edge and the temporary best edge have almost equivalent
877 probabilities. The higher frequency of a successor now means
878 that there is another edge going into that successor.
879 This successor has lower frequency so it is better. */
880 is_better_edge = true;
881 else if (freq > best_freq + diff_freq)
882 /* This successor has higher frequency so it is worse. */
883 is_better_edge = false;
884 else if (e->dest->prev_bb == bb)
885 /* The edges have equivalent probabilities and the successors
886 have equivalent frequencies. Select the previous successor. */
887 is_better_edge = true;
888 else
889 is_better_edge = false;
890
891 /* If we are doing hot/cold partitioning, make sure that we always favor
892 non-crossing edges over crossing edges. */
893
894 if (!is_better_edge
895 && flag_reorder_blocks_and_partition
896 && cur_best_edge
897 && (cur_best_edge->flags & EDGE_CROSSING)
898 && !(e->flags & EDGE_CROSSING))
899 is_better_edge = true;
900
901 return is_better_edge;
902 }
903
904 /* Connect traces in array TRACES, N_TRACES is the count of traces. */
905
906 static void
907 connect_traces (int n_traces, struct trace *traces)
908 {
909 int i;
910 bool *connected;
911 bool two_passes;
912 int last_trace;
913 int current_pass;
914 int current_partition;
915 int freq_threshold;
916 gcov_type count_threshold;
917
918 freq_threshold = max_entry_frequency * DUPLICATION_THRESHOLD / 1000;
919 if (max_entry_count < INT_MAX / 1000)
920 count_threshold = max_entry_count * DUPLICATION_THRESHOLD / 1000;
921 else
922 count_threshold = max_entry_count / 1000 * DUPLICATION_THRESHOLD;
923
924 connected = XCNEWVEC (bool, n_traces);
925 last_trace = -1;
926 current_pass = 1;
927 current_partition = BB_PARTITION (traces[0].first);
928 two_passes = false;
929
930 if (flag_reorder_blocks_and_partition)
931 for (i = 0; i < n_traces && !two_passes; i++)
932 if (BB_PARTITION (traces[0].first)
933 != BB_PARTITION (traces[i].first))
934 two_passes = true;
935
936 for (i = 0; i < n_traces || (two_passes && current_pass == 1) ; i++)
937 {
938 int t = i;
939 int t2;
940 edge e, best;
941 int best_len;
942
943 if (i >= n_traces)
944 {
945 gcc_assert (two_passes && current_pass == 1);
946 i = 0;
947 t = i;
948 current_pass = 2;
949 if (current_partition == BB_HOT_PARTITION)
950 current_partition = BB_COLD_PARTITION;
951 else
952 current_partition = BB_HOT_PARTITION;
953 }
954
955 if (connected[t])
956 continue;
957
958 if (two_passes
959 && BB_PARTITION (traces[t].first) != current_partition)
960 continue;
961
962 connected[t] = true;
963
964 /* Find the predecessor traces. */
965 for (t2 = t; t2 > 0;)
966 {
967 edge_iterator ei;
968 best = NULL;
969 best_len = 0;
970 FOR_EACH_EDGE (e, ei, traces[t2].first->preds)
971 {
972 int si = e->src->index;
973
974 if (e->src != ENTRY_BLOCK_PTR
975 && (e->flags & EDGE_CAN_FALLTHRU)
976 && !(e->flags & EDGE_COMPLEX)
977 && bbd[si].end_of_trace >= 0
978 && !connected[bbd[si].end_of_trace]
979 && (BB_PARTITION (e->src) == current_partition)
980 && (!best
981 || e->probability > best->probability
982 || (e->probability == best->probability
983 && traces[bbd[si].end_of_trace].length > best_len)))
984 {
985 best = e;
986 best_len = traces[bbd[si].end_of_trace].length;
987 }
988 }
989 if (best)
990 {
991 best->src->aux = best->dest;
992 t2 = bbd[best->src->index].end_of_trace;
993 connected[t2] = true;
994
995 if (dump_file)
996 {
997 fprintf (dump_file, "Connection: %d %d\n",
998 best->src->index, best->dest->index);
999 }
1000 }
1001 else
1002 break;
1003 }
1004
1005 if (last_trace >= 0)
1006 traces[last_trace].last->aux = traces[t2].first;
1007 last_trace = t;
1008
1009 /* Find the successor traces. */
1010 while (1)
1011 {
1012 /* Find the continuation of the chain. */
1013 edge_iterator ei;
1014 best = NULL;
1015 best_len = 0;
1016 FOR_EACH_EDGE (e, ei, traces[t].last->succs)
1017 {
1018 int di = e->dest->index;
1019
1020 if (e->dest != EXIT_BLOCK_PTR
1021 && (e->flags & EDGE_CAN_FALLTHRU)
1022 && !(e->flags & EDGE_COMPLEX)
1023 && bbd[di].start_of_trace >= 0
1024 && !connected[bbd[di].start_of_trace]
1025 && (BB_PARTITION (e->dest) == current_partition)
1026 && (!best
1027 || e->probability > best->probability
1028 || (e->probability == best->probability
1029 && traces[bbd[di].start_of_trace].length > best_len)))
1030 {
1031 best = e;
1032 best_len = traces[bbd[di].start_of_trace].length;
1033 }
1034 }
1035
1036 if (best)
1037 {
1038 if (dump_file)
1039 {
1040 fprintf (dump_file, "Connection: %d %d\n",
1041 best->src->index, best->dest->index);
1042 }
1043 t = bbd[best->dest->index].start_of_trace;
1044 traces[last_trace].last->aux = traces[t].first;
1045 connected[t] = true;
1046 last_trace = t;
1047 }
1048 else
1049 {
1050 /* Try to connect the traces by duplication of 1 block. */
1051 edge e2;
1052 basic_block next_bb = NULL;
1053 bool try_copy = false;
1054
1055 FOR_EACH_EDGE (e, ei, traces[t].last->succs)
1056 if (e->dest != EXIT_BLOCK_PTR
1057 && (e->flags & EDGE_CAN_FALLTHRU)
1058 && !(e->flags & EDGE_COMPLEX)
1059 && (!best || e->probability > best->probability))
1060 {
1061 edge_iterator ei;
1062 edge best2 = NULL;
1063 int best2_len = 0;
1064
1065 /* If the destination is a start of a trace which is only
1066 one block long, then no need to search the successor
1067 blocks of the trace. Accept it. */
1068 if (bbd[e->dest->index].start_of_trace >= 0
1069 && traces[bbd[e->dest->index].start_of_trace].length
1070 == 1)
1071 {
1072 best = e;
1073 try_copy = true;
1074 continue;
1075 }
1076
1077 FOR_EACH_EDGE (e2, ei, e->dest->succs)
1078 {
1079 int di = e2->dest->index;
1080
1081 if (e2->dest == EXIT_BLOCK_PTR
1082 || ((e2->flags & EDGE_CAN_FALLTHRU)
1083 && !(e2->flags & EDGE_COMPLEX)
1084 && bbd[di].start_of_trace >= 0
1085 && !connected[bbd[di].start_of_trace]
1086 && (BB_PARTITION (e2->dest) == current_partition)
1087 && (EDGE_FREQUENCY (e2) >= freq_threshold)
1088 && (e2->count >= count_threshold)
1089 && (!best2
1090 || e2->probability > best2->probability
1091 || (e2->probability == best2->probability
1092 && traces[bbd[di].start_of_trace].length
1093 > best2_len))))
1094 {
1095 best = e;
1096 best2 = e2;
1097 if (e2->dest != EXIT_BLOCK_PTR)
1098 best2_len = traces[bbd[di].start_of_trace].length;
1099 else
1100 best2_len = INT_MAX;
1101 next_bb = e2->dest;
1102 try_copy = true;
1103 }
1104 }
1105 }
1106
1107 if (flag_reorder_blocks_and_partition)
1108 try_copy = false;
1109
1110 /* Copy tiny blocks always; copy larger blocks only when the
1111 edge is traversed frequently enough. */
1112 if (try_copy
1113 && copy_bb_p (best->dest,
1114 optimize_edge_for_speed_p (best)
1115 && EDGE_FREQUENCY (best) >= freq_threshold
1116 && best->count >= count_threshold))
1117 {
1118 basic_block new_bb;
1119
1120 if (dump_file)
1121 {
1122 fprintf (dump_file, "Connection: %d %d ",
1123 traces[t].last->index, best->dest->index);
1124 if (!next_bb)
1125 fputc ('\n', dump_file);
1126 else if (next_bb == EXIT_BLOCK_PTR)
1127 fprintf (dump_file, "exit\n");
1128 else
1129 fprintf (dump_file, "%d\n", next_bb->index);
1130 }
1131
1132 new_bb = copy_bb (best->dest, best, traces[t].last, t);
1133 traces[t].last = new_bb;
1134 if (next_bb && next_bb != EXIT_BLOCK_PTR)
1135 {
1136 t = bbd[next_bb->index].start_of_trace;
1137 traces[last_trace].last->aux = traces[t].first;
1138 connected[t] = true;
1139 last_trace = t;
1140 }
1141 else
1142 break; /* Stop finding the successor traces. */
1143 }
1144 else
1145 break; /* Stop finding the successor traces. */
1146 }
1147 }
1148 }
1149
1150 if (dump_file)
1151 {
1152 basic_block bb;
1153
1154 fprintf (dump_file, "Final order:\n");
1155 for (bb = traces[0].first; bb; bb = (basic_block) bb->aux)
1156 fprintf (dump_file, "%d ", bb->index);
1157 fprintf (dump_file, "\n");
1158 fflush (dump_file);
1159 }
1160
1161 FREE (connected);
1162 }
1163
1164 /* Return true when BB can and should be copied. CODE_MAY_GROW is true
1165 when code size is allowed to grow by duplication. */
1166
1167 static bool
1168 copy_bb_p (const_basic_block bb, int code_may_grow)
1169 {
1170 int size = 0;
1171 int max_size = uncond_jump_length;
1172 rtx insn;
1173
1174 if (!bb->frequency)
1175 return false;
1176 if (EDGE_COUNT (bb->preds) < 2)
1177 return false;
1178 if (!can_duplicate_block_p (bb))
1179 return false;
1180
1181 /* Avoid duplicating blocks which have many successors (PR/13430). */
1182 if (EDGE_COUNT (bb->succs) > 8)
1183 return false;
1184
1185 if (code_may_grow && optimize_bb_for_speed_p (bb))
1186 max_size *= PARAM_VALUE (PARAM_MAX_GROW_COPY_BB_INSNS);
1187
1188 FOR_BB_INSNS (bb, insn)
1189 {
1190 if (INSN_P (insn))
1191 size += get_attr_min_length (insn);
1192 }
1193
1194 if (size <= max_size)
1195 return true;
1196
1197 if (dump_file)
1198 {
1199 fprintf (dump_file,
1200 "Block %d can't be copied because its size = %d.\n",
1201 bb->index, size);
1202 }
1203
1204 return false;
1205 }
1206
1207 /* Return the length of unconditional jump instruction. */
1208
1209 int
1210 get_uncond_jump_length (void)
1211 {
1212 rtx label, jump;
1213 int length;
1214
1215 label = emit_label_before (gen_label_rtx (), get_insns ());
1216 jump = emit_jump_insn (gen_jump (label));
1217
1218 length = get_attr_min_length (jump);
1219
1220 delete_insn (jump);
1221 delete_insn (label);
1222 return length;
1223 }
1224
1225 /* Emit a barrier into the footer of BB. */
1226
1227 static void
1228 emit_barrier_after_bb (basic_block bb)
1229 {
1230 rtx barrier = emit_barrier_after (BB_END (bb));
1231 BB_FOOTER (bb) = unlink_insn_chain (barrier, barrier);
1232 }
1233
1234 /* The landing pad OLD_LP, in block OLD_BB, has edges from both partitions.
1235 Duplicate the landing pad and split the edges so that no EH edge
1236 crosses partitions. */
1237
1238 static void
1239 fix_up_crossing_landing_pad (eh_landing_pad old_lp, basic_block old_bb)
1240 {
1241 eh_landing_pad new_lp;
1242 basic_block new_bb, last_bb, post_bb;
1243 rtx new_label, jump, post_label;
1244 unsigned new_partition;
1245 edge_iterator ei;
1246 edge e;
1247
1248 /* Generate the new landing-pad structure. */
1249 new_lp = gen_eh_landing_pad (old_lp->region);
1250 new_lp->post_landing_pad = old_lp->post_landing_pad;
1251 new_lp->landing_pad = gen_label_rtx ();
1252 LABEL_PRESERVE_P (new_lp->landing_pad) = 1;
1253
1254 /* Put appropriate instructions in new bb. */
1255 new_label = emit_label (new_lp->landing_pad);
1256
1257 expand_dw2_landing_pad_for_region (old_lp->region);
1258
1259 post_bb = BLOCK_FOR_INSN (old_lp->landing_pad);
1260 post_bb = single_succ (post_bb);
1261 post_label = block_label (post_bb);
1262 jump = emit_jump_insn (gen_jump (post_label));
1263 JUMP_LABEL (jump) = post_label;
1264
1265 /* Create new basic block to be dest for lp. */
1266 last_bb = EXIT_BLOCK_PTR->prev_bb;
1267 new_bb = create_basic_block (new_label, jump, last_bb);
1268 new_bb->aux = last_bb->aux;
1269 last_bb->aux = new_bb;
1270
1271 emit_barrier_after_bb (new_bb);
1272
1273 make_edge (new_bb, post_bb, 0);
1274
1275 /* Make sure new bb is in the other partition. */
1276 new_partition = BB_PARTITION (old_bb);
1277 new_partition ^= BB_HOT_PARTITION | BB_COLD_PARTITION;
1278 BB_SET_PARTITION (new_bb, new_partition);
1279
1280 /* Fix up the edges. */
1281 for (ei = ei_start (old_bb->preds); (e = ei_safe_edge (ei)) != NULL; )
1282 if (BB_PARTITION (e->src) == new_partition)
1283 {
1284 rtx insn = BB_END (e->src);
1285 rtx note = find_reg_note (insn, REG_EH_REGION, NULL_RTX);
1286
1287 gcc_assert (note != NULL);
1288 gcc_checking_assert (INTVAL (XEXP (note, 0)) == old_lp->index);
1289 XEXP (note, 0) = GEN_INT (new_lp->index);
1290
1291 /* Adjust the edge to the new destination. */
1292 redirect_edge_succ (e, new_bb);
1293 }
1294 else
1295 ei_next (&ei);
1296 }
1297
1298 /* Find the basic blocks that are rarely executed and need to be moved to
1299 a separate section of the .o file (to cut down on paging and improve
1300 cache locality). Return a vector of all edges that cross. */
1301
1302 static VEC(edge, heap) *
1303 find_rarely_executed_basic_blocks_and_crossing_edges (void)
1304 {
1305 VEC(edge, heap) *crossing_edges = NULL;
1306 basic_block bb;
1307 edge e;
1308 edge_iterator ei;
1309
1310 /* Mark which partition (hot/cold) each basic block belongs in. */
1311 FOR_EACH_BB (bb)
1312 {
1313 if (probably_never_executed_bb_p (bb))
1314 BB_SET_PARTITION (bb, BB_COLD_PARTITION);
1315 else
1316 BB_SET_PARTITION (bb, BB_HOT_PARTITION);
1317 }
1318
1319 /* The format of .gcc_except_table does not allow landing pads to
1320 be in a different partition as the throw. Fix this by either
1321 moving or duplicating the landing pads. */
1322 if (cfun->eh->lp_array)
1323 {
1324 unsigned i;
1325 eh_landing_pad lp;
1326
1327 FOR_EACH_VEC_ELT (eh_landing_pad, cfun->eh->lp_array, i, lp)
1328 {
1329 bool all_same, all_diff;
1330
1331 if (lp == NULL
1332 || lp->landing_pad == NULL_RTX
1333 || !LABEL_P (lp->landing_pad))
1334 continue;
1335
1336 all_same = all_diff = true;
1337 bb = BLOCK_FOR_INSN (lp->landing_pad);
1338 FOR_EACH_EDGE (e, ei, bb->preds)
1339 {
1340 gcc_assert (e->flags & EDGE_EH);
1341 if (BB_PARTITION (bb) == BB_PARTITION (e->src))
1342 all_diff = false;
1343 else
1344 all_same = false;
1345 }
1346
1347 if (all_same)
1348 ;
1349 else if (all_diff)
1350 {
1351 int which = BB_PARTITION (bb);
1352 which ^= BB_HOT_PARTITION | BB_COLD_PARTITION;
1353 BB_SET_PARTITION (bb, which);
1354 }
1355 else
1356 fix_up_crossing_landing_pad (lp, bb);
1357 }
1358 }
1359
1360 /* Mark every edge that crosses between sections. */
1361
1362 FOR_EACH_BB (bb)
1363 FOR_EACH_EDGE (e, ei, bb->succs)
1364 {
1365 unsigned int flags = e->flags;
1366
1367 /* We should never have EDGE_CROSSING set yet. */
1368 gcc_checking_assert ((flags & EDGE_CROSSING) == 0);
1369
1370 if (e->src != ENTRY_BLOCK_PTR
1371 && e->dest != EXIT_BLOCK_PTR
1372 && BB_PARTITION (e->src) != BB_PARTITION (e->dest))
1373 {
1374 VEC_safe_push (edge, heap, crossing_edges, e);
1375 flags |= EDGE_CROSSING;
1376 }
1377
1378 /* Now that we've split eh edges as appropriate, allow landing pads
1379 to be merged with the post-landing pads. */
1380 flags &= ~EDGE_PRESERVE;
1381
1382 e->flags = flags;
1383 }
1384
1385 return crossing_edges;
1386 }
1387
1388 /* If any destination of a crossing edge does not have a label, add label;
1389 Convert any easy fall-through crossing edges to unconditional jumps. */
1390
1391 static void
1392 add_labels_and_missing_jumps (VEC(edge, heap) *crossing_edges)
1393 {
1394 size_t i;
1395 edge e;
1396
1397 FOR_EACH_VEC_ELT (edge, crossing_edges, i, e)
1398 {
1399 basic_block src = e->src;
1400 basic_block dest = e->dest;
1401 rtx label, new_jump;
1402
1403 if (dest == EXIT_BLOCK_PTR)
1404 continue;
1405
1406 /* Make sure dest has a label. */
1407 label = block_label (dest);
1408
1409 /* Nothing to do for non-fallthru edges. */
1410 if (src == ENTRY_BLOCK_PTR)
1411 continue;
1412 if ((e->flags & EDGE_FALLTHRU) == 0)
1413 continue;
1414
1415 /* If the block does not end with a control flow insn, then we
1416 can trivially add a jump to the end to fixup the crossing.
1417 Otherwise the jump will have to go in a new bb, which will
1418 be handled by fix_up_fall_thru_edges function. */
1419 if (control_flow_insn_p (BB_END (src)))
1420 continue;
1421
1422 /* Make sure there's only one successor. */
1423 gcc_assert (single_succ_p (src));
1424
1425 new_jump = emit_jump_insn_after (gen_jump (label), BB_END (src));
1426 BB_END (src) = new_jump;
1427 JUMP_LABEL (new_jump) = label;
1428 LABEL_NUSES (label) += 1;
1429
1430 emit_barrier_after_bb (src);
1431
1432 /* Mark edge as non-fallthru. */
1433 e->flags &= ~EDGE_FALLTHRU;
1434 }
1435 }
1436
1437 /* Find any bb's where the fall-through edge is a crossing edge (note that
1438 these bb's must also contain a conditional jump or end with a call
1439 instruction; we've already dealt with fall-through edges for blocks
1440 that didn't have a conditional jump or didn't end with call instruction
1441 in the call to add_labels_and_missing_jumps). Convert the fall-through
1442 edge to non-crossing edge by inserting a new bb to fall-through into.
1443 The new bb will contain an unconditional jump (crossing edge) to the
1444 original fall through destination. */
1445
1446 static void
1447 fix_up_fall_thru_edges (void)
1448 {
1449 basic_block cur_bb;
1450 basic_block new_bb;
1451 edge succ1;
1452 edge succ2;
1453 edge fall_thru;
1454 edge cond_jump = NULL;
1455 edge e;
1456 bool cond_jump_crosses;
1457 int invert_worked;
1458 rtx old_jump;
1459 rtx fall_thru_label;
1460
1461 FOR_EACH_BB (cur_bb)
1462 {
1463 fall_thru = NULL;
1464 if (EDGE_COUNT (cur_bb->succs) > 0)
1465 succ1 = EDGE_SUCC (cur_bb, 0);
1466 else
1467 succ1 = NULL;
1468
1469 if (EDGE_COUNT (cur_bb->succs) > 1)
1470 succ2 = EDGE_SUCC (cur_bb, 1);
1471 else
1472 succ2 = NULL;
1473
1474 /* Find the fall-through edge. */
1475
1476 if (succ1
1477 && (succ1->flags & EDGE_FALLTHRU))
1478 {
1479 fall_thru = succ1;
1480 cond_jump = succ2;
1481 }
1482 else if (succ2
1483 && (succ2->flags & EDGE_FALLTHRU))
1484 {
1485 fall_thru = succ2;
1486 cond_jump = succ1;
1487 }
1488 else if (succ1
1489 && (block_ends_with_call_p (cur_bb)
1490 || can_throw_internal (BB_END (cur_bb))))
1491 {
1492 edge e;
1493 edge_iterator ei;
1494
1495 /* Find EDGE_CAN_FALLTHRU edge. */
1496 FOR_EACH_EDGE (e, ei, cur_bb->succs)
1497 if (e->flags & EDGE_CAN_FALLTHRU)
1498 {
1499 fall_thru = e;
1500 break;
1501 }
1502 }
1503
1504 if (fall_thru && (fall_thru->dest != EXIT_BLOCK_PTR))
1505 {
1506 /* Check to see if the fall-thru edge is a crossing edge. */
1507
1508 if (fall_thru->flags & EDGE_CROSSING)
1509 {
1510 /* The fall_thru edge crosses; now check the cond jump edge, if
1511 it exists. */
1512
1513 cond_jump_crosses = true;
1514 invert_worked = 0;
1515 old_jump = BB_END (cur_bb);
1516
1517 /* Find the jump instruction, if there is one. */
1518
1519 if (cond_jump)
1520 {
1521 if (!(cond_jump->flags & EDGE_CROSSING))
1522 cond_jump_crosses = false;
1523
1524 /* We know the fall-thru edge crosses; if the cond
1525 jump edge does NOT cross, and its destination is the
1526 next block in the bb order, invert the jump
1527 (i.e. fix it so the fall through does not cross and
1528 the cond jump does). */
1529
1530 if (!cond_jump_crosses
1531 && cur_bb->aux == cond_jump->dest)
1532 {
1533 /* Find label in fall_thru block. We've already added
1534 any missing labels, so there must be one. */
1535
1536 fall_thru_label = block_label (fall_thru->dest);
1537
1538 if (old_jump && JUMP_P (old_jump) && fall_thru_label)
1539 invert_worked = invert_jump (old_jump,
1540 fall_thru_label,0);
1541 if (invert_worked)
1542 {
1543 fall_thru->flags &= ~EDGE_FALLTHRU;
1544 cond_jump->flags |= EDGE_FALLTHRU;
1545 update_br_prob_note (cur_bb);
1546 e = fall_thru;
1547 fall_thru = cond_jump;
1548 cond_jump = e;
1549 cond_jump->flags |= EDGE_CROSSING;
1550 fall_thru->flags &= ~EDGE_CROSSING;
1551 }
1552 }
1553 }
1554
1555 if (cond_jump_crosses || !invert_worked)
1556 {
1557 /* This is the case where both edges out of the basic
1558 block are crossing edges. Here we will fix up the
1559 fall through edge. The jump edge will be taken care
1560 of later. The EDGE_CROSSING flag of fall_thru edge
1561 is unset before the call to force_nonfallthru
1562 function because if a new basic-block is created
1563 this edge remains in the current section boundary
1564 while the edge between new_bb and the fall_thru->dest
1565 becomes EDGE_CROSSING. */
1566
1567 fall_thru->flags &= ~EDGE_CROSSING;
1568 new_bb = force_nonfallthru (fall_thru);
1569
1570 if (new_bb)
1571 {
1572 new_bb->aux = cur_bb->aux;
1573 cur_bb->aux = new_bb;
1574
1575 /* Make sure new fall-through bb is in same
1576 partition as bb it's falling through from. */
1577
1578 BB_COPY_PARTITION (new_bb, cur_bb);
1579 single_succ_edge (new_bb)->flags |= EDGE_CROSSING;
1580 }
1581 else
1582 {
1583 /* If a new basic-block was not created; restore
1584 the EDGE_CROSSING flag. */
1585 fall_thru->flags |= EDGE_CROSSING;
1586 }
1587
1588 /* Add barrier after new jump */
1589 emit_barrier_after_bb (new_bb ? new_bb : cur_bb);
1590 }
1591 }
1592 }
1593 }
1594 }
1595
1596 /* This function checks the destination block of a "crossing jump" to
1597 see if it has any crossing predecessors that begin with a code label
1598 and end with an unconditional jump. If so, it returns that predecessor
1599 block. (This is to avoid creating lots of new basic blocks that all
1600 contain unconditional jumps to the same destination). */
1601
1602 static basic_block
1603 find_jump_block (basic_block jump_dest)
1604 {
1605 basic_block source_bb = NULL;
1606 edge e;
1607 rtx insn;
1608 edge_iterator ei;
1609
1610 FOR_EACH_EDGE (e, ei, jump_dest->preds)
1611 if (e->flags & EDGE_CROSSING)
1612 {
1613 basic_block src = e->src;
1614
1615 /* Check each predecessor to see if it has a label, and contains
1616 only one executable instruction, which is an unconditional jump.
1617 If so, we can use it. */
1618
1619 if (LABEL_P (BB_HEAD (src)))
1620 for (insn = BB_HEAD (src);
1621 !INSN_P (insn) && insn != NEXT_INSN (BB_END (src));
1622 insn = NEXT_INSN (insn))
1623 {
1624 if (INSN_P (insn)
1625 && insn == BB_END (src)
1626 && JUMP_P (insn)
1627 && !any_condjump_p (insn))
1628 {
1629 source_bb = src;
1630 break;
1631 }
1632 }
1633
1634 if (source_bb)
1635 break;
1636 }
1637
1638 return source_bb;
1639 }
1640
1641 /* Find all BB's with conditional jumps that are crossing edges;
1642 insert a new bb and make the conditional jump branch to the new
1643 bb instead (make the new bb same color so conditional branch won't
1644 be a 'crossing' edge). Insert an unconditional jump from the
1645 new bb to the original destination of the conditional jump. */
1646
1647 static void
1648 fix_crossing_conditional_branches (void)
1649 {
1650 basic_block cur_bb;
1651 basic_block new_bb;
1652 basic_block dest;
1653 edge succ1;
1654 edge succ2;
1655 edge crossing_edge;
1656 edge new_edge;
1657 rtx old_jump;
1658 rtx set_src;
1659 rtx old_label = NULL_RTX;
1660 rtx new_label;
1661
1662 FOR_EACH_BB (cur_bb)
1663 {
1664 crossing_edge = NULL;
1665 if (EDGE_COUNT (cur_bb->succs) > 0)
1666 succ1 = EDGE_SUCC (cur_bb, 0);
1667 else
1668 succ1 = NULL;
1669
1670 if (EDGE_COUNT (cur_bb->succs) > 1)
1671 succ2 = EDGE_SUCC (cur_bb, 1);
1672 else
1673 succ2 = NULL;
1674
1675 /* We already took care of fall-through edges, so only one successor
1676 can be a crossing edge. */
1677
1678 if (succ1 && (succ1->flags & EDGE_CROSSING))
1679 crossing_edge = succ1;
1680 else if (succ2 && (succ2->flags & EDGE_CROSSING))
1681 crossing_edge = succ2;
1682
1683 if (crossing_edge)
1684 {
1685 old_jump = BB_END (cur_bb);
1686
1687 /* Check to make sure the jump instruction is a
1688 conditional jump. */
1689
1690 set_src = NULL_RTX;
1691
1692 if (any_condjump_p (old_jump))
1693 {
1694 if (GET_CODE (PATTERN (old_jump)) == SET)
1695 set_src = SET_SRC (PATTERN (old_jump));
1696 else if (GET_CODE (PATTERN (old_jump)) == PARALLEL)
1697 {
1698 set_src = XVECEXP (PATTERN (old_jump), 0,0);
1699 if (GET_CODE (set_src) == SET)
1700 set_src = SET_SRC (set_src);
1701 else
1702 set_src = NULL_RTX;
1703 }
1704 }
1705
1706 if (set_src && (GET_CODE (set_src) == IF_THEN_ELSE))
1707 {
1708 if (GET_CODE (XEXP (set_src, 1)) == PC)
1709 old_label = XEXP (set_src, 2);
1710 else if (GET_CODE (XEXP (set_src, 2)) == PC)
1711 old_label = XEXP (set_src, 1);
1712
1713 /* Check to see if new bb for jumping to that dest has
1714 already been created; if so, use it; if not, create
1715 a new one. */
1716
1717 new_bb = find_jump_block (crossing_edge->dest);
1718
1719 if (new_bb)
1720 new_label = block_label (new_bb);
1721 else
1722 {
1723 basic_block last_bb;
1724 rtx new_jump;
1725
1726 /* Create new basic block to be dest for
1727 conditional jump. */
1728
1729 /* Put appropriate instructions in new bb. */
1730
1731 new_label = gen_label_rtx ();
1732 emit_label (new_label);
1733
1734 gcc_assert (GET_CODE (old_label) == LABEL_REF);
1735 old_label = JUMP_LABEL (old_jump);
1736 new_jump = emit_jump_insn (gen_jump (old_label));
1737 JUMP_LABEL (new_jump) = old_label;
1738
1739 last_bb = EXIT_BLOCK_PTR->prev_bb;
1740 new_bb = create_basic_block (new_label, new_jump, last_bb);
1741 new_bb->aux = last_bb->aux;
1742 last_bb->aux = new_bb;
1743
1744 emit_barrier_after_bb (new_bb);
1745
1746 /* Make sure new bb is in same partition as source
1747 of conditional branch. */
1748 BB_COPY_PARTITION (new_bb, cur_bb);
1749 }
1750
1751 /* Make old jump branch to new bb. */
1752
1753 redirect_jump (old_jump, new_label, 0);
1754
1755 /* Remove crossing_edge as predecessor of 'dest'. */
1756
1757 dest = crossing_edge->dest;
1758
1759 redirect_edge_succ (crossing_edge, new_bb);
1760
1761 /* Make a new edge from new_bb to old dest; new edge
1762 will be a successor for new_bb and a predecessor
1763 for 'dest'. */
1764
1765 if (EDGE_COUNT (new_bb->succs) == 0)
1766 new_edge = make_edge (new_bb, dest, 0);
1767 else
1768 new_edge = EDGE_SUCC (new_bb, 0);
1769
1770 crossing_edge->flags &= ~EDGE_CROSSING;
1771 new_edge->flags |= EDGE_CROSSING;
1772 }
1773 }
1774 }
1775 }
1776
1777 /* Find any unconditional branches that cross between hot and cold
1778 sections. Convert them into indirect jumps instead. */
1779
1780 static void
1781 fix_crossing_unconditional_branches (void)
1782 {
1783 basic_block cur_bb;
1784 rtx last_insn;
1785 rtx label;
1786 rtx label_addr;
1787 rtx indirect_jump_sequence;
1788 rtx jump_insn = NULL_RTX;
1789 rtx new_reg;
1790 rtx cur_insn;
1791 edge succ;
1792
1793 FOR_EACH_BB (cur_bb)
1794 {
1795 last_insn = BB_END (cur_bb);
1796
1797 if (EDGE_COUNT (cur_bb->succs) < 1)
1798 continue;
1799
1800 succ = EDGE_SUCC (cur_bb, 0);
1801
1802 /* Check to see if bb ends in a crossing (unconditional) jump. At
1803 this point, no crossing jumps should be conditional. */
1804
1805 if (JUMP_P (last_insn)
1806 && (succ->flags & EDGE_CROSSING))
1807 {
1808 rtx label2, table;
1809
1810 gcc_assert (!any_condjump_p (last_insn));
1811
1812 /* Make sure the jump is not already an indirect or table jump. */
1813
1814 if (!computed_jump_p (last_insn)
1815 && !tablejump_p (last_insn, &label2, &table))
1816 {
1817 /* We have found a "crossing" unconditional branch. Now
1818 we must convert it to an indirect jump. First create
1819 reference of label, as target for jump. */
1820
1821 label = JUMP_LABEL (last_insn);
1822 label_addr = gen_rtx_LABEL_REF (Pmode, label);
1823 LABEL_NUSES (label) += 1;
1824
1825 /* Get a register to use for the indirect jump. */
1826
1827 new_reg = gen_reg_rtx (Pmode);
1828
1829 /* Generate indirect the jump sequence. */
1830
1831 start_sequence ();
1832 emit_move_insn (new_reg, label_addr);
1833 emit_indirect_jump (new_reg);
1834 indirect_jump_sequence = get_insns ();
1835 end_sequence ();
1836
1837 /* Make sure every instruction in the new jump sequence has
1838 its basic block set to be cur_bb. */
1839
1840 for (cur_insn = indirect_jump_sequence; cur_insn;
1841 cur_insn = NEXT_INSN (cur_insn))
1842 {
1843 if (!BARRIER_P (cur_insn))
1844 BLOCK_FOR_INSN (cur_insn) = cur_bb;
1845 if (JUMP_P (cur_insn))
1846 jump_insn = cur_insn;
1847 }
1848
1849 /* Insert the new (indirect) jump sequence immediately before
1850 the unconditional jump, then delete the unconditional jump. */
1851
1852 emit_insn_before (indirect_jump_sequence, last_insn);
1853 delete_insn (last_insn);
1854
1855 /* Make BB_END for cur_bb be the jump instruction (NOT the
1856 barrier instruction at the end of the sequence...). */
1857
1858 BB_END (cur_bb) = jump_insn;
1859 }
1860 }
1861 }
1862 }
1863
1864 /* Add REG_CROSSING_JUMP note to all crossing jump insns. */
1865
1866 static void
1867 add_reg_crossing_jump_notes (void)
1868 {
1869 basic_block bb;
1870 edge e;
1871 edge_iterator ei;
1872
1873 FOR_EACH_BB (bb)
1874 FOR_EACH_EDGE (e, ei, bb->succs)
1875 if ((e->flags & EDGE_CROSSING)
1876 && JUMP_P (BB_END (e->src)))
1877 add_reg_note (BB_END (e->src), REG_CROSSING_JUMP, NULL_RTX);
1878 }
1879
1880 /* Verify, in the basic block chain, that there is at most one switch
1881 between hot/cold partitions. This is modelled on
1882 rtl_verify_flow_info_1, but it cannot go inside that function
1883 because this condition will not be true until after
1884 reorder_basic_blocks is called. */
1885
1886 static void
1887 verify_hot_cold_block_grouping (void)
1888 {
1889 basic_block bb;
1890 int err = 0;
1891 bool switched_sections = false;
1892 int current_partition = 0;
1893
1894 FOR_EACH_BB (bb)
1895 {
1896 if (!current_partition)
1897 current_partition = BB_PARTITION (bb);
1898 if (BB_PARTITION (bb) != current_partition)
1899 {
1900 if (switched_sections)
1901 {
1902 error ("multiple hot/cold transitions found (bb %i)",
1903 bb->index);
1904 err = 1;
1905 }
1906 else
1907 {
1908 switched_sections = true;
1909 current_partition = BB_PARTITION (bb);
1910 }
1911 }
1912 }
1913
1914 gcc_assert(!err);
1915 }
1916
1917 /* Reorder basic blocks. The main entry point to this file. FLAGS is
1918 the set of flags to pass to cfg_layout_initialize(). */
1919
1920 static void
1921 reorder_basic_blocks (void)
1922 {
1923 int n_traces;
1924 int i;
1925 struct trace *traces;
1926
1927 gcc_assert (current_ir_type () == IR_RTL_CFGLAYOUT);
1928
1929 if (n_basic_blocks <= NUM_FIXED_BLOCKS + 1)
1930 return;
1931
1932 set_edge_can_fallthru_flag ();
1933 mark_dfs_back_edges ();
1934
1935 /* We are estimating the length of uncond jump insn only once since the code
1936 for getting the insn length always returns the minimal length now. */
1937 if (uncond_jump_length == 0)
1938 uncond_jump_length = get_uncond_jump_length ();
1939
1940 /* We need to know some information for each basic block. */
1941 array_size = GET_ARRAY_SIZE (last_basic_block);
1942 bbd = XNEWVEC (bbro_basic_block_data, array_size);
1943 for (i = 0; i < array_size; i++)
1944 {
1945 bbd[i].start_of_trace = -1;
1946 bbd[i].end_of_trace = -1;
1947 bbd[i].in_trace = -1;
1948 bbd[i].visited = 0;
1949 bbd[i].heap = NULL;
1950 bbd[i].node = NULL;
1951 }
1952
1953 traces = XNEWVEC (struct trace, n_basic_blocks);
1954 n_traces = 0;
1955 find_traces (&n_traces, traces);
1956 connect_traces (n_traces, traces);
1957 FREE (traces);
1958 FREE (bbd);
1959
1960 relink_block_chain (/*stay_in_cfglayout_mode=*/true);
1961
1962 if (dump_file)
1963 dump_flow_info (dump_file, dump_flags);
1964
1965 if (flag_reorder_blocks_and_partition)
1966 verify_hot_cold_block_grouping ();
1967 }
1968
1969 /* Determine which partition the first basic block in the function
1970 belongs to, then find the first basic block in the current function
1971 that belongs to a different section, and insert a
1972 NOTE_INSN_SWITCH_TEXT_SECTIONS note immediately before it in the
1973 instruction stream. When writing out the assembly code,
1974 encountering this note will make the compiler switch between the
1975 hot and cold text sections. */
1976
1977 static void
1978 insert_section_boundary_note (void)
1979 {
1980 basic_block bb;
1981 rtx new_note;
1982 int first_partition = 0;
1983
1984 if (!flag_reorder_blocks_and_partition)
1985 return;
1986
1987 FOR_EACH_BB (bb)
1988 {
1989 if (!first_partition)
1990 first_partition = BB_PARTITION (bb);
1991 if (BB_PARTITION (bb) != first_partition)
1992 {
1993 new_note = emit_note_before (NOTE_INSN_SWITCH_TEXT_SECTIONS,
1994 BB_HEAD (bb));
1995 /* ??? This kind of note always lives between basic blocks,
1996 but add_insn_before will set BLOCK_FOR_INSN anyway. */
1997 BLOCK_FOR_INSN (new_note) = NULL;
1998 break;
1999 }
2000 }
2001 }
2002
2003 /* Duplicate the blocks containing computed gotos. This basically unfactors
2004 computed gotos that were factored early on in the compilation process to
2005 speed up edge based data flow. We used to not unfactoring them again,
2006 which can seriously pessimize code with many computed jumps in the source
2007 code, such as interpreters. See e.g. PR15242. */
2008
2009 static bool
2010 gate_duplicate_computed_gotos (void)
2011 {
2012 if (targetm.cannot_modify_jumps_p ())
2013 return false;
2014 return (optimize > 0
2015 && flag_expensive_optimizations
2016 && ! optimize_function_for_size_p (cfun));
2017 }
2018
2019
2020 static unsigned int
2021 duplicate_computed_gotos (void)
2022 {
2023 basic_block bb, new_bb;
2024 bitmap candidates;
2025 int max_size;
2026
2027 if (n_basic_blocks <= NUM_FIXED_BLOCKS + 1)
2028 return 0;
2029
2030 clear_bb_flags ();
2031 cfg_layout_initialize (0);
2032
2033 /* We are estimating the length of uncond jump insn only once
2034 since the code for getting the insn length always returns
2035 the minimal length now. */
2036 if (uncond_jump_length == 0)
2037 uncond_jump_length = get_uncond_jump_length ();
2038
2039 max_size = uncond_jump_length * PARAM_VALUE (PARAM_MAX_GOTO_DUPLICATION_INSNS);
2040 candidates = BITMAP_ALLOC (NULL);
2041
2042 /* Look for blocks that end in a computed jump, and see if such blocks
2043 are suitable for unfactoring. If a block is a candidate for unfactoring,
2044 mark it in the candidates. */
2045 FOR_EACH_BB (bb)
2046 {
2047 rtx insn;
2048 edge e;
2049 edge_iterator ei;
2050 int size, all_flags;
2051
2052 /* Build the reorder chain for the original order of blocks. */
2053 if (bb->next_bb != EXIT_BLOCK_PTR)
2054 bb->aux = bb->next_bb;
2055
2056 /* Obviously the block has to end in a computed jump. */
2057 if (!computed_jump_p (BB_END (bb)))
2058 continue;
2059
2060 /* Only consider blocks that can be duplicated. */
2061 if (find_reg_note (BB_END (bb), REG_CROSSING_JUMP, NULL_RTX)
2062 || !can_duplicate_block_p (bb))
2063 continue;
2064
2065 /* Make sure that the block is small enough. */
2066 size = 0;
2067 FOR_BB_INSNS (bb, insn)
2068 if (INSN_P (insn))
2069 {
2070 size += get_attr_min_length (insn);
2071 if (size > max_size)
2072 break;
2073 }
2074 if (size > max_size)
2075 continue;
2076
2077 /* Final check: there must not be any incoming abnormal edges. */
2078 all_flags = 0;
2079 FOR_EACH_EDGE (e, ei, bb->preds)
2080 all_flags |= e->flags;
2081 if (all_flags & EDGE_COMPLEX)
2082 continue;
2083
2084 bitmap_set_bit (candidates, bb->index);
2085 }
2086
2087 /* Nothing to do if there is no computed jump here. */
2088 if (bitmap_empty_p (candidates))
2089 goto done;
2090
2091 /* Duplicate computed gotos. */
2092 FOR_EACH_BB (bb)
2093 {
2094 if (bb->flags & BB_VISITED)
2095 continue;
2096
2097 bb->flags |= BB_VISITED;
2098
2099 /* BB must have one outgoing edge. That edge must not lead to
2100 the exit block or the next block.
2101 The destination must have more than one predecessor. */
2102 if (!single_succ_p (bb)
2103 || single_succ (bb) == EXIT_BLOCK_PTR
2104 || single_succ (bb) == bb->next_bb
2105 || single_pred_p (single_succ (bb)))
2106 continue;
2107
2108 /* The successor block has to be a duplication candidate. */
2109 if (!bitmap_bit_p (candidates, single_succ (bb)->index))
2110 continue;
2111
2112 new_bb = duplicate_block (single_succ (bb), single_succ_edge (bb), bb);
2113 new_bb->aux = bb->aux;
2114 bb->aux = new_bb;
2115 new_bb->flags |= BB_VISITED;
2116 }
2117
2118 done:
2119 cfg_layout_finalize ();
2120
2121 BITMAP_FREE (candidates);
2122 return 0;
2123 }
2124
2125 struct rtl_opt_pass pass_duplicate_computed_gotos =
2126 {
2127 {
2128 RTL_PASS,
2129 "compgotos", /* name */
2130 gate_duplicate_computed_gotos, /* gate */
2131 duplicate_computed_gotos, /* execute */
2132 NULL, /* sub */
2133 NULL, /* next */
2134 0, /* static_pass_number */
2135 TV_REORDER_BLOCKS, /* tv_id */
2136 0, /* properties_required */
2137 0, /* properties_provided */
2138 0, /* properties_destroyed */
2139 0, /* todo_flags_start */
2140 TODO_verify_rtl_sharing,/* todo_flags_finish */
2141 }
2142 };
2143
2144
2145 /* This function is the main 'entrance' for the optimization that
2146 partitions hot and cold basic blocks into separate sections of the
2147 .o file (to improve performance and cache locality). Ideally it
2148 would be called after all optimizations that rearrange the CFG have
2149 been called. However part of this optimization may introduce new
2150 register usage, so it must be called before register allocation has
2151 occurred. This means that this optimization is actually called
2152 well before the optimization that reorders basic blocks (see
2153 function above).
2154
2155 This optimization checks the feedback information to determine
2156 which basic blocks are hot/cold, updates flags on the basic blocks
2157 to indicate which section they belong in. This information is
2158 later used for writing out sections in the .o file. Because hot
2159 and cold sections can be arbitrarily large (within the bounds of
2160 memory), far beyond the size of a single function, it is necessary
2161 to fix up all edges that cross section boundaries, to make sure the
2162 instructions used can actually span the required distance. The
2163 fixes are described below.
2164
2165 Fall-through edges must be changed into jumps; it is not safe or
2166 legal to fall through across a section boundary. Whenever a
2167 fall-through edge crossing a section boundary is encountered, a new
2168 basic block is inserted (in the same section as the fall-through
2169 source), and the fall through edge is redirected to the new basic
2170 block. The new basic block contains an unconditional jump to the
2171 original fall-through target. (If the unconditional jump is
2172 insufficient to cross section boundaries, that is dealt with a
2173 little later, see below).
2174
2175 In order to deal with architectures that have short conditional
2176 branches (which cannot span all of memory) we take any conditional
2177 jump that attempts to cross a section boundary and add a level of
2178 indirection: it becomes a conditional jump to a new basic block, in
2179 the same section. The new basic block contains an unconditional
2180 jump to the original target, in the other section.
2181
2182 For those architectures whose unconditional branch is also
2183 incapable of reaching all of memory, those unconditional jumps are
2184 converted into indirect jumps, through a register.
2185
2186 IMPORTANT NOTE: This optimization causes some messy interactions
2187 with the cfg cleanup optimizations; those optimizations want to
2188 merge blocks wherever possible, and to collapse indirect jump
2189 sequences (change "A jumps to B jumps to C" directly into "A jumps
2190 to C"). Those optimizations can undo the jump fixes that
2191 partitioning is required to make (see above), in order to ensure
2192 that jumps attempting to cross section boundaries are really able
2193 to cover whatever distance the jump requires (on many architectures
2194 conditional or unconditional jumps are not able to reach all of
2195 memory). Therefore tests have to be inserted into each such
2196 optimization to make sure that it does not undo stuff necessary to
2197 cross partition boundaries. This would be much less of a problem
2198 if we could perform this optimization later in the compilation, but
2199 unfortunately the fact that we may need to create indirect jumps
2200 (through registers) requires that this optimization be performed
2201 before register allocation.
2202
2203 Hot and cold basic blocks are partitioned and put in separate
2204 sections of the .o file, to reduce paging and improve cache
2205 performance (hopefully). This can result in bits of code from the
2206 same function being widely separated in the .o file. However this
2207 is not obvious to the current bb structure. Therefore we must take
2208 care to ensure that: 1). There are no fall_thru edges that cross
2209 between sections; 2). For those architectures which have "short"
2210 conditional branches, all conditional branches that attempt to
2211 cross between sections are converted to unconditional branches;
2212 and, 3). For those architectures which have "short" unconditional
2213 branches, all unconditional branches that attempt to cross between
2214 sections are converted to indirect jumps.
2215
2216 The code for fixing up fall_thru edges that cross between hot and
2217 cold basic blocks does so by creating new basic blocks containing
2218 unconditional branches to the appropriate label in the "other"
2219 section. The new basic block is then put in the same (hot or cold)
2220 section as the original conditional branch, and the fall_thru edge
2221 is modified to fall into the new basic block instead. By adding
2222 this level of indirection we end up with only unconditional branches
2223 crossing between hot and cold sections.
2224
2225 Conditional branches are dealt with by adding a level of indirection.
2226 A new basic block is added in the same (hot/cold) section as the
2227 conditional branch, and the conditional branch is retargeted to the
2228 new basic block. The new basic block contains an unconditional branch
2229 to the original target of the conditional branch (in the other section).
2230
2231 Unconditional branches are dealt with by converting them into
2232 indirect jumps. */
2233
2234 static unsigned
2235 partition_hot_cold_basic_blocks (void)
2236 {
2237 VEC(edge, heap) *crossing_edges;
2238
2239 if (n_basic_blocks <= NUM_FIXED_BLOCKS + 1)
2240 return 0;
2241
2242 df_set_flags (DF_DEFER_INSN_RESCAN);
2243
2244 crossing_edges = find_rarely_executed_basic_blocks_and_crossing_edges ();
2245 if (crossing_edges == NULL)
2246 return 0;
2247
2248 /* Make sure the source of any crossing edge ends in a jump and the
2249 destination of any crossing edge has a label. */
2250 add_labels_and_missing_jumps (crossing_edges);
2251
2252 /* Convert all crossing fall_thru edges to non-crossing fall
2253 thrus to unconditional jumps (that jump to the original fall
2254 through dest). */
2255 fix_up_fall_thru_edges ();
2256
2257 /* If the architecture does not have conditional branches that can
2258 span all of memory, convert crossing conditional branches into
2259 crossing unconditional branches. */
2260 if (!HAS_LONG_COND_BRANCH)
2261 fix_crossing_conditional_branches ();
2262
2263 /* If the architecture does not have unconditional branches that
2264 can span all of memory, convert crossing unconditional branches
2265 into indirect jumps. Since adding an indirect jump also adds
2266 a new register usage, update the register usage information as
2267 well. */
2268 if (!HAS_LONG_UNCOND_BRANCH)
2269 fix_crossing_unconditional_branches ();
2270
2271 add_reg_crossing_jump_notes ();
2272
2273 /* Clear bb->aux fields that the above routines were using. */
2274 clear_aux_for_blocks ();
2275
2276 VEC_free (edge, heap, crossing_edges);
2277
2278 /* ??? FIXME: DF generates the bb info for a block immediately.
2279 And by immediately, I mean *during* creation of the block.
2280
2281 #0 df_bb_refs_collect
2282 #1 in df_bb_refs_record
2283 #2 in create_basic_block_structure
2284
2285 Which means that the bb_has_eh_pred test in df_bb_refs_collect
2286 will *always* fail, because no edges can have been added to the
2287 block yet. Which of course means we don't add the right
2288 artificial refs, which means we fail df_verify (much) later.
2289
2290 Cleanest solution would seem to make DF_DEFER_INSN_RESCAN imply
2291 that we also shouldn't grab data from the new blocks those new
2292 insns are in either. In this way one can create the block, link
2293 it up properly, and have everything Just Work later, when deferred
2294 insns are processed.
2295
2296 In the meantime, we have no other option but to throw away all
2297 of the DF data and recompute it all. */
2298 if (cfun->eh->lp_array)
2299 {
2300 df_finish_pass (true);
2301 df_scan_alloc (NULL);
2302 df_scan_blocks ();
2303 /* Not all post-landing pads use all of the EH_RETURN_DATA_REGNO
2304 data. We blindly generated all of them when creating the new
2305 landing pad. Delete those assignments we don't use. */
2306 df_set_flags (DF_LR_RUN_DCE);
2307 df_analyze ();
2308 }
2309
2310 return TODO_verify_flow | TODO_verify_rtl_sharing;
2311 }
2312 \f
2313 static bool
2314 gate_handle_reorder_blocks (void)
2315 {
2316 if (targetm.cannot_modify_jumps_p ())
2317 return false;
2318 /* Don't reorder blocks when optimizing for size because extra jump insns may
2319 be created; also barrier may create extra padding.
2320
2321 More correctly we should have a block reordering mode that tried to
2322 minimize the combined size of all the jumps. This would more or less
2323 automatically remove extra jumps, but would also try to use more short
2324 jumps instead of long jumps. */
2325 if (!optimize_function_for_speed_p (cfun))
2326 return false;
2327 return (optimize > 0
2328 && (flag_reorder_blocks || flag_reorder_blocks_and_partition));
2329 }
2330
2331
2332 /* Reorder basic blocks. */
2333 static unsigned int
2334 rest_of_handle_reorder_blocks (void)
2335 {
2336 basic_block bb;
2337
2338 /* Last attempt to optimize CFG, as scheduling, peepholing and insn
2339 splitting possibly introduced more crossjumping opportunities. */
2340 cfg_layout_initialize (CLEANUP_EXPENSIVE);
2341
2342 reorder_basic_blocks ();
2343 cleanup_cfg (CLEANUP_EXPENSIVE);
2344
2345 FOR_EACH_BB (bb)
2346 if (bb->next_bb != EXIT_BLOCK_PTR)
2347 bb->aux = bb->next_bb;
2348 cfg_layout_finalize ();
2349
2350 /* Add NOTE_INSN_SWITCH_TEXT_SECTIONS notes. */
2351 insert_section_boundary_note ();
2352 return 0;
2353 }
2354
2355 struct rtl_opt_pass pass_reorder_blocks =
2356 {
2357 {
2358 RTL_PASS,
2359 "bbro", /* name */
2360 gate_handle_reorder_blocks, /* gate */
2361 rest_of_handle_reorder_blocks, /* execute */
2362 NULL, /* sub */
2363 NULL, /* next */
2364 0, /* static_pass_number */
2365 TV_REORDER_BLOCKS, /* tv_id */
2366 0, /* properties_required */
2367 0, /* properties_provided */
2368 0, /* properties_destroyed */
2369 0, /* todo_flags_start */
2370 TODO_verify_rtl_sharing, /* todo_flags_finish */
2371 }
2372 };
2373
2374 static bool
2375 gate_handle_partition_blocks (void)
2376 {
2377 /* The optimization to partition hot/cold basic blocks into separate
2378 sections of the .o file does not work well with linkonce or with
2379 user defined section attributes. Don't call it if either case
2380 arises. */
2381 return (flag_reorder_blocks_and_partition
2382 && optimize
2383 /* See gate_handle_reorder_blocks. We should not partition if
2384 we are going to omit the reordering. */
2385 && optimize_function_for_speed_p (cfun)
2386 && !DECL_ONE_ONLY (current_function_decl)
2387 && !user_defined_section_attribute);
2388 }
2389
2390 struct rtl_opt_pass pass_partition_blocks =
2391 {
2392 {
2393 RTL_PASS,
2394 "bbpart", /* name */
2395 gate_handle_partition_blocks, /* gate */
2396 partition_hot_cold_basic_blocks, /* execute */
2397 NULL, /* sub */
2398 NULL, /* next */
2399 0, /* static_pass_number */
2400 TV_REORDER_BLOCKS, /* tv_id */
2401 PROP_cfglayout, /* properties_required */
2402 0, /* properties_provided */
2403 0, /* properties_destroyed */
2404 0, /* todo_flags_start */
2405 0 /* todo_flags_finish */
2406 }
2407 };