]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/mode-switching.c
decl2.c (mangling_aliases): New variable.
[thirdparty/gcc.git] / gcc / mode-switching.c
CommitLineData
610d2478 1/* CPU mode switching
5624e564 2 Copyright (C) 1998-2015 Free Software Foundation, Inc.
610d2478
SB
3
4This file is part of GCC.
5
6GCC is free software; you can redistribute it and/or modify it under
7the terms of the GNU General Public License as published by the Free
9dcd6f09 8Software Foundation; either version 3, or (at your option) any later
610d2478
SB
9version.
10
11GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12WARRANTY; without even the implied warranty of MERCHANTABILITY or
13FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14for more details.
15
16You should have received a copy of the GNU General Public License
9dcd6f09
NC
17along with GCC; see the file COPYING3. If not see
18<http://www.gnu.org/licenses/>. */
610d2478
SB
19
20#include "config.h"
21#include "system.h"
22#include "coretypes.h"
23#include "tm.h"
82f81f18 24#include "target.h"
610d2478
SB
25#include "rtl.h"
26#include "regs.h"
27#include "hard-reg-set.h"
28#include "flags.h"
610d2478
SB
29#include "insn-config.h"
30#include "recog.h"
60393bbc
AM
31#include "predict.h"
32#include "vec.h"
83685514
AM
33#include "hashtab.h"
34#include "hash-set.h"
83685514
AM
35#include "machmode.h"
36#include "input.h"
610d2478 37#include "function.h"
60393bbc
AM
38#include "dominance.h"
39#include "cfg.h"
40#include "cfgrtl.h"
41#include "cfganal.h"
42#include "lcm.h"
43#include "cfgcleanup.h"
44#include "basic-block.h"
45#include "tm_p.h"
ef330312 46#include "tree-pass.h"
6fb5fa3c 47#include "df.h"
5936d944 48#include "emit-rtl.h"
610d2478
SB
49
50/* We want target macros for the mode switching code to be able to refer
51 to instruction attribute values. */
52#include "insn-attr.h"
53
54#ifdef OPTIMIZE_MODE_SWITCHING
55
56/* The algorithm for setting the modes consists of scanning the insn list
57 and finding all the insns which require a specific mode. Each insn gets
58 a unique struct seginfo element. These structures are inserted into a list
59 for each basic block. For each entity, there is an array of bb_info over
cee9defb 60 the flow graph basic blocks (local var 'bb_info'), which contains a list
610d2478
SB
61 of all insns within that basic block, in the order they are encountered.
62
63 For each entity, any basic block WITHOUT any insns requiring a specific
cee9defb
EB
64 mode are given a single entry without a mode (each basic block in the
65 flow graph must have at least one entry in the segment table).
610d2478
SB
66
67 The LCM algorithm is then run over the flow graph to determine where to
cee9defb 68 place the sets to the highest-priority mode with respect to the first
610d2478
SB
69 insn in any one block. Any adjustments required to the transparency
70 vectors are made, then the next iteration starts for the next-lower
71 priority mode, till for each entity all modes are exhausted.
72
cee9defb 73 More details can be found in the code of optimize_mode_switching. */
610d2478
SB
74\f
75/* This structure contains the information for each insn which requires
76 either single or double mode to be set.
77 MODE is the mode this insn must be executed in.
78 INSN_PTR is the insn to be executed (may be the note that marks the
79 beginning of a basic block).
80 BBNUM is the flow graph basic block this insn occurs in.
81 NEXT is the next insn in the same basic block. */
82struct seginfo
83{
84 int mode;
1d455520 85 rtx_insn *insn_ptr;
610d2478
SB
86 int bbnum;
87 struct seginfo *next;
88 HARD_REG_SET regs_live;
89};
90
91struct bb_info
92{
93 struct seginfo *seginfo;
94 int computing;
cbb1e3d9
CB
95 int mode_out;
96 int mode_in;
610d2478
SB
97};
98
1d455520 99static struct seginfo * new_seginfo (int, rtx_insn *, int, HARD_REG_SET);
610d2478 100static void add_seginfo (struct bb_info *, struct seginfo *);
408bed3c 101static void reg_dies (rtx, HARD_REG_SET *);
7bc980e1 102static void reg_becomes_live (rtx, const_rtx, void *);
610d2478 103
cbb1e3d9
CB
104/* Clear ode I from entity J in bitmap B. */
105#define clear_mode_bit(b, j, i) \
106 bitmap_clear_bit (b, (j * max_num_modes) + i)
107
108/* Test mode I from entity J in bitmap B. */
109#define mode_bit_p(b, j, i) \
110 bitmap_bit_p (b, (j * max_num_modes) + i)
111
112/* Set mode I from entity J in bitmal B. */
113#define set_mode_bit(b, j, i) \
114 bitmap_set_bit (b, (j * max_num_modes) + i)
115
116/* Emit modes segments from EDGE_LIST associated with entity E.
117 INFO gives mode availability for each mode. */
118
119static bool
120commit_mode_sets (struct edge_list *edge_list, int e, struct bb_info *info)
121{
122 bool need_commit = false;
123
124 for (int ed = NUM_EDGES (edge_list) - 1; ed >= 0; ed--)
125 {
126 edge eg = INDEX_EDGE (edge_list, ed);
127 int mode;
128
129 if ((mode = (int)(intptr_t)(eg->aux)) != -1)
130 {
131 HARD_REG_SET live_at_edge;
132 basic_block src_bb = eg->src;
133 int cur_mode = info[src_bb->index].mode_out;
134 rtx mode_set;
135
136 REG_SET_TO_HARD_REG_SET (live_at_edge, df_get_live_out (src_bb));
137
138 rtl_profile_for_edge (eg);
139 start_sequence ();
140
141 targetm.mode_switching.emit (e, mode, cur_mode, live_at_edge);
142
143 mode_set = get_insns ();
144 end_sequence ();
145 default_rtl_profile ();
146
147 /* Do not bother to insert empty sequence. */
148 if (mode_set == NULL_RTX)
149 continue;
150
151 /* We should not get an abnormal edge here. */
152 gcc_assert (! (eg->flags & EDGE_ABNORMAL));
153
154 need_commit = true;
155 insert_insn_on_edge (mode_set, eg);
156 }
157 }
158
159 return need_commit;
160}
161
162/* Allocate a new BBINFO structure, initialized with the MODE, INSN,
163 and basic block BB parameters.
473fd99a
JR
164 INSN may not be a NOTE_INSN_BASIC_BLOCK, unless it is an empty
165 basic block; that allows us later to insert instructions in a FIFO-like
166 manner. */
610d2478
SB
167
168static struct seginfo *
1d455520 169new_seginfo (int mode, rtx_insn *insn, int bb, HARD_REG_SET regs_live)
610d2478
SB
170{
171 struct seginfo *ptr;
473fd99a
JR
172
173 gcc_assert (!NOTE_INSN_BASIC_BLOCK_P (insn)
174 || insn == BB_END (NOTE_BASIC_BLOCK (insn)));
5ed6ace5 175 ptr = XNEW (struct seginfo);
610d2478
SB
176 ptr->mode = mode;
177 ptr->insn_ptr = insn;
178 ptr->bbnum = bb;
179 ptr->next = NULL;
180 COPY_HARD_REG_SET (ptr->regs_live, regs_live);
181 return ptr;
182}
183
184/* Add a seginfo element to the end of a list.
185 HEAD is a pointer to the list beginning.
186 INFO is the structure to be linked in. */
187
188static void
189add_seginfo (struct bb_info *head, struct seginfo *info)
190{
191 struct seginfo *ptr;
192
193 if (head->seginfo == NULL)
194 head->seginfo = info;
195 else
196 {
197 ptr = head->seginfo;
198 while (ptr->next != NULL)
199 ptr = ptr->next;
200 ptr->next = info;
201 }
202}
203
610d2478
SB
204/* Record in LIVE that register REG died. */
205
206static void
408bed3c 207reg_dies (rtx reg, HARD_REG_SET *live)
610d2478 208{
09e18274 209 int regno;
610d2478
SB
210
211 if (!REG_P (reg))
212 return;
213
214 regno = REGNO (reg);
215 if (regno < FIRST_PSEUDO_REGISTER)
09e18274 216 remove_from_hard_reg_set (live, GET_MODE (reg), regno);
610d2478
SB
217}
218
219/* Record in LIVE that register REG became live.
220 This is called via note_stores. */
221
222static void
7bc980e1 223reg_becomes_live (rtx reg, const_rtx setter ATTRIBUTE_UNUSED, void *live)
610d2478 224{
09e18274 225 int regno;
610d2478
SB
226
227 if (GET_CODE (reg) == SUBREG)
228 reg = SUBREG_REG (reg);
229
230 if (!REG_P (reg))
231 return;
232
233 regno = REGNO (reg);
234 if (regno < FIRST_PSEUDO_REGISTER)
09e18274 235 add_to_hard_reg_set ((HARD_REG_SET *) live, GET_MODE (reg), regno);
610d2478
SB
236}
237
610d2478
SB
238/* Split the fallthrough edge to the exit block, so that we can note
239 that there NORMAL_MODE is required. Return the new block if it's
240 inserted before the exit block. Otherwise return null. */
241
242static basic_block
243create_pre_exit (int n_entities, int *entity_map, const int *num_modes)
244{
245 edge eg;
246 edge_iterator ei;
247 basic_block pre_exit;
248
249 /* The only non-call predecessor at this stage is a block with a
250 fallthrough edge; there can be at most one, but there could be
251 none at all, e.g. when exit is called. */
252 pre_exit = 0;
fefa31b5 253 FOR_EACH_EDGE (eg, ei, EXIT_BLOCK_PTR_FOR_FN (cfun)->preds)
610d2478
SB
254 if (eg->flags & EDGE_FALLTHRU)
255 {
256 basic_block src_bb = eg->src;
1d455520
DM
257 rtx_insn *last_insn;
258 rtx ret_reg;
610d2478
SB
259
260 gcc_assert (!pre_exit);
261 /* If this function returns a value at the end, we have to
262 insert the final mode switch before the return value copy
263 to its hard register. */
fefa31b5 264 if (EDGE_COUNT (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds) == 1
610d2478
SB
265 && NONJUMP_INSN_P ((last_insn = BB_END (src_bb)))
266 && GET_CODE (PATTERN (last_insn)) == USE
267 && GET_CODE ((ret_reg = XEXP (PATTERN (last_insn), 0))) == REG)
268 {
269 int ret_start = REGNO (ret_reg);
270 int nregs = hard_regno_nregs[ret_start][GET_MODE (ret_reg)];
271 int ret_end = ret_start + nregs;
c07757e5
UB
272 bool short_block = false;
273 bool multi_reg_return = false;
274 bool forced_late_switch = false;
1d455520 275 rtx_insn *before_return_copy;
610d2478
SB
276
277 do
278 {
1d455520 279 rtx_insn *return_copy = PREV_INSN (last_insn);
610d2478
SB
280 rtx return_copy_pat, copy_reg;
281 int copy_start, copy_num;
282 int j;
283
141a9e06 284 if (NONDEBUG_INSN_P (return_copy))
610d2478 285 {
2bde7ae9
RS
286 /* When using SJLJ exceptions, the call to the
287 unregister function is inserted between the
288 clobber of the return value and the copy.
289 We do not want to split the block before this
290 or any other call; if we have not found the
291 copy yet, the copy must have been deleted. */
292 if (CALL_P (return_copy))
293 {
c07757e5 294 short_block = true;
2bde7ae9
RS
295 break;
296 }
89ab4659
KK
297 return_copy_pat = PATTERN (return_copy);
298 switch (GET_CODE (return_copy_pat))
07288ab0 299 {
89ab4659 300 case USE:
c07757e5
UB
301 /* Skip USEs of multiple return registers.
302 __builtin_apply pattern is also handled here. */
89ab4659 303 if (GET_CODE (XEXP (return_copy_pat, 0)) == REG
82f81f18 304 && (targetm.calls.function_value_regno_p
89ab4659
KK
305 (REGNO (XEXP (return_copy_pat, 0)))))
306 {
c07757e5 307 multi_reg_return = true;
89ab4659
KK
308 last_insn = return_copy;
309 continue;
310 }
311 break;
312
313 case ASM_OPERANDS:
314 /* Skip barrier insns. */
315 if (!MEM_VOLATILE_P (return_copy_pat))
316 break;
317
318 /* Fall through. */
319
320 case ASM_INPUT:
321 case UNSPEC_VOLATILE:
07288ab0
KK
322 last_insn = return_copy;
323 continue;
89ab4659
KK
324
325 default:
326 break;
07288ab0 327 }
89ab4659 328
610d2478
SB
329 /* If the return register is not (in its entirety)
330 likely spilled, the return copy might be
331 partially or completely optimized away. */
332 return_copy_pat = single_set (return_copy);
333 if (!return_copy_pat)
334 {
335 return_copy_pat = PATTERN (return_copy);
336 if (GET_CODE (return_copy_pat) != CLOBBER)
337 break;
6fb5fa3c
DB
338 else if (!optimize)
339 {
340 /* This might be (clobber (reg [<result>]))
341 when not optimizing. Then check if
342 the previous insn is the clobber for
343 the return register. */
344 copy_reg = SET_DEST (return_copy_pat);
345 if (GET_CODE (copy_reg) == REG
346 && !HARD_REGISTER_NUM_P (REGNO (copy_reg)))
347 {
348 if (INSN_P (PREV_INSN (return_copy)))
349 {
350 return_copy = PREV_INSN (return_copy);
351 return_copy_pat = PATTERN (return_copy);
352 if (GET_CODE (return_copy_pat) != CLOBBER)
353 break;
354 }
355 }
356 }
610d2478
SB
357 }
358 copy_reg = SET_DEST (return_copy_pat);
359 if (GET_CODE (copy_reg) == REG)
360 copy_start = REGNO (copy_reg);
361 else if (GET_CODE (copy_reg) == SUBREG
362 && GET_CODE (SUBREG_REG (copy_reg)) == REG)
363 copy_start = REGNO (SUBREG_REG (copy_reg));
364 else
d78e64db
KK
365 {
366 /* When control reaches end of non-void function,
367 there are no return copy insns at all. This
368 avoids an ice on that invalid function. */
369 if (ret_start + nregs == ret_end)
c07757e5 370 short_block = true;
d78e64db
KK
371 break;
372 }
ffbbfaba 373 if (!targetm.calls.function_value_regno_p (copy_start))
ce4a9422
JR
374 copy_num = 0;
375 else
376 copy_num
377 = hard_regno_nregs[copy_start][GET_MODE (copy_reg)];
610d2478
SB
378
379 /* If the return register is not likely spilled, - as is
380 the case for floating point on SH4 - then it might
381 be set by an arithmetic operation that needs a
382 different mode than the exit block. */
383 for (j = n_entities - 1; j >= 0; j--)
384 {
385 int e = entity_map[j];
06b90602
CB
386 int mode =
387 targetm.mode_switching.needed (e, return_copy);
610d2478 388
06b90602
CB
389 if (mode != num_modes[e]
390 && mode != targetm.mode_switching.exit (e))
610d2478
SB
391 break;
392 }
393 if (j >= 0)
394 {
b8435aa9
UB
395 /* __builtin_return emits a sequence of loads to all
396 return registers. One of them might require
397 another mode than MODE_EXIT, even if it is
398 unrelated to the return value, so we want to put
399 the final mode switch after it. */
c07757e5 400 if (multi_reg_return
b8435aa9
UB
401 && targetm.calls.function_value_regno_p
402 (copy_start))
c07757e5 403 forced_late_switch = true;
b8435aa9 404
610d2478
SB
405 /* For the SH4, floating point loads depend on fpscr,
406 thus we might need to put the final mode switch
407 after the return value copy. That is still OK,
408 because a floating point return value does not
409 conflict with address reloads. */
410 if (copy_start >= ret_start
411 && copy_start + copy_num <= ret_end
412 && OBJECT_P (SET_SRC (return_copy_pat)))
c07757e5 413 forced_late_switch = true;
610d2478
SB
414 break;
415 }
ce4a9422
JR
416 if (copy_num == 0)
417 {
418 last_insn = return_copy;
419 continue;
420 }
610d2478
SB
421
422 if (copy_start >= ret_start
423 && copy_start + copy_num <= ret_end)
424 nregs -= copy_num;
c07757e5 425 else if (!multi_reg_return
82f81f18
AS
426 || !targetm.calls.function_value_regno_p
427 (copy_start))
610d2478
SB
428 break;
429 last_insn = return_copy;
430 }
431 /* ??? Exception handling can lead to the return value
432 copy being already separated from the return value use,
433 as in unwind-dw2.c .
434 Similarly, conditionally returning without a value,
435 and conditionally using builtin_return can lead to an
436 isolated use. */
437 if (return_copy == BB_HEAD (src_bb))
438 {
c07757e5 439 short_block = true;
610d2478
SB
440 break;
441 }
442 last_insn = return_copy;
443 }
444 while (nregs);
b8698a0f 445
610d2478
SB
446 /* If we didn't see a full return value copy, verify that there
447 is a plausible reason for this. If some, but not all of the
448 return register is likely spilled, we can expect that there
449 is a copy for the likely spilled part. */
450 gcc_assert (!nregs
451 || forced_late_switch
452 || short_block
07b8f0a8 453 || !(targetm.class_likely_spilled_p
610d2478
SB
454 (REGNO_REG_CLASS (ret_start)))
455 || (nregs
456 != hard_regno_nregs[ret_start][GET_MODE (ret_reg)])
457 /* For multi-hard-register floating point
458 values, sometimes the likely-spilled part
459 is ordinarily copied first, then the other
460 part is set with an arithmetic operation.
461 This doesn't actually cause reload
462 failures, so let it pass. */
463 || (GET_MODE_CLASS (GET_MODE (ret_reg)) != MODE_INT
464 && nregs != 1));
b8698a0f 465
bba33211 466 if (!NOTE_INSN_BASIC_BLOCK_P (last_insn))
610d2478
SB
467 {
468 before_return_copy
469 = emit_note_before (NOTE_INSN_DELETED, last_insn);
470 /* Instructions preceding LAST_INSN in the same block might
471 require a different mode than MODE_EXIT, so if we might
472 have such instructions, keep them in a separate block
473 from pre_exit. */
bba33211
JR
474 src_bb = split_block (src_bb,
475 PREV_INSN (before_return_copy))->dest;
610d2478
SB
476 }
477 else
478 before_return_copy = last_insn;
479 pre_exit = split_block (src_bb, before_return_copy)->src;
480 }
481 else
482 {
483 pre_exit = split_edge (eg);
610d2478
SB
484 }
485 }
486
487 return pre_exit;
488}
610d2478
SB
489
490/* Find all insns that need a particular mode setting, and insert the
491 necessary mode switches. Return true if we did work. */
492
7399bcb0 493static int
10d22567 494optimize_mode_switching (void)
610d2478 495{
610d2478
SB
496 int e;
497 basic_block bb;
cbb1e3d9 498 bool need_commit = false;
610d2478
SB
499 static const int num_modes[] = NUM_MODES_FOR_MODE_SWITCHING;
500#define N_ENTITIES ARRAY_SIZE (num_modes)
501 int entity_map[N_ENTITIES];
502 struct bb_info *bb_info[N_ENTITIES];
503 int i, j;
cbb1e3d9 504 int n_entities = 0;
610d2478 505 int max_num_modes = 0;
073a8998 506 bool emitted ATTRIBUTE_UNUSED = false;
06b90602
CB
507 basic_block post_entry = 0;
508 basic_block pre_exit = 0;
cbb1e3d9
CB
509 struct edge_list *edge_list = 0;
510
511 /* These bitmaps are used for the LCM algorithm. */
512 sbitmap *kill, *del, *insert, *antic, *transp, *comp;
513 sbitmap *avin, *avout;
610d2478 514
cbb1e3d9 515 for (e = N_ENTITIES - 1; e >= 0; e--)
610d2478
SB
516 if (OPTIMIZE_MODE_SWITCHING (e))
517 {
518 int entry_exit_extra = 0;
519
520 /* Create the list of segments within each basic block.
521 If NORMAL_MODE is defined, allow for two extra
522 blocks split from the entry and exit block. */
06b90602
CB
523 if (targetm.mode_switching.entry && targetm.mode_switching.exit)
524 entry_exit_extra = 3;
525
610d2478 526 bb_info[n_entities]
8b1c6fd7
DM
527 = XCNEWVEC (struct bb_info,
528 last_basic_block_for_fn (cfun) + entry_exit_extra);
610d2478
SB
529 entity_map[n_entities++] = e;
530 if (num_modes[e] > max_num_modes)
531 max_num_modes = num_modes[e];
532 }
533
534 if (! n_entities)
535 return 0;
536
cbb1e3d9 537 /* Make sure if MODE_ENTRY is defined MODE_EXIT is defined. */
06b90602 538 gcc_assert ((targetm.mode_switching.entry && targetm.mode_switching.exit)
cbb1e3d9
CB
539 || (!targetm.mode_switching.entry
540 && !targetm.mode_switching.exit));
06b90602
CB
541
542 if (targetm.mode_switching.entry && targetm.mode_switching.exit)
543 {
544 /* Split the edge from the entry block, so that we can note that
545 there NORMAL_MODE is supplied. */
546 post_entry = split_edge (single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun)));
547 pre_exit = create_pre_exit (n_entities, entity_map, num_modes);
548 }
610d2478 549
6fb5fa3c
DB
550 df_analyze ();
551
610d2478 552 /* Create the bitmap vectors. */
cbb1e3d9
CB
553 antic = sbitmap_vector_alloc (last_basic_block_for_fn (cfun),
554 n_entities * max_num_modes);
555 transp = sbitmap_vector_alloc (last_basic_block_for_fn (cfun),
556 n_entities * max_num_modes);
557 comp = sbitmap_vector_alloc (last_basic_block_for_fn (cfun),
558 n_entities * max_num_modes);
559 avin = sbitmap_vector_alloc (last_basic_block_for_fn (cfun),
560 n_entities * max_num_modes);
561 avout = sbitmap_vector_alloc (last_basic_block_for_fn (cfun),
562 n_entities * max_num_modes);
563 kill = sbitmap_vector_alloc (last_basic_block_for_fn (cfun),
564 n_entities * max_num_modes);
610d2478 565
8b1c6fd7 566 bitmap_vector_ones (transp, last_basic_block_for_fn (cfun));
cbb1e3d9
CB
567 bitmap_vector_clear (antic, last_basic_block_for_fn (cfun));
568 bitmap_vector_clear (comp, last_basic_block_for_fn (cfun));
610d2478
SB
569
570 for (j = n_entities - 1; j >= 0; j--)
571 {
572 int e = entity_map[j];
573 int no_mode = num_modes[e];
574 struct bb_info *info = bb_info[j];
1d455520 575 rtx_insn *insn;
610d2478
SB
576
577 /* Determine what the first use (if any) need for a mode of entity E is.
578 This will be the mode that is anticipatable for this block.
579 Also compute the initial transparency settings. */
11cd3bed 580 FOR_EACH_BB_FN (bb, cfun)
610d2478
SB
581 {
582 struct seginfo *ptr;
583 int last_mode = no_mode;
a44250f4 584 bool any_set_required = false;
610d2478
SB
585 HARD_REG_SET live_now;
586
cbb1e3d9
CB
587 info[bb->index].mode_out = info[bb->index].mode_in = no_mode;
588
6fb5fa3c 589 REG_SET_TO_HARD_REG_SET (live_now, df_get_live_in (bb));
24c2fde2
RH
590
591 /* Pretend the mode is clobbered across abnormal edges. */
592 {
593 edge_iterator ei;
cbb1e3d9
CB
594 edge eg;
595 FOR_EACH_EDGE (eg, ei, bb->preds)
596 if (eg->flags & EDGE_COMPLEX)
24c2fde2 597 break;
cbb1e3d9 598 if (eg)
650a59ef 599 {
1d455520 600 rtx_insn *ins_pos = BB_HEAD (bb);
473fd99a
JR
601 if (LABEL_P (ins_pos))
602 ins_pos = NEXT_INSN (ins_pos);
603 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (ins_pos));
604 if (ins_pos != BB_END (bb))
605 ins_pos = NEXT_INSN (ins_pos);
606 ptr = new_seginfo (no_mode, ins_pos, bb->index, live_now);
650a59ef 607 add_seginfo (info + bb->index, ptr);
cbb1e3d9
CB
608 for (i = 0; i < no_mode; i++)
609 clear_mode_bit (transp[bb->index], j, i);
650a59ef 610 }
24c2fde2
RH
611 }
612
0f346928 613 FOR_BB_INSNS (bb, insn)
610d2478
SB
614 {
615 if (INSN_P (insn))
616 {
06b90602 617 int mode = targetm.mode_switching.needed (e, insn);
610d2478
SB
618 rtx link;
619
620 if (mode != no_mode && mode != last_mode)
621 {
a44250f4 622 any_set_required = true;
610d2478
SB
623 last_mode = mode;
624 ptr = new_seginfo (mode, insn, bb->index, live_now);
625 add_seginfo (info + bb->index, ptr);
cbb1e3d9
CB
626 for (i = 0; i < no_mode; i++)
627 clear_mode_bit (transp[bb->index], j, i);
610d2478 628 }
06b90602
CB
629
630 if (targetm.mode_switching.after)
cbb1e3d9
CB
631 last_mode = targetm.mode_switching.after (e, last_mode,
632 insn);
06b90602 633
610d2478
SB
634 /* Update LIVE_NOW. */
635 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
636 if (REG_NOTE_KIND (link) == REG_DEAD)
408bed3c 637 reg_dies (XEXP (link, 0), &live_now);
610d2478
SB
638
639 note_stores (PATTERN (insn), reg_becomes_live, &live_now);
640 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
641 if (REG_NOTE_KIND (link) == REG_UNUSED)
408bed3c 642 reg_dies (XEXP (link, 0), &live_now);
610d2478
SB
643 }
644 }
645
646 info[bb->index].computing = last_mode;
a44250f4 647 /* Check for blocks without ANY mode requirements.
611a4849
UB
648 N.B. because of MODE_AFTER, last_mode might still
649 be different from no_mode, in which case we need to
650 mark the block as nontransparent. */
a44250f4 651 if (!any_set_required)
610d2478
SB
652 {
653 ptr = new_seginfo (no_mode, BB_END (bb), bb->index, live_now);
654 add_seginfo (info + bb->index, ptr);
611a4849 655 if (last_mode != no_mode)
cbb1e3d9
CB
656 for (i = 0; i < no_mode; i++)
657 clear_mode_bit (transp[bb->index], j, i);
610d2478
SB
658 }
659 }
06b90602
CB
660 if (targetm.mode_switching.entry && targetm.mode_switching.exit)
661 {
662 int mode = targetm.mode_switching.entry (e);
610d2478 663
cbb1e3d9
CB
664 info[post_entry->index].mode_out =
665 info[post_entry->index].mode_in = no_mode;
666 if (pre_exit)
667 {
668 info[pre_exit->index].mode_out =
669 info[pre_exit->index].mode_in = no_mode;
670 }
671
06b90602
CB
672 if (mode != no_mode)
673 {
674 bb = post_entry;
675
676 /* By always making this nontransparent, we save
677 an extra check in make_preds_opaque. We also
678 need this to avoid confusing pre_edge_lcm when
679 antic is cleared but transp and comp are set. */
cbb1e3d9
CB
680 for (i = 0; i < no_mode; i++)
681 clear_mode_bit (transp[bb->index], j, i);
06b90602
CB
682
683 /* Insert a fake computing definition of MODE into entry
684 blocks which compute no mode. This represents the mode on
685 entry. */
686 info[bb->index].computing = mode;
687
688 if (pre_exit)
689 info[pre_exit->index].seginfo->mode =
690 targetm.mode_switching.exit (e);
691 }
692 }
610d2478
SB
693
694 /* Set the anticipatable and computing arrays. */
cbb1e3d9 695 for (i = 0; i < no_mode; i++)
610d2478 696 {
cbb1e3d9 697 int m = targetm.mode_switching.priority (entity_map[j], i);
610d2478 698
11cd3bed 699 FOR_EACH_BB_FN (bb, cfun)
610d2478
SB
700 {
701 if (info[bb->index].seginfo->mode == m)
cbb1e3d9 702 set_mode_bit (antic[bb->index], j, m);
610d2478
SB
703
704 if (info[bb->index].computing == m)
cbb1e3d9 705 set_mode_bit (comp[bb->index], j, m);
610d2478
SB
706 }
707 }
cbb1e3d9 708 }
610d2478 709
cbb1e3d9
CB
710 /* Calculate the optimal locations for the
711 placement mode switches to modes with priority I. */
610d2478 712
cbb1e3d9
CB
713 FOR_EACH_BB_FN (bb, cfun)
714 bitmap_not (kill[bb->index], transp[bb->index]);
610d2478 715
cbb1e3d9
CB
716 edge_list = pre_edge_lcm_avs (n_entities * max_num_modes, transp, comp, antic,
717 kill, avin, avout, &insert, &del);
610d2478 718
cbb1e3d9
CB
719 for (j = n_entities - 1; j >= 0; j--)
720 {
721 int no_mode = num_modes[entity_map[j]];
610d2478 722
cbb1e3d9 723 /* Insert all mode sets that have been inserted by lcm. */
610d2478 724
cbb1e3d9
CB
725 for (int ed = NUM_EDGES (edge_list) - 1; ed >= 0; ed--)
726 {
727 edge eg = INDEX_EDGE (edge_list, ed);
610d2478 728
cbb1e3d9 729 eg->aux = (void *)(intptr_t)-1;
610d2478 730
cbb1e3d9
CB
731 for (i = 0; i < no_mode; i++)
732 {
733 int m = targetm.mode_switching.priority (entity_map[j], i);
734 if (mode_bit_p (insert[ed], j, m))
735 {
736 eg->aux = (void *)(intptr_t)m;
737 break;
738 }
739 }
740 }
610d2478 741
cbb1e3d9
CB
742 FOR_EACH_BB_FN (bb, cfun)
743 {
744 struct bb_info *info = bb_info[j];
745 int last_mode = no_mode;
650a59ef 746
cbb1e3d9
CB
747 /* intialize mode in availability for bb. */
748 for (i = 0; i < no_mode; i++)
749 if (mode_bit_p (avout[bb->index], j, i))
750 {
751 if (last_mode == no_mode)
752 last_mode = i;
753 if (last_mode != i)
754 {
755 last_mode = no_mode;
756 break;
757 }
758 }
759 info[bb->index].mode_out = last_mode;
610d2478 760
cbb1e3d9
CB
761 /* intialize mode out availability for bb. */
762 last_mode = no_mode;
763 for (i = 0; i < no_mode; i++)
764 if (mode_bit_p (avin[bb->index], j, i))
610d2478 765 {
cbb1e3d9
CB
766 if (last_mode == no_mode)
767 last_mode = i;
768 if (last_mode != i)
769 {
770 last_mode = no_mode;
771 break;
772 }
610d2478 773 }
cbb1e3d9
CB
774 info[bb->index].mode_in = last_mode;
775
776 for (i = 0; i < no_mode; i++)
777 if (mode_bit_p (del[bb->index], j, i))
778 info[bb->index].seginfo->mode = no_mode;
610d2478
SB
779 }
780
cbb1e3d9 781 /* Now output the remaining mode sets in all the segments. */
610d2478 782
cbb1e3d9
CB
783 /* In case there was no mode inserted. the mode information on the edge
784 might not be complete.
785 Update mode info on edges and commit pending mode sets. */
786 need_commit |= commit_mode_sets (edge_list, entity_map[j], bb_info[j]);
787
788 /* Reset modes for next entity. */
789 clear_aux_for_edges ();
610d2478 790
cbb1e3d9 791 FOR_EACH_BB_FN (bb, cfun)
610d2478
SB
792 {
793 struct seginfo *ptr, *next;
cbb1e3d9
CB
794 int cur_mode = bb_info[j][bb->index].mode_in;
795
610d2478
SB
796 for (ptr = bb_info[j][bb->index].seginfo; ptr; ptr = next)
797 {
798 next = ptr->next;
799 if (ptr->mode != no_mode)
800 {
1d455520 801 rtx_insn *mode_set;
610d2478 802
5f28524a 803 rtl_profile_for_bb (bb);
610d2478 804 start_sequence ();
cbb1e3d9
CB
805
806 targetm.mode_switching.emit (entity_map[j], ptr->mode,
807 cur_mode, ptr->regs_live);
610d2478
SB
808 mode_set = get_insns ();
809 end_sequence ();
810
cbb1e3d9
CB
811 /* modes kill each other inside a basic block. */
812 cur_mode = ptr->mode;
813
610d2478
SB
814 /* Insert MODE_SET only if it is nonempty. */
815 if (mode_set != NULL_RTX)
816 {
073a8998 817 emitted = true;
a38e7aa5 818 if (NOTE_INSN_BASIC_BLOCK_P (ptr->insn_ptr))
473fd99a
JR
819 /* We need to emit the insns in a FIFO-like manner,
820 i.e. the first to be emitted at our insertion
821 point ends up first in the instruction steam.
822 Because we made sure that NOTE_INSN_BASIC_BLOCK is
823 only used for initially empty basic blocks, we
74145685 824 can achieve this by appending at the end of
473fd99a
JR
825 the block. */
826 emit_insn_after
827 (mode_set, BB_END (NOTE_BASIC_BLOCK (ptr->insn_ptr)));
610d2478
SB
828 else
829 emit_insn_before (mode_set, ptr->insn_ptr);
830 }
5f28524a
JH
831
832 default_rtl_profile ();
610d2478
SB
833 }
834
835 free (ptr);
836 }
837 }
838
839 free (bb_info[j]);
840 }
841
cbb1e3d9
CB
842 free_edge_list (edge_list);
843
610d2478 844 /* Finished. Free up all the things we've allocated. */
cbb1e3d9
CB
845 sbitmap_vector_free (del);
846 sbitmap_vector_free (insert);
610d2478
SB
847 sbitmap_vector_free (kill);
848 sbitmap_vector_free (antic);
849 sbitmap_vector_free (transp);
850 sbitmap_vector_free (comp);
cbb1e3d9
CB
851 sbitmap_vector_free (avin);
852 sbitmap_vector_free (avout);
610d2478
SB
853
854 if (need_commit)
855 commit_edge_insertions ();
856
06b90602
CB
857 if (targetm.mode_switching.entry && targetm.mode_switching.exit)
858 cleanup_cfg (CLEANUP_NO_INSN_DEL);
859 else if (!need_commit && !emitted)
610d2478 860 return 0;
610d2478 861
610d2478
SB
862 return 1;
863}
ef330312 864
610d2478 865#endif /* OPTIMIZE_MODE_SWITCHING */
ef330312 866\f
27a4cd48
DM
867namespace {
868
869const pass_data pass_data_mode_switching =
ef330312 870{
27a4cd48
DM
871 RTL_PASS, /* type */
872 "mode_sw", /* name */
873 OPTGROUP_NONE, /* optinfo_flags */
27a4cd48
DM
874 TV_MODE_SWITCH, /* tv_id */
875 0, /* properties_required */
876 0, /* properties_provided */
877 0, /* properties_destroyed */
878 0, /* todo_flags_start */
3bea341f 879 TODO_df_finish, /* todo_flags_finish */
ef330312 880};
27a4cd48
DM
881
882class pass_mode_switching : public rtl_opt_pass
883{
884public:
c3284718
RS
885 pass_mode_switching (gcc::context *ctxt)
886 : rtl_opt_pass (pass_data_mode_switching, ctxt)
27a4cd48
DM
887 {}
888
889 /* opt_pass methods: */
05555c4a
DM
890 /* The epiphany backend creates a second instance of this pass, so we need
891 a clone method. */
65d3284b 892 opt_pass * clone () { return new pass_mode_switching (m_ctxt); }
1a3d085c
TS
893 virtual bool gate (function *)
894 {
895#ifdef OPTIMIZE_MODE_SWITCHING
896 return true;
897#else
898 return false;
899#endif
900 }
901
be55bfe6
TS
902 virtual unsigned int execute (function *)
903 {
904#ifdef OPTIMIZE_MODE_SWITCHING
905 optimize_mode_switching ();
906#endif /* OPTIMIZE_MODE_SWITCHING */
907 return 0;
908 }
27a4cd48
DM
909
910}; // class pass_mode_switching
911
912} // anon namespace
913
914rtl_opt_pass *
915make_pass_mode_switching (gcc::context *ctxt)
916{
917 return new pass_mode_switching (ctxt);
918}