]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/mode-switching.c
[C++] Protect call to copy_attributes_to_builtin (PR91505)
[thirdparty/gcc.git] / gcc / mode-switching.c
CommitLineData
2f138c1c 1/* CPU mode switching
fbd26352 2 Copyright (C) 1998-2019 Free Software Foundation, Inc.
2f138c1c 3
4This file is part of GCC.
5
6GCC is free software; you can redistribute it and/or modify it under
7the terms of the GNU General Public License as published by the Free
8c4c00c1 8Software Foundation; either version 3, or (at your option) any later
2f138c1c 9version.
10
11GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12WARRANTY; without even the implied warranty of MERCHANTABILITY or
13FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14for more details.
15
16You should have received a copy of the GNU General Public License
8c4c00c1 17along with GCC; see the file COPYING3. If not see
18<http://www.gnu.org/licenses/>. */
2f138c1c 19
20#include "config.h"
21#include "system.h"
22#include "coretypes.h"
9ef16211 23#include "backend.h"
7c29e30e 24#include "target.h"
2f138c1c 25#include "rtl.h"
7c29e30e 26#include "cfghooks.h"
9ef16211 27#include "df.h"
ad7b10a2 28#include "memmodel.h"
7c29e30e 29#include "tm_p.h"
7c29e30e 30#include "regs.h"
31#include "emit-rtl.h"
94ea8568 32#include "cfgrtl.h"
33#include "cfganal.h"
34#include "lcm.h"
35#include "cfgcleanup.h"
77fce4cd 36#include "tree-pass.h"
2f138c1c 37
38/* We want target macros for the mode switching code to be able to refer
39 to instruction attribute values. */
40#include "insn-attr.h"
41
42#ifdef OPTIMIZE_MODE_SWITCHING
43
44/* The algorithm for setting the modes consists of scanning the insn list
45 and finding all the insns which require a specific mode. Each insn gets
46 a unique struct seginfo element. These structures are inserted into a list
47 for each basic block. For each entity, there is an array of bb_info over
78985618 48 the flow graph basic blocks (local var 'bb_info'), which contains a list
2f138c1c 49 of all insns within that basic block, in the order they are encountered.
50
51 For each entity, any basic block WITHOUT any insns requiring a specific
78985618 52 mode are given a single entry without a mode (each basic block in the
53 flow graph must have at least one entry in the segment table).
2f138c1c 54
55 The LCM algorithm is then run over the flow graph to determine where to
78985618 56 place the sets to the highest-priority mode with respect to the first
2f138c1c 57 insn in any one block. Any adjustments required to the transparency
58 vectors are made, then the next iteration starts for the next-lower
59 priority mode, till for each entity all modes are exhausted.
60
78985618 61 More details can be found in the code of optimize_mode_switching. */
2f138c1c 62\f
63/* This structure contains the information for each insn which requires
64 either single or double mode to be set.
65 MODE is the mode this insn must be executed in.
66 INSN_PTR is the insn to be executed (may be the note that marks the
67 beginning of a basic block).
68 BBNUM is the flow graph basic block this insn occurs in.
69 NEXT is the next insn in the same basic block. */
70struct seginfo
71{
72 int mode;
d024774f 73 rtx_insn *insn_ptr;
2f138c1c 74 int bbnum;
75 struct seginfo *next;
76 HARD_REG_SET regs_live;
77};
78
79struct bb_info
80{
81 struct seginfo *seginfo;
82 int computing;
7fc0df2f 83 int mode_out;
84 int mode_in;
2f138c1c 85};
86
d024774f 87static struct seginfo * new_seginfo (int, rtx_insn *, int, HARD_REG_SET);
2f138c1c 88static void add_seginfo (struct bb_info *, struct seginfo *);
0ff4fe1d 89static void reg_dies (rtx, HARD_REG_SET *);
81a410b1 90static void reg_becomes_live (rtx, const_rtx, void *);
2f138c1c 91
7fc0df2f 92/* Clear ode I from entity J in bitmap B. */
93#define clear_mode_bit(b, j, i) \
94 bitmap_clear_bit (b, (j * max_num_modes) + i)
95
96/* Test mode I from entity J in bitmap B. */
97#define mode_bit_p(b, j, i) \
98 bitmap_bit_p (b, (j * max_num_modes) + i)
99
100/* Set mode I from entity J in bitmal B. */
101#define set_mode_bit(b, j, i) \
102 bitmap_set_bit (b, (j * max_num_modes) + i)
103
104/* Emit modes segments from EDGE_LIST associated with entity E.
105 INFO gives mode availability for each mode. */
106
107static bool
108commit_mode_sets (struct edge_list *edge_list, int e, struct bb_info *info)
109{
110 bool need_commit = false;
111
112 for (int ed = NUM_EDGES (edge_list) - 1; ed >= 0; ed--)
113 {
114 edge eg = INDEX_EDGE (edge_list, ed);
115 int mode;
116
117 if ((mode = (int)(intptr_t)(eg->aux)) != -1)
118 {
119 HARD_REG_SET live_at_edge;
120 basic_block src_bb = eg->src;
121 int cur_mode = info[src_bb->index].mode_out;
e0f97a1a 122 rtx_insn *mode_set;
7fc0df2f 123
124 REG_SET_TO_HARD_REG_SET (live_at_edge, df_get_live_out (src_bb));
125
126 rtl_profile_for_edge (eg);
127 start_sequence ();
128
129 targetm.mode_switching.emit (e, mode, cur_mode, live_at_edge);
130
131 mode_set = get_insns ();
132 end_sequence ();
133 default_rtl_profile ();
134
135 /* Do not bother to insert empty sequence. */
e0f97a1a 136 if (mode_set == NULL)
7fc0df2f 137 continue;
138
139 /* We should not get an abnormal edge here. */
140 gcc_assert (! (eg->flags & EDGE_ABNORMAL));
141
142 need_commit = true;
143 insert_insn_on_edge (mode_set, eg);
144 }
145 }
146
147 return need_commit;
148}
149
150/* Allocate a new BBINFO structure, initialized with the MODE, INSN,
151 and basic block BB parameters.
6d0a5596 152 INSN may not be a NOTE_INSN_BASIC_BLOCK, unless it is an empty
153 basic block; that allows us later to insert instructions in a FIFO-like
154 manner. */
2f138c1c 155
156static struct seginfo *
d024774f 157new_seginfo (int mode, rtx_insn *insn, int bb, HARD_REG_SET regs_live)
2f138c1c 158{
159 struct seginfo *ptr;
6d0a5596 160
161 gcc_assert (!NOTE_INSN_BASIC_BLOCK_P (insn)
162 || insn == BB_END (NOTE_BASIC_BLOCK (insn)));
4c36ffe6 163 ptr = XNEW (struct seginfo);
2f138c1c 164 ptr->mode = mode;
165 ptr->insn_ptr = insn;
166 ptr->bbnum = bb;
167 ptr->next = NULL;
168 COPY_HARD_REG_SET (ptr->regs_live, regs_live);
169 return ptr;
170}
171
172/* Add a seginfo element to the end of a list.
173 HEAD is a pointer to the list beginning.
174 INFO is the structure to be linked in. */
175
176static void
177add_seginfo (struct bb_info *head, struct seginfo *info)
178{
179 struct seginfo *ptr;
180
181 if (head->seginfo == NULL)
182 head->seginfo = info;
183 else
184 {
185 ptr = head->seginfo;
186 while (ptr->next != NULL)
187 ptr = ptr->next;
188 ptr->next = info;
189 }
190}
191
2f138c1c 192/* Record in LIVE that register REG died. */
193
194static void
0ff4fe1d 195reg_dies (rtx reg, HARD_REG_SET *live)
2f138c1c 196{
a2c6f0b7 197 int regno;
2f138c1c 198
199 if (!REG_P (reg))
200 return;
201
202 regno = REGNO (reg);
203 if (regno < FIRST_PSEUDO_REGISTER)
a2c6f0b7 204 remove_from_hard_reg_set (live, GET_MODE (reg), regno);
2f138c1c 205}
206
207/* Record in LIVE that register REG became live.
208 This is called via note_stores. */
209
210static void
81a410b1 211reg_becomes_live (rtx reg, const_rtx setter ATTRIBUTE_UNUSED, void *live)
2f138c1c 212{
a2c6f0b7 213 int regno;
2f138c1c 214
215 if (GET_CODE (reg) == SUBREG)
216 reg = SUBREG_REG (reg);
217
218 if (!REG_P (reg))
219 return;
220
221 regno = REGNO (reg);
222 if (regno < FIRST_PSEUDO_REGISTER)
a2c6f0b7 223 add_to_hard_reg_set ((HARD_REG_SET *) live, GET_MODE (reg), regno);
2f138c1c 224}
225
2f138c1c 226/* Split the fallthrough edge to the exit block, so that we can note
227 that there NORMAL_MODE is required. Return the new block if it's
228 inserted before the exit block. Otherwise return null. */
229
230static basic_block
231create_pre_exit (int n_entities, int *entity_map, const int *num_modes)
232{
233 edge eg;
234 edge_iterator ei;
235 basic_block pre_exit;
236
237 /* The only non-call predecessor at this stage is a block with a
238 fallthrough edge; there can be at most one, but there could be
239 none at all, e.g. when exit is called. */
240 pre_exit = 0;
34154e27 241 FOR_EACH_EDGE (eg, ei, EXIT_BLOCK_PTR_FOR_FN (cfun)->preds)
2f138c1c 242 if (eg->flags & EDGE_FALLTHRU)
243 {
244 basic_block src_bb = eg->src;
d024774f 245 rtx_insn *last_insn;
246 rtx ret_reg;
2f138c1c 247
248 gcc_assert (!pre_exit);
249 /* If this function returns a value at the end, we have to
250 insert the final mode switch before the return value copy
2907158a 251 to its hard register.
252
253 x86 targets use mode-switching infrastructure to
254 conditionally insert vzeroupper instruction at the exit
255 from the function where there is no need to switch the
256 mode before the return value copy. The vzeroupper insertion
257 pass runs after reload, so use !reload_completed as a stand-in
258 for x86 to skip the search for the return value copy insn.
259
260 N.b.: the code below assumes that the return copy insn
261 immediately precedes its corresponding use insn. This
262 assumption does not hold after reload, since sched1 pass
263 can schedule the return copy insn away from its
264 corresponding use insn. */
265 if (!reload_completed
266 && EDGE_COUNT (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds) == 1
2f138c1c 267 && NONJUMP_INSN_P ((last_insn = BB_END (src_bb)))
268 && GET_CODE (PATTERN (last_insn)) == USE
269 && GET_CODE ((ret_reg = XEXP (PATTERN (last_insn), 0))) == REG)
270 {
271 int ret_start = REGNO (ret_reg);
0933f1d9 272 int nregs = REG_NREGS (ret_reg);
2f138c1c 273 int ret_end = ret_start + nregs;
c9c236e7 274 bool short_block = false;
275 bool multi_reg_return = false;
276 bool forced_late_switch = false;
d024774f 277 rtx_insn *before_return_copy;
2f138c1c 278
279 do
280 {
d024774f 281 rtx_insn *return_copy = PREV_INSN (last_insn);
2f138c1c 282 rtx return_copy_pat, copy_reg;
283 int copy_start, copy_num;
284 int j;
285
8c94ea57 286 if (NONDEBUG_INSN_P (return_copy))
2f138c1c 287 {
1915b15f 288 /* When using SJLJ exceptions, the call to the
289 unregister function is inserted between the
290 clobber of the return value and the copy.
291 We do not want to split the block before this
292 or any other call; if we have not found the
293 copy yet, the copy must have been deleted. */
294 if (CALL_P (return_copy))
295 {
c9c236e7 296 short_block = true;
1915b15f 297 break;
298 }
646857a5 299 return_copy_pat = PATTERN (return_copy);
300 switch (GET_CODE (return_copy_pat))
8801c4fd 301 {
646857a5 302 case USE:
c9c236e7 303 /* Skip USEs of multiple return registers.
304 __builtin_apply pattern is also handled here. */
646857a5 305 if (GET_CODE (XEXP (return_copy_pat, 0)) == REG
e1ce1485 306 && (targetm.calls.function_value_regno_p
646857a5 307 (REGNO (XEXP (return_copy_pat, 0)))))
308 {
c9c236e7 309 multi_reg_return = true;
646857a5 310 last_insn = return_copy;
311 continue;
312 }
313 break;
314
315 case ASM_OPERANDS:
316 /* Skip barrier insns. */
317 if (!MEM_VOLATILE_P (return_copy_pat))
318 break;
319
320 /* Fall through. */
321
322 case ASM_INPUT:
323 case UNSPEC_VOLATILE:
8801c4fd 324 last_insn = return_copy;
325 continue;
646857a5 326
327 default:
328 break;
8801c4fd 329 }
646857a5 330
2f138c1c 331 /* If the return register is not (in its entirety)
332 likely spilled, the return copy might be
333 partially or completely optimized away. */
334 return_copy_pat = single_set (return_copy);
335 if (!return_copy_pat)
336 {
337 return_copy_pat = PATTERN (return_copy);
338 if (GET_CODE (return_copy_pat) != CLOBBER)
339 break;
3072d30e 340 else if (!optimize)
341 {
342 /* This might be (clobber (reg [<result>]))
343 when not optimizing. Then check if
344 the previous insn is the clobber for
345 the return register. */
346 copy_reg = SET_DEST (return_copy_pat);
347 if (GET_CODE (copy_reg) == REG
348 && !HARD_REGISTER_NUM_P (REGNO (copy_reg)))
349 {
350 if (INSN_P (PREV_INSN (return_copy)))
351 {
352 return_copy = PREV_INSN (return_copy);
353 return_copy_pat = PATTERN (return_copy);
354 if (GET_CODE (return_copy_pat) != CLOBBER)
355 break;
356 }
357 }
358 }
2f138c1c 359 }
360 copy_reg = SET_DEST (return_copy_pat);
361 if (GET_CODE (copy_reg) == REG)
362 copy_start = REGNO (copy_reg);
363 else if (GET_CODE (copy_reg) == SUBREG
364 && GET_CODE (SUBREG_REG (copy_reg)) == REG)
365 copy_start = REGNO (SUBREG_REG (copy_reg));
366 else
419e5c35 367 {
368 /* When control reaches end of non-void function,
369 there are no return copy insns at all. This
370 avoids an ice on that invalid function. */
371 if (ret_start + nregs == ret_end)
c9c236e7 372 short_block = true;
419e5c35 373 break;
374 }
3f988ca9 375 if (!targetm.calls.function_value_regno_p (copy_start))
39be9d26 376 copy_num = 0;
377 else
92d2aec3 378 copy_num = hard_regno_nregs (copy_start,
379 GET_MODE (copy_reg));
2f138c1c 380
381 /* If the return register is not likely spilled, - as is
382 the case for floating point on SH4 - then it might
383 be set by an arithmetic operation that needs a
384 different mode than the exit block. */
385 for (j = n_entities - 1; j >= 0; j--)
386 {
387 int e = entity_map[j];
cea19dab 388 int mode =
389 targetm.mode_switching.needed (e, return_copy);
2f138c1c 390
cea19dab 391 if (mode != num_modes[e]
392 && mode != targetm.mode_switching.exit (e))
2f138c1c 393 break;
394 }
395 if (j >= 0)
396 {
9cb7855c 397 /* __builtin_return emits a sequence of loads to all
398 return registers. One of them might require
399 another mode than MODE_EXIT, even if it is
400 unrelated to the return value, so we want to put
401 the final mode switch after it. */
c9c236e7 402 if (multi_reg_return
9cb7855c 403 && targetm.calls.function_value_regno_p
404 (copy_start))
c9c236e7 405 forced_late_switch = true;
9cb7855c 406
2f138c1c 407 /* For the SH4, floating point loads depend on fpscr,
408 thus we might need to put the final mode switch
409 after the return value copy. That is still OK,
410 because a floating point return value does not
411 conflict with address reloads. */
412 if (copy_start >= ret_start
413 && copy_start + copy_num <= ret_end
414 && OBJECT_P (SET_SRC (return_copy_pat)))
c9c236e7 415 forced_late_switch = true;
2f138c1c 416 break;
417 }
39be9d26 418 if (copy_num == 0)
419 {
420 last_insn = return_copy;
421 continue;
422 }
2f138c1c 423
424 if (copy_start >= ret_start
425 && copy_start + copy_num <= ret_end)
426 nregs -= copy_num;
c9c236e7 427 else if (!multi_reg_return
e1ce1485 428 || !targetm.calls.function_value_regno_p
429 (copy_start))
2f138c1c 430 break;
431 last_insn = return_copy;
432 }
433 /* ??? Exception handling can lead to the return value
434 copy being already separated from the return value use,
435 as in unwind-dw2.c .
436 Similarly, conditionally returning without a value,
437 and conditionally using builtin_return can lead to an
438 isolated use. */
439 if (return_copy == BB_HEAD (src_bb))
440 {
c9c236e7 441 short_block = true;
2f138c1c 442 break;
443 }
444 last_insn = return_copy;
445 }
446 while (nregs);
48e1416a 447
2f138c1c 448 /* If we didn't see a full return value copy, verify that there
449 is a plausible reason for this. If some, but not all of the
450 return register is likely spilled, we can expect that there
451 is a copy for the likely spilled part. */
452 gcc_assert (!nregs
453 || forced_late_switch
454 || short_block
24dd0668 455 || !(targetm.class_likely_spilled_p
2f138c1c 456 (REGNO_REG_CLASS (ret_start)))
10fa8f76 457 || nregs != REG_NREGS (ret_reg)
2f138c1c 458 /* For multi-hard-register floating point
459 values, sometimes the likely-spilled part
460 is ordinarily copied first, then the other
461 part is set with an arithmetic operation.
462 This doesn't actually cause reload
463 failures, so let it pass. */
464 || (GET_MODE_CLASS (GET_MODE (ret_reg)) != MODE_INT
465 && nregs != 1));
48e1416a 466
4da102b5 467 if (!NOTE_INSN_BASIC_BLOCK_P (last_insn))
2f138c1c 468 {
469 before_return_copy
470 = emit_note_before (NOTE_INSN_DELETED, last_insn);
471 /* Instructions preceding LAST_INSN in the same block might
472 require a different mode than MODE_EXIT, so if we might
473 have such instructions, keep them in a separate block
474 from pre_exit. */
4da102b5 475 src_bb = split_block (src_bb,
476 PREV_INSN (before_return_copy))->dest;
2f138c1c 477 }
478 else
479 before_return_copy = last_insn;
480 pre_exit = split_block (src_bb, before_return_copy)->src;
481 }
482 else
483 {
484 pre_exit = split_edge (eg);
2f138c1c 485 }
486 }
487
488 return pre_exit;
489}
2f138c1c 490
491/* Find all insns that need a particular mode setting, and insert the
492 necessary mode switches. Return true if we did work. */
493
9d31a126 494static int
3f5be5f4 495optimize_mode_switching (void)
2f138c1c 496{
2f138c1c 497 int e;
498 basic_block bb;
7fc0df2f 499 bool need_commit = false;
2f138c1c 500 static const int num_modes[] = NUM_MODES_FOR_MODE_SWITCHING;
501#define N_ENTITIES ARRAY_SIZE (num_modes)
502 int entity_map[N_ENTITIES];
503 struct bb_info *bb_info[N_ENTITIES];
504 int i, j;
7fc0df2f 505 int n_entities = 0;
2f138c1c 506 int max_num_modes = 0;
9d75589a 507 bool emitted ATTRIBUTE_UNUSED = false;
cea19dab 508 basic_block post_entry = 0;
509 basic_block pre_exit = 0;
7fc0df2f 510 struct edge_list *edge_list = 0;
511
512 /* These bitmaps are used for the LCM algorithm. */
513 sbitmap *kill, *del, *insert, *antic, *transp, *comp;
514 sbitmap *avin, *avout;
2f138c1c 515
7fc0df2f 516 for (e = N_ENTITIES - 1; e >= 0; e--)
2f138c1c 517 if (OPTIMIZE_MODE_SWITCHING (e))
518 {
519 int entry_exit_extra = 0;
520
521 /* Create the list of segments within each basic block.
522 If NORMAL_MODE is defined, allow for two extra
523 blocks split from the entry and exit block. */
cea19dab 524 if (targetm.mode_switching.entry && targetm.mode_switching.exit)
525 entry_exit_extra = 3;
526
2f138c1c 527 bb_info[n_entities]
fe672ac0 528 = XCNEWVEC (struct bb_info,
529 last_basic_block_for_fn (cfun) + entry_exit_extra);
2f138c1c 530 entity_map[n_entities++] = e;
531 if (num_modes[e] > max_num_modes)
532 max_num_modes = num_modes[e];
533 }
534
535 if (! n_entities)
536 return 0;
537
7fc0df2f 538 /* Make sure if MODE_ENTRY is defined MODE_EXIT is defined. */
cea19dab 539 gcc_assert ((targetm.mode_switching.entry && targetm.mode_switching.exit)
7fc0df2f 540 || (!targetm.mode_switching.entry
541 && !targetm.mode_switching.exit));
cea19dab 542
543 if (targetm.mode_switching.entry && targetm.mode_switching.exit)
544 {
545 /* Split the edge from the entry block, so that we can note that
546 there NORMAL_MODE is supplied. */
547 post_entry = split_edge (single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun)));
548 pre_exit = create_pre_exit (n_entities, entity_map, num_modes);
549 }
2f138c1c 550
3072d30e 551 df_analyze ();
552
2f138c1c 553 /* Create the bitmap vectors. */
7fc0df2f 554 antic = sbitmap_vector_alloc (last_basic_block_for_fn (cfun),
555 n_entities * max_num_modes);
556 transp = sbitmap_vector_alloc (last_basic_block_for_fn (cfun),
557 n_entities * max_num_modes);
558 comp = sbitmap_vector_alloc (last_basic_block_for_fn (cfun),
559 n_entities * max_num_modes);
560 avin = sbitmap_vector_alloc (last_basic_block_for_fn (cfun),
561 n_entities * max_num_modes);
562 avout = sbitmap_vector_alloc (last_basic_block_for_fn (cfun),
563 n_entities * max_num_modes);
564 kill = sbitmap_vector_alloc (last_basic_block_for_fn (cfun),
565 n_entities * max_num_modes);
2f138c1c 566
fe672ac0 567 bitmap_vector_ones (transp, last_basic_block_for_fn (cfun));
7fc0df2f 568 bitmap_vector_clear (antic, last_basic_block_for_fn (cfun));
569 bitmap_vector_clear (comp, last_basic_block_for_fn (cfun));
2f138c1c 570
571 for (j = n_entities - 1; j >= 0; j--)
572 {
573 int e = entity_map[j];
574 int no_mode = num_modes[e];
575 struct bb_info *info = bb_info[j];
d024774f 576 rtx_insn *insn;
2f138c1c 577
578 /* Determine what the first use (if any) need for a mode of entity E is.
579 This will be the mode that is anticipatable for this block.
580 Also compute the initial transparency settings. */
fc00614f 581 FOR_EACH_BB_FN (bb, cfun)
2f138c1c 582 {
583 struct seginfo *ptr;
584 int last_mode = no_mode;
aadec354 585 bool any_set_required = false;
2f138c1c 586 HARD_REG_SET live_now;
587
7fc0df2f 588 info[bb->index].mode_out = info[bb->index].mode_in = no_mode;
589
3072d30e 590 REG_SET_TO_HARD_REG_SET (live_now, df_get_live_in (bb));
37430745 591
592 /* Pretend the mode is clobbered across abnormal edges. */
593 {
594 edge_iterator ei;
7fc0df2f 595 edge eg;
596 FOR_EACH_EDGE (eg, ei, bb->preds)
597 if (eg->flags & EDGE_COMPLEX)
37430745 598 break;
7fc0df2f 599 if (eg)
9e236a59 600 {
d024774f 601 rtx_insn *ins_pos = BB_HEAD (bb);
6d0a5596 602 if (LABEL_P (ins_pos))
603 ins_pos = NEXT_INSN (ins_pos);
604 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (ins_pos));
605 if (ins_pos != BB_END (bb))
606 ins_pos = NEXT_INSN (ins_pos);
607 ptr = new_seginfo (no_mode, ins_pos, bb->index, live_now);
9e236a59 608 add_seginfo (info + bb->index, ptr);
7fc0df2f 609 for (i = 0; i < no_mode; i++)
610 clear_mode_bit (transp[bb->index], j, i);
9e236a59 611 }
37430745 612 }
613
50b15e04 614 FOR_BB_INSNS (bb, insn)
2f138c1c 615 {
616 if (INSN_P (insn))
617 {
cea19dab 618 int mode = targetm.mode_switching.needed (e, insn);
2f138c1c 619 rtx link;
620
621 if (mode != no_mode && mode != last_mode)
622 {
aadec354 623 any_set_required = true;
2f138c1c 624 last_mode = mode;
625 ptr = new_seginfo (mode, insn, bb->index, live_now);
626 add_seginfo (info + bb->index, ptr);
7fc0df2f 627 for (i = 0; i < no_mode; i++)
628 clear_mode_bit (transp[bb->index], j, i);
2f138c1c 629 }
cea19dab 630
631 if (targetm.mode_switching.after)
7fc0df2f 632 last_mode = targetm.mode_switching.after (e, last_mode,
633 insn);
cea19dab 634
2f138c1c 635 /* Update LIVE_NOW. */
636 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
637 if (REG_NOTE_KIND (link) == REG_DEAD)
0ff4fe1d 638 reg_dies (XEXP (link, 0), &live_now);
2f138c1c 639
640 note_stores (PATTERN (insn), reg_becomes_live, &live_now);
641 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
642 if (REG_NOTE_KIND (link) == REG_UNUSED)
0ff4fe1d 643 reg_dies (XEXP (link, 0), &live_now);
2f138c1c 644 }
645 }
646
647 info[bb->index].computing = last_mode;
aadec354 648 /* Check for blocks without ANY mode requirements.
d8cefb47 649 N.B. because of MODE_AFTER, last_mode might still
650 be different from no_mode, in which case we need to
651 mark the block as nontransparent. */
aadec354 652 if (!any_set_required)
2f138c1c 653 {
654 ptr = new_seginfo (no_mode, BB_END (bb), bb->index, live_now);
655 add_seginfo (info + bb->index, ptr);
d8cefb47 656 if (last_mode != no_mode)
7fc0df2f 657 for (i = 0; i < no_mode; i++)
658 clear_mode_bit (transp[bb->index], j, i);
2f138c1c 659 }
660 }
cea19dab 661 if (targetm.mode_switching.entry && targetm.mode_switching.exit)
662 {
663 int mode = targetm.mode_switching.entry (e);
2f138c1c 664
7fc0df2f 665 info[post_entry->index].mode_out =
666 info[post_entry->index].mode_in = no_mode;
667 if (pre_exit)
668 {
669 info[pre_exit->index].mode_out =
670 info[pre_exit->index].mode_in = no_mode;
671 }
672
cea19dab 673 if (mode != no_mode)
674 {
675 bb = post_entry;
676
677 /* By always making this nontransparent, we save
678 an extra check in make_preds_opaque. We also
679 need this to avoid confusing pre_edge_lcm when
680 antic is cleared but transp and comp are set. */
7fc0df2f 681 for (i = 0; i < no_mode; i++)
682 clear_mode_bit (transp[bb->index], j, i);
cea19dab 683
684 /* Insert a fake computing definition of MODE into entry
685 blocks which compute no mode. This represents the mode on
686 entry. */
687 info[bb->index].computing = mode;
688
689 if (pre_exit)
690 info[pre_exit->index].seginfo->mode =
691 targetm.mode_switching.exit (e);
692 }
693 }
2f138c1c 694
695 /* Set the anticipatable and computing arrays. */
7fc0df2f 696 for (i = 0; i < no_mode; i++)
2f138c1c 697 {
7fc0df2f 698 int m = targetm.mode_switching.priority (entity_map[j], i);
2f138c1c 699
fc00614f 700 FOR_EACH_BB_FN (bb, cfun)
2f138c1c 701 {
702 if (info[bb->index].seginfo->mode == m)
7fc0df2f 703 set_mode_bit (antic[bb->index], j, m);
2f138c1c 704
705 if (info[bb->index].computing == m)
7fc0df2f 706 set_mode_bit (comp[bb->index], j, m);
2f138c1c 707 }
708 }
7fc0df2f 709 }
2f138c1c 710
7fc0df2f 711 /* Calculate the optimal locations for the
712 placement mode switches to modes with priority I. */
2f138c1c 713
7fc0df2f 714 FOR_EACH_BB_FN (bb, cfun)
715 bitmap_not (kill[bb->index], transp[bb->index]);
2f138c1c 716
7fc0df2f 717 edge_list = pre_edge_lcm_avs (n_entities * max_num_modes, transp, comp, antic,
718 kill, avin, avout, &insert, &del);
2f138c1c 719
7fc0df2f 720 for (j = n_entities - 1; j >= 0; j--)
721 {
722 int no_mode = num_modes[entity_map[j]];
2f138c1c 723
7fc0df2f 724 /* Insert all mode sets that have been inserted by lcm. */
2f138c1c 725
7fc0df2f 726 for (int ed = NUM_EDGES (edge_list) - 1; ed >= 0; ed--)
727 {
728 edge eg = INDEX_EDGE (edge_list, ed);
2f138c1c 729
7fc0df2f 730 eg->aux = (void *)(intptr_t)-1;
2f138c1c 731
7fc0df2f 732 for (i = 0; i < no_mode; i++)
733 {
734 int m = targetm.mode_switching.priority (entity_map[j], i);
735 if (mode_bit_p (insert[ed], j, m))
736 {
737 eg->aux = (void *)(intptr_t)m;
738 break;
739 }
740 }
741 }
2f138c1c 742
7fc0df2f 743 FOR_EACH_BB_FN (bb, cfun)
744 {
745 struct bb_info *info = bb_info[j];
746 int last_mode = no_mode;
9e236a59 747
7fc0df2f 748 /* intialize mode in availability for bb. */
749 for (i = 0; i < no_mode; i++)
750 if (mode_bit_p (avout[bb->index], j, i))
751 {
752 if (last_mode == no_mode)
753 last_mode = i;
754 if (last_mode != i)
755 {
756 last_mode = no_mode;
757 break;
758 }
759 }
760 info[bb->index].mode_out = last_mode;
2f138c1c 761
7fc0df2f 762 /* intialize mode out availability for bb. */
763 last_mode = no_mode;
764 for (i = 0; i < no_mode; i++)
765 if (mode_bit_p (avin[bb->index], j, i))
2f138c1c 766 {
7fc0df2f 767 if (last_mode == no_mode)
768 last_mode = i;
769 if (last_mode != i)
770 {
771 last_mode = no_mode;
772 break;
773 }
2f138c1c 774 }
7fc0df2f 775 info[bb->index].mode_in = last_mode;
776
777 for (i = 0; i < no_mode; i++)
778 if (mode_bit_p (del[bb->index], j, i))
779 info[bb->index].seginfo->mode = no_mode;
2f138c1c 780 }
781
7fc0df2f 782 /* Now output the remaining mode sets in all the segments. */
2f138c1c 783
7fc0df2f 784 /* In case there was no mode inserted. the mode information on the edge
785 might not be complete.
786 Update mode info on edges and commit pending mode sets. */
787 need_commit |= commit_mode_sets (edge_list, entity_map[j], bb_info[j]);
788
789 /* Reset modes for next entity. */
790 clear_aux_for_edges ();
2f138c1c 791
7fc0df2f 792 FOR_EACH_BB_FN (bb, cfun)
2f138c1c 793 {
794 struct seginfo *ptr, *next;
7fc0df2f 795 int cur_mode = bb_info[j][bb->index].mode_in;
796
2f138c1c 797 for (ptr = bb_info[j][bb->index].seginfo; ptr; ptr = next)
798 {
799 next = ptr->next;
800 if (ptr->mode != no_mode)
801 {
d024774f 802 rtx_insn *mode_set;
2f138c1c 803
54ef9b16 804 rtl_profile_for_bb (bb);
2f138c1c 805 start_sequence ();
7fc0df2f 806
807 targetm.mode_switching.emit (entity_map[j], ptr->mode,
808 cur_mode, ptr->regs_live);
2f138c1c 809 mode_set = get_insns ();
810 end_sequence ();
811
7fc0df2f 812 /* modes kill each other inside a basic block. */
813 cur_mode = ptr->mode;
814
2f138c1c 815 /* Insert MODE_SET only if it is nonempty. */
816 if (mode_set != NULL_RTX)
817 {
9d75589a 818 emitted = true;
ad4583d9 819 if (NOTE_INSN_BASIC_BLOCK_P (ptr->insn_ptr))
6d0a5596 820 /* We need to emit the insns in a FIFO-like manner,
821 i.e. the first to be emitted at our insertion
822 point ends up first in the instruction steam.
823 Because we made sure that NOTE_INSN_BASIC_BLOCK is
824 only used for initially empty basic blocks, we
794a8431 825 can achieve this by appending at the end of
6d0a5596 826 the block. */
827 emit_insn_after
828 (mode_set, BB_END (NOTE_BASIC_BLOCK (ptr->insn_ptr)));
2f138c1c 829 else
830 emit_insn_before (mode_set, ptr->insn_ptr);
831 }
54ef9b16 832
833 default_rtl_profile ();
2f138c1c 834 }
835
836 free (ptr);
837 }
838 }
839
840 free (bb_info[j]);
841 }
842
7fc0df2f 843 free_edge_list (edge_list);
844
2f138c1c 845 /* Finished. Free up all the things we've allocated. */
7fc0df2f 846 sbitmap_vector_free (del);
847 sbitmap_vector_free (insert);
2f138c1c 848 sbitmap_vector_free (kill);
849 sbitmap_vector_free (antic);
850 sbitmap_vector_free (transp);
851 sbitmap_vector_free (comp);
7fc0df2f 852 sbitmap_vector_free (avin);
853 sbitmap_vector_free (avout);
2f138c1c 854
855 if (need_commit)
856 commit_edge_insertions ();
857
cea19dab 858 if (targetm.mode_switching.entry && targetm.mode_switching.exit)
0338fcd3 859 {
860 free_dominance_info (CDI_DOMINATORS);
861 cleanup_cfg (CLEANUP_NO_INSN_DEL);
862 }
cea19dab 863 else if (!need_commit && !emitted)
2f138c1c 864 return 0;
2f138c1c 865
2f138c1c 866 return 1;
867}
77fce4cd 868
2f138c1c 869#endif /* OPTIMIZE_MODE_SWITCHING */
77fce4cd 870\f
7620bc82 871namespace {
872
873const pass_data pass_data_mode_switching =
77fce4cd 874{
cbe8bda8 875 RTL_PASS, /* type */
876 "mode_sw", /* name */
877 OPTGROUP_NONE, /* optinfo_flags */
cbe8bda8 878 TV_MODE_SWITCH, /* tv_id */
879 0, /* properties_required */
880 0, /* properties_provided */
881 0, /* properties_destroyed */
882 0, /* todo_flags_start */
8b88439e 883 TODO_df_finish, /* todo_flags_finish */
77fce4cd 884};
cbe8bda8 885
7620bc82 886class pass_mode_switching : public rtl_opt_pass
cbe8bda8 887{
888public:
9af5ce0c 889 pass_mode_switching (gcc::context *ctxt)
890 : rtl_opt_pass (pass_data_mode_switching, ctxt)
cbe8bda8 891 {}
892
893 /* opt_pass methods: */
a6ae2cf4 894 /* The epiphany backend creates a second instance of this pass, so we need
895 a clone method. */
ae84f584 896 opt_pass * clone () { return new pass_mode_switching (m_ctxt); }
31315c24 897 virtual bool gate (function *)
898 {
899#ifdef OPTIMIZE_MODE_SWITCHING
900 return true;
901#else
902 return false;
903#endif
904 }
905
65b0537f 906 virtual unsigned int execute (function *)
907 {
908#ifdef OPTIMIZE_MODE_SWITCHING
909 optimize_mode_switching ();
910#endif /* OPTIMIZE_MODE_SWITCHING */
911 return 0;
912 }
cbe8bda8 913
914}; // class pass_mode_switching
915
7620bc82 916} // anon namespace
917
cbe8bda8 918rtl_opt_pass *
919make_pass_mode_switching (gcc::context *ctxt)
920{
921 return new pass_mode_switching (ctxt);
922}