]>
Commit | Line | Data |
---|---|---|
e1ab7874 | 1 | /* Instruction scheduling pass. Selective scheduler and pipeliner. |
d353bf18 | 2 | Copyright (C) 2006-2015 Free Software Foundation, Inc. |
e1ab7874 | 3 | |
4 | This file is part of GCC. | |
5 | ||
6 | GCC is free software; you can redistribute it and/or modify it under | |
7 | the terms of the GNU General Public License as published by the Free | |
8 | Software Foundation; either version 3, or (at your option) any later | |
9 | version. | |
10 | ||
11 | GCC is distributed in the hope that it will be useful, but WITHOUT ANY | |
12 | WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
13 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
14 | for more details. | |
15 | ||
16 | You should have received a copy of the GNU General Public License | |
17 | along with GCC; see the file COPYING3. If not see | |
18 | <http://www.gnu.org/licenses/>. */ | |
19 | ||
20 | #include "config.h" | |
21 | #include "system.h" | |
22 | #include "coretypes.h" | |
23 | #include "tm.h" | |
d7091a76 | 24 | #include "rtl-error.h" |
e1ab7874 | 25 | #include "tm_p.h" |
26 | #include "hard-reg-set.h" | |
27 | #include "regs.h" | |
28 | #include "function.h" | |
94ea8568 | 29 | #include "predict.h" |
30 | #include "dominance.h" | |
31 | #include "cfg.h" | |
32 | #include "cfgbuild.h" | |
33 | #include "basic-block.h" | |
e1ab7874 | 34 | #include "flags.h" |
35 | #include "insn-config.h" | |
36 | #include "insn-attr.h" | |
37 | #include "except.h" | |
e1ab7874 | 38 | #include "recog.h" |
39 | #include "params.h" | |
40 | #include "target.h" | |
41 | #include "output.h" | |
e1ab7874 | 42 | #include "sched-int.h" |
b20a8bb4 | 43 | #include "symtab.h" |
e1ab7874 | 44 | #include "tree.h" |
e1ab7874 | 45 | #include "langhooks.h" |
46 | #include "rtlhooks-def.h" | |
06f9d6ef | 47 | #include "emit-rtl.h" |
3e1f1f1a | 48 | #include "ira.h" |
6121af96 | 49 | #include "rtl-iter.h" |
e1ab7874 | 50 | |
51 | #ifdef INSN_SCHEDULING | |
52 | #include "sel-sched-ir.h" | |
53 | #include "sel-sched-dump.h" | |
54 | #include "sel-sched.h" | |
55 | #include "dbgcnt.h" | |
56 | ||
57 | /* Implementation of selective scheduling approach. | |
58 | The below implementation follows the original approach with the following | |
59 | changes: | |
60 | ||
48e1416a | 61 | o the scheduler works after register allocation (but can be also tuned |
e1ab7874 | 62 | to work before RA); |
63 | o some instructions are not copied or register renamed; | |
64 | o conditional jumps are not moved with code duplication; | |
65 | o several jumps in one parallel group are not supported; | |
66 | o when pipelining outer loops, code motion through inner loops | |
67 | is not supported; | |
68 | o control and data speculation are supported; | |
69 | o some improvements for better compile time/performance were made. | |
70 | ||
71 | Terminology | |
72 | =========== | |
73 | ||
48e1416a | 74 | A vinsn, or virtual insn, is an insn with additional data characterizing |
75 | insn pattern, such as LHS, RHS, register sets used/set/clobbered, etc. | |
76 | Vinsns also act as smart pointers to save memory by reusing them in | |
e1ab7874 | 77 | different expressions. A vinsn is described by vinsn_t type. |
78 | ||
79 | An expression is a vinsn with additional data characterizing its properties | |
48e1416a | 80 | at some point in the control flow graph. The data may be its usefulness, |
e1ab7874 | 81 | priority, speculative status, whether it was renamed/subsituted, etc. |
82 | An expression is described by expr_t type. | |
83 | ||
48e1416a | 84 | Availability set (av_set) is a set of expressions at a given control flow |
e1ab7874 | 85 | point. It is represented as av_set_t. The expressions in av sets are kept |
48e1416a | 86 | sorted in the terms of expr_greater_p function. It allows to truncate |
e1ab7874 | 87 | the set while leaving the best expressions. |
48e1416a | 88 | |
e1ab7874 | 89 | A fence is a point through which code motion is prohibited. On each step, |
90 | we gather a parallel group of insns at a fence. It is possible to have | |
91 | multiple fences. A fence is represented via fence_t. | |
92 | ||
93 | A boundary is the border between the fence group and the rest of the code. | |
94 | Currently, we never have more than one boundary per fence, as we finalize | |
48e1416a | 95 | the fence group when a jump is scheduled. A boundary is represented |
e1ab7874 | 96 | via bnd_t. |
97 | ||
98 | High-level overview | |
99 | =================== | |
100 | ||
101 | The scheduler finds regions to schedule, schedules each one, and finalizes. | |
48e1416a | 102 | The regions are formed starting from innermost loops, so that when the inner |
e1ab7874 | 103 | loop is pipelined, its prologue can be scheduled together with yet unprocessed |
48e1416a | 104 | outer loop. The rest of acyclic regions are found using extend_rgns: |
e1ab7874 | 105 | the blocks that are not yet allocated to any regions are traversed in top-down |
48e1416a | 106 | order, and a block is added to a region to which all its predecessors belong; |
e1ab7874 | 107 | otherwise, the block starts its own region. |
108 | ||
109 | The main scheduling loop (sel_sched_region_2) consists of just | |
110 | scheduling on each fence and updating fences. For each fence, | |
111 | we fill a parallel group of insns (fill_insns) until some insns can be added. | |
48e1416a | 112 | First, we compute available exprs (av-set) at the boundary of the current |
113 | group. Second, we choose the best expression from it. If the stall is | |
e1ab7874 | 114 | required to schedule any of the expressions, we advance the current cycle |
48e1416a | 115 | appropriately. So, the final group does not exactly correspond to a VLIW |
e1ab7874 | 116 | word. Third, we move the chosen expression to the boundary (move_op) |
117 | and update the intermediate av sets and liveness sets. We quit fill_insns | |
118 | when either no insns left for scheduling or we have scheduled enough insns | |
48e1416a | 119 | so we feel like advancing a scheduling point. |
e1ab7874 | 120 | |
121 | Computing available expressions | |
122 | =============================== | |
123 | ||
124 | The computation (compute_av_set) is a bottom-up traversal. At each insn, | |
48e1416a | 125 | we're moving the union of its successors' sets through it via |
126 | moveup_expr_set. The dependent expressions are removed. Local | |
127 | transformations (substitution, speculation) are applied to move more | |
e1ab7874 | 128 | exprs. Then the expr corresponding to the current insn is added. |
129 | The result is saved on each basic block header. | |
130 | ||
131 | When traversing the CFG, we're moving down for no more than max_ws insns. | |
132 | Also, we do not move down to ineligible successors (is_ineligible_successor), | |
133 | which include moving along a back-edge, moving to already scheduled code, | |
48e1416a | 134 | and moving to another fence. The first two restrictions are lifted during |
e1ab7874 | 135 | pipelining, which allows us to move insns along a back-edge. We always have |
136 | an acyclic region for scheduling because we forbid motion through fences. | |
137 | ||
138 | Choosing the best expression | |
139 | ============================ | |
140 | ||
141 | We sort the final availability set via sel_rank_for_schedule, then we remove | |
142 | expressions which are not yet ready (tick_check_p) or which dest registers | |
48e1416a | 143 | cannot be used. For some of them, we choose another register via |
144 | find_best_reg. To do this, we run find_used_regs to calculate the set of | |
e1ab7874 | 145 | registers which cannot be used. The find_used_regs function performs |
146 | a traversal of code motion paths for an expr. We consider for renaming | |
48e1416a | 147 | only registers which are from the same regclass as the original one and |
e1ab7874 | 148 | using which does not interfere with any live ranges. Finally, we convert |
149 | the resulting set to the ready list format and use max_issue and reorder* | |
150 | hooks similarly to the Haifa scheduler. | |
151 | ||
152 | Scheduling the best expression | |
153 | ============================== | |
154 | ||
48e1416a | 155 | We run the move_op routine to perform the same type of code motion paths |
e1ab7874 | 156 | traversal as in find_used_regs. (These are working via the same driver, |
157 | code_motion_path_driver.) When moving down the CFG, we look for original | |
48e1416a | 158 | instruction that gave birth to a chosen expression. We undo |
e1ab7874 | 159 | the transformations performed on an expression via the history saved in it. |
48e1416a | 160 | When found, we remove the instruction or leave a reg-reg copy/speculation |
161 | check if needed. On a way up, we insert bookkeeping copies at each join | |
162 | point. If a copy is not needed, it will be removed later during this | |
e1ab7874 | 163 | traversal. We update the saved av sets and liveness sets on the way up, too. |
164 | ||
165 | Finalizing the schedule | |
166 | ======================= | |
167 | ||
48e1416a | 168 | When pipelining, we reschedule the blocks from which insns were pipelined |
169 | to get a tighter schedule. On Itanium, we also perform bundling via | |
170 | the same routine from ia64.c. | |
e1ab7874 | 171 | |
172 | Dependence analysis changes | |
173 | =========================== | |
174 | ||
175 | We augmented the sched-deps.c with hooks that get called when a particular | |
176 | dependence is found in a particular part of an insn. Using these hooks, we | |
177 | can do several actions such as: determine whether an insn can be moved through | |
48e1416a | 178 | another (has_dependence_p, moveup_expr); find out whether an insn can be |
179 | scheduled on the current cycle (tick_check_p); find out registers that | |
180 | are set/used/clobbered by an insn and find out all the strange stuff that | |
181 | restrict its movement, like SCHED_GROUP_P or CANT_MOVE (done in | |
e1ab7874 | 182 | init_global_and_expr_for_insn). |
183 | ||
184 | Initialization changes | |
185 | ====================== | |
186 | ||
48e1416a | 187 | There are parts of haifa-sched.c, sched-deps.c, and sched-rgn.c that are |
e1ab7874 | 188 | reused in all of the schedulers. We have split up the initialization of data |
48e1416a | 189 | of such parts into different functions prefixed with scheduler type and |
e1ab7874 | 190 | postfixed with the type of data initialized: {,sel_,haifa_}sched_{init,finish}, |
191 | sched_rgn_init/finish, sched_deps_init/finish, sched_init_{luids/bbs}, etc. | |
48e1416a | 192 | The same splitting is done with current_sched_info structure: |
193 | dependence-related parts are in sched_deps_info, common part is in | |
e1ab7874 | 194 | common_sched_info, and haifa/sel/etc part is in current_sched_info. |
48e1416a | 195 | |
e1ab7874 | 196 | Target contexts |
197 | =============== | |
198 | ||
199 | As we now have multiple-point scheduling, this would not work with backends | |
48e1416a | 200 | which save some of the scheduler state to use it in the target hooks. |
201 | For this purpose, we introduce a concept of target contexts, which | |
e1ab7874 | 202 | encapsulate such information. The backend should implement simple routines |
203 | of allocating/freeing/setting such a context. The scheduler calls these | |
204 | as target hooks and handles the target context as an opaque pointer (similar | |
205 | to the DFA state type, state_t). | |
206 | ||
207 | Various speedups | |
208 | ================ | |
209 | ||
210 | As the correct data dependence graph is not supported during scheduling (which | |
48e1416a | 211 | is to be changed in mid-term), we cache as much of the dependence analysis |
212 | results as possible to avoid reanalyzing. This includes: bitmap caches on | |
213 | each insn in stream of the region saying yes/no for a query with a pair of | |
e1ab7874 | 214 | UIDs; hashtables with the previously done transformations on each insn in |
215 | stream; a vector keeping a history of transformations on each expr. | |
216 | ||
217 | Also, we try to minimize the dependence context used on each fence to check | |
218 | whether the given expression is ready for scheduling by removing from it | |
48e1416a | 219 | insns that are definitely completed the execution. The results of |
e1ab7874 | 220 | tick_check_p checks are also cached in a vector on each fence. |
221 | ||
48e1416a | 222 | We keep a valid liveness set on each insn in a region to avoid the high |
e1ab7874 | 223 | cost of recomputation on large basic blocks. |
224 | ||
225 | Finally, we try to minimize the number of needed updates to the availability | |
48e1416a | 226 | sets. The updates happen in two cases: when fill_insns terminates, |
e1ab7874 | 227 | we advance all fences and increase the stage number to show that the region |
228 | has changed and the sets are to be recomputed; and when the next iteration | |
229 | of a loop in fill_insns happens (but this one reuses the saved av sets | |
230 | on bb headers.) Thus, we try to break the fill_insns loop only when | |
231 | "significant" number of insns from the current scheduling window was | |
232 | scheduled. This should be made a target param. | |
48e1416a | 233 | |
e1ab7874 | 234 | |
235 | TODO: correctly support the data dependence graph at all stages and get rid | |
236 | of all caches. This should speed up the scheduler. | |
237 | TODO: implement moving cond jumps with bookkeeping copies on both targets. | |
238 | TODO: tune the scheduler before RA so it does not create too much pseudos. | |
239 | ||
240 | ||
241 | References: | |
242 | S.-M. Moon and K. Ebcioglu. Parallelizing nonnumerical code with | |
48e1416a | 243 | selective scheduling and software pipelining. |
244 | ACM TOPLAS, Vol 19, No. 6, pages 853--898, Nov. 1997. | |
e1ab7874 | 245 | |
48e1416a | 246 | Andrey Belevantsev, Maxim Kuvyrkov, Vladimir Makarov, Dmitry Melnik, |
247 | and Dmitry Zhurikhin. An interblock VLIW-targeted instruction scheduler | |
e1ab7874 | 248 | for GCC. In Proceedings of GCC Developers' Summit 2006. |
249 | ||
48e1416a | 250 | Arutyun Avetisyan, Andrey Belevantsev, and Dmitry Melnik. GCC Instruction |
e1ab7874 | 251 | Scheduler and Software Pipeliner on the Itanium Platform. EPIC-7 Workshop. |
252 | http://rogue.colorado.edu/EPIC7/. | |
48e1416a | 253 | |
e1ab7874 | 254 | */ |
255 | ||
256 | /* True when pipelining is enabled. */ | |
257 | bool pipelining_p; | |
258 | ||
259 | /* True if bookkeeping is enabled. */ | |
260 | bool bookkeeping_p; | |
261 | ||
262 | /* Maximum number of insns that are eligible for renaming. */ | |
263 | int max_insns_to_rename; | |
264 | \f | |
265 | ||
266 | /* Definitions of local types and macros. */ | |
267 | ||
268 | /* Represents possible outcomes of moving an expression through an insn. */ | |
48e1416a | 269 | enum MOVEUP_EXPR_CODE |
270 | { | |
e1ab7874 | 271 | /* The expression is not changed. */ |
48e1416a | 272 | MOVEUP_EXPR_SAME, |
e1ab7874 | 273 | |
274 | /* Not changed, but requires a new destination register. */ | |
48e1416a | 275 | MOVEUP_EXPR_AS_RHS, |
e1ab7874 | 276 | |
277 | /* Cannot be moved. */ | |
48e1416a | 278 | MOVEUP_EXPR_NULL, |
e1ab7874 | 279 | |
280 | /* Changed (substituted or speculated). */ | |
48e1416a | 281 | MOVEUP_EXPR_CHANGED |
e1ab7874 | 282 | }; |
283 | ||
284 | /* The container to be passed into rtx search & replace functions. */ | |
285 | struct rtx_search_arg | |
286 | { | |
287 | /* What we are searching for. */ | |
288 | rtx x; | |
289 | ||
9d75589a | 290 | /* The occurrence counter. */ |
e1ab7874 | 291 | int n; |
292 | }; | |
293 | ||
294 | typedef struct rtx_search_arg *rtx_search_arg_p; | |
295 | ||
48e1416a | 296 | /* This struct contains precomputed hard reg sets that are needed when |
e1ab7874 | 297 | computing registers available for renaming. */ |
48e1416a | 298 | struct hard_regs_data |
e1ab7874 | 299 | { |
48e1416a | 300 | /* For every mode, this stores registers available for use with |
e1ab7874 | 301 | that mode. */ |
302 | HARD_REG_SET regs_for_mode[NUM_MACHINE_MODES]; | |
303 | ||
304 | /* True when regs_for_mode[mode] is initialized. */ | |
305 | bool regs_for_mode_ok[NUM_MACHINE_MODES]; | |
306 | ||
307 | /* For every register, it has regs that are ok to rename into it. | |
308 | The register in question is always set. If not, this means | |
309 | that the whole set is not computed yet. */ | |
310 | HARD_REG_SET regs_for_rename[FIRST_PSEUDO_REGISTER]; | |
311 | ||
48e1416a | 312 | /* For every mode, this stores registers not available due to |
e1ab7874 | 313 | call clobbering. */ |
314 | HARD_REG_SET regs_for_call_clobbered[NUM_MACHINE_MODES]; | |
315 | ||
316 | /* All registers that are used or call used. */ | |
317 | HARD_REG_SET regs_ever_used; | |
318 | ||
319 | #ifdef STACK_REGS | |
320 | /* Stack registers. */ | |
321 | HARD_REG_SET stack_regs; | |
322 | #endif | |
323 | }; | |
324 | ||
325 | /* Holds the results of computation of available for renaming and | |
326 | unavailable hard registers. */ | |
327 | struct reg_rename | |
328 | { | |
329 | /* These are unavailable due to calls crossing, globalness, etc. */ | |
330 | HARD_REG_SET unavailable_hard_regs; | |
331 | ||
332 | /* These are *available* for renaming. */ | |
333 | HARD_REG_SET available_for_renaming; | |
334 | ||
335 | /* Whether this code motion path crosses a call. */ | |
336 | bool crosses_call; | |
337 | }; | |
338 | ||
48e1416a | 339 | /* A global structure that contains the needed information about harg |
e1ab7874 | 340 | regs. */ |
341 | static struct hard_regs_data sel_hrd; | |
342 | \f | |
343 | ||
48e1416a | 344 | /* This structure holds local data used in code_motion_path_driver hooks on |
345 | the same or adjacent levels of recursion. Here we keep those parameters | |
346 | that are not used in code_motion_path_driver routine itself, but only in | |
347 | its hooks. Moreover, all parameters that can be modified in hooks are | |
348 | in this structure, so all other parameters passed explicitly to hooks are | |
e1ab7874 | 349 | read-only. */ |
350 | struct cmpd_local_params | |
351 | { | |
352 | /* Local params used in move_op_* functions. */ | |
353 | ||
354 | /* Edges for bookkeeping generation. */ | |
355 | edge e1, e2; | |
356 | ||
357 | /* C_EXPR merged from all successors and locally allocated temporary C_EXPR. */ | |
358 | expr_t c_expr_merged, c_expr_local; | |
359 | ||
360 | /* Local params used in fur_* functions. */ | |
361 | /* Copy of the ORIGINAL_INSN list, stores the original insns already | |
362 | found before entering the current level of code_motion_path_driver. */ | |
363 | def_list_t old_original_insns; | |
364 | ||
365 | /* Local params used in move_op_* functions. */ | |
48e1416a | 366 | /* True when we have removed last insn in the block which was |
e1ab7874 | 367 | also a boundary. Do not update anything or create bookkeeping copies. */ |
368 | BOOL_BITFIELD removed_last_insn : 1; | |
369 | }; | |
370 | ||
371 | /* Stores the static parameters for move_op_* calls. */ | |
372 | struct moveop_static_params | |
373 | { | |
374 | /* Destination register. */ | |
375 | rtx dest; | |
376 | ||
377 | /* Current C_EXPR. */ | |
378 | expr_t c_expr; | |
379 | ||
380 | /* An UID of expr_vliw which is to be moved up. If we find other exprs, | |
381 | they are to be removed. */ | |
382 | int uid; | |
383 | ||
384 | #ifdef ENABLE_CHECKING | |
385 | /* This is initialized to the insn on which the driver stopped its traversal. */ | |
386 | insn_t failed_insn; | |
387 | #endif | |
388 | ||
389 | /* True if we scheduled an insn with different register. */ | |
390 | bool was_renamed; | |
391 | }; | |
392 | ||
393 | /* Stores the static parameters for fur_* calls. */ | |
394 | struct fur_static_params | |
395 | { | |
396 | /* Set of registers unavailable on the code motion path. */ | |
397 | regset used_regs; | |
398 | ||
399 | /* Pointer to the list of original insns definitions. */ | |
400 | def_list_t *original_insns; | |
401 | ||
402 | /* True if a code motion path contains a CALL insn. */ | |
403 | bool crosses_call; | |
404 | }; | |
405 | ||
406 | typedef struct fur_static_params *fur_static_params_p; | |
407 | typedef struct cmpd_local_params *cmpd_local_params_p; | |
408 | typedef struct moveop_static_params *moveop_static_params_p; | |
409 | ||
410 | /* Set of hooks and parameters that determine behaviour specific to | |
411 | move_op or find_used_regs functions. */ | |
412 | struct code_motion_path_driver_info_def | |
413 | { | |
414 | /* Called on enter to the basic block. */ | |
415 | int (*on_enter) (insn_t, cmpd_local_params_p, void *, bool); | |
416 | ||
417 | /* Called when original expr is found. */ | |
418 | void (*orig_expr_found) (insn_t, expr_t, cmpd_local_params_p, void *); | |
419 | ||
420 | /* Called while descending current basic block if current insn is not | |
421 | the original EXPR we're searching for. */ | |
422 | bool (*orig_expr_not_found) (insn_t, av_set_t, void *); | |
423 | ||
424 | /* Function to merge C_EXPRes from different successors. */ | |
425 | void (*merge_succs) (insn_t, insn_t, int, cmpd_local_params_p, void *); | |
426 | ||
427 | /* Function to finalize merge from different successors and possibly | |
428 | deallocate temporary data structures used for merging. */ | |
429 | void (*after_merge_succs) (cmpd_local_params_p, void *); | |
430 | ||
431 | /* Called on the backward stage of recursion to do moveup_expr. | |
432 | Used only with move_op_*. */ | |
433 | void (*ascend) (insn_t, void *); | |
434 | ||
48e1416a | 435 | /* Called on the ascending pass, before returning from the current basic |
e1ab7874 | 436 | block or from the whole traversal. */ |
437 | void (*at_first_insn) (insn_t, cmpd_local_params_p, void *); | |
438 | ||
48e1416a | 439 | /* When processing successors in move_op we need only descend into |
e1ab7874 | 440 | SUCCS_NORMAL successors, while in find_used_regs we need SUCCS_ALL. */ |
441 | int succ_flags; | |
442 | ||
443 | /* The routine name to print in dumps ("move_op" of "find_used_regs"). */ | |
444 | const char *routine_name; | |
445 | }; | |
446 | ||
447 | /* Global pointer to current hooks, either points to MOVE_OP_HOOKS or | |
448 | FUR_HOOKS. */ | |
449 | struct code_motion_path_driver_info_def *code_motion_path_driver_info; | |
450 | ||
451 | /* Set of hooks for performing move_op and find_used_regs routines with | |
452 | code_motion_path_driver. */ | |
40b15760 | 453 | extern struct code_motion_path_driver_info_def move_op_hooks, fur_hooks; |
e1ab7874 | 454 | |
48e1416a | 455 | /* True if/when we want to emulate Haifa scheduler in the common code. |
456 | This is used in sched_rgn_local_init and in various places in | |
e1ab7874 | 457 | sched-deps.c. */ |
458 | int sched_emulate_haifa_p; | |
459 | ||
460 | /* GLOBAL_LEVEL is used to discard information stored in basic block headers | |
461 | av_sets. Av_set of bb header is valid if its (bb header's) level is equal | |
462 | to GLOBAL_LEVEL. And invalid if lesser. This is primarily used to advance | |
463 | scheduling window. */ | |
464 | int global_level; | |
465 | ||
466 | /* Current fences. */ | |
467 | flist_t fences; | |
468 | ||
469 | /* True when separable insns should be scheduled as RHSes. */ | |
470 | static bool enable_schedule_as_rhs_p; | |
471 | ||
472 | /* Used in verify_target_availability to assert that target reg is reported | |
473 | unavailabile by both TARGET_UNAVAILABLE and find_used_regs only if | |
48e1416a | 474 | we haven't scheduled anything on the previous fence. |
e1ab7874 | 475 | if scheduled_something_on_previous_fence is true, TARGET_UNAVAILABLE can |
48e1416a | 476 | have more conservative value than the one returned by the |
e1ab7874 | 477 | find_used_regs, thus we shouldn't assert that these values are equal. */ |
478 | static bool scheduled_something_on_previous_fence; | |
479 | ||
480 | /* All newly emitted insns will have their uids greater than this value. */ | |
481 | static int first_emitted_uid; | |
482 | ||
483 | /* Set of basic blocks that are forced to start new ebbs. This is a subset | |
484 | of all the ebb heads. */ | |
485 | static bitmap_head _forced_ebb_heads; | |
486 | bitmap_head *forced_ebb_heads = &_forced_ebb_heads; | |
487 | ||
488 | /* Blocks that need to be rescheduled after pipelining. */ | |
489 | bitmap blocks_to_reschedule = NULL; | |
490 | ||
491 | /* True when the first lv set should be ignored when updating liveness. */ | |
492 | static bool ignore_first = false; | |
493 | ||
494 | /* Number of insns max_issue has initialized data structures for. */ | |
495 | static int max_issue_size = 0; | |
496 | ||
497 | /* Whether we can issue more instructions. */ | |
498 | static int can_issue_more; | |
499 | ||
500 | /* Maximum software lookahead window size, reduced when rescheduling after | |
501 | pipelining. */ | |
502 | static int max_ws; | |
503 | ||
504 | /* Number of insns scheduled in current region. */ | |
505 | static int num_insns_scheduled; | |
506 | ||
507 | /* A vector of expressions is used to be able to sort them. */ | |
1e094109 | 508 | static vec<expr_t> vec_av_set = vNULL; |
e1ab7874 | 509 | |
510 | /* A vector of vinsns is used to hold temporary lists of vinsns. */ | |
f1f41a6c | 511 | typedef vec<vinsn_t> vinsn_vec_t; |
e1ab7874 | 512 | |
513 | /* This vector has the exprs which may still present in av_sets, but actually | |
514 | can't be moved up due to bookkeeping created during code motion to another | |
515 | fence. See comment near the call to update_and_record_unavailable_insns | |
516 | for the detailed explanations. */ | |
9af5ce0c | 517 | static vinsn_vec_t vec_bookkeeping_blocked_vinsns = vinsn_vec_t (); |
e1ab7874 | 518 | |
48e1416a | 519 | /* This vector has vinsns which are scheduled with renaming on the first fence |
e1ab7874 | 520 | and then seen on the second. For expressions with such vinsns, target |
521 | availability information may be wrong. */ | |
9af5ce0c | 522 | static vinsn_vec_t vec_target_unavailable_vinsns = vinsn_vec_t (); |
e1ab7874 | 523 | |
524 | /* Vector to store temporary nops inserted in move_op to prevent removal | |
525 | of empty bbs. */ | |
1e094109 | 526 | static vec<insn_t> vec_temp_moveop_nops = vNULL; |
e1ab7874 | 527 | |
48e1416a | 528 | /* These bitmaps record original instructions scheduled on the current |
529 | iteration and bookkeeping copies created by them. */ | |
e1ab7874 | 530 | static bitmap current_originators = NULL; |
531 | static bitmap current_copies = NULL; | |
532 | ||
533 | /* This bitmap marks the blocks visited by code_motion_path_driver so we don't | |
534 | visit them afterwards. */ | |
535 | static bitmap code_motion_visited_blocks = NULL; | |
536 | ||
537 | /* Variables to accumulate different statistics. */ | |
538 | ||
539 | /* The number of bookkeeping copies created. */ | |
540 | static int stat_bookkeeping_copies; | |
541 | ||
542 | /* The number of insns that required bookkeeiping for their scheduling. */ | |
543 | static int stat_insns_needed_bookkeeping; | |
544 | ||
545 | /* The number of insns that got renamed. */ | |
546 | static int stat_renamed_scheduled; | |
547 | ||
548 | /* The number of substitutions made during scheduling. */ | |
549 | static int stat_substitutions_total; | |
550 | \f | |
551 | ||
552 | /* Forward declarations of static functions. */ | |
553 | static bool rtx_ok_for_substitution_p (rtx, rtx); | |
554 | static int sel_rank_for_schedule (const void *, const void *); | |
555 | static av_set_t find_sequential_best_exprs (bnd_t, expr_t, bool); | |
9845d120 | 556 | static basic_block find_block_for_bookkeeping (edge e1, edge e2, bool lax); |
e1ab7874 | 557 | |
558 | static rtx get_dest_from_orig_ops (av_set_t); | |
559 | static basic_block generate_bookkeeping_insn (expr_t, edge, edge); | |
48e1416a | 560 | static bool find_used_regs (insn_t, av_set_t, regset, struct reg_rename *, |
e1ab7874 | 561 | def_list_t *); |
de353418 | 562 | static bool move_op (insn_t, av_set_t, expr_t, rtx, expr_t, bool*); |
563 | static int code_motion_path_driver (insn_t, av_set_t, ilist_t, | |
564 | cmpd_local_params_p, void *); | |
e1ab7874 | 565 | static void sel_sched_region_1 (void); |
566 | static void sel_sched_region_2 (int); | |
567 | static av_set_t compute_av_set_inside_bb (insn_t, ilist_t, int, bool); | |
568 | ||
569 | static void debug_state (state_t); | |
570 | \f | |
571 | ||
572 | /* Functions that work with fences. */ | |
573 | ||
574 | /* Advance one cycle on FENCE. */ | |
575 | static void | |
576 | advance_one_cycle (fence_t fence) | |
577 | { | |
578 | unsigned i; | |
579 | int cycle; | |
2f3c9801 | 580 | rtx_insn *insn; |
48e1416a | 581 | |
e1ab7874 | 582 | advance_state (FENCE_STATE (fence)); |
583 | cycle = ++FENCE_CYCLE (fence); | |
584 | FENCE_ISSUED_INSNS (fence) = 0; | |
585 | FENCE_STARTS_CYCLE_P (fence) = 1; | |
586 | can_issue_more = issue_rate; | |
abb9c563 | 587 | FENCE_ISSUE_MORE (fence) = can_issue_more; |
e1ab7874 | 588 | |
f1f41a6c | 589 | for (i = 0; vec_safe_iterate (FENCE_EXECUTING_INSNS (fence), i, &insn); ) |
e1ab7874 | 590 | { |
591 | if (INSN_READY_CYCLE (insn) < cycle) | |
592 | { | |
593 | remove_from_deps (FENCE_DC (fence), insn); | |
f1f41a6c | 594 | FENCE_EXECUTING_INSNS (fence)->unordered_remove (i); |
e1ab7874 | 595 | continue; |
596 | } | |
597 | i++; | |
598 | } | |
599 | if (sched_verbose >= 2) | |
600 | { | |
601 | sel_print ("Finished a cycle. Current cycle = %d\n", FENCE_CYCLE (fence)); | |
602 | debug_state (FENCE_STATE (fence)); | |
603 | } | |
604 | } | |
605 | ||
606 | /* Returns true when SUCC in a fallthru bb of INSN, possibly | |
607 | skipping empty basic blocks. */ | |
608 | static bool | |
71ce7f59 | 609 | in_fallthru_bb_p (rtx_insn *insn, rtx succ) |
e1ab7874 | 610 | { |
611 | basic_block bb = BLOCK_FOR_INSN (insn); | |
7f58c05e | 612 | edge e; |
e1ab7874 | 613 | |
614 | if (bb == BLOCK_FOR_INSN (succ)) | |
615 | return true; | |
616 | ||
7f58c05e | 617 | e = find_fallthru_edge_from (bb); |
618 | if (e) | |
619 | bb = e->dest; | |
e1ab7874 | 620 | else |
621 | return false; | |
622 | ||
623 | while (sel_bb_empty_p (bb)) | |
624 | bb = bb->next_bb; | |
625 | ||
626 | return bb == BLOCK_FOR_INSN (succ); | |
627 | } | |
628 | ||
48e1416a | 629 | /* Construct successor fences from OLD_FENCEs and put them in NEW_FENCES. |
e1ab7874 | 630 | When a successor will continue a ebb, transfer all parameters of a fence |
631 | to the new fence. ORIG_MAX_SEQNO is the maximal seqno before this round | |
632 | of scheduling helping to distinguish between the old and the new code. */ | |
633 | static void | |
634 | extract_new_fences_from (flist_t old_fences, flist_tail_t new_fences, | |
635 | int orig_max_seqno) | |
636 | { | |
637 | bool was_here_p = false; | |
2f3c9801 | 638 | insn_t insn = NULL; |
e1ab7874 | 639 | insn_t succ; |
640 | succ_iterator si; | |
641 | ilist_iterator ii; | |
642 | fence_t fence = FLIST_FENCE (old_fences); | |
643 | basic_block bb; | |
644 | ||
645 | /* Get the only element of FENCE_BNDS (fence). */ | |
646 | FOR_EACH_INSN (insn, ii, FENCE_BNDS (fence)) | |
647 | { | |
648 | gcc_assert (!was_here_p); | |
649 | was_here_p = true; | |
650 | } | |
651 | gcc_assert (was_here_p && insn != NULL_RTX); | |
652 | ||
48e1416a | 653 | /* When in the "middle" of the block, just move this fence |
e1ab7874 | 654 | to the new list. */ |
655 | bb = BLOCK_FOR_INSN (insn); | |
656 | if (! sel_bb_end_p (insn) | |
48e1416a | 657 | || (single_succ_p (bb) |
e1ab7874 | 658 | && single_pred_p (single_succ (bb)))) |
659 | { | |
660 | insn_t succ; | |
661 | ||
48e1416a | 662 | succ = (sel_bb_end_p (insn) |
e1ab7874 | 663 | ? sel_bb_head (single_succ (bb)) |
664 | : NEXT_INSN (insn)); | |
665 | ||
48e1416a | 666 | if (INSN_SEQNO (succ) > 0 |
e1ab7874 | 667 | && INSN_SEQNO (succ) <= orig_max_seqno |
668 | && INSN_SCHED_TIMES (succ) <= 0) | |
669 | { | |
670 | FENCE_INSN (fence) = succ; | |
671 | move_fence_to_fences (old_fences, new_fences); | |
672 | ||
673 | if (sched_verbose >= 1) | |
48e1416a | 674 | sel_print ("Fence %d continues as %d[%d] (state continue)\n", |
e1ab7874 | 675 | INSN_UID (insn), INSN_UID (succ), BLOCK_NUM (succ)); |
676 | } | |
677 | return; | |
678 | } | |
679 | ||
680 | /* Otherwise copy fence's structures to (possibly) multiple successors. */ | |
681 | FOR_EACH_SUCC_1 (succ, si, insn, SUCCS_NORMAL | SUCCS_SKIP_TO_LOOP_EXITS) | |
682 | { | |
683 | int seqno = INSN_SEQNO (succ); | |
684 | ||
685 | if (0 < seqno && seqno <= orig_max_seqno | |
686 | && (pipelining_p || INSN_SCHED_TIMES (succ) <= 0)) | |
687 | { | |
688 | bool b = (in_same_ebb_p (insn, succ) | |
48e1416a | 689 | || in_fallthru_bb_p (insn, succ)); |
e1ab7874 | 690 | |
691 | if (sched_verbose >= 1) | |
48e1416a | 692 | sel_print ("Fence %d continues as %d[%d] (state %s)\n", |
693 | INSN_UID (insn), INSN_UID (succ), | |
e1ab7874 | 694 | BLOCK_NUM (succ), b ? "continue" : "reset"); |
695 | ||
696 | if (b) | |
697 | add_dirty_fence_to_fences (new_fences, succ, fence); | |
698 | else | |
699 | { | |
700 | /* Mark block of the SUCC as head of the new ebb. */ | |
701 | bitmap_set_bit (forced_ebb_heads, BLOCK_NUM (succ)); | |
702 | add_clean_fence_to_fences (new_fences, succ, fence); | |
703 | } | |
704 | } | |
705 | } | |
706 | } | |
707 | \f | |
708 | ||
709 | /* Functions to support substitution. */ | |
710 | ||
48e1416a | 711 | /* Returns whether INSN with dependence status DS is eligible for |
712 | substitution, i.e. it's a copy operation x := y, and RHS that is | |
e1ab7874 | 713 | moved up through this insn should be substituted. */ |
714 | static bool | |
715 | can_substitute_through_p (insn_t insn, ds_t ds) | |
716 | { | |
717 | /* We can substitute only true dependencies. */ | |
718 | if ((ds & DEP_OUTPUT) | |
719 | || (ds & DEP_ANTI) | |
720 | || ! INSN_RHS (insn) | |
721 | || ! INSN_LHS (insn)) | |
722 | return false; | |
723 | ||
48e1416a | 724 | /* Now we just need to make sure the INSN_RHS consists of only one |
e1ab7874 | 725 | simple REG rtx. */ |
48e1416a | 726 | if (REG_P (INSN_LHS (insn)) |
e1ab7874 | 727 | && REG_P (INSN_RHS (insn))) |
48e1416a | 728 | return true; |
e1ab7874 | 729 | return false; |
730 | } | |
731 | ||
9d75589a | 732 | /* Substitute all occurrences of INSN's destination in EXPR' vinsn with INSN's |
e1ab7874 | 733 | source (if INSN is eligible for substitution). Returns TRUE if |
734 | substitution was actually performed, FALSE otherwise. Substitution might | |
735 | be not performed because it's either EXPR' vinsn doesn't contain INSN's | |
48e1416a | 736 | destination or the resulting insn is invalid for the target machine. |
e1ab7874 | 737 | When UNDO is true, perform unsubstitution instead (the difference is in |
738 | the part of rtx on which validate_replace_rtx is called). */ | |
739 | static bool | |
740 | substitute_reg_in_expr (expr_t expr, insn_t insn, bool undo) | |
741 | { | |
742 | rtx *where; | |
743 | bool new_insn_valid; | |
744 | vinsn_t *vi = &EXPR_VINSN (expr); | |
745 | bool has_rhs = VINSN_RHS (*vi) != NULL; | |
746 | rtx old, new_rtx; | |
747 | ||
748 | /* Do not try to replace in SET_DEST. Although we'll choose new | |
48e1416a | 749 | register for the RHS, we don't want to change RHS' original reg. |
e1ab7874 | 750 | If the insn is not SET, we may still be able to substitute something |
48e1416a | 751 | in it, and if we're here (don't have deps), it doesn't write INSN's |
e1ab7874 | 752 | dest. */ |
753 | where = (has_rhs | |
754 | ? &VINSN_RHS (*vi) | |
755 | : &PATTERN (VINSN_INSN_RTX (*vi))); | |
756 | old = undo ? INSN_RHS (insn) : INSN_LHS (insn); | |
757 | ||
758 | /* Substitute if INSN has a form of x:=y and LHS(INSN) occurs in *VI. */ | |
759 | if (rtx_ok_for_substitution_p (old, *where)) | |
760 | { | |
9c4c93d0 | 761 | rtx_insn *new_insn; |
e1ab7874 | 762 | rtx *where_replace; |
763 | ||
764 | /* We should copy these rtxes before substitution. */ | |
765 | new_rtx = copy_rtx (undo ? INSN_LHS (insn) : INSN_RHS (insn)); | |
766 | new_insn = create_copy_of_insn_rtx (VINSN_INSN_RTX (*vi)); | |
767 | ||
48e1416a | 768 | /* Where we'll replace. |
e1ab7874 | 769 | WHERE_REPLACE should point inside NEW_INSN, so INSN_RHS couldn't be |
770 | used instead of SET_SRC. */ | |
771 | where_replace = (has_rhs | |
772 | ? &SET_SRC (PATTERN (new_insn)) | |
773 | : &PATTERN (new_insn)); | |
774 | ||
48e1416a | 775 | new_insn_valid |
776 | = validate_replace_rtx_part_nosimplify (old, new_rtx, where_replace, | |
e1ab7874 | 777 | new_insn); |
778 | ||
779 | /* ??? Actually, constrain_operands result depends upon choice of | |
780 | destination register. E.g. if we allow single register to be an rhs, | |
48e1416a | 781 | and if we try to move dx=ax(as rhs) through ax=dx, we'll result |
e1ab7874 | 782 | in invalid insn dx=dx, so we'll loose this rhs here. |
783 | Just can't come up with significant testcase for this, so just | |
784 | leaving it for now. */ | |
785 | if (new_insn_valid) | |
786 | { | |
48e1416a | 787 | change_vinsn_in_expr (expr, |
e1ab7874 | 788 | create_vinsn_from_insn_rtx (new_insn, false)); |
789 | ||
48e1416a | 790 | /* Do not allow clobbering the address register of speculative |
e1ab7874 | 791 | insns. */ |
792 | if ((EXPR_SPEC_DONE_DS (expr) & SPECULATIVE) | |
1f53e226 | 793 | && register_unavailable_p (VINSN_REG_USES (EXPR_VINSN (expr)), |
794 | expr_dest_reg (expr))) | |
e1ab7874 | 795 | EXPR_TARGET_AVAILABLE (expr) = false; |
796 | ||
797 | return true; | |
798 | } | |
799 | else | |
800 | return false; | |
801 | } | |
802 | else | |
803 | return false; | |
804 | } | |
805 | ||
48e1416a | 806 | /* Return the number of places WHAT appears within WHERE. |
e1ab7874 | 807 | Bail out when we found a reference occupying several hard registers. */ |
48e1416a | 808 | static int |
6121af96 | 809 | count_occurrences_equiv (const_rtx what, const_rtx where) |
e1ab7874 | 810 | { |
6121af96 | 811 | int count = 0; |
812 | subrtx_iterator::array_type array; | |
813 | FOR_EACH_SUBRTX (iter, array, where, NONCONST) | |
814 | { | |
815 | const_rtx x = *iter; | |
816 | if (REG_P (x) && REGNO (x) == REGNO (what)) | |
817 | { | |
818 | /* Bail out if mode is different or more than one register is | |
819 | used. */ | |
0933f1d9 | 820 | if (GET_MODE (x) != GET_MODE (what) || REG_NREGS (x) > 1) |
6121af96 | 821 | return 0; |
822 | count += 1; | |
823 | } | |
824 | else if (GET_CODE (x) == SUBREG | |
825 | && (!REG_P (SUBREG_REG (x)) | |
826 | || REGNO (SUBREG_REG (x)) == REGNO (what))) | |
827 | /* ??? Do not support substituting regs inside subregs. In that case, | |
828 | simplify_subreg will be called by validate_replace_rtx, and | |
829 | unsubstitution will fail later. */ | |
830 | return 0; | |
831 | } | |
832 | return count; | |
e1ab7874 | 833 | } |
834 | ||
835 | /* Returns TRUE if WHAT is found in WHERE rtx tree. */ | |
836 | static bool | |
837 | rtx_ok_for_substitution_p (rtx what, rtx where) | |
838 | { | |
839 | return (count_occurrences_equiv (what, where) > 0); | |
840 | } | |
841 | \f | |
842 | ||
843 | /* Functions to support register renaming. */ | |
844 | ||
845 | /* Substitute VI's set source with REGNO. Returns newly created pattern | |
846 | that has REGNO as its source. */ | |
9c4c93d0 | 847 | static rtx_insn * |
e1ab7874 | 848 | create_insn_rtx_with_rhs (vinsn_t vi, rtx rhs_rtx) |
849 | { | |
850 | rtx lhs_rtx; | |
851 | rtx pattern; | |
9c4c93d0 | 852 | rtx_insn *insn_rtx; |
e1ab7874 | 853 | |
854 | lhs_rtx = copy_rtx (VINSN_LHS (vi)); | |
855 | ||
d1f9b275 | 856 | pattern = gen_rtx_SET (lhs_rtx, rhs_rtx); |
e1ab7874 | 857 | insn_rtx = create_insn_rtx_from_pattern (pattern, NULL_RTX); |
858 | ||
859 | return insn_rtx; | |
860 | } | |
861 | ||
48e1416a | 862 | /* Returns whether INSN's src can be replaced with register number |
e1ab7874 | 863 | NEW_SRC_REG. E.g. the following insn is valid for i386: |
864 | ||
48e1416a | 865 | (insn:HI 2205 6585 2207 727 ../../gcc/libiberty/regex.c:3337 |
e1ab7874 | 866 | (set (mem/s:QI (plus:SI (plus:SI (reg/f:SI 7 sp) |
867 | (reg:SI 0 ax [orig:770 c1 ] [770])) | |
868 | (const_int 288 [0x120])) [0 str S1 A8]) | |
869 | (const_int 0 [0x0])) 43 {*movqi_1} (nil) | |
870 | (nil)) | |
871 | ||
872 | But if we change (const_int 0 [0x0]) to (reg:QI 4 si), it will be invalid | |
48e1416a | 873 | because of operand constraints: |
e1ab7874 | 874 | |
875 | (define_insn "*movqi_1" | |
876 | [(set (match_operand:QI 0 "nonimmediate_operand" "=q,q ,q ,r,r ,?r,m") | |
877 | (match_operand:QI 1 "general_operand" " q,qn,qm,q,rn,qm,qn") | |
878 | )] | |
48e1416a | 879 | |
880 | So do constrain_operands here, before choosing NEW_SRC_REG as best | |
e1ab7874 | 881 | reg for rhs. */ |
882 | ||
883 | static bool | |
884 | replace_src_with_reg_ok_p (insn_t insn, rtx new_src_reg) | |
885 | { | |
886 | vinsn_t vi = INSN_VINSN (insn); | |
3754d046 | 887 | machine_mode mode; |
e1ab7874 | 888 | rtx dst_loc; |
889 | bool res; | |
890 | ||
891 | gcc_assert (VINSN_SEPARABLE_P (vi)); | |
892 | ||
893 | get_dest_and_mode (insn, &dst_loc, &mode); | |
894 | gcc_assert (mode == GET_MODE (new_src_reg)); | |
895 | ||
896 | if (REG_P (dst_loc) && REGNO (new_src_reg) == REGNO (dst_loc)) | |
897 | return true; | |
898 | ||
899 | /* See whether SET_SRC can be replaced with this register. */ | |
900 | validate_change (insn, &SET_SRC (PATTERN (insn)), new_src_reg, 1); | |
901 | res = verify_changes (0); | |
902 | cancel_changes (0); | |
903 | ||
904 | return res; | |
905 | } | |
906 | ||
907 | /* Returns whether INSN still be valid after replacing it's DEST with | |
908 | register NEW_REG. */ | |
909 | static bool | |
910 | replace_dest_with_reg_ok_p (insn_t insn, rtx new_reg) | |
911 | { | |
912 | vinsn_t vi = INSN_VINSN (insn); | |
913 | bool res; | |
914 | ||
915 | /* We should deal here only with separable insns. */ | |
916 | gcc_assert (VINSN_SEPARABLE_P (vi)); | |
917 | gcc_assert (GET_MODE (VINSN_LHS (vi)) == GET_MODE (new_reg)); | |
918 | ||
919 | /* See whether SET_DEST can be replaced with this register. */ | |
920 | validate_change (insn, &SET_DEST (PATTERN (insn)), new_reg, 1); | |
921 | res = verify_changes (0); | |
922 | cancel_changes (0); | |
923 | ||
924 | return res; | |
925 | } | |
926 | ||
927 | /* Create a pattern with rhs of VI and lhs of LHS_RTX. */ | |
9c4c93d0 | 928 | static rtx_insn * |
e1ab7874 | 929 | create_insn_rtx_with_lhs (vinsn_t vi, rtx lhs_rtx) |
930 | { | |
931 | rtx rhs_rtx; | |
932 | rtx pattern; | |
9c4c93d0 | 933 | rtx_insn *insn_rtx; |
e1ab7874 | 934 | |
935 | rhs_rtx = copy_rtx (VINSN_RHS (vi)); | |
936 | ||
d1f9b275 | 937 | pattern = gen_rtx_SET (lhs_rtx, rhs_rtx); |
e1ab7874 | 938 | insn_rtx = create_insn_rtx_from_pattern (pattern, NULL_RTX); |
939 | ||
940 | return insn_rtx; | |
941 | } | |
942 | ||
48e1416a | 943 | /* Substitute lhs in the given expression EXPR for the register with number |
e1ab7874 | 944 | NEW_REGNO. SET_DEST may be arbitrary rtx, not only register. */ |
945 | static void | |
946 | replace_dest_with_reg_in_expr (expr_t expr, rtx new_reg) | |
947 | { | |
2f3c9801 | 948 | rtx_insn *insn_rtx; |
e1ab7874 | 949 | vinsn_t vinsn; |
950 | ||
951 | insn_rtx = create_insn_rtx_with_lhs (EXPR_VINSN (expr), new_reg); | |
952 | vinsn = create_vinsn_from_insn_rtx (insn_rtx, false); | |
953 | ||
954 | change_vinsn_in_expr (expr, vinsn); | |
955 | EXPR_WAS_RENAMED (expr) = 1; | |
956 | EXPR_TARGET_AVAILABLE (expr) = 1; | |
957 | } | |
958 | ||
959 | /* Returns whether VI writes either one of the USED_REGS registers or, | |
960 | if a register is a hard one, one of the UNAVAILABLE_HARD_REGS registers. */ | |
961 | static bool | |
48e1416a | 962 | vinsn_writes_one_of_regs_p (vinsn_t vi, regset used_regs, |
e1ab7874 | 963 | HARD_REG_SET unavailable_hard_regs) |
964 | { | |
965 | unsigned regno; | |
966 | reg_set_iterator rsi; | |
967 | ||
968 | EXECUTE_IF_SET_IN_REG_SET (VINSN_REG_SETS (vi), 0, regno, rsi) | |
969 | { | |
970 | if (REGNO_REG_SET_P (used_regs, regno)) | |
971 | return true; | |
972 | if (HARD_REGISTER_NUM_P (regno) | |
973 | && TEST_HARD_REG_BIT (unavailable_hard_regs, regno)) | |
974 | return true; | |
975 | } | |
976 | ||
977 | EXECUTE_IF_SET_IN_REG_SET (VINSN_REG_CLOBBERS (vi), 0, regno, rsi) | |
978 | { | |
979 | if (REGNO_REG_SET_P (used_regs, regno)) | |
980 | return true; | |
981 | if (HARD_REGISTER_NUM_P (regno) | |
982 | && TEST_HARD_REG_BIT (unavailable_hard_regs, regno)) | |
983 | return true; | |
984 | } | |
985 | ||
986 | return false; | |
987 | } | |
988 | ||
48e1416a | 989 | /* Returns register class of the output register in INSN. |
e1ab7874 | 990 | Returns NO_REGS for call insns because some targets have constraints on |
991 | destination register of a call insn. | |
48e1416a | 992 | |
e1ab7874 | 993 | Code adopted from regrename.c::build_def_use. */ |
994 | static enum reg_class | |
ed3e6e5d | 995 | get_reg_class (rtx_insn *insn) |
e1ab7874 | 996 | { |
757fefec | 997 | int i, n_ops; |
e1ab7874 | 998 | |
835b8178 | 999 | extract_constrain_insn (insn); |
8eaaac4d | 1000 | preprocess_constraints (insn); |
e1ab7874 | 1001 | n_ops = recog_data.n_operands; |
1002 | ||
89a7a6a5 | 1003 | const operand_alternative *op_alt = which_op_alt (); |
e1ab7874 | 1004 | if (asm_noperands (PATTERN (insn)) > 0) |
1005 | { | |
1006 | for (i = 0; i < n_ops; i++) | |
1007 | if (recog_data.operand_type[i] == OP_OUT) | |
1008 | { | |
1009 | rtx *loc = recog_data.operand_loc[i]; | |
1010 | rtx op = *loc; | |
89a7a6a5 | 1011 | enum reg_class cl = alternative_class (op_alt, i); |
e1ab7874 | 1012 | |
1013 | if (REG_P (op) | |
1014 | && REGNO (op) == ORIGINAL_REGNO (op)) | |
1015 | continue; | |
1016 | ||
1017 | return cl; | |
1018 | } | |
1019 | } | |
1020 | else if (!CALL_P (insn)) | |
1021 | { | |
1022 | for (i = 0; i < n_ops + recog_data.n_dups; i++) | |
1023 | { | |
1024 | int opn = i < n_ops ? i : recog_data.dup_num[i - n_ops]; | |
89a7a6a5 | 1025 | enum reg_class cl = alternative_class (op_alt, opn); |
48e1416a | 1026 | |
e1ab7874 | 1027 | if (recog_data.operand_type[opn] == OP_OUT || |
1028 | recog_data.operand_type[opn] == OP_INOUT) | |
1029 | return cl; | |
1030 | } | |
1031 | } | |
1032 | ||
1033 | /* Insns like | |
1034 | (insn (set (reg:CCZ 17 flags) (compare:CCZ ...))) | |
1035 | may result in returning NO_REGS, cause flags is written implicitly through | |
1036 | CMP insn, which has no OP_OUT | OP_INOUT operands. */ | |
1037 | return NO_REGS; | |
1038 | } | |
1039 | ||
e1ab7874 | 1040 | /* Calculate HARD_REGNO_RENAME_OK data for REGNO. */ |
1041 | static void | |
1042 | init_hard_regno_rename (int regno) | |
1043 | { | |
1044 | int cur_reg; | |
1045 | ||
1046 | SET_HARD_REG_BIT (sel_hrd.regs_for_rename[regno], regno); | |
1047 | ||
1048 | for (cur_reg = 0; cur_reg < FIRST_PSEUDO_REGISTER; cur_reg++) | |
1049 | { | |
1050 | /* We are not interested in renaming in other regs. */ | |
1051 | if (!TEST_HARD_REG_BIT (sel_hrd.regs_ever_used, cur_reg)) | |
1052 | continue; | |
1053 | ||
1054 | if (HARD_REGNO_RENAME_OK (regno, cur_reg)) | |
1055 | SET_HARD_REG_BIT (sel_hrd.regs_for_rename[regno], cur_reg); | |
1056 | } | |
1057 | } | |
e1ab7874 | 1058 | |
48e1416a | 1059 | /* A wrapper around HARD_REGNO_RENAME_OK that will look into the hard regs |
e1ab7874 | 1060 | data first. */ |
1061 | static inline bool | |
34ff3f78 | 1062 | sel_hard_regno_rename_ok (int from ATTRIBUTE_UNUSED, int to ATTRIBUTE_UNUSED) |
e1ab7874 | 1063 | { |
e1ab7874 | 1064 | /* Check whether this is all calculated. */ |
1065 | if (TEST_HARD_REG_BIT (sel_hrd.regs_for_rename[from], from)) | |
1066 | return TEST_HARD_REG_BIT (sel_hrd.regs_for_rename[from], to); | |
1067 | ||
1068 | init_hard_regno_rename (from); | |
1069 | ||
1070 | return TEST_HARD_REG_BIT (sel_hrd.regs_for_rename[from], to); | |
e1ab7874 | 1071 | } |
1072 | ||
1073 | /* Calculate set of registers that are capable of holding MODE. */ | |
1074 | static void | |
3754d046 | 1075 | init_regs_for_mode (machine_mode mode) |
e1ab7874 | 1076 | { |
1077 | int cur_reg; | |
48e1416a | 1078 | |
e1ab7874 | 1079 | CLEAR_HARD_REG_SET (sel_hrd.regs_for_mode[mode]); |
1080 | CLEAR_HARD_REG_SET (sel_hrd.regs_for_call_clobbered[mode]); | |
1081 | ||
1082 | for (cur_reg = 0; cur_reg < FIRST_PSEUDO_REGISTER; cur_reg++) | |
1083 | { | |
ee331bc7 | 1084 | int nregs; |
e1ab7874 | 1085 | int i; |
48e1416a | 1086 | |
ee331bc7 | 1087 | /* See whether it accepts all modes that occur in |
1088 | original insns. */ | |
1089 | if (! HARD_REGNO_MODE_OK (cur_reg, mode)) | |
1090 | continue; | |
1091 | ||
1092 | nregs = hard_regno_nregs[cur_reg][mode]; | |
1093 | ||
e1ab7874 | 1094 | for (i = nregs - 1; i >= 0; --i) |
1095 | if (fixed_regs[cur_reg + i] | |
1096 | || global_regs[cur_reg + i] | |
48e1416a | 1097 | /* Can't use regs which aren't saved by |
e1ab7874 | 1098 | the prologue. */ |
1099 | || !TEST_HARD_REG_BIT (sel_hrd.regs_ever_used, cur_reg + i) | |
0cfef6c5 | 1100 | /* Can't use regs with non-null REG_BASE_VALUE, because adjusting |
1101 | it affects aliasing globally and invalidates all AV sets. */ | |
1102 | || get_reg_base_value (cur_reg + i) | |
e1ab7874 | 1103 | #ifdef LEAF_REGISTERS |
1104 | /* We can't use a non-leaf register if we're in a | |
1105 | leaf function. */ | |
d5bf7b64 | 1106 | || (crtl->is_leaf |
e1ab7874 | 1107 | && !LEAF_REGISTERS[cur_reg + i]) |
1108 | #endif | |
1109 | ) | |
1110 | break; | |
48e1416a | 1111 | |
1112 | if (i >= 0) | |
e1ab7874 | 1113 | continue; |
48e1416a | 1114 | |
e1ab7874 | 1115 | if (HARD_REGNO_CALL_PART_CLOBBERED (cur_reg, mode)) |
48e1416a | 1116 | SET_HARD_REG_BIT (sel_hrd.regs_for_call_clobbered[mode], |
e1ab7874 | 1117 | cur_reg); |
48e1416a | 1118 | |
1119 | /* If the CUR_REG passed all the checks above, | |
e1ab7874 | 1120 | then it's ok. */ |
1121 | SET_HARD_REG_BIT (sel_hrd.regs_for_mode[mode], cur_reg); | |
1122 | } | |
1123 | ||
1124 | sel_hrd.regs_for_mode_ok[mode] = true; | |
1125 | } | |
1126 | ||
1127 | /* Init all register sets gathered in HRD. */ | |
1128 | static void | |
1129 | init_hard_regs_data (void) | |
1130 | { | |
1131 | int cur_reg = 0; | |
8458f4ca | 1132 | int cur_mode = 0; |
e1ab7874 | 1133 | |
1134 | CLEAR_HARD_REG_SET (sel_hrd.regs_ever_used); | |
1135 | for (cur_reg = 0; cur_reg < FIRST_PSEUDO_REGISTER; cur_reg++) | |
1136 | if (df_regs_ever_live_p (cur_reg) || call_used_regs[cur_reg]) | |
1137 | SET_HARD_REG_BIT (sel_hrd.regs_ever_used, cur_reg); | |
48e1416a | 1138 | |
1139 | /* Initialize registers that are valid based on mode when this is | |
e1ab7874 | 1140 | really needed. */ |
1141 | for (cur_mode = 0; cur_mode < NUM_MACHINE_MODES; cur_mode++) | |
1142 | sel_hrd.regs_for_mode_ok[cur_mode] = false; | |
48e1416a | 1143 | |
e1ab7874 | 1144 | /* Mark that all HARD_REGNO_RENAME_OK is not calculated. */ |
1145 | for (cur_reg = 0; cur_reg < FIRST_PSEUDO_REGISTER; cur_reg++) | |
1146 | CLEAR_HARD_REG_SET (sel_hrd.regs_for_rename[cur_reg]); | |
1147 | ||
1148 | #ifdef STACK_REGS | |
1149 | CLEAR_HARD_REG_SET (sel_hrd.stack_regs); | |
1150 | ||
1151 | for (cur_reg = FIRST_STACK_REG; cur_reg <= LAST_STACK_REG; cur_reg++) | |
1152 | SET_HARD_REG_BIT (sel_hrd.stack_regs, cur_reg); | |
1153 | #endif | |
48e1416a | 1154 | } |
e1ab7874 | 1155 | |
48e1416a | 1156 | /* Mark hardware regs in REG_RENAME_P that are not suitable |
e1ab7874 | 1157 | for renaming rhs in INSN due to hardware restrictions (register class, |
1158 | modes compatibility etc). This doesn't affect original insn's dest reg, | |
1159 | if it isn't in USED_REGS. DEF is a definition insn of rhs for which the | |
1160 | destination register is sought. LHS (DEF->ORIG_INSN) may be REG or MEM. | |
1161 | Registers that are in used_regs are always marked in | |
1162 | unavailable_hard_regs as well. */ | |
1163 | ||
1164 | static void | |
1165 | mark_unavailable_hard_regs (def_t def, struct reg_rename *reg_rename_p, | |
1166 | regset used_regs ATTRIBUTE_UNUSED) | |
1167 | { | |
3754d046 | 1168 | machine_mode mode; |
e1ab7874 | 1169 | enum reg_class cl = NO_REGS; |
1170 | rtx orig_dest; | |
1171 | unsigned cur_reg, regno; | |
1172 | hard_reg_set_iterator hrsi; | |
1173 | ||
1174 | gcc_assert (GET_CODE (PATTERN (def->orig_insn)) == SET); | |
1175 | gcc_assert (reg_rename_p); | |
1176 | ||
1177 | orig_dest = SET_DEST (PATTERN (def->orig_insn)); | |
48e1416a | 1178 | |
e1ab7874 | 1179 | /* We have decided not to rename 'mem = something;' insns, as 'something' |
1180 | is usually a register. */ | |
1181 | if (!REG_P (orig_dest)) | |
1182 | return; | |
1183 | ||
1184 | regno = REGNO (orig_dest); | |
1185 | ||
1186 | /* If before reload, don't try to work with pseudos. */ | |
1187 | if (!reload_completed && !HARD_REGISTER_NUM_P (regno)) | |
1188 | return; | |
1189 | ||
ba1fc759 | 1190 | if (reload_completed) |
1191 | cl = get_reg_class (def->orig_insn); | |
e1ab7874 | 1192 | |
ba1fc759 | 1193 | /* Stop if the original register is one of the fixed_regs, global_regs or |
1194 | frame pointer, or we could not discover its class. */ | |
48e1416a | 1195 | if (fixed_regs[regno] |
e1ab7874 | 1196 | || global_regs[regno] |
5ae82d58 | 1197 | #if !HARD_FRAME_POINTER_IS_FRAME_POINTER |
ba1fc759 | 1198 | || (frame_pointer_needed && regno == HARD_FRAME_POINTER_REGNUM) |
e1ab7874 | 1199 | #else |
ba1fc759 | 1200 | || (frame_pointer_needed && regno == FRAME_POINTER_REGNUM) |
e1ab7874 | 1201 | #endif |
ba1fc759 | 1202 | || (reload_completed && cl == NO_REGS)) |
e1ab7874 | 1203 | { |
1204 | SET_HARD_REG_SET (reg_rename_p->unavailable_hard_regs); | |
1205 | ||
1206 | /* Give a chance for original register, if it isn't in used_regs. */ | |
1207 | if (!def->crosses_call) | |
1208 | CLEAR_HARD_REG_BIT (reg_rename_p->unavailable_hard_regs, regno); | |
1209 | ||
1210 | return; | |
1211 | } | |
1212 | ||
1213 | /* If something allocated on stack in this function, mark frame pointer | |
48e1416a | 1214 | register unavailable, considering also modes. |
e1ab7874 | 1215 | FIXME: it is enough to do this once per all original defs. */ |
1216 | if (frame_pointer_needed) | |
1217 | { | |
d82cf2b2 | 1218 | add_to_hard_reg_set (®_rename_p->unavailable_hard_regs, |
1219 | Pmode, FRAME_POINTER_REGNUM); | |
e1ab7874 | 1220 | |
d82cf2b2 | 1221 | if (!HARD_FRAME_POINTER_IS_FRAME_POINTER) |
1222 | add_to_hard_reg_set (®_rename_p->unavailable_hard_regs, | |
f4ce3ea7 | 1223 | Pmode, HARD_FRAME_POINTER_REGNUM); |
e1ab7874 | 1224 | } |
1225 | ||
1226 | #ifdef STACK_REGS | |
1227 | /* For the stack registers the presence of FIRST_STACK_REG in USED_REGS | |
1228 | is equivalent to as if all stack regs were in this set. | |
1229 | I.e. no stack register can be renamed, and even if it's an original | |
48e1416a | 1230 | register here we make sure it won't be lifted over it's previous def |
1231 | (it's previous def will appear as if it's a FIRST_STACK_REG def. | |
e1ab7874 | 1232 | The HARD_REGNO_RENAME_OK covers other cases in condition below. */ |
1233 | if (IN_RANGE (REGNO (orig_dest), FIRST_STACK_REG, LAST_STACK_REG) | |
48e1416a | 1234 | && REGNO_REG_SET_P (used_regs, FIRST_STACK_REG)) |
1235 | IOR_HARD_REG_SET (reg_rename_p->unavailable_hard_regs, | |
e1ab7874 | 1236 | sel_hrd.stack_regs); |
48e1416a | 1237 | #endif |
e1ab7874 | 1238 | |
48e1416a | 1239 | /* If there's a call on this path, make regs from call_used_reg_set |
e1ab7874 | 1240 | unavailable. */ |
1241 | if (def->crosses_call) | |
48e1416a | 1242 | IOR_HARD_REG_SET (reg_rename_p->unavailable_hard_regs, |
e1ab7874 | 1243 | call_used_reg_set); |
1244 | ||
48e1416a | 1245 | /* Stop here before reload: we need FRAME_REGS, STACK_REGS, and crosses_call, |
e1ab7874 | 1246 | but not register classes. */ |
1247 | if (!reload_completed) | |
1248 | return; | |
1249 | ||
48e1416a | 1250 | /* Leave regs as 'available' only from the current |
e1ab7874 | 1251 | register class. */ |
e1ab7874 | 1252 | COPY_HARD_REG_SET (reg_rename_p->available_for_renaming, |
1253 | reg_class_contents[cl]); | |
1254 | ||
ba1fc759 | 1255 | mode = GET_MODE (orig_dest); |
1256 | ||
e1ab7874 | 1257 | /* Leave only registers available for this mode. */ |
1258 | if (!sel_hrd.regs_for_mode_ok[mode]) | |
1259 | init_regs_for_mode (mode); | |
48e1416a | 1260 | AND_HARD_REG_SET (reg_rename_p->available_for_renaming, |
e1ab7874 | 1261 | sel_hrd.regs_for_mode[mode]); |
1262 | ||
1263 | /* Exclude registers that are partially call clobbered. */ | |
1264 | if (def->crosses_call | |
1265 | && ! HARD_REGNO_CALL_PART_CLOBBERED (regno, mode)) | |
48e1416a | 1266 | AND_COMPL_HARD_REG_SET (reg_rename_p->available_for_renaming, |
e1ab7874 | 1267 | sel_hrd.regs_for_call_clobbered[mode]); |
1268 | ||
1269 | /* Leave only those that are ok to rename. */ | |
1270 | EXECUTE_IF_SET_IN_HARD_REG_SET (reg_rename_p->available_for_renaming, | |
1271 | 0, cur_reg, hrsi) | |
1272 | { | |
1273 | int nregs; | |
1274 | int i; | |
1275 | ||
1276 | nregs = hard_regno_nregs[cur_reg][mode]; | |
1277 | gcc_assert (nregs > 0); | |
1278 | ||
1279 | for (i = nregs - 1; i >= 0; --i) | |
1280 | if (! sel_hard_regno_rename_ok (regno + i, cur_reg + i)) | |
1281 | break; | |
1282 | ||
48e1416a | 1283 | if (i >= 0) |
1284 | CLEAR_HARD_REG_BIT (reg_rename_p->available_for_renaming, | |
e1ab7874 | 1285 | cur_reg); |
1286 | } | |
1287 | ||
48e1416a | 1288 | AND_COMPL_HARD_REG_SET (reg_rename_p->available_for_renaming, |
e1ab7874 | 1289 | reg_rename_p->unavailable_hard_regs); |
1290 | ||
1291 | /* Regno is always ok from the renaming part of view, but it really | |
1292 | could be in *unavailable_hard_regs already, so set it here instead | |
1293 | of there. */ | |
1294 | SET_HARD_REG_BIT (reg_rename_p->available_for_renaming, regno); | |
1295 | } | |
1296 | ||
1297 | /* reg_rename_tick[REG1] > reg_rename_tick[REG2] if REG1 was chosen as the | |
1298 | best register more recently than REG2. */ | |
1299 | static int reg_rename_tick[FIRST_PSEUDO_REGISTER]; | |
1300 | ||
1301 | /* Indicates the number of times renaming happened before the current one. */ | |
1302 | static int reg_rename_this_tick; | |
1303 | ||
48e1416a | 1304 | /* Choose the register among free, that is suitable for storing |
e1ab7874 | 1305 | the rhs value. |
1306 | ||
1307 | ORIGINAL_INSNS is the list of insns where the operation (rhs) | |
48e1416a | 1308 | originally appears. There could be multiple original operations |
1309 | for single rhs since we moving it up and merging along different | |
e1ab7874 | 1310 | paths. |
1311 | ||
1312 | Some code is adapted from regrename.c (regrename_optimize). | |
1313 | If original register is available, function returns it. | |
1314 | Otherwise it performs the checks, so the new register should | |
1315 | comply with the following: | |
48e1416a | 1316 | - it should not violate any live ranges (such registers are in |
e1ab7874 | 1317 | REG_RENAME_P->available_for_renaming set); |
1318 | - it should not be in the HARD_REGS_USED regset; | |
1319 | - it should be in the class compatible with original uses; | |
1320 | - it should not be clobbered through reference with different mode; | |
48e1416a | 1321 | - if we're in the leaf function, then the new register should |
e1ab7874 | 1322 | not be in the LEAF_REGISTERS; |
1323 | - etc. | |
1324 | ||
1325 | If several registers meet the conditions, the register with smallest | |
1326 | tick is returned to achieve more even register allocation. | |
1327 | ||
1328 | If original register seems to be ok, we set *IS_ORIG_REG_P_PTR to true. | |
1329 | ||
1330 | If no register satisfies the above conditions, NULL_RTX is returned. */ | |
1331 | static rtx | |
48e1416a | 1332 | choose_best_reg_1 (HARD_REG_SET hard_regs_used, |
1333 | struct reg_rename *reg_rename_p, | |
e1ab7874 | 1334 | def_list_t original_insns, bool *is_orig_reg_p_ptr) |
1335 | { | |
1336 | int best_new_reg; | |
1337 | unsigned cur_reg; | |
3754d046 | 1338 | machine_mode mode = VOIDmode; |
e1ab7874 | 1339 | unsigned regno, i, n; |
1340 | hard_reg_set_iterator hrsi; | |
1341 | def_list_iterator di; | |
1342 | def_t def; | |
1343 | ||
1344 | /* If original register is available, return it. */ | |
1345 | *is_orig_reg_p_ptr = true; | |
1346 | ||
1347 | FOR_EACH_DEF (def, di, original_insns) | |
1348 | { | |
1349 | rtx orig_dest = SET_DEST (PATTERN (def->orig_insn)); | |
1350 | ||
1351 | gcc_assert (REG_P (orig_dest)); | |
1352 | ||
48e1416a | 1353 | /* Check that all original operations have the same mode. |
e1ab7874 | 1354 | This is done for the next loop; if we'd return from this |
48e1416a | 1355 | loop, we'd check only part of them, but in this case |
e1ab7874 | 1356 | it doesn't matter. */ |
1357 | if (mode == VOIDmode) | |
1358 | mode = GET_MODE (orig_dest); | |
1359 | gcc_assert (mode == GET_MODE (orig_dest)); | |
1360 | ||
1361 | regno = REGNO (orig_dest); | |
1362 | for (i = 0, n = hard_regno_nregs[regno][mode]; i < n; i++) | |
1363 | if (TEST_HARD_REG_BIT (hard_regs_used, regno + i)) | |
1364 | break; | |
1365 | ||
1366 | /* All hard registers are available. */ | |
1367 | if (i == n) | |
1368 | { | |
1369 | gcc_assert (mode != VOIDmode); | |
48e1416a | 1370 | |
e1ab7874 | 1371 | /* Hard registers should not be shared. */ |
1372 | return gen_rtx_REG (mode, regno); | |
1373 | } | |
1374 | } | |
48e1416a | 1375 | |
e1ab7874 | 1376 | *is_orig_reg_p_ptr = false; |
1377 | best_new_reg = -1; | |
48e1416a | 1378 | |
1379 | /* Among all available regs choose the register that was | |
e1ab7874 | 1380 | allocated earliest. */ |
1381 | EXECUTE_IF_SET_IN_HARD_REG_SET (reg_rename_p->available_for_renaming, | |
1382 | 0, cur_reg, hrsi) | |
1383 | if (! TEST_HARD_REG_BIT (hard_regs_used, cur_reg)) | |
1384 | { | |
936f065e | 1385 | /* Check that all hard regs for mode are available. */ |
1386 | for (i = 1, n = hard_regno_nregs[cur_reg][mode]; i < n; i++) | |
1387 | if (TEST_HARD_REG_BIT (hard_regs_used, cur_reg + i) | |
1388 | || !TEST_HARD_REG_BIT (reg_rename_p->available_for_renaming, | |
1389 | cur_reg + i)) | |
1390 | break; | |
1391 | ||
1392 | if (i < n) | |
1393 | continue; | |
1394 | ||
e1ab7874 | 1395 | /* All hard registers are available. */ |
1396 | if (best_new_reg < 0 | |
1397 | || reg_rename_tick[cur_reg] < reg_rename_tick[best_new_reg]) | |
1398 | { | |
1399 | best_new_reg = cur_reg; | |
48e1416a | 1400 | |
e1ab7874 | 1401 | /* Return immediately when we know there's no better reg. */ |
1402 | if (! reg_rename_tick[best_new_reg]) | |
1403 | break; | |
1404 | } | |
1405 | } | |
1406 | ||
1407 | if (best_new_reg >= 0) | |
1408 | { | |
1409 | /* Use the check from the above loop. */ | |
1410 | gcc_assert (mode != VOIDmode); | |
1411 | return gen_rtx_REG (mode, best_new_reg); | |
1412 | } | |
1413 | ||
1414 | return NULL_RTX; | |
1415 | } | |
1416 | ||
1417 | /* A wrapper around choose_best_reg_1 () to verify that we make correct | |
1418 | assumptions about available registers in the function. */ | |
1419 | static rtx | |
48e1416a | 1420 | choose_best_reg (HARD_REG_SET hard_regs_used, struct reg_rename *reg_rename_p, |
e1ab7874 | 1421 | def_list_t original_insns, bool *is_orig_reg_p_ptr) |
1422 | { | |
48e1416a | 1423 | rtx best_reg = choose_best_reg_1 (hard_regs_used, reg_rename_p, |
e1ab7874 | 1424 | original_insns, is_orig_reg_p_ptr); |
1425 | ||
936f065e | 1426 | /* FIXME loop over hard_regno_nregs here. */ |
e1ab7874 | 1427 | gcc_assert (best_reg == NULL_RTX |
1428 | || TEST_HARD_REG_BIT (sel_hrd.regs_ever_used, REGNO (best_reg))); | |
1429 | ||
1430 | return best_reg; | |
1431 | } | |
1432 | ||
48e1416a | 1433 | /* Choose the pseudo register for storing rhs value. As this is supposed |
e1ab7874 | 1434 | to work before reload, we return either the original register or make |
48e1416a | 1435 | the new one. The parameters are the same that in choose_nest_reg_1 |
1436 | functions, except that USED_REGS may contain pseudos. | |
e1ab7874 | 1437 | If we work with hard regs, check also REG_RENAME_P->UNAVAILABLE_HARD_REGS. |
1438 | ||
48e1416a | 1439 | TODO: take into account register pressure while doing this. Up to this |
1440 | moment, this function would never return NULL for pseudos, but we should | |
e1ab7874 | 1441 | not rely on this. */ |
1442 | static rtx | |
48e1416a | 1443 | choose_best_pseudo_reg (regset used_regs, |
1444 | struct reg_rename *reg_rename_p, | |
e1ab7874 | 1445 | def_list_t original_insns, bool *is_orig_reg_p_ptr) |
1446 | { | |
1447 | def_list_iterator i; | |
1448 | def_t def; | |
3754d046 | 1449 | machine_mode mode = VOIDmode; |
e1ab7874 | 1450 | bool bad_hard_regs = false; |
48e1416a | 1451 | |
e1ab7874 | 1452 | /* We should not use this after reload. */ |
1453 | gcc_assert (!reload_completed); | |
1454 | ||
1455 | /* If original register is available, return it. */ | |
1456 | *is_orig_reg_p_ptr = true; | |
1457 | ||
1458 | FOR_EACH_DEF (def, i, original_insns) | |
1459 | { | |
1460 | rtx dest = SET_DEST (PATTERN (def->orig_insn)); | |
1461 | int orig_regno; | |
48e1416a | 1462 | |
e1ab7874 | 1463 | gcc_assert (REG_P (dest)); |
48e1416a | 1464 | |
e1ab7874 | 1465 | /* Check that all original operations have the same mode. */ |
1466 | if (mode == VOIDmode) | |
1467 | mode = GET_MODE (dest); | |
1468 | else | |
1469 | gcc_assert (mode == GET_MODE (dest)); | |
1470 | orig_regno = REGNO (dest); | |
48e1416a | 1471 | |
e1ab7874 | 1472 | if (!REGNO_REG_SET_P (used_regs, orig_regno)) |
1473 | { | |
1474 | if (orig_regno < FIRST_PSEUDO_REGISTER) | |
1475 | { | |
1476 | gcc_assert (df_regs_ever_live_p (orig_regno)); | |
48e1416a | 1477 | |
1478 | /* For hard registers, we have to check hardware imposed | |
e1ab7874 | 1479 | limitations (frame/stack registers, calls crossed). */ |
48e1416a | 1480 | if (!TEST_HARD_REG_BIT (reg_rename_p->unavailable_hard_regs, |
e1ab7874 | 1481 | orig_regno)) |
1482 | { | |
48e1416a | 1483 | /* Don't let register cross a call if it doesn't already |
1484 | cross one. This condition is written in accordance with | |
e1ab7874 | 1485 | that in sched-deps.c sched_analyze_reg(). */ |
48e1416a | 1486 | if (!reg_rename_p->crosses_call |
e1ab7874 | 1487 | || REG_N_CALLS_CROSSED (orig_regno) > 0) |
48e1416a | 1488 | return gen_rtx_REG (mode, orig_regno); |
e1ab7874 | 1489 | } |
48e1416a | 1490 | |
e1ab7874 | 1491 | bad_hard_regs = true; |
1492 | } | |
1493 | else | |
1494 | return dest; | |
1495 | } | |
1496 | } | |
1497 | ||
1498 | *is_orig_reg_p_ptr = false; | |
48e1416a | 1499 | |
e1ab7874 | 1500 | /* We had some original hard registers that couldn't be used. |
1501 | Those were likely special. Don't try to create a pseudo. */ | |
1502 | if (bad_hard_regs) | |
1503 | return NULL_RTX; | |
48e1416a | 1504 | |
1505 | /* We haven't found a register from original operations. Get a new one. | |
e1ab7874 | 1506 | FIXME: control register pressure somehow. */ |
1507 | { | |
1508 | rtx new_reg = gen_reg_rtx (mode); | |
1509 | ||
1510 | gcc_assert (mode != VOIDmode); | |
1511 | ||
1512 | max_regno = max_reg_num (); | |
1513 | maybe_extend_reg_info_p (); | |
1514 | REG_N_CALLS_CROSSED (REGNO (new_reg)) = reg_rename_p->crosses_call ? 1 : 0; | |
1515 | ||
1516 | return new_reg; | |
1517 | } | |
1518 | } | |
1519 | ||
1520 | /* True when target of EXPR is available due to EXPR_TARGET_AVAILABLE, | |
1521 | USED_REGS and REG_RENAME_P->UNAVAILABLE_HARD_REGS. */ | |
1522 | static void | |
48e1416a | 1523 | verify_target_availability (expr_t expr, regset used_regs, |
e1ab7874 | 1524 | struct reg_rename *reg_rename_p) |
1525 | { | |
1526 | unsigned n, i, regno; | |
3754d046 | 1527 | machine_mode mode; |
e1ab7874 | 1528 | bool target_available, live_available, hard_available; |
1529 | ||
1530 | if (!REG_P (EXPR_LHS (expr)) || EXPR_TARGET_AVAILABLE (expr) < 0) | |
1531 | return; | |
48e1416a | 1532 | |
e1ab7874 | 1533 | regno = expr_dest_regno (expr); |
1534 | mode = GET_MODE (EXPR_LHS (expr)); | |
1535 | target_available = EXPR_TARGET_AVAILABLE (expr) == 1; | |
619459a3 | 1536 | n = HARD_REGISTER_NUM_P (regno) ? hard_regno_nregs[regno][mode] : 1; |
e1ab7874 | 1537 | |
1538 | live_available = hard_available = true; | |
1539 | for (i = 0; i < n; i++) | |
1540 | { | |
1541 | if (bitmap_bit_p (used_regs, regno + i)) | |
1542 | live_available = false; | |
1543 | if (TEST_HARD_REG_BIT (reg_rename_p->unavailable_hard_regs, regno + i)) | |
1544 | hard_available = false; | |
1545 | } | |
1546 | ||
48e1416a | 1547 | /* When target is not available, it may be due to hard register |
e1ab7874 | 1548 | restrictions, e.g. crosses calls, so we check hard_available too. */ |
1549 | if (target_available) | |
1550 | gcc_assert (live_available); | |
1551 | else | |
48e1416a | 1552 | /* Check only if we haven't scheduled something on the previous fence, |
e1ab7874 | 1553 | cause due to MAX_SOFTWARE_LOOKAHEAD_WINDOW_SIZE issues |
1554 | and having more than one fence, we may end having targ_un in a block | |
48e1416a | 1555 | in which successors target register is actually available. |
e1ab7874 | 1556 | |
1557 | The last condition handles the case when a dependence from a call insn | |
48e1416a | 1558 | was created in sched-deps.c for insns with destination registers that |
1559 | never crossed a call before, but do cross one after our code motion. | |
e1ab7874 | 1560 | |
48e1416a | 1561 | FIXME: in the latter case, we just uselessly called find_used_regs, |
1562 | because we can't move this expression with any other register | |
e1ab7874 | 1563 | as well. */ |
48e1416a | 1564 | gcc_assert (scheduled_something_on_previous_fence || !live_available |
1565 | || !hard_available | |
1566 | || (!reload_completed && reg_rename_p->crosses_call | |
e1ab7874 | 1567 | && REG_N_CALLS_CROSSED (regno) == 0)); |
1568 | } | |
1569 | ||
48e1416a | 1570 | /* Collect unavailable registers due to liveness for EXPR from BNDS |
1571 | into USED_REGS. Save additional information about available | |
e1ab7874 | 1572 | registers and unavailable due to hardware restriction registers |
1573 | into REG_RENAME_P structure. Save original insns into ORIGINAL_INSNS | |
1574 | list. */ | |
1575 | static void | |
1576 | collect_unavailable_regs_from_bnds (expr_t expr, blist_t bnds, regset used_regs, | |
1577 | struct reg_rename *reg_rename_p, | |
1578 | def_list_t *original_insns) | |
1579 | { | |
1580 | for (; bnds; bnds = BLIST_NEXT (bnds)) | |
1581 | { | |
1582 | bool res; | |
1583 | av_set_t orig_ops = NULL; | |
1584 | bnd_t bnd = BLIST_BND (bnds); | |
1585 | ||
1586 | /* If the chosen best expr doesn't belong to current boundary, | |
1587 | skip it. */ | |
1588 | if (!av_set_is_in_p (BND_AV1 (bnd), EXPR_VINSN (expr))) | |
1589 | continue; | |
1590 | ||
1591 | /* Put in ORIG_OPS all exprs from this boundary that became | |
1592 | RES on top. */ | |
1593 | orig_ops = find_sequential_best_exprs (bnd, expr, false); | |
1594 | ||
1595 | /* Compute used regs and OR it into the USED_REGS. */ | |
1596 | res = find_used_regs (BND_TO (bnd), orig_ops, used_regs, | |
1597 | reg_rename_p, original_insns); | |
1598 | ||
1599 | /* FIXME: the assert is true until we'd have several boundaries. */ | |
1600 | gcc_assert (res); | |
1601 | av_set_clear (&orig_ops); | |
1602 | } | |
1603 | } | |
1604 | ||
1605 | /* Return TRUE if it is possible to replace LHSes of ORIG_INSNS with BEST_REG. | |
1606 | If BEST_REG is valid, replace LHS of EXPR with it. */ | |
1607 | static bool | |
1608 | try_replace_dest_reg (ilist_t orig_insns, rtx best_reg, expr_t expr) | |
1609 | { | |
e1ab7874 | 1610 | /* Try whether we'll be able to generate the insn |
1611 | 'dest := best_reg' at the place of the original operation. */ | |
1612 | for (; orig_insns; orig_insns = ILIST_NEXT (orig_insns)) | |
1613 | { | |
1614 | insn_t orig_insn = DEF_LIST_DEF (orig_insns)->orig_insn; | |
1615 | ||
1616 | gcc_assert (EXPR_SEPARABLE_P (INSN_EXPR (orig_insn))); | |
1617 | ||
28abb7ee | 1618 | if (REGNO (best_reg) != REGNO (INSN_LHS (orig_insn)) |
1619 | && (! replace_src_with_reg_ok_p (orig_insn, best_reg) | |
1620 | || ! replace_dest_with_reg_ok_p (orig_insn, best_reg))) | |
e1ab7874 | 1621 | return false; |
1622 | } | |
1623 | ||
1624 | /* Make sure that EXPR has the right destination | |
1625 | register. */ | |
28abb7ee | 1626 | if (expr_dest_regno (expr) != REGNO (best_reg)) |
1627 | replace_dest_with_reg_in_expr (expr, best_reg); | |
1628 | else | |
1629 | EXPR_TARGET_AVAILABLE (expr) = 1; | |
1630 | ||
e1ab7874 | 1631 | return true; |
1632 | } | |
1633 | ||
48e1416a | 1634 | /* Select and assign best register to EXPR searching from BNDS. |
1635 | Set *IS_ORIG_REG_P to TRUE if original register was selected. | |
e1ab7874 | 1636 | Return FALSE if no register can be chosen, which could happen when: |
1637 | * EXPR_SEPARABLE_P is true but we were unable to find suitable register; | |
1638 | * EXPR_SEPARABLE_P is false but the insn sets/clobbers one of the registers | |
1639 | that are used on the moving path. */ | |
1640 | static bool | |
1641 | find_best_reg_for_expr (expr_t expr, blist_t bnds, bool *is_orig_reg_p) | |
1642 | { | |
1643 | static struct reg_rename reg_rename_data; | |
1644 | ||
1645 | regset used_regs; | |
1646 | def_list_t original_insns = NULL; | |
1647 | bool reg_ok; | |
1648 | ||
1649 | *is_orig_reg_p = false; | |
1650 | ||
1651 | /* Don't bother to do anything if this insn doesn't set any registers. */ | |
1652 | if (bitmap_empty_p (VINSN_REG_SETS (EXPR_VINSN (expr))) | |
1653 | && bitmap_empty_p (VINSN_REG_CLOBBERS (EXPR_VINSN (expr)))) | |
1654 | return true; | |
1655 | ||
1656 | used_regs = get_clear_regset_from_pool (); | |
1657 | CLEAR_HARD_REG_SET (reg_rename_data.unavailable_hard_regs); | |
1658 | ||
1659 | collect_unavailable_regs_from_bnds (expr, bnds, used_regs, ®_rename_data, | |
1660 | &original_insns); | |
1661 | ||
1662 | #ifdef ENABLE_CHECKING | |
1663 | /* If after reload, make sure we're working with hard regs here. */ | |
48e1416a | 1664 | if (reload_completed) |
e1ab7874 | 1665 | { |
1666 | reg_set_iterator rsi; | |
1667 | unsigned i; | |
48e1416a | 1668 | |
e1ab7874 | 1669 | EXECUTE_IF_SET_IN_REG_SET (used_regs, FIRST_PSEUDO_REGISTER, i, rsi) |
1670 | gcc_unreachable (); | |
1671 | } | |
1672 | #endif | |
1673 | ||
1674 | if (EXPR_SEPARABLE_P (expr)) | |
1675 | { | |
1676 | rtx best_reg = NULL_RTX; | |
1677 | /* Check that we have computed availability of a target register | |
1678 | correctly. */ | |
1679 | verify_target_availability (expr, used_regs, ®_rename_data); | |
1680 | ||
1681 | /* Turn everything in hard regs after reload. */ | |
1682 | if (reload_completed) | |
1683 | { | |
1684 | HARD_REG_SET hard_regs_used; | |
1685 | REG_SET_TO_HARD_REG_SET (hard_regs_used, used_regs); | |
1686 | ||
1687 | /* Join hard registers unavailable due to register class | |
1688 | restrictions and live range intersection. */ | |
1689 | IOR_HARD_REG_SET (hard_regs_used, | |
1690 | reg_rename_data.unavailable_hard_regs); | |
1691 | ||
1692 | best_reg = choose_best_reg (hard_regs_used, ®_rename_data, | |
1693 | original_insns, is_orig_reg_p); | |
1694 | } | |
1695 | else | |
1696 | best_reg = choose_best_pseudo_reg (used_regs, ®_rename_data, | |
1697 | original_insns, is_orig_reg_p); | |
1698 | ||
1699 | if (!best_reg) | |
1700 | reg_ok = false; | |
1701 | else if (*is_orig_reg_p) | |
1702 | { | |
1703 | /* In case of unification BEST_REG may be different from EXPR's LHS | |
1704 | when EXPR's LHS is unavailable, and there is another LHS among | |
1705 | ORIGINAL_INSNS. */ | |
1706 | reg_ok = try_replace_dest_reg (original_insns, best_reg, expr); | |
1707 | } | |
1708 | else | |
1709 | { | |
1710 | /* Forbid renaming of low-cost insns. */ | |
1711 | if (sel_vinsn_cost (EXPR_VINSN (expr)) < 2) | |
1712 | reg_ok = false; | |
1713 | else | |
1714 | reg_ok = try_replace_dest_reg (original_insns, best_reg, expr); | |
1715 | } | |
1716 | } | |
1717 | else | |
1718 | { | |
1719 | /* If !EXPR_SCHEDULE_AS_RHS (EXPR), just make sure INSN doesn't set | |
1720 | any of the HARD_REGS_USED set. */ | |
1721 | if (vinsn_writes_one_of_regs_p (EXPR_VINSN (expr), used_regs, | |
1722 | reg_rename_data.unavailable_hard_regs)) | |
1723 | { | |
1724 | reg_ok = false; | |
1725 | gcc_assert (EXPR_TARGET_AVAILABLE (expr) <= 0); | |
1726 | } | |
1727 | else | |
1728 | { | |
1729 | reg_ok = true; | |
1730 | gcc_assert (EXPR_TARGET_AVAILABLE (expr) != 0); | |
1731 | } | |
1732 | } | |
1733 | ||
1734 | ilist_clear (&original_insns); | |
1735 | return_regset_to_pool (used_regs); | |
1736 | ||
1737 | return reg_ok; | |
1738 | } | |
1739 | \f | |
1740 | ||
1741 | /* Return true if dependence described by DS can be overcomed. */ | |
1742 | static bool | |
1743 | can_speculate_dep_p (ds_t ds) | |
1744 | { | |
1745 | if (spec_info == NULL) | |
1746 | return false; | |
1747 | ||
1748 | /* Leave only speculative data. */ | |
1749 | ds &= SPECULATIVE; | |
1750 | ||
1751 | if (ds == 0) | |
1752 | return false; | |
1753 | ||
1754 | { | |
1755 | /* FIXME: make sched-deps.c produce only those non-hard dependencies, | |
1756 | that we can overcome. */ | |
1757 | ds_t spec_mask = spec_info->mask; | |
1758 | ||
1759 | if ((ds & spec_mask) != ds) | |
1760 | return false; | |
1761 | } | |
1762 | ||
1763 | if (ds_weak (ds) < spec_info->data_weakness_cutoff) | |
1764 | return false; | |
1765 | ||
1766 | return true; | |
1767 | } | |
1768 | ||
1769 | /* Get a speculation check instruction. | |
1770 | C_EXPR is a speculative expression, | |
1771 | CHECK_DS describes speculations that should be checked, | |
1772 | ORIG_INSN is the original non-speculative insn in the stream. */ | |
1773 | static insn_t | |
1774 | create_speculation_check (expr_t c_expr, ds_t check_ds, insn_t orig_insn) | |
1775 | { | |
1776 | rtx check_pattern; | |
9c4c93d0 | 1777 | rtx_insn *insn_rtx; |
e1ab7874 | 1778 | insn_t insn; |
1779 | basic_block recovery_block; | |
18282db0 | 1780 | rtx_insn *label; |
e1ab7874 | 1781 | |
1782 | /* Create a recovery block if target is going to emit branchy check, or if | |
1783 | ORIG_INSN was speculative already. */ | |
cf7898a6 | 1784 | if (targetm.sched.needs_block_p (check_ds) |
e1ab7874 | 1785 | || EXPR_SPEC_DONE_DS (INSN_EXPR (orig_insn)) != 0) |
1786 | { | |
1787 | recovery_block = sel_create_recovery_block (orig_insn); | |
1788 | label = BB_HEAD (recovery_block); | |
1789 | } | |
1790 | else | |
1791 | { | |
1792 | recovery_block = NULL; | |
18282db0 | 1793 | label = NULL; |
e1ab7874 | 1794 | } |
1795 | ||
1796 | /* Get pattern of the check. */ | |
1797 | check_pattern = targetm.sched.gen_spec_check (EXPR_INSN_RTX (c_expr), label, | |
1798 | check_ds); | |
1799 | ||
1800 | gcc_assert (check_pattern != NULL); | |
1801 | ||
1802 | /* Emit check. */ | |
1803 | insn_rtx = create_insn_rtx_from_pattern (check_pattern, label); | |
1804 | ||
1805 | insn = sel_gen_insn_from_rtx_after (insn_rtx, INSN_EXPR (orig_insn), | |
1806 | INSN_SEQNO (orig_insn), orig_insn); | |
1807 | ||
1808 | /* Make check to be non-speculative. */ | |
1809 | EXPR_SPEC_DONE_DS (INSN_EXPR (insn)) = 0; | |
1810 | INSN_SPEC_CHECKED_DS (insn) = check_ds; | |
1811 | ||
1812 | /* Decrease priority of check by difference of load/check instruction | |
1813 | latencies. */ | |
1814 | EXPR_PRIORITY (INSN_EXPR (insn)) -= (sel_vinsn_cost (INSN_VINSN (orig_insn)) | |
1815 | - sel_vinsn_cost (INSN_VINSN (insn))); | |
1816 | ||
1817 | /* Emit copy of original insn (though with replaced target register, | |
1818 | if needed) to the recovery block. */ | |
1819 | if (recovery_block != NULL) | |
1820 | { | |
1821 | rtx twin_rtx; | |
e1ab7874 | 1822 | |
1823 | twin_rtx = copy_rtx (PATTERN (EXPR_INSN_RTX (c_expr))); | |
1824 | twin_rtx = create_insn_rtx_from_pattern (twin_rtx, NULL_RTX); | |
57ab8ec3 | 1825 | sel_gen_recovery_insn_from_rtx_after (twin_rtx, |
1826 | INSN_EXPR (orig_insn), | |
1827 | INSN_SEQNO (insn), | |
1828 | bb_note (recovery_block)); | |
e1ab7874 | 1829 | } |
1830 | ||
1831 | /* If we've generated a data speculation check, make sure | |
1832 | that all the bookkeeping instruction we'll create during | |
1833 | this move_op () will allocate an ALAT entry so that the | |
1834 | check won't fail. | |
1835 | In case of control speculation we must convert C_EXPR to control | |
1836 | speculative mode, because failing to do so will bring us an exception | |
1837 | thrown by the non-control-speculative load. */ | |
1838 | check_ds = ds_get_max_dep_weak (check_ds); | |
1839 | speculate_expr (c_expr, check_ds); | |
48e1416a | 1840 | |
e1ab7874 | 1841 | return insn; |
1842 | } | |
1843 | ||
1844 | /* True when INSN is a "regN = regN" copy. */ | |
1845 | static bool | |
71ce7f59 | 1846 | identical_copy_p (rtx_insn *insn) |
e1ab7874 | 1847 | { |
1848 | rtx lhs, rhs, pat; | |
1849 | ||
1850 | pat = PATTERN (insn); | |
1851 | ||
1852 | if (GET_CODE (pat) != SET) | |
1853 | return false; | |
1854 | ||
1855 | lhs = SET_DEST (pat); | |
1856 | if (!REG_P (lhs)) | |
1857 | return false; | |
1858 | ||
1859 | rhs = SET_SRC (pat); | |
1860 | if (!REG_P (rhs)) | |
1861 | return false; | |
1862 | ||
1863 | return REGNO (lhs) == REGNO (rhs); | |
1864 | } | |
1865 | ||
48e1416a | 1866 | /* Undo all transformations on *AV_PTR that were done when |
e1ab7874 | 1867 | moving through INSN. */ |
1868 | static void | |
2f3c9801 | 1869 | undo_transformations (av_set_t *av_ptr, rtx_insn *insn) |
e1ab7874 | 1870 | { |
1871 | av_set_iterator av_iter; | |
1872 | expr_t expr; | |
1873 | av_set_t new_set = NULL; | |
1874 | ||
48e1416a | 1875 | /* First, kill any EXPR that uses registers set by an insn. This is |
e1ab7874 | 1876 | required for correctness. */ |
1877 | FOR_EACH_EXPR_1 (expr, av_iter, av_ptr) | |
1878 | if (!sched_insns_conditions_mutex_p (insn, EXPR_INSN_RTX (expr)) | |
48e1416a | 1879 | && bitmap_intersect_p (INSN_REG_SETS (insn), |
e1ab7874 | 1880 | VINSN_REG_USES (EXPR_VINSN (expr))) |
1881 | /* When an insn looks like 'r1 = r1', we could substitute through | |
1882 | it, but the above condition will still hold. This happened with | |
48e1416a | 1883 | gcc.c-torture/execute/961125-1.c. */ |
e1ab7874 | 1884 | && !identical_copy_p (insn)) |
1885 | { | |
1886 | if (sched_verbose >= 6) | |
48e1416a | 1887 | sel_print ("Expr %d removed due to use/set conflict\n", |
e1ab7874 | 1888 | INSN_UID (EXPR_INSN_RTX (expr))); |
1889 | av_set_iter_remove (&av_iter); | |
1890 | } | |
1891 | ||
1892 | /* Undo transformations looking at the history vector. */ | |
1893 | FOR_EACH_EXPR (expr, av_iter, *av_ptr) | |
1894 | { | |
1895 | int index = find_in_history_vect (EXPR_HISTORY_OF_CHANGES (expr), | |
1896 | insn, EXPR_VINSN (expr), true); | |
1897 | ||
1898 | if (index >= 0) | |
1899 | { | |
1900 | expr_history_def *phist; | |
1901 | ||
f1f41a6c | 1902 | phist = &EXPR_HISTORY_OF_CHANGES (expr)[index]; |
e1ab7874 | 1903 | |
48e1416a | 1904 | switch (phist->type) |
e1ab7874 | 1905 | { |
1906 | case TRANS_SPECULATION: | |
1907 | { | |
1908 | ds_t old_ds, new_ds; | |
48e1416a | 1909 | |
e1ab7874 | 1910 | /* Compute the difference between old and new speculative |
48e1416a | 1911 | statuses: that's what we need to check. |
e1ab7874 | 1912 | Earlier we used to assert that the status will really |
1913 | change. This no longer works because only the probability | |
1914 | bits in the status may have changed during compute_av_set, | |
48e1416a | 1915 | and in the case of merging different probabilities of the |
1916 | same speculative status along different paths we do not | |
e1ab7874 | 1917 | record this in the history vector. */ |
1918 | old_ds = phist->spec_ds; | |
1919 | new_ds = EXPR_SPEC_DONE_DS (expr); | |
1920 | ||
1921 | old_ds &= SPECULATIVE; | |
1922 | new_ds &= SPECULATIVE; | |
1923 | new_ds &= ~old_ds; | |
48e1416a | 1924 | |
e1ab7874 | 1925 | EXPR_SPEC_TO_CHECK_DS (expr) |= new_ds; |
1926 | break; | |
1927 | } | |
1928 | case TRANS_SUBSTITUTION: | |
1929 | { | |
1930 | expr_def _tmp_expr, *tmp_expr = &_tmp_expr; | |
1931 | vinsn_t new_vi; | |
1932 | bool add = true; | |
48e1416a | 1933 | |
e1ab7874 | 1934 | new_vi = phist->old_expr_vinsn; |
48e1416a | 1935 | |
1936 | gcc_assert (VINSN_SEPARABLE_P (new_vi) | |
e1ab7874 | 1937 | == EXPR_SEPARABLE_P (expr)); |
1938 | copy_expr (tmp_expr, expr); | |
1939 | ||
48e1416a | 1940 | if (vinsn_equal_p (phist->new_expr_vinsn, |
e1ab7874 | 1941 | EXPR_VINSN (tmp_expr))) |
1942 | change_vinsn_in_expr (tmp_expr, new_vi); | |
1943 | else | |
1944 | /* This happens when we're unsubstituting on a bookkeeping | |
1945 | copy, which was in turn substituted. The history is wrong | |
1946 | in this case. Do it the hard way. */ | |
1947 | add = substitute_reg_in_expr (tmp_expr, insn, true); | |
1948 | if (add) | |
1949 | av_set_add (&new_set, tmp_expr); | |
1950 | clear_expr (tmp_expr); | |
1951 | break; | |
1952 | } | |
1953 | default: | |
1954 | gcc_unreachable (); | |
1955 | } | |
1956 | } | |
48e1416a | 1957 | |
e1ab7874 | 1958 | } |
1959 | ||
1960 | av_set_union_and_clear (av_ptr, &new_set, NULL); | |
1961 | } | |
1962 | \f | |
1963 | ||
1964 | /* Moveup_* helpers for code motion and computing av sets. */ | |
1965 | ||
1966 | /* Propagates EXPR inside an insn group through THROUGH_INSN. | |
48e1416a | 1967 | The difference from the below function is that only substitution is |
e1ab7874 | 1968 | performed. */ |
1969 | static enum MOVEUP_EXPR_CODE | |
1970 | moveup_expr_inside_insn_group (expr_t expr, insn_t through_insn) | |
1971 | { | |
1972 | vinsn_t vi = EXPR_VINSN (expr); | |
1973 | ds_t *has_dep_p; | |
1974 | ds_t full_ds; | |
1975 | ||
1976 | /* Do this only inside insn group. */ | |
1977 | gcc_assert (INSN_SCHED_CYCLE (through_insn) > 0); | |
1978 | ||
1979 | full_ds = has_dependence_p (expr, through_insn, &has_dep_p); | |
1980 | if (full_ds == 0) | |
1981 | return MOVEUP_EXPR_SAME; | |
1982 | ||
1983 | /* Substitution is the possible choice in this case. */ | |
1984 | if (has_dep_p[DEPS_IN_RHS]) | |
1985 | { | |
1986 | /* Can't substitute UNIQUE VINSNs. */ | |
1987 | gcc_assert (!VINSN_UNIQUE_P (vi)); | |
48e1416a | 1988 | |
1989 | if (can_substitute_through_p (through_insn, | |
e1ab7874 | 1990 | has_dep_p[DEPS_IN_RHS]) |
1991 | && substitute_reg_in_expr (expr, through_insn, false)) | |
1992 | { | |
1993 | EXPR_WAS_SUBSTITUTED (expr) = true; | |
1994 | return MOVEUP_EXPR_CHANGED; | |
1995 | } | |
1996 | ||
1997 | /* Don't care about this, as even true dependencies may be allowed | |
1998 | in an insn group. */ | |
1999 | return MOVEUP_EXPR_SAME; | |
2000 | } | |
2001 | ||
2002 | /* This can catch output dependencies in COND_EXECs. */ | |
2003 | if (has_dep_p[DEPS_IN_INSN]) | |
2004 | return MOVEUP_EXPR_NULL; | |
48e1416a | 2005 | |
e1ab7874 | 2006 | /* This is either an output or an anti dependence, which usually have |
2007 | a zero latency. Allow this here, if we'd be wrong, tick_check_p | |
2008 | will fix this. */ | |
2009 | gcc_assert (has_dep_p[DEPS_IN_LHS]); | |
2010 | return MOVEUP_EXPR_AS_RHS; | |
2011 | } | |
2012 | ||
2013 | /* True when a trapping EXPR cannot be moved through THROUGH_INSN. */ | |
2014 | #define CANT_MOVE_TRAPPING(expr, through_insn) \ | |
2015 | (VINSN_MAY_TRAP_P (EXPR_VINSN (expr)) \ | |
2016 | && !sel_insn_has_single_succ_p ((through_insn), SUCCS_ALL) \ | |
2017 | && !sel_insn_is_speculation_check (through_insn)) | |
2018 | ||
2019 | /* True when a conflict on a target register was found during moveup_expr. */ | |
2020 | static bool was_target_conflict = false; | |
2021 | ||
9845d120 | 2022 | /* Return true when moving a debug INSN across THROUGH_INSN will |
2023 | create a bookkeeping block. We don't want to create such blocks, | |
2024 | for they would cause codegen differences between compilations with | |
2025 | and without debug info. */ | |
2026 | ||
2027 | static bool | |
2028 | moving_insn_creates_bookkeeping_block_p (insn_t insn, | |
2029 | insn_t through_insn) | |
2030 | { | |
2031 | basic_block bbi, bbt; | |
2032 | edge e1, e2; | |
2033 | edge_iterator ei1, ei2; | |
2034 | ||
2035 | if (!bookkeeping_can_be_created_if_moved_through_p (through_insn)) | |
2036 | { | |
2037 | if (sched_verbose >= 9) | |
2038 | sel_print ("no bookkeeping required: "); | |
2039 | return FALSE; | |
2040 | } | |
2041 | ||
2042 | bbi = BLOCK_FOR_INSN (insn); | |
2043 | ||
2044 | if (EDGE_COUNT (bbi->preds) == 1) | |
2045 | { | |
2046 | if (sched_verbose >= 9) | |
2047 | sel_print ("only one pred edge: "); | |
2048 | return TRUE; | |
2049 | } | |
2050 | ||
2051 | bbt = BLOCK_FOR_INSN (through_insn); | |
2052 | ||
2053 | FOR_EACH_EDGE (e1, ei1, bbt->succs) | |
2054 | { | |
2055 | FOR_EACH_EDGE (e2, ei2, bbi->preds) | |
2056 | { | |
2057 | if (find_block_for_bookkeeping (e1, e2, TRUE)) | |
2058 | { | |
2059 | if (sched_verbose >= 9) | |
2060 | sel_print ("found existing block: "); | |
2061 | return FALSE; | |
2062 | } | |
2063 | } | |
2064 | } | |
2065 | ||
2066 | if (sched_verbose >= 9) | |
2067 | sel_print ("would create bookkeeping block: "); | |
2068 | ||
2069 | return TRUE; | |
2070 | } | |
2071 | ||
3e1f1f1a | 2072 | /* Return true when the conflict with newly created implicit clobbers |
2073 | between EXPR and THROUGH_INSN is found because of renaming. */ | |
2074 | static bool | |
2075 | implicit_clobber_conflict_p (insn_t through_insn, expr_t expr) | |
2076 | { | |
2077 | HARD_REG_SET temp; | |
9c4c93d0 | 2078 | rtx_insn *insn; |
2079 | rtx reg, rhs, pat; | |
3e1f1f1a | 2080 | hard_reg_set_iterator hrsi; |
2081 | unsigned regno; | |
2082 | bool valid; | |
2083 | ||
2084 | /* Make a new pseudo register. */ | |
2085 | reg = gen_reg_rtx (GET_MODE (EXPR_LHS (expr))); | |
2086 | max_regno = max_reg_num (); | |
2087 | maybe_extend_reg_info_p (); | |
2088 | ||
2089 | /* Validate a change and bail out early. */ | |
2090 | insn = EXPR_INSN_RTX (expr); | |
2091 | validate_change (insn, &SET_DEST (PATTERN (insn)), reg, true); | |
2092 | valid = verify_changes (0); | |
2093 | cancel_changes (0); | |
2094 | if (!valid) | |
2095 | { | |
2096 | if (sched_verbose >= 6) | |
2097 | sel_print ("implicit clobbers failed validation, "); | |
2098 | return true; | |
2099 | } | |
2100 | ||
2101 | /* Make a new insn with it. */ | |
2102 | rhs = copy_rtx (VINSN_RHS (EXPR_VINSN (expr))); | |
d1f9b275 | 2103 | pat = gen_rtx_SET (reg, rhs); |
3e1f1f1a | 2104 | start_sequence (); |
2105 | insn = emit_insn (pat); | |
2106 | end_sequence (); | |
2107 | ||
2108 | /* Calculate implicit clobbers. */ | |
2109 | extract_insn (insn); | |
8eaaac4d | 2110 | preprocess_constraints (insn); |
3e1f1f1a | 2111 | ira_implicitly_set_insn_hard_regs (&temp); |
2112 | AND_COMPL_HARD_REG_SET (temp, ira_no_alloc_regs); | |
2113 | ||
2114 | /* If any implicit clobber registers intersect with regular ones in | |
2115 | through_insn, we have a dependency and thus bail out. */ | |
2116 | EXECUTE_IF_SET_IN_HARD_REG_SET (temp, 0, regno, hrsi) | |
2117 | { | |
2118 | vinsn_t vi = INSN_VINSN (through_insn); | |
2119 | if (bitmap_bit_p (VINSN_REG_SETS (vi), regno) | |
2120 | || bitmap_bit_p (VINSN_REG_CLOBBERS (vi), regno) | |
2121 | || bitmap_bit_p (VINSN_REG_USES (vi), regno)) | |
2122 | return true; | |
2123 | } | |
2124 | ||
2125 | return false; | |
2126 | } | |
2127 | ||
e1ab7874 | 2128 | /* Modifies EXPR so it can be moved through the THROUGH_INSN, |
48e1416a | 2129 | performing necessary transformations. Record the type of transformation |
2130 | made in PTRANS_TYPE, when it is not NULL. When INSIDE_INSN_GROUP, | |
e1ab7874 | 2131 | permit all dependencies except true ones, and try to remove those |
48e1416a | 2132 | too via forward substitution. All cases when a non-eliminable |
2133 | non-zero cost dependency exists inside an insn group will be fixed | |
e1ab7874 | 2134 | in tick_check_p instead. */ |
2135 | static enum MOVEUP_EXPR_CODE | |
2136 | moveup_expr (expr_t expr, insn_t through_insn, bool inside_insn_group, | |
2137 | enum local_trans_type *ptrans_type) | |
2138 | { | |
2139 | vinsn_t vi = EXPR_VINSN (expr); | |
2140 | insn_t insn = VINSN_INSN_RTX (vi); | |
2141 | bool was_changed = false; | |
2142 | bool as_rhs = false; | |
2143 | ds_t *has_dep_p; | |
2144 | ds_t full_ds; | |
2145 | ||
995ca335 | 2146 | /* ??? We use dependencies of non-debug insns on debug insns to |
2147 | indicate that the debug insns need to be reset if the non-debug | |
2148 | insn is pulled ahead of it. It's hard to figure out how to | |
2149 | introduce such a notion in sel-sched, but it already fails to | |
2150 | support debug insns in other ways, so we just go ahead and | |
2151 | let the deug insns go corrupt for now. */ | |
2152 | if (DEBUG_INSN_P (through_insn) && !DEBUG_INSN_P (insn)) | |
2153 | return MOVEUP_EXPR_SAME; | |
2154 | ||
e1ab7874 | 2155 | /* When inside_insn_group, delegate to the helper. */ |
2156 | if (inside_insn_group) | |
2157 | return moveup_expr_inside_insn_group (expr, through_insn); | |
2158 | ||
2159 | /* Deal with unique insns and control dependencies. */ | |
2160 | if (VINSN_UNIQUE_P (vi)) | |
2161 | { | |
2162 | /* We can move jumps without side-effects or jumps that are | |
2163 | mutually exclusive with instruction THROUGH_INSN (all in cases | |
2164 | dependencies allow to do so and jump is not speculative). */ | |
2165 | if (control_flow_insn_p (insn)) | |
2166 | { | |
2167 | basic_block fallthru_bb; | |
2168 | ||
48e1416a | 2169 | /* Do not move checks and do not move jumps through other |
e1ab7874 | 2170 | jumps. */ |
2171 | if (control_flow_insn_p (through_insn) | |
2172 | || sel_insn_is_speculation_check (insn)) | |
2173 | return MOVEUP_EXPR_NULL; | |
2174 | ||
2175 | /* Don't move jumps through CFG joins. */ | |
2176 | if (bookkeeping_can_be_created_if_moved_through_p (through_insn)) | |
2177 | return MOVEUP_EXPR_NULL; | |
2178 | ||
48e1416a | 2179 | /* The jump should have a clear fallthru block, and |
e1ab7874 | 2180 | this block should be in the current region. */ |
2181 | if ((fallthru_bb = fallthru_bb_of_jump (insn)) == NULL | |
2182 | || ! in_current_region_p (fallthru_bb)) | |
2183 | return MOVEUP_EXPR_NULL; | |
48e1416a | 2184 | |
afd14b63 | 2185 | /* And it should be mutually exclusive with through_insn. */ |
2186 | if (! sched_insns_conditions_mutex_p (insn, through_insn) | |
9845d120 | 2187 | && ! DEBUG_INSN_P (through_insn)) |
e1ab7874 | 2188 | return MOVEUP_EXPR_NULL; |
2189 | } | |
2190 | ||
2191 | /* Don't move what we can't move. */ | |
2192 | if (EXPR_CANT_MOVE (expr) | |
2193 | && BLOCK_FOR_INSN (through_insn) != BLOCK_FOR_INSN (insn)) | |
2194 | return MOVEUP_EXPR_NULL; | |
2195 | ||
2196 | /* Don't move SCHED_GROUP instruction through anything. | |
2197 | If we don't force this, then it will be possible to start | |
2198 | scheduling a sched_group before all its dependencies are | |
2199 | resolved. | |
2200 | ??? Haifa deals with this issue by delaying the SCHED_GROUP | |
2201 | as late as possible through rank_for_schedule. */ | |
2202 | if (SCHED_GROUP_P (insn)) | |
2203 | return MOVEUP_EXPR_NULL; | |
2204 | } | |
2205 | else | |
2206 | gcc_assert (!control_flow_insn_p (insn)); | |
2207 | ||
9845d120 | 2208 | /* Don't move debug insns if this would require bookkeeping. */ |
2209 | if (DEBUG_INSN_P (insn) | |
2210 | && BLOCK_FOR_INSN (through_insn) != BLOCK_FOR_INSN (insn) | |
2211 | && moving_insn_creates_bookkeeping_block_p (insn, through_insn)) | |
2212 | return MOVEUP_EXPR_NULL; | |
2213 | ||
e1ab7874 | 2214 | /* Deal with data dependencies. */ |
2215 | was_target_conflict = false; | |
2216 | full_ds = has_dependence_p (expr, through_insn, &has_dep_p); | |
2217 | if (full_ds == 0) | |
2218 | { | |
2219 | if (!CANT_MOVE_TRAPPING (expr, through_insn)) | |
2220 | return MOVEUP_EXPR_SAME; | |
2221 | } | |
2222 | else | |
2223 | { | |
48e1416a | 2224 | /* We can move UNIQUE insn up only as a whole and unchanged, |
e1ab7874 | 2225 | so it shouldn't have any dependencies. */ |
2226 | if (VINSN_UNIQUE_P (vi)) | |
2227 | return MOVEUP_EXPR_NULL; | |
2228 | } | |
2229 | ||
2230 | if (full_ds != 0 && can_speculate_dep_p (full_ds)) | |
2231 | { | |
2232 | int res; | |
2233 | ||
2234 | res = speculate_expr (expr, full_ds); | |
2235 | if (res >= 0) | |
2236 | { | |
2237 | /* Speculation was successful. */ | |
2238 | full_ds = 0; | |
2239 | was_changed = (res > 0); | |
2240 | if (res == 2) | |
2241 | was_target_conflict = true; | |
2242 | if (ptrans_type) | |
2243 | *ptrans_type = TRANS_SPECULATION; | |
2244 | sel_clear_has_dependence (); | |
2245 | } | |
2246 | } | |
2247 | ||
2248 | if (has_dep_p[DEPS_IN_INSN]) | |
2249 | /* We have some dependency that cannot be discarded. */ | |
2250 | return MOVEUP_EXPR_NULL; | |
2251 | ||
2252 | if (has_dep_p[DEPS_IN_LHS]) | |
48e1416a | 2253 | { |
e1ab7874 | 2254 | /* Only separable insns can be moved up with the new register. |
48e1416a | 2255 | Anyways, we should mark that the original register is |
e1ab7874 | 2256 | unavailable. */ |
2257 | if (!enable_schedule_as_rhs_p || !EXPR_SEPARABLE_P (expr)) | |
2258 | return MOVEUP_EXPR_NULL; | |
2259 | ||
3e1f1f1a | 2260 | /* When renaming a hard register to a pseudo before reload, extra |
2261 | dependencies can occur from the implicit clobbers of the insn. | |
2262 | Filter out such cases here. */ | |
2263 | if (!reload_completed && REG_P (EXPR_LHS (expr)) | |
2264 | && HARD_REGISTER_P (EXPR_LHS (expr)) | |
2265 | && implicit_clobber_conflict_p (through_insn, expr)) | |
2266 | { | |
2267 | if (sched_verbose >= 6) | |
2268 | sel_print ("implicit clobbers conflict detected, "); | |
2269 | return MOVEUP_EXPR_NULL; | |
2270 | } | |
e1ab7874 | 2271 | EXPR_TARGET_AVAILABLE (expr) = false; |
2272 | was_target_conflict = true; | |
2273 | as_rhs = true; | |
2274 | } | |
2275 | ||
2276 | /* At this point we have either separable insns, that will be lifted | |
2277 | up only as RHSes, or non-separable insns with no dependency in lhs. | |
2278 | If dependency is in RHS, then try to perform substitution and move up | |
2279 | substituted RHS: | |
2280 | ||
2281 | Ex. 1: Ex.2 | |
2282 | y = x; y = x; | |
2283 | z = y*2; y = y*2; | |
2284 | ||
48e1416a | 2285 | In Ex.1 y*2 can be substituted for x*2 and the whole operation can be |
e1ab7874 | 2286 | moved above y=x assignment as z=x*2. |
2287 | ||
48e1416a | 2288 | In Ex.2 y*2 also can be substituted for x*2, but only the right hand |
e1ab7874 | 2289 | side can be moved because of the output dependency. The operation was |
2290 | cropped to its rhs above. */ | |
2291 | if (has_dep_p[DEPS_IN_RHS]) | |
2292 | { | |
2293 | ds_t *rhs_dsp = &has_dep_p[DEPS_IN_RHS]; | |
2294 | ||
2295 | /* Can't substitute UNIQUE VINSNs. */ | |
2296 | gcc_assert (!VINSN_UNIQUE_P (vi)); | |
2297 | ||
2298 | if (can_speculate_dep_p (*rhs_dsp)) | |
2299 | { | |
2300 | int res; | |
48e1416a | 2301 | |
e1ab7874 | 2302 | res = speculate_expr (expr, *rhs_dsp); |
2303 | if (res >= 0) | |
2304 | { | |
2305 | /* Speculation was successful. */ | |
2306 | *rhs_dsp = 0; | |
2307 | was_changed = (res > 0); | |
2308 | if (res == 2) | |
2309 | was_target_conflict = true; | |
2310 | if (ptrans_type) | |
2311 | *ptrans_type = TRANS_SPECULATION; | |
2312 | } | |
2313 | else | |
2314 | return MOVEUP_EXPR_NULL; | |
2315 | } | |
2316 | else if (can_substitute_through_p (through_insn, | |
2317 | *rhs_dsp) | |
2318 | && substitute_reg_in_expr (expr, through_insn, false)) | |
2319 | { | |
2320 | /* ??? We cannot perform substitution AND speculation on the same | |
2321 | insn. */ | |
2322 | gcc_assert (!was_changed); | |
2323 | was_changed = true; | |
2324 | if (ptrans_type) | |
2325 | *ptrans_type = TRANS_SUBSTITUTION; | |
2326 | EXPR_WAS_SUBSTITUTED (expr) = true; | |
2327 | } | |
2328 | else | |
2329 | return MOVEUP_EXPR_NULL; | |
2330 | } | |
2331 | ||
2332 | /* Don't move trapping insns through jumps. | |
2333 | This check should be at the end to give a chance to control speculation | |
2334 | to perform its duties. */ | |
2335 | if (CANT_MOVE_TRAPPING (expr, through_insn)) | |
2336 | return MOVEUP_EXPR_NULL; | |
2337 | ||
48e1416a | 2338 | return (was_changed |
2339 | ? MOVEUP_EXPR_CHANGED | |
2340 | : (as_rhs | |
e1ab7874 | 2341 | ? MOVEUP_EXPR_AS_RHS |
2342 | : MOVEUP_EXPR_SAME)); | |
2343 | } | |
2344 | ||
48e1416a | 2345 | /* Try to look at bitmap caches for EXPR and INSN pair, return true |
e1ab7874 | 2346 | if successful. When INSIDE_INSN_GROUP, also try ignore dependencies |
2347 | that can exist within a parallel group. Write to RES the resulting | |
2348 | code for moveup_expr. */ | |
48e1416a | 2349 | static bool |
e1ab7874 | 2350 | try_bitmap_cache (expr_t expr, insn_t insn, |
2351 | bool inside_insn_group, | |
2352 | enum MOVEUP_EXPR_CODE *res) | |
2353 | { | |
2354 | int expr_uid = INSN_UID (EXPR_INSN_RTX (expr)); | |
48e1416a | 2355 | |
e1ab7874 | 2356 | /* First check whether we've analyzed this situation already. */ |
2357 | if (bitmap_bit_p (INSN_ANALYZED_DEPS (insn), expr_uid)) | |
2358 | { | |
2359 | if (bitmap_bit_p (INSN_FOUND_DEPS (insn), expr_uid)) | |
2360 | { | |
2361 | if (sched_verbose >= 6) | |
2362 | sel_print ("removed (cached)\n"); | |
2363 | *res = MOVEUP_EXPR_NULL; | |
2364 | return true; | |
2365 | } | |
2366 | else | |
2367 | { | |
2368 | if (sched_verbose >= 6) | |
2369 | sel_print ("unchanged (cached)\n"); | |
2370 | *res = MOVEUP_EXPR_SAME; | |
2371 | return true; | |
2372 | } | |
2373 | } | |
2374 | else if (bitmap_bit_p (INSN_FOUND_DEPS (insn), expr_uid)) | |
2375 | { | |
2376 | if (inside_insn_group) | |
2377 | { | |
2378 | if (sched_verbose >= 6) | |
2379 | sel_print ("unchanged (as RHS, cached, inside insn group)\n"); | |
2380 | *res = MOVEUP_EXPR_SAME; | |
2381 | return true; | |
48e1416a | 2382 | |
e1ab7874 | 2383 | } |
2384 | else | |
2385 | EXPR_TARGET_AVAILABLE (expr) = false; | |
2386 | ||
48e1416a | 2387 | /* This is the only case when propagation result can change over time, |
2388 | as we can dynamically switch off scheduling as RHS. In this case, | |
e1ab7874 | 2389 | just check the flag to reach the correct decision. */ |
2390 | if (enable_schedule_as_rhs_p) | |
2391 | { | |
2392 | if (sched_verbose >= 6) | |
2393 | sel_print ("unchanged (as RHS, cached)\n"); | |
2394 | *res = MOVEUP_EXPR_AS_RHS; | |
2395 | return true; | |
2396 | } | |
2397 | else | |
2398 | { | |
2399 | if (sched_verbose >= 6) | |
2400 | sel_print ("removed (cached as RHS, but renaming" | |
2401 | " is now disabled)\n"); | |
2402 | *res = MOVEUP_EXPR_NULL; | |
2403 | return true; | |
2404 | } | |
2405 | } | |
2406 | ||
2407 | return false; | |
2408 | } | |
2409 | ||
48e1416a | 2410 | /* Try to look at bitmap caches for EXPR and INSN pair, return true |
e1ab7874 | 2411 | if successful. Write to RES the resulting code for moveup_expr. */ |
48e1416a | 2412 | static bool |
e1ab7874 | 2413 | try_transformation_cache (expr_t expr, insn_t insn, |
2414 | enum MOVEUP_EXPR_CODE *res) | |
2415 | { | |
48e1416a | 2416 | struct transformed_insns *pti |
e1ab7874 | 2417 | = (struct transformed_insns *) |
2418 | htab_find_with_hash (INSN_TRANSFORMED_INSNS (insn), | |
48e1416a | 2419 | &EXPR_VINSN (expr), |
e1ab7874 | 2420 | VINSN_HASH_RTX (EXPR_VINSN (expr))); |
2421 | if (pti) | |
2422 | { | |
48e1416a | 2423 | /* This EXPR was already moved through this insn and was |
2424 | changed as a result. Fetch the proper data from | |
e1ab7874 | 2425 | the hashtable. */ |
48e1416a | 2426 | insert_in_history_vect (&EXPR_HISTORY_OF_CHANGES (expr), |
2427 | INSN_UID (insn), pti->type, | |
2428 | pti->vinsn_old, pti->vinsn_new, | |
e1ab7874 | 2429 | EXPR_SPEC_DONE_DS (expr)); |
48e1416a | 2430 | |
e1ab7874 | 2431 | if (INSN_IN_STREAM_P (VINSN_INSN_RTX (pti->vinsn_new))) |
2432 | pti->vinsn_new = vinsn_copy (pti->vinsn_new, true); | |
2433 | change_vinsn_in_expr (expr, pti->vinsn_new); | |
2434 | if (pti->was_target_conflict) | |
2435 | EXPR_TARGET_AVAILABLE (expr) = false; | |
2436 | if (pti->type == TRANS_SPECULATION) | |
2437 | { | |
e1ab7874 | 2438 | EXPR_SPEC_DONE_DS (expr) = pti->ds; |
2439 | EXPR_NEEDS_SPEC_CHECK_P (expr) |= pti->needs_check; | |
2440 | } | |
2441 | ||
2442 | if (sched_verbose >= 6) | |
2443 | { | |
2444 | sel_print ("changed (cached): "); | |
2445 | dump_expr (expr); | |
2446 | sel_print ("\n"); | |
2447 | } | |
2448 | ||
2449 | *res = MOVEUP_EXPR_CHANGED; | |
2450 | return true; | |
2451 | } | |
2452 | ||
2453 | return false; | |
2454 | } | |
2455 | ||
2456 | /* Update bitmap caches on INSN with result RES of propagating EXPR. */ | |
2457 | static void | |
48e1416a | 2458 | update_bitmap_cache (expr_t expr, insn_t insn, bool inside_insn_group, |
e1ab7874 | 2459 | enum MOVEUP_EXPR_CODE res) |
2460 | { | |
2461 | int expr_uid = INSN_UID (EXPR_INSN_RTX (expr)); | |
2462 | ||
48e1416a | 2463 | /* Do not cache result of propagating jumps through an insn group, |
e1ab7874 | 2464 | as it is always true, which is not useful outside the group. */ |
2465 | if (inside_insn_group) | |
2466 | return; | |
48e1416a | 2467 | |
e1ab7874 | 2468 | if (res == MOVEUP_EXPR_NULL) |
2469 | { | |
2470 | bitmap_set_bit (INSN_ANALYZED_DEPS (insn), expr_uid); | |
2471 | bitmap_set_bit (INSN_FOUND_DEPS (insn), expr_uid); | |
2472 | } | |
2473 | else if (res == MOVEUP_EXPR_SAME) | |
2474 | { | |
2475 | bitmap_set_bit (INSN_ANALYZED_DEPS (insn), expr_uid); | |
2476 | bitmap_clear_bit (INSN_FOUND_DEPS (insn), expr_uid); | |
2477 | } | |
2478 | else if (res == MOVEUP_EXPR_AS_RHS) | |
2479 | { | |
2480 | bitmap_clear_bit (INSN_ANALYZED_DEPS (insn), expr_uid); | |
2481 | bitmap_set_bit (INSN_FOUND_DEPS (insn), expr_uid); | |
2482 | } | |
2483 | else | |
2484 | gcc_unreachable (); | |
2485 | } | |
2486 | ||
2487 | /* Update hashtable on INSN with changed EXPR, old EXPR_OLD_VINSN | |
2488 | and transformation type TRANS_TYPE. */ | |
2489 | static void | |
48e1416a | 2490 | update_transformation_cache (expr_t expr, insn_t insn, |
e1ab7874 | 2491 | bool inside_insn_group, |
48e1416a | 2492 | enum local_trans_type trans_type, |
e1ab7874 | 2493 | vinsn_t expr_old_vinsn) |
2494 | { | |
2495 | struct transformed_insns *pti; | |
2496 | ||
2497 | if (inside_insn_group) | |
2498 | return; | |
48e1416a | 2499 | |
e1ab7874 | 2500 | pti = XNEW (struct transformed_insns); |
2501 | pti->vinsn_old = expr_old_vinsn; | |
2502 | pti->vinsn_new = EXPR_VINSN (expr); | |
2503 | pti->type = trans_type; | |
2504 | pti->was_target_conflict = was_target_conflict; | |
2505 | pti->ds = EXPR_SPEC_DONE_DS (expr); | |
2506 | pti->needs_check = EXPR_NEEDS_SPEC_CHECK_P (expr); | |
2507 | vinsn_attach (pti->vinsn_old); | |
2508 | vinsn_attach (pti->vinsn_new); | |
48e1416a | 2509 | *((struct transformed_insns **) |
e1ab7874 | 2510 | htab_find_slot_with_hash (INSN_TRANSFORMED_INSNS (insn), |
2511 | pti, VINSN_HASH_RTX (expr_old_vinsn), | |
2512 | INSERT)) = pti; | |
2513 | } | |
2514 | ||
48e1416a | 2515 | /* Same as moveup_expr, but first looks up the result of |
e1ab7874 | 2516 | transformation in caches. */ |
2517 | static enum MOVEUP_EXPR_CODE | |
2518 | moveup_expr_cached (expr_t expr, insn_t insn, bool inside_insn_group) | |
2519 | { | |
2520 | enum MOVEUP_EXPR_CODE res; | |
2521 | bool got_answer = false; | |
2522 | ||
2523 | if (sched_verbose >= 6) | |
2524 | { | |
48e1416a | 2525 | sel_print ("Moving "); |
e1ab7874 | 2526 | dump_expr (expr); |
2527 | sel_print (" through %d: ", INSN_UID (insn)); | |
2528 | } | |
2529 | ||
9845d120 | 2530 | if (DEBUG_INSN_P (EXPR_INSN_RTX (expr)) |
2531 | && (sel_bb_head (BLOCK_FOR_INSN (EXPR_INSN_RTX (expr))) | |
2532 | == EXPR_INSN_RTX (expr))) | |
2533 | /* Don't use cached information for debug insns that are heads of | |
2534 | basic blocks. */; | |
2535 | else if (try_bitmap_cache (expr, insn, inside_insn_group, &res)) | |
e1ab7874 | 2536 | /* When inside insn group, we do not want remove stores conflicting |
2537 | with previosly issued loads. */ | |
2538 | got_answer = ! inside_insn_group || res != MOVEUP_EXPR_NULL; | |
2539 | else if (try_transformation_cache (expr, insn, &res)) | |
2540 | got_answer = true; | |
2541 | ||
2542 | if (! got_answer) | |
2543 | { | |
2544 | /* Invoke moveup_expr and record the results. */ | |
2545 | vinsn_t expr_old_vinsn = EXPR_VINSN (expr); | |
2546 | ds_t expr_old_spec_ds = EXPR_SPEC_DONE_DS (expr); | |
2547 | int expr_uid = INSN_UID (VINSN_INSN_RTX (expr_old_vinsn)); | |
2548 | bool unique_p = VINSN_UNIQUE_P (expr_old_vinsn); | |
2549 | enum local_trans_type trans_type = TRANS_SUBSTITUTION; | |
2550 | ||
48e1416a | 2551 | /* ??? Invent something better than this. We can't allow old_vinsn |
e1ab7874 | 2552 | to go, we need it for the history vector. */ |
2553 | vinsn_attach (expr_old_vinsn); | |
2554 | ||
2555 | res = moveup_expr (expr, insn, inside_insn_group, | |
2556 | &trans_type); | |
2557 | switch (res) | |
2558 | { | |
2559 | case MOVEUP_EXPR_NULL: | |
2560 | update_bitmap_cache (expr, insn, inside_insn_group, res); | |
2561 | if (sched_verbose >= 6) | |
2562 | sel_print ("removed\n"); | |
2563 | break; | |
2564 | ||
2565 | case MOVEUP_EXPR_SAME: | |
2566 | update_bitmap_cache (expr, insn, inside_insn_group, res); | |
2567 | if (sched_verbose >= 6) | |
2568 | sel_print ("unchanged\n"); | |
2569 | break; | |
2570 | ||
2571 | case MOVEUP_EXPR_AS_RHS: | |
2572 | gcc_assert (!unique_p || inside_insn_group); | |
2573 | update_bitmap_cache (expr, insn, inside_insn_group, res); | |
2574 | if (sched_verbose >= 6) | |
2575 | sel_print ("unchanged (as RHS)\n"); | |
2576 | break; | |
2577 | ||
2578 | case MOVEUP_EXPR_CHANGED: | |
2579 | gcc_assert (INSN_UID (EXPR_INSN_RTX (expr)) != expr_uid | |
2580 | || EXPR_SPEC_DONE_DS (expr) != expr_old_spec_ds); | |
48e1416a | 2581 | insert_in_history_vect (&EXPR_HISTORY_OF_CHANGES (expr), |
2582 | INSN_UID (insn), trans_type, | |
2583 | expr_old_vinsn, EXPR_VINSN (expr), | |
e1ab7874 | 2584 | expr_old_spec_ds); |
2585 | update_transformation_cache (expr, insn, inside_insn_group, | |
2586 | trans_type, expr_old_vinsn); | |
2587 | if (sched_verbose >= 6) | |
2588 | { | |
2589 | sel_print ("changed: "); | |
2590 | dump_expr (expr); | |
2591 | sel_print ("\n"); | |
2592 | } | |
2593 | break; | |
2594 | default: | |
2595 | gcc_unreachable (); | |
2596 | } | |
2597 | ||
2598 | vinsn_detach (expr_old_vinsn); | |
2599 | } | |
2600 | ||
2601 | return res; | |
2602 | } | |
2603 | ||
48e1416a | 2604 | /* Moves an av set AVP up through INSN, performing necessary |
e1ab7874 | 2605 | transformations. */ |
2606 | static void | |
2607 | moveup_set_expr (av_set_t *avp, insn_t insn, bool inside_insn_group) | |
2608 | { | |
2609 | av_set_iterator i; | |
2610 | expr_t expr; | |
2611 | ||
48e1416a | 2612 | FOR_EACH_EXPR_1 (expr, i, avp) |
2613 | { | |
2614 | ||
e1ab7874 | 2615 | switch (moveup_expr_cached (expr, insn, inside_insn_group)) |
2616 | { | |
2617 | case MOVEUP_EXPR_SAME: | |
2618 | case MOVEUP_EXPR_AS_RHS: | |
2619 | break; | |
2620 | ||
2621 | case MOVEUP_EXPR_NULL: | |
2622 | av_set_iter_remove (&i); | |
2623 | break; | |
2624 | ||
2625 | case MOVEUP_EXPR_CHANGED: | |
2626 | expr = merge_with_other_exprs (avp, &i, expr); | |
2627 | break; | |
48e1416a | 2628 | |
e1ab7874 | 2629 | default: |
2630 | gcc_unreachable (); | |
2631 | } | |
2632 | } | |
2633 | } | |
2634 | ||
2635 | /* Moves AVP set along PATH. */ | |
2636 | static void | |
2637 | moveup_set_inside_insn_group (av_set_t *avp, ilist_t path) | |
2638 | { | |
2639 | int last_cycle; | |
48e1416a | 2640 | |
e1ab7874 | 2641 | if (sched_verbose >= 6) |
2642 | sel_print ("Moving expressions up in the insn group...\n"); | |
2643 | if (! path) | |
2644 | return; | |
2645 | last_cycle = INSN_SCHED_CYCLE (ILIST_INSN (path)); | |
48e1416a | 2646 | while (path |
e1ab7874 | 2647 | && INSN_SCHED_CYCLE (ILIST_INSN (path)) == last_cycle) |
2648 | { | |
2649 | moveup_set_expr (avp, ILIST_INSN (path), true); | |
2650 | path = ILIST_NEXT (path); | |
2651 | } | |
2652 | } | |
2653 | ||
2654 | /* Returns true if after moving EXPR along PATH it equals to EXPR_VLIW. */ | |
2655 | static bool | |
2656 | equal_after_moveup_path_p (expr_t expr, ilist_t path, expr_t expr_vliw) | |
2657 | { | |
2658 | expr_def _tmp, *tmp = &_tmp; | |
2659 | int last_cycle; | |
2660 | bool res = true; | |
2661 | ||
2662 | copy_expr_onside (tmp, expr); | |
2663 | last_cycle = path ? INSN_SCHED_CYCLE (ILIST_INSN (path)) : 0; | |
48e1416a | 2664 | while (path |
e1ab7874 | 2665 | && res |
2666 | && INSN_SCHED_CYCLE (ILIST_INSN (path)) == last_cycle) | |
2667 | { | |
48e1416a | 2668 | res = (moveup_expr_cached (tmp, ILIST_INSN (path), true) |
e1ab7874 | 2669 | != MOVEUP_EXPR_NULL); |
2670 | path = ILIST_NEXT (path); | |
2671 | } | |
2672 | ||
2673 | if (res) | |
2674 | { | |
2675 | vinsn_t tmp_vinsn = EXPR_VINSN (tmp); | |
2676 | vinsn_t expr_vliw_vinsn = EXPR_VINSN (expr_vliw); | |
2677 | ||
2678 | if (tmp_vinsn != expr_vliw_vinsn) | |
2679 | res = vinsn_equal_p (tmp_vinsn, expr_vliw_vinsn); | |
2680 | } | |
2681 | ||
2682 | clear_expr (tmp); | |
2683 | return res; | |
2684 | } | |
2685 | \f | |
2686 | ||
2687 | /* Functions that compute av and lv sets. */ | |
2688 | ||
48e1416a | 2689 | /* Returns true if INSN is not a downward continuation of the given path P in |
e1ab7874 | 2690 | the current stage. */ |
2691 | static bool | |
2692 | is_ineligible_successor (insn_t insn, ilist_t p) | |
2693 | { | |
2694 | insn_t prev_insn; | |
2695 | ||
2696 | /* Check if insn is not deleted. */ | |
2697 | if (PREV_INSN (insn) && NEXT_INSN (PREV_INSN (insn)) != insn) | |
2698 | gcc_unreachable (); | |
2699 | else if (NEXT_INSN (insn) && PREV_INSN (NEXT_INSN (insn)) != insn) | |
2700 | gcc_unreachable (); | |
2701 | ||
2702 | /* If it's the first insn visited, then the successor is ok. */ | |
2703 | if (!p) | |
2704 | return false; | |
2705 | ||
2706 | prev_insn = ILIST_INSN (p); | |
2707 | ||
2708 | if (/* a backward edge. */ | |
2709 | INSN_SEQNO (insn) < INSN_SEQNO (prev_insn) | |
2710 | /* is already visited. */ | |
2711 | || (INSN_SEQNO (insn) == INSN_SEQNO (prev_insn) | |
2712 | && (ilist_is_in_p (p, insn) | |
48e1416a | 2713 | /* We can reach another fence here and still seqno of insn |
2714 | would be equal to seqno of prev_insn. This is possible | |
e1ab7874 | 2715 | when prev_insn is a previously created bookkeeping copy. |
2716 | In that case it'd get a seqno of insn. Thus, check here | |
2717 | whether insn is in current fence too. */ | |
2718 | || IN_CURRENT_FENCE_P (insn))) | |
2719 | /* Was already scheduled on this round. */ | |
2720 | || (INSN_SEQNO (insn) > INSN_SEQNO (prev_insn) | |
2721 | && IN_CURRENT_FENCE_P (insn)) | |
48e1416a | 2722 | /* An insn from another fence could also be |
2723 | scheduled earlier even if this insn is not in | |
e1ab7874 | 2724 | a fence list right now. Check INSN_SCHED_CYCLE instead. */ |
2725 | || (!pipelining_p | |
2726 | && INSN_SCHED_TIMES (insn) > 0)) | |
2727 | return true; | |
2728 | else | |
2729 | return false; | |
2730 | } | |
2731 | ||
48e1416a | 2732 | /* Computes the av_set below the last bb insn INSN, doing all the 'dirty work' |
2733 | of handling multiple successors and properly merging its av_sets. P is | |
2734 | the current path traversed. WS is the size of lookahead window. | |
e1ab7874 | 2735 | Return the av set computed. */ |
2736 | static av_set_t | |
2737 | compute_av_set_at_bb_end (insn_t insn, ilist_t p, int ws) | |
2738 | { | |
2739 | struct succs_info *sinfo; | |
2740 | av_set_t expr_in_all_succ_branches = NULL; | |
2741 | int is; | |
2742 | insn_t succ, zero_succ = NULL; | |
2743 | av_set_t av1 = NULL; | |
2744 | ||
2745 | gcc_assert (sel_bb_end_p (insn)); | |
2746 | ||
48e1416a | 2747 | /* Find different kind of successors needed for correct computing of |
e1ab7874 | 2748 | SPEC and TARGET_AVAILABLE attributes. */ |
2749 | sinfo = compute_succs_info (insn, SUCCS_NORMAL); | |
2750 | ||
2751 | /* Debug output. */ | |
2752 | if (sched_verbose >= 6) | |
2753 | { | |
2754 | sel_print ("successors of bb end (%d): ", INSN_UID (insn)); | |
2755 | dump_insn_vector (sinfo->succs_ok); | |
2756 | sel_print ("\n"); | |
2757 | if (sinfo->succs_ok_n != sinfo->all_succs_n) | |
2758 | sel_print ("real successors num: %d\n", sinfo->all_succs_n); | |
2759 | } | |
2760 | ||
851d9296 | 2761 | /* Add insn to the tail of current path. */ |
e1ab7874 | 2762 | ilist_add (&p, insn); |
2763 | ||
f1f41a6c | 2764 | FOR_EACH_VEC_ELT (sinfo->succs_ok, is, succ) |
e1ab7874 | 2765 | { |
2766 | av_set_t succ_set; | |
2767 | ||
2768 | /* We will edit SUCC_SET and EXPR_SPEC field of its elements. */ | |
2769 | succ_set = compute_av_set_inside_bb (succ, p, ws, true); | |
2770 | ||
48e1416a | 2771 | av_set_split_usefulness (succ_set, |
f1f41a6c | 2772 | sinfo->probs_ok[is], |
e1ab7874 | 2773 | sinfo->all_prob); |
2774 | ||
fd23e508 | 2775 | if (sinfo->all_succs_n > 1) |
e1ab7874 | 2776 | { |
48e1416a | 2777 | /* Find EXPR'es that came from *all* successors and save them |
e1ab7874 | 2778 | into expr_in_all_succ_branches. This set will be used later |
2779 | for calculating speculation attributes of EXPR'es. */ | |
2780 | if (is == 0) | |
2781 | { | |
2782 | expr_in_all_succ_branches = av_set_copy (succ_set); | |
2783 | ||
2784 | /* Remember the first successor for later. */ | |
2785 | zero_succ = succ; | |
2786 | } | |
2787 | else | |
2788 | { | |
2789 | av_set_iterator i; | |
2790 | expr_t expr; | |
48e1416a | 2791 | |
e1ab7874 | 2792 | FOR_EACH_EXPR_1 (expr, i, &expr_in_all_succ_branches) |
2793 | if (!av_set_is_in_p (succ_set, EXPR_VINSN (expr))) | |
2794 | av_set_iter_remove (&i); | |
2795 | } | |
2796 | } | |
2797 | ||
2798 | /* Union the av_sets. Check liveness restrictions on target registers | |
2799 | in special case of two successors. */ | |
2800 | if (sinfo->succs_ok_n == 2 && is == 1) | |
2801 | { | |
2802 | basic_block bb0 = BLOCK_FOR_INSN (zero_succ); | |
2803 | basic_block bb1 = BLOCK_FOR_INSN (succ); | |
2804 | ||
2805 | gcc_assert (BB_LV_SET_VALID_P (bb0) && BB_LV_SET_VALID_P (bb1)); | |
48e1416a | 2806 | av_set_union_and_live (&av1, &succ_set, |
e1ab7874 | 2807 | BB_LV_SET (bb0), |
2808 | BB_LV_SET (bb1), | |
2809 | insn); | |
2810 | } | |
2811 | else | |
2812 | av_set_union_and_clear (&av1, &succ_set, insn); | |
2813 | } | |
2814 | ||
48e1416a | 2815 | /* Check liveness restrictions via hard way when there are more than |
e1ab7874 | 2816 | two successors. */ |
2817 | if (sinfo->succs_ok_n > 2) | |
f1f41a6c | 2818 | FOR_EACH_VEC_ELT (sinfo->succs_ok, is, succ) |
e1ab7874 | 2819 | { |
2820 | basic_block succ_bb = BLOCK_FOR_INSN (succ); | |
48e1416a | 2821 | |
e1ab7874 | 2822 | gcc_assert (BB_LV_SET_VALID_P (succ_bb)); |
48e1416a | 2823 | mark_unavailable_targets (av1, BB_AV_SET (succ_bb), |
e1ab7874 | 2824 | BB_LV_SET (succ_bb)); |
2825 | } | |
48e1416a | 2826 | |
2827 | /* Finally, check liveness restrictions on paths leaving the region. */ | |
e1ab7874 | 2828 | if (sinfo->all_succs_n > sinfo->succs_ok_n) |
f1f41a6c | 2829 | FOR_EACH_VEC_ELT (sinfo->succs_other, is, succ) |
48e1416a | 2830 | mark_unavailable_targets |
e1ab7874 | 2831 | (av1, NULL, BB_LV_SET (BLOCK_FOR_INSN (succ))); |
2832 | ||
2833 | if (sinfo->all_succs_n > 1) | |
2834 | { | |
2835 | av_set_iterator i; | |
2836 | expr_t expr; | |
2837 | ||
48e1416a | 2838 | /* Increase the spec attribute of all EXPR'es that didn't come |
e1ab7874 | 2839 | from all successors. */ |
2840 | FOR_EACH_EXPR (expr, i, av1) | |
2841 | if (!av_set_is_in_p (expr_in_all_succ_branches, EXPR_VINSN (expr))) | |
2842 | EXPR_SPEC (expr)++; | |
2843 | ||
2844 | av_set_clear (&expr_in_all_succ_branches); | |
48e1416a | 2845 | |
2846 | /* Do not move conditional branches through other | |
2847 | conditional branches. So, remove all conditional | |
e1ab7874 | 2848 | branches from av_set if current operator is a conditional |
2849 | branch. */ | |
2850 | av_set_substract_cond_branches (&av1); | |
2851 | } | |
48e1416a | 2852 | |
e1ab7874 | 2853 | ilist_remove (&p); |
2854 | free_succs_info (sinfo); | |
2855 | ||
2856 | if (sched_verbose >= 6) | |
2857 | { | |
2858 | sel_print ("av_succs (%d): ", INSN_UID (insn)); | |
2859 | dump_av_set (av1); | |
2860 | sel_print ("\n"); | |
2861 | } | |
2862 | ||
2863 | return av1; | |
2864 | } | |
2865 | ||
48e1416a | 2866 | /* This function computes av_set for the FIRST_INSN by dragging valid |
2867 | av_set through all basic block insns either from the end of basic block | |
2868 | (computed using compute_av_set_at_bb_end) or from the insn on which | |
e1ab7874 | 2869 | MAX_WS was exceeded. It uses compute_av_set_at_bb_end to compute av_set |
2870 | below the basic block and handling conditional branches. | |
2871 | FIRST_INSN - the basic block head, P - path consisting of the insns | |
2872 | traversed on the way to the FIRST_INSN (the path is sparse, only bb heads | |
2873 | and bb ends are added to the path), WS - current window size, | |
2874 | NEED_COPY_P - true if we'll make a copy of av_set before returning it. */ | |
2875 | static av_set_t | |
48e1416a | 2876 | compute_av_set_inside_bb (insn_t first_insn, ilist_t p, int ws, |
e1ab7874 | 2877 | bool need_copy_p) |
2878 | { | |
2879 | insn_t cur_insn; | |
2880 | int end_ws = ws; | |
2881 | insn_t bb_end = sel_bb_end (BLOCK_FOR_INSN (first_insn)); | |
2882 | insn_t after_bb_end = NEXT_INSN (bb_end); | |
2883 | insn_t last_insn; | |
2884 | av_set_t av = NULL; | |
2885 | basic_block cur_bb = BLOCK_FOR_INSN (first_insn); | |
2886 | ||
2887 | /* Return NULL if insn is not on the legitimate downward path. */ | |
2888 | if (is_ineligible_successor (first_insn, p)) | |
2889 | { | |
2890 | if (sched_verbose >= 6) | |
2891 | sel_print ("Insn %d is ineligible_successor\n", INSN_UID (first_insn)); | |
2892 | ||
2893 | return NULL; | |
2894 | } | |
2895 | ||
48e1416a | 2896 | /* If insn already has valid av(insn) computed, just return it. */ |
e1ab7874 | 2897 | if (AV_SET_VALID_P (first_insn)) |
2898 | { | |
2899 | av_set_t av_set; | |
2900 | ||
2901 | if (sel_bb_head_p (first_insn)) | |
2902 | av_set = BB_AV_SET (BLOCK_FOR_INSN (first_insn)); | |
2903 | else | |
2904 | av_set = NULL; | |
2905 | ||
2906 | if (sched_verbose >= 6) | |
2907 | { | |
2908 | sel_print ("Insn %d has a valid av set: ", INSN_UID (first_insn)); | |
2909 | dump_av_set (av_set); | |
2910 | sel_print ("\n"); | |
2911 | } | |
2912 | ||
2913 | return need_copy_p ? av_set_copy (av_set) : av_set; | |
2914 | } | |
2915 | ||
2916 | ilist_add (&p, first_insn); | |
2917 | ||
2918 | /* As the result after this loop have completed, in LAST_INSN we'll | |
48e1416a | 2919 | have the insn which has valid av_set to start backward computation |
2920 | from: it either will be NULL because on it the window size was exceeded | |
2921 | or other valid av_set as returned by compute_av_set for the last insn | |
e1ab7874 | 2922 | of the basic block. */ |
2923 | for (last_insn = first_insn; last_insn != after_bb_end; | |
2924 | last_insn = NEXT_INSN (last_insn)) | |
2925 | { | |
2926 | /* We may encounter valid av_set not only on bb_head, but also on | |
2927 | those insns on which previously MAX_WS was exceeded. */ | |
2928 | if (AV_SET_VALID_P (last_insn)) | |
2929 | { | |
2930 | if (sched_verbose >= 6) | |
2931 | sel_print ("Insn %d has a valid empty av set\n", INSN_UID (last_insn)); | |
2932 | break; | |
2933 | } | |
2934 | ||
2935 | /* The special case: the last insn of the BB may be an | |
2936 | ineligible_successor due to its SEQ_NO that was set on | |
2937 | it as a bookkeeping. */ | |
48e1416a | 2938 | if (last_insn != first_insn |
e1ab7874 | 2939 | && is_ineligible_successor (last_insn, p)) |
2940 | { | |
2941 | if (sched_verbose >= 6) | |
2942 | sel_print ("Insn %d is ineligible_successor\n", INSN_UID (last_insn)); | |
48e1416a | 2943 | break; |
e1ab7874 | 2944 | } |
2945 | ||
9845d120 | 2946 | if (DEBUG_INSN_P (last_insn)) |
2947 | continue; | |
2948 | ||
e1ab7874 | 2949 | if (end_ws > max_ws) |
2950 | { | |
48e1416a | 2951 | /* We can reach max lookahead size at bb_header, so clean av_set |
e1ab7874 | 2952 | first. */ |
2953 | INSN_WS_LEVEL (last_insn) = global_level; | |
2954 | ||
2955 | if (sched_verbose >= 6) | |
2956 | sel_print ("Insn %d is beyond the software lookahead window size\n", | |
2957 | INSN_UID (last_insn)); | |
2958 | break; | |
2959 | } | |
2960 | ||
2961 | end_ws++; | |
2962 | } | |
2963 | ||
2964 | /* Get the valid av_set into AV above the LAST_INSN to start backward | |
2965 | computation from. It either will be empty av_set or av_set computed from | |
2966 | the successors on the last insn of the current bb. */ | |
2967 | if (last_insn != after_bb_end) | |
2968 | { | |
2969 | av = NULL; | |
2970 | ||
48e1416a | 2971 | /* This is needed only to obtain av_sets that are identical to |
e1ab7874 | 2972 | those computed by the old compute_av_set version. */ |
2973 | if (last_insn == first_insn && !INSN_NOP_P (last_insn)) | |
2974 | av_set_add (&av, INSN_EXPR (last_insn)); | |
2975 | } | |
2976 | else | |
2977 | /* END_WS is always already increased by 1 if LAST_INSN == AFTER_BB_END. */ | |
2978 | av = compute_av_set_at_bb_end (bb_end, p, end_ws); | |
2979 | ||
2980 | /* Compute av_set in AV starting from below the LAST_INSN up to | |
2981 | location above the FIRST_INSN. */ | |
2982 | for (cur_insn = PREV_INSN (last_insn); cur_insn != PREV_INSN (first_insn); | |
48e1416a | 2983 | cur_insn = PREV_INSN (cur_insn)) |
e1ab7874 | 2984 | if (!INSN_NOP_P (cur_insn)) |
2985 | { | |
2986 | expr_t expr; | |
48e1416a | 2987 | |
e1ab7874 | 2988 | moveup_set_expr (&av, cur_insn, false); |
48e1416a | 2989 | |
2990 | /* If the expression for CUR_INSN is already in the set, | |
e1ab7874 | 2991 | replace it by the new one. */ |
48e1416a | 2992 | expr = av_set_lookup (av, INSN_VINSN (cur_insn)); |
e1ab7874 | 2993 | if (expr != NULL) |
2994 | { | |
2995 | clear_expr (expr); | |
2996 | copy_expr (expr, INSN_EXPR (cur_insn)); | |
2997 | } | |
2998 | else | |
2999 | av_set_add (&av, INSN_EXPR (cur_insn)); | |
3000 | } | |
3001 | ||
3002 | /* Clear stale bb_av_set. */ | |
3003 | if (sel_bb_head_p (first_insn)) | |
3004 | { | |
3005 | av_set_clear (&BB_AV_SET (cur_bb)); | |
3006 | BB_AV_SET (cur_bb) = need_copy_p ? av_set_copy (av) : av; | |
3007 | BB_AV_LEVEL (cur_bb) = global_level; | |
3008 | } | |
3009 | ||
3010 | if (sched_verbose >= 6) | |
3011 | { | |
3012 | sel_print ("Computed av set for insn %d: ", INSN_UID (first_insn)); | |
3013 | dump_av_set (av); | |
3014 | sel_print ("\n"); | |
3015 | } | |
3016 | ||
3017 | ilist_remove (&p); | |
3018 | return av; | |
3019 | } | |
3020 | ||
3021 | /* Compute av set before INSN. | |
3022 | INSN - the current operation (actual rtx INSN) | |
3023 | P - the current path, which is list of insns visited so far | |
3024 | WS - software lookahead window size. | |
3025 | UNIQUE_P - TRUE, if returned av_set will be changed, hence | |
3026 | if we want to save computed av_set in s_i_d, we should make a copy of it. | |
3027 | ||
3028 | In the resulting set we will have only expressions that don't have delay | |
3029 | stalls and nonsubstitutable dependences. */ | |
3030 | static av_set_t | |
3031 | compute_av_set (insn_t insn, ilist_t p, int ws, bool unique_p) | |
3032 | { | |
3033 | return compute_av_set_inside_bb (insn, p, ws, unique_p); | |
3034 | } | |
3035 | ||
3036 | /* Propagate a liveness set LV through INSN. */ | |
3037 | static void | |
3038 | propagate_lv_set (regset lv, insn_t insn) | |
3039 | { | |
3040 | gcc_assert (INSN_P (insn)); | |
3041 | ||
3042 | if (INSN_NOP_P (insn)) | |
3043 | return; | |
3044 | ||
a1b0a968 | 3045 | df_simulate_one_insn_backwards (BLOCK_FOR_INSN (insn), insn, lv); |
e1ab7874 | 3046 | } |
3047 | ||
3048 | /* Return livness set at the end of BB. */ | |
3049 | static regset | |
3050 | compute_live_after_bb (basic_block bb) | |
3051 | { | |
3052 | edge e; | |
3053 | edge_iterator ei; | |
3054 | regset lv = get_clear_regset_from_pool (); | |
3055 | ||
3056 | gcc_assert (!ignore_first); | |
3057 | ||
3058 | FOR_EACH_EDGE (e, ei, bb->succs) | |
3059 | if (sel_bb_empty_p (e->dest)) | |
3060 | { | |
3061 | if (! BB_LV_SET_VALID_P (e->dest)) | |
3062 | { | |
3063 | gcc_unreachable (); | |
3064 | gcc_assert (BB_LV_SET (e->dest) == NULL); | |
3065 | BB_LV_SET (e->dest) = compute_live_after_bb (e->dest); | |
3066 | BB_LV_SET_VALID_P (e->dest) = true; | |
3067 | } | |
3068 | IOR_REG_SET (lv, BB_LV_SET (e->dest)); | |
3069 | } | |
3070 | else | |
3071 | IOR_REG_SET (lv, compute_live (sel_bb_head (e->dest))); | |
3072 | ||
3073 | return lv; | |
3074 | } | |
3075 | ||
3076 | /* Compute the set of all live registers at the point before INSN and save | |
3077 | it at INSN if INSN is bb header. */ | |
3078 | regset | |
3079 | compute_live (insn_t insn) | |
3080 | { | |
3081 | basic_block bb = BLOCK_FOR_INSN (insn); | |
3082 | insn_t final, temp; | |
3083 | regset lv; | |
3084 | ||
3085 | /* Return the valid set if we're already on it. */ | |
3086 | if (!ignore_first) | |
3087 | { | |
3088 | regset src = NULL; | |
48e1416a | 3089 | |
e1ab7874 | 3090 | if (sel_bb_head_p (insn) && BB_LV_SET_VALID_P (bb)) |
3091 | src = BB_LV_SET (bb); | |
48e1416a | 3092 | else |
e1ab7874 | 3093 | { |
3094 | gcc_assert (in_current_region_p (bb)); | |
3095 | if (INSN_LIVE_VALID_P (insn)) | |
3096 | src = INSN_LIVE (insn); | |
3097 | } | |
48e1416a | 3098 | |
e1ab7874 | 3099 | if (src) |
3100 | { | |
3101 | lv = get_regset_from_pool (); | |
3102 | COPY_REG_SET (lv, src); | |
3103 | ||
3104 | if (sel_bb_head_p (insn) && ! BB_LV_SET_VALID_P (bb)) | |
3105 | { | |
3106 | COPY_REG_SET (BB_LV_SET (bb), lv); | |
3107 | BB_LV_SET_VALID_P (bb) = true; | |
3108 | } | |
48e1416a | 3109 | |
e1ab7874 | 3110 | return_regset_to_pool (lv); |
3111 | return lv; | |
3112 | } | |
3113 | } | |
3114 | ||
3115 | /* We've skipped the wrong lv_set. Don't skip the right one. */ | |
3116 | ignore_first = false; | |
3117 | gcc_assert (in_current_region_p (bb)); | |
3118 | ||
48e1416a | 3119 | /* Find a valid LV set in this block or below, if needed. |
3120 | Start searching from the next insn: either ignore_first is true, or | |
e1ab7874 | 3121 | INSN doesn't have a correct live set. */ |
3122 | temp = NEXT_INSN (insn); | |
3123 | final = NEXT_INSN (BB_END (bb)); | |
3124 | while (temp != final && ! INSN_LIVE_VALID_P (temp)) | |
3125 | temp = NEXT_INSN (temp); | |
3126 | if (temp == final) | |
3127 | { | |
3128 | lv = compute_live_after_bb (bb); | |
3129 | temp = PREV_INSN (temp); | |
3130 | } | |
3131 | else | |
3132 | { | |
3133 | lv = get_regset_from_pool (); | |
3134 | COPY_REG_SET (lv, INSN_LIVE (temp)); | |
3135 | } | |
3136 | ||
3137 | /* Put correct lv sets on the insns which have bad sets. */ | |
3138 | final = PREV_INSN (insn); | |
3139 | while (temp != final) | |
3140 | { | |
3141 | propagate_lv_set (lv, temp); | |
3142 | COPY_REG_SET (INSN_LIVE (temp), lv); | |
3143 | INSN_LIVE_VALID_P (temp) = true; | |
3144 | temp = PREV_INSN (temp); | |
3145 | } | |
3146 | ||
3147 | /* Also put it in a BB. */ | |
3148 | if (sel_bb_head_p (insn)) | |
3149 | { | |
3150 | basic_block bb = BLOCK_FOR_INSN (insn); | |
48e1416a | 3151 | |
e1ab7874 | 3152 | COPY_REG_SET (BB_LV_SET (bb), lv); |
3153 | BB_LV_SET_VALID_P (bb) = true; | |
3154 | } | |
48e1416a | 3155 | |
e1ab7874 | 3156 | /* We return LV to the pool, but will not clear it there. Thus we can |
3157 | legimatelly use LV till the next use of regset_pool_get (). */ | |
3158 | return_regset_to_pool (lv); | |
3159 | return lv; | |
3160 | } | |
3161 | ||
3162 | /* Update liveness sets for INSN. */ | |
3163 | static inline void | |
2f3c9801 | 3164 | update_liveness_on_insn (rtx_insn *insn) |
e1ab7874 | 3165 | { |
3166 | ignore_first = true; | |
3167 | compute_live (insn); | |
3168 | } | |
3169 | ||
3170 | /* Compute liveness below INSN and write it into REGS. */ | |
3171 | static inline void | |
2f3c9801 | 3172 | compute_live_below_insn (rtx_insn *insn, regset regs) |
e1ab7874 | 3173 | { |
2f3c9801 | 3174 | rtx_insn *succ; |
e1ab7874 | 3175 | succ_iterator si; |
48e1416a | 3176 | |
3177 | FOR_EACH_SUCC_1 (succ, si, insn, SUCCS_ALL) | |
e1ab7874 | 3178 | IOR_REG_SET (regs, compute_live (succ)); |
3179 | } | |
3180 | ||
3181 | /* Update the data gathered in av and lv sets starting from INSN. */ | |
3182 | static void | |
2f3c9801 | 3183 | update_data_sets (rtx_insn *insn) |
e1ab7874 | 3184 | { |
3185 | update_liveness_on_insn (insn); | |
3186 | if (sel_bb_head_p (insn)) | |
3187 | { | |
3188 | gcc_assert (AV_LEVEL (insn) != 0); | |
3189 | BB_AV_LEVEL (BLOCK_FOR_INSN (insn)) = -1; | |
3190 | compute_av_set (insn, NULL, 0, 0); | |
3191 | } | |
3192 | } | |
3193 | \f | |
3194 | ||
3195 | /* Helper for move_op () and find_used_regs (). | |
3196 | Return speculation type for which a check should be created on the place | |
3197 | of INSN. EXPR is one of the original ops we are searching for. */ | |
3198 | static ds_t | |
3199 | get_spec_check_type_for_insn (insn_t insn, expr_t expr) | |
3200 | { | |
3201 | ds_t to_check_ds; | |
3202 | ds_t already_checked_ds = EXPR_SPEC_DONE_DS (INSN_EXPR (insn)); | |
3203 | ||
3204 | to_check_ds = EXPR_SPEC_TO_CHECK_DS (expr); | |
3205 | ||
3206 | if (targetm.sched.get_insn_checked_ds) | |
3207 | already_checked_ds |= targetm.sched.get_insn_checked_ds (insn); | |
3208 | ||
3209 | if (spec_info != NULL | |
3210 | && (spec_info->flags & SEL_SCHED_SPEC_DONT_CHECK_CONTROL)) | |
3211 | already_checked_ds |= BEGIN_CONTROL; | |
3212 | ||
3213 | already_checked_ds = ds_get_speculation_types (already_checked_ds); | |
3214 | ||
3215 | to_check_ds &= ~already_checked_ds; | |
3216 | ||
3217 | return to_check_ds; | |
3218 | } | |
3219 | ||
48e1416a | 3220 | /* Find the set of registers that are unavailable for storing expres |
e1ab7874 | 3221 | while moving ORIG_OPS up on the path starting from INSN due to |
3222 | liveness (USED_REGS) or hardware restrictions (REG_RENAME_P). | |
3223 | ||
3224 | All the original operations found during the traversal are saved in the | |
3225 | ORIGINAL_INSNS list. | |
3226 | ||
3227 | REG_RENAME_P denotes the set of hardware registers that | |
3228 | can not be used with renaming due to the register class restrictions, | |
48e1416a | 3229 | mode restrictions and other (the register we'll choose should be |
e1ab7874 | 3230 | compatible class with the original uses, shouldn't be in call_used_regs, |
3231 | should be HARD_REGNO_RENAME_OK etc). | |
3232 | ||
3233 | Returns TRUE if we've found all original insns, FALSE otherwise. | |
3234 | ||
3235 | This function utilizes code_motion_path_driver (formerly find_used_regs_1) | |
48e1416a | 3236 | to traverse the code motion paths. This helper function finds registers |
3237 | that are not available for storing expres while moving ORIG_OPS up on the | |
e1ab7874 | 3238 | path starting from INSN. A register considered as used on the moving path, |
3239 | if one of the following conditions is not satisfied: | |
3240 | ||
48e1416a | 3241 | (1) a register not set or read on any path from xi to an instance of |
3242 | the original operation, | |
3243 | (2) not among the live registers of the point immediately following the | |
e1ab7874 | 3244 | first original operation on a given downward path, except for the |
3245 | original target register of the operation, | |
48e1416a | 3246 | (3) not live on the other path of any conditional branch that is passed |
e1ab7874 | 3247 | by the operation, in case original operations are not present on |
3248 | both paths of the conditional branch. | |
3249 | ||
3250 | All the original operations found during the traversal are saved in the | |
3251 | ORIGINAL_INSNS list. | |
3252 | ||
48e1416a | 3253 | REG_RENAME_P->CROSSES_CALL is true, if there is a call insn on the path |
3254 | from INSN to original insn. In this case CALL_USED_REG_SET will be added | |
e1ab7874 | 3255 | to unavailable hard regs at the point original operation is found. */ |
3256 | ||
3257 | static bool | |
3258 | find_used_regs (insn_t insn, av_set_t orig_ops, regset used_regs, | |
3259 | struct reg_rename *reg_rename_p, def_list_t *original_insns) | |
3260 | { | |
3261 | def_list_iterator i; | |
3262 | def_t def; | |
3263 | int res; | |
3264 | bool needs_spec_check_p = false; | |
3265 | expr_t expr; | |
3266 | av_set_iterator expr_iter; | |
3267 | struct fur_static_params sparams; | |
3268 | struct cmpd_local_params lparams; | |
3269 | ||
3270 | /* We haven't visited any blocks yet. */ | |
3271 | bitmap_clear (code_motion_visited_blocks); | |
3272 | ||
3273 | /* Init parameters for code_motion_path_driver. */ | |
3274 | sparams.crosses_call = false; | |
3275 | sparams.original_insns = original_insns; | |
3276 | sparams.used_regs = used_regs; | |
48e1416a | 3277 | |
e1ab7874 | 3278 | /* Set the appropriate hooks and data. */ |
3279 | code_motion_path_driver_info = &fur_hooks; | |
48e1416a | 3280 | |
e1ab7874 | 3281 | res = code_motion_path_driver (insn, orig_ops, NULL, &lparams, &sparams); |
3282 | ||
3283 | reg_rename_p->crosses_call |= sparams.crosses_call; | |
3284 | ||
3285 | gcc_assert (res == 1); | |
3286 | gcc_assert (original_insns && *original_insns); | |
3287 | ||
3288 | /* ??? We calculate whether an expression needs a check when computing | |
3289 | av sets. This information is not as precise as it could be due to | |
3290 | merging this bit in merge_expr. We can do better in find_used_regs, | |
48e1416a | 3291 | but we want to avoid multiple traversals of the same code motion |
e1ab7874 | 3292 | paths. */ |
3293 | FOR_EACH_EXPR (expr, expr_iter, orig_ops) | |
3294 | needs_spec_check_p |= EXPR_NEEDS_SPEC_CHECK_P (expr); | |
3295 | ||
48e1416a | 3296 | /* Mark hardware regs in REG_RENAME_P that are not suitable |
e1ab7874 | 3297 | for renaming expr in INSN due to hardware restrictions (register class, |
3298 | modes compatibility etc). */ | |
3299 | FOR_EACH_DEF (def, i, *original_insns) | |
3300 | { | |
3301 | vinsn_t vinsn = INSN_VINSN (def->orig_insn); | |
3302 | ||
3303 | if (VINSN_SEPARABLE_P (vinsn)) | |
3304 | mark_unavailable_hard_regs (def, reg_rename_p, used_regs); | |
3305 | ||
48e1416a | 3306 | /* Do not allow clobbering of ld.[sa] address in case some of the |
e1ab7874 | 3307 | original operations need a check. */ |
3308 | if (needs_spec_check_p) | |
3309 | IOR_REG_SET (used_regs, VINSN_REG_USES (vinsn)); | |
3310 | } | |
3311 | ||
3312 | return true; | |
3313 | } | |
3314 | \f | |
3315 | ||
3316 | /* Functions to choose the best insn from available ones. */ | |
3317 | ||
3318 | /* Adjusts the priority for EXPR using the backend *_adjust_priority hook. */ | |
3319 | static int | |
3320 | sel_target_adjust_priority (expr_t expr) | |
3321 | { | |
3322 | int priority = EXPR_PRIORITY (expr); | |
3323 | int new_priority; | |
3324 | ||
3325 | if (targetm.sched.adjust_priority) | |
3326 | new_priority = targetm.sched.adjust_priority (EXPR_INSN_RTX (expr), priority); | |
3327 | else | |
3328 | new_priority = priority; | |
3329 | ||
3330 | /* If the priority has changed, adjust EXPR_PRIORITY_ADJ accordingly. */ | |
3331 | EXPR_PRIORITY_ADJ (expr) = new_priority - EXPR_PRIORITY (expr); | |
3332 | ||
3333 | gcc_assert (EXPR_PRIORITY_ADJ (expr) >= 0); | |
3334 | ||
abb9c563 | 3335 | if (sched_verbose >= 4) |
3336 | sel_print ("sel_target_adjust_priority: insn %d, %d+%d = %d.\n", | |
48e1416a | 3337 | INSN_UID (EXPR_INSN_RTX (expr)), EXPR_PRIORITY (expr), |
e1ab7874 | 3338 | EXPR_PRIORITY_ADJ (expr), new_priority); |
3339 | ||
3340 | return new_priority; | |
3341 | } | |
3342 | ||
3343 | /* Rank two available exprs for schedule. Never return 0 here. */ | |
48e1416a | 3344 | static int |
e1ab7874 | 3345 | sel_rank_for_schedule (const void *x, const void *y) |
3346 | { | |
3347 | expr_t tmp = *(const expr_t *) y; | |
3348 | expr_t tmp2 = *(const expr_t *) x; | |
3349 | insn_t tmp_insn, tmp2_insn; | |
3350 | vinsn_t tmp_vinsn, tmp2_vinsn; | |
3351 | int val; | |
3352 | ||
3353 | tmp_vinsn = EXPR_VINSN (tmp); | |
3354 | tmp2_vinsn = EXPR_VINSN (tmp2); | |
3355 | tmp_insn = EXPR_INSN_RTX (tmp); | |
3356 | tmp2_insn = EXPR_INSN_RTX (tmp2); | |
48e1416a | 3357 | |
9845d120 | 3358 | /* Schedule debug insns as early as possible. */ |
3359 | if (DEBUG_INSN_P (tmp_insn) && !DEBUG_INSN_P (tmp2_insn)) | |
3360 | return -1; | |
3361 | else if (DEBUG_INSN_P (tmp2_insn)) | |
3362 | return 1; | |
3363 | ||
e1ab7874 | 3364 | /* Prefer SCHED_GROUP_P insns to any others. */ |
3365 | if (SCHED_GROUP_P (tmp_insn) != SCHED_GROUP_P (tmp2_insn)) | |
3366 | { | |
48e1416a | 3367 | if (VINSN_UNIQUE_P (tmp_vinsn) && VINSN_UNIQUE_P (tmp2_vinsn)) |
e1ab7874 | 3368 | return SCHED_GROUP_P (tmp2_insn) ? 1 : -1; |
3369 | ||
3370 | /* Now uniqueness means SCHED_GROUP_P is set, because schedule groups | |
3371 | cannot be cloned. */ | |
3372 | if (VINSN_UNIQUE_P (tmp2_vinsn)) | |
3373 | return 1; | |
3374 | return -1; | |
3375 | } | |
3376 | ||
3377 | /* Discourage scheduling of speculative checks. */ | |
3378 | val = (sel_insn_is_speculation_check (tmp_insn) | |
3379 | - sel_insn_is_speculation_check (tmp2_insn)); | |
3380 | if (val) | |
3381 | return val; | |
3382 | ||
3383 | /* Prefer not scheduled insn over scheduled one. */ | |
3384 | if (EXPR_SCHED_TIMES (tmp) > 0 || EXPR_SCHED_TIMES (tmp2) > 0) | |
3385 | { | |
3386 | val = EXPR_SCHED_TIMES (tmp) - EXPR_SCHED_TIMES (tmp2); | |
3387 | if (val) | |
3388 | return val; | |
3389 | } | |
3390 | ||
3391 | /* Prefer jump over non-jump instruction. */ | |
3392 | if (control_flow_insn_p (tmp_insn) && !control_flow_insn_p (tmp2_insn)) | |
3393 | return -1; | |
3394 | else if (control_flow_insn_p (tmp2_insn) && !control_flow_insn_p (tmp_insn)) | |
3395 | return 1; | |
3396 | ||
3397 | /* Prefer an expr with greater priority. */ | |
3398 | if (EXPR_USEFULNESS (tmp) != 0 && EXPR_USEFULNESS (tmp2) != 0) | |
3399 | { | |
3400 | int p2 = EXPR_PRIORITY (tmp2) + EXPR_PRIORITY_ADJ (tmp2), | |
3401 | p1 = EXPR_PRIORITY (tmp) + EXPR_PRIORITY_ADJ (tmp); | |
3402 | ||
3403 | val = p2 * EXPR_USEFULNESS (tmp2) - p1 * EXPR_USEFULNESS (tmp); | |
3404 | } | |
3405 | else | |
48e1416a | 3406 | val = EXPR_PRIORITY (tmp2) - EXPR_PRIORITY (tmp) |
e1ab7874 | 3407 | + EXPR_PRIORITY_ADJ (tmp2) - EXPR_PRIORITY_ADJ (tmp); |
3408 | if (val) | |
3409 | return val; | |
3410 | ||
3411 | if (spec_info != NULL && spec_info->mask != 0) | |
3412 | /* This code was taken from haifa-sched.c: rank_for_schedule (). */ | |
3413 | { | |
3414 | ds_t ds1, ds2; | |
3415 | dw_t dw1, dw2; | |
3416 | int dw; | |
3417 | ||
3418 | ds1 = EXPR_SPEC_DONE_DS (tmp); | |
3419 | if (ds1) | |
3420 | dw1 = ds_weak (ds1); | |
3421 | else | |
3422 | dw1 = NO_DEP_WEAK; | |
3423 | ||
3424 | ds2 = EXPR_SPEC_DONE_DS (tmp2); | |
3425 | if (ds2) | |
3426 | dw2 = ds_weak (ds2); | |
3427 | else | |
3428 | dw2 = NO_DEP_WEAK; | |
3429 | ||
3430 | dw = dw2 - dw1; | |
3431 | if (dw > (NO_DEP_WEAK / 8) || dw < -(NO_DEP_WEAK / 8)) | |
3432 | return dw; | |
3433 | } | |
3434 | ||
e1ab7874 | 3435 | /* Prefer an old insn to a bookkeeping insn. */ |
48e1416a | 3436 | if (INSN_UID (tmp_insn) < first_emitted_uid |
e1ab7874 | 3437 | && INSN_UID (tmp2_insn) >= first_emitted_uid) |
3438 | return -1; | |
48e1416a | 3439 | if (INSN_UID (tmp_insn) >= first_emitted_uid |
e1ab7874 | 3440 | && INSN_UID (tmp2_insn) < first_emitted_uid) |
3441 | return 1; | |
3442 | ||
48e1416a | 3443 | /* Prefer an insn with smaller UID, as a last resort. |
e1ab7874 | 3444 | We can't safely use INSN_LUID as it is defined only for those insns |
3445 | that are in the stream. */ | |
3446 | return INSN_UID (tmp_insn) - INSN_UID (tmp2_insn); | |
3447 | } | |
3448 | ||
48e1416a | 3449 | /* Filter out expressions from av set pointed to by AV_PTR |
e1ab7874 | 3450 | that are pipelined too many times. */ |
3451 | static void | |
3452 | process_pipelined_exprs (av_set_t *av_ptr) | |
3453 | { | |
3454 | expr_t expr; | |
3455 | av_set_iterator si; | |
3456 | ||
3457 | /* Don't pipeline already pipelined code as that would increase | |
48e1416a | 3458 | number of unnecessary register moves. */ |
e1ab7874 | 3459 | FOR_EACH_EXPR_1 (expr, si, av_ptr) |
3460 | { | |
3461 | if (EXPR_SCHED_TIMES (expr) | |
3462 | >= PARAM_VALUE (PARAM_SELSCHED_MAX_SCHED_TIMES)) | |
3463 | av_set_iter_remove (&si); | |
3464 | } | |
3465 | } | |
3466 | ||
3467 | /* Filter speculative insns from AV_PTR if we don't want them. */ | |
3468 | static void | |
3469 | process_spec_exprs (av_set_t *av_ptr) | |
3470 | { | |
e1ab7874 | 3471 | expr_t expr; |
3472 | av_set_iterator si; | |
3473 | ||
3474 | if (spec_info == NULL) | |
3475 | return; | |
3476 | ||
3477 | /* Scan *AV_PTR to find out if we want to consider speculative | |
3478 | instructions for scheduling. */ | |
3479 | FOR_EACH_EXPR_1 (expr, si, av_ptr) | |
3480 | { | |
3481 | ds_t ds; | |
3482 | ||
3483 | ds = EXPR_SPEC_DONE_DS (expr); | |
3484 | ||
3485 | /* The probability of a success is too low - don't speculate. */ | |
3486 | if ((ds & SPECULATIVE) | |
3487 | && (ds_weak (ds) < spec_info->data_weakness_cutoff | |
3488 | || EXPR_USEFULNESS (expr) < spec_info->control_weakness_cutoff | |
3489 | || (pipelining_p && false | |
3490 | && (ds & DATA_SPEC) | |
3491 | && (ds & CONTROL_SPEC)))) | |
3492 | { | |
3493 | av_set_iter_remove (&si); | |
3494 | continue; | |
3495 | } | |
e1ab7874 | 3496 | } |
3497 | } | |
3498 | ||
48e1416a | 3499 | /* Search for any use-like insns in AV_PTR and decide on scheduling |
3500 | them. Return one when found, and NULL otherwise. | |
e1ab7874 | 3501 | Note that we check here whether a USE could be scheduled to avoid |
3502 | an infinite loop later. */ | |
3503 | static expr_t | |
3504 | process_use_exprs (av_set_t *av_ptr) | |
3505 | { | |
3506 | expr_t expr; | |
3507 | av_set_iterator si; | |
3508 | bool uses_present_p = false; | |
3509 | bool try_uses_p = true; | |
3510 | ||
3511 | FOR_EACH_EXPR_1 (expr, si, av_ptr) | |
3512 | { | |
3513 | /* This will also initialize INSN_CODE for later use. */ | |
3514 | if (recog_memoized (EXPR_INSN_RTX (expr)) < 0) | |
3515 | { | |
3516 | /* If we have a USE in *AV_PTR that was not scheduled yet, | |
3517 | do so because it will do good only. */ | |
3518 | if (EXPR_SCHED_TIMES (expr) <= 0) | |
3519 | { | |
3520 | if (EXPR_TARGET_AVAILABLE (expr) == 1) | |
3521 | return expr; | |
3522 | ||
3523 | av_set_iter_remove (&si); | |
3524 | } | |
3525 | else | |
3526 | { | |
3527 | gcc_assert (pipelining_p); | |
3528 | ||
3529 | uses_present_p = true; | |
3530 | } | |
3531 | } | |
3532 | else | |
3533 | try_uses_p = false; | |
3534 | } | |
3535 | ||
3536 | if (uses_present_p) | |
3537 | { | |
3538 | /* If we don't want to schedule any USEs right now and we have some | |
3539 | in *AV_PTR, remove them, else just return the first one found. */ | |
3540 | if (!try_uses_p) | |
3541 | { | |
3542 | FOR_EACH_EXPR_1 (expr, si, av_ptr) | |
3543 | if (INSN_CODE (EXPR_INSN_RTX (expr)) < 0) | |
3544 | av_set_iter_remove (&si); | |
3545 | } | |
3546 | else | |
3547 | { | |
3548 | FOR_EACH_EXPR_1 (expr, si, av_ptr) | |
3549 | { | |
3550 | gcc_assert (INSN_CODE (EXPR_INSN_RTX (expr)) < 0); | |
3551 | ||
3552 | if (EXPR_TARGET_AVAILABLE (expr) == 1) | |
3553 | return expr; | |
3554 | ||
3555 | av_set_iter_remove (&si); | |
3556 | } | |
3557 | } | |
3558 | } | |
3559 | ||
3560 | return NULL; | |
3561 | } | |
3562 | ||
846800d7 | 3563 | /* Lookup EXPR in VINSN_VEC and return TRUE if found. Also check patterns from |
3564 | EXPR's history of changes. */ | |
e1ab7874 | 3565 | static bool |
3566 | vinsn_vec_has_expr_p (vinsn_vec_t vinsn_vec, expr_t expr) | |
3567 | { | |
846800d7 | 3568 | vinsn_t vinsn, expr_vinsn; |
e1ab7874 | 3569 | int n; |
846800d7 | 3570 | unsigned i; |
e1ab7874 | 3571 | |
846800d7 | 3572 | /* Start with checking expr itself and then proceed with all the old forms |
3573 | of expr taken from its history vector. */ | |
3574 | for (i = 0, expr_vinsn = EXPR_VINSN (expr); | |
3575 | expr_vinsn; | |
f1f41a6c | 3576 | expr_vinsn = (i < EXPR_HISTORY_OF_CHANGES (expr).length () |
3577 | ? EXPR_HISTORY_OF_CHANGES (expr)[i++].old_expr_vinsn | |
846800d7 | 3578 | : NULL)) |
f1f41a6c | 3579 | FOR_EACH_VEC_ELT (vinsn_vec, n, vinsn) |
846800d7 | 3580 | if (VINSN_SEPARABLE_P (vinsn)) |
3581 | { | |
3582 | if (vinsn_equal_p (vinsn, expr_vinsn)) | |
3583 | return true; | |
3584 | } | |
3585 | else | |
3586 | { | |
3587 | /* For non-separable instructions, the blocking insn can have | |
3588 | another pattern due to substitution, and we can't choose | |
3589 | different register as in the above case. Check all registers | |
3590 | being written instead. */ | |
3591 | if (bitmap_intersect_p (VINSN_REG_SETS (vinsn), | |
3592 | VINSN_REG_SETS (expr_vinsn))) | |
3593 | return true; | |
3594 | } | |
e1ab7874 | 3595 | |
3596 | return false; | |
3597 | } | |
3598 | ||
3599 | #ifdef ENABLE_CHECKING | |
3600 | /* Return true if either of expressions from ORIG_OPS can be blocked | |
3601 | by previously created bookkeeping code. STATIC_PARAMS points to static | |
3602 | parameters of move_op. */ | |
3603 | static bool | |
3604 | av_set_could_be_blocked_by_bookkeeping_p (av_set_t orig_ops, void *static_params) | |
3605 | { | |
3606 | expr_t expr; | |
3607 | av_set_iterator iter; | |
3608 | moveop_static_params_p sparams; | |
3609 | ||
3610 | /* This checks that expressions in ORIG_OPS are not blocked by bookkeeping | |
3611 | created while scheduling on another fence. */ | |
3612 | FOR_EACH_EXPR (expr, iter, orig_ops) | |
3613 | if (vinsn_vec_has_expr_p (vec_bookkeeping_blocked_vinsns, expr)) | |
3614 | return true; | |
3615 | ||
3616 | gcc_assert (code_motion_path_driver_info == &move_op_hooks); | |
3617 | sparams = (moveop_static_params_p) static_params; | |
3618 | ||
3619 | /* Expressions can be also blocked by bookkeeping created during current | |
3620 | move_op. */ | |
3621 | if (bitmap_bit_p (current_copies, INSN_UID (sparams->failed_insn))) | |
3622 | FOR_EACH_EXPR (expr, iter, orig_ops) | |
3623 | if (moveup_expr_cached (expr, sparams->failed_insn, false) != MOVEUP_EXPR_NULL) | |
3624 | return true; | |
3625 | ||
3626 | /* Expressions in ORIG_OPS may have wrong destination register due to | |
3627 | renaming. Check with the right register instead. */ | |
3628 | if (sparams->dest && REG_P (sparams->dest)) | |
3629 | { | |
1f53e226 | 3630 | rtx reg = sparams->dest; |
e1ab7874 | 3631 | vinsn_t failed_vinsn = INSN_VINSN (sparams->failed_insn); |
3632 | ||
1f53e226 | 3633 | if (register_unavailable_p (VINSN_REG_SETS (failed_vinsn), reg) |
3634 | || register_unavailable_p (VINSN_REG_USES (failed_vinsn), reg) | |
3635 | || register_unavailable_p (VINSN_REG_CLOBBERS (failed_vinsn), reg)) | |
e1ab7874 | 3636 | return true; |
3637 | } | |
3638 | ||
3639 | return false; | |
3640 | } | |
3641 | #endif | |
3642 | ||
3643 | /* Clear VINSN_VEC and detach vinsns. */ | |
3644 | static void | |
3645 | vinsn_vec_clear (vinsn_vec_t *vinsn_vec) | |
3646 | { | |
f1f41a6c | 3647 | unsigned len = vinsn_vec->length (); |
e1ab7874 | 3648 | if (len > 0) |
3649 | { | |
3650 | vinsn_t vinsn; | |
3651 | int n; | |
48e1416a | 3652 | |
f1f41a6c | 3653 | FOR_EACH_VEC_ELT (*vinsn_vec, n, vinsn) |
e1ab7874 | 3654 | vinsn_detach (vinsn); |
f1f41a6c | 3655 | vinsn_vec->block_remove (0, len); |
e1ab7874 | 3656 | } |
3657 | } | |
3658 | ||
3659 | /* Add the vinsn of EXPR to the VINSN_VEC. */ | |
3660 | static void | |
3661 | vinsn_vec_add (vinsn_vec_t *vinsn_vec, expr_t expr) | |
3662 | { | |
3663 | vinsn_attach (EXPR_VINSN (expr)); | |
f1f41a6c | 3664 | vinsn_vec->safe_push (EXPR_VINSN (expr)); |
e1ab7874 | 3665 | } |
3666 | ||
48e1416a | 3667 | /* Free the vector representing blocked expressions. */ |
e1ab7874 | 3668 | static void |
f1f41a6c | 3669 | vinsn_vec_free (vinsn_vec_t &vinsn_vec) |
e1ab7874 | 3670 | { |
f1f41a6c | 3671 | vinsn_vec.release (); |
e1ab7874 | 3672 | } |
3673 | ||
3674 | /* Increase EXPR_PRIORITY_ADJ for INSN by AMOUNT. */ | |
3675 | ||
3676 | void sel_add_to_insn_priority (rtx insn, int amount) | |
3677 | { | |
3678 | EXPR_PRIORITY_ADJ (INSN_EXPR (insn)) += amount; | |
3679 | ||
3680 | if (sched_verbose >= 2) | |
48e1416a | 3681 | sel_print ("sel_add_to_insn_priority: insn %d, by %d (now %d+%d).\n", |
e1ab7874 | 3682 | INSN_UID (insn), amount, EXPR_PRIORITY (INSN_EXPR (insn)), |
3683 | EXPR_PRIORITY_ADJ (INSN_EXPR (insn))); | |
3684 | } | |
3685 | ||
48e1416a | 3686 | /* Turn AV into a vector, filter inappropriate insns and sort it. Return |
e1ab7874 | 3687 | true if there is something to schedule. BNDS and FENCE are current |
3688 | boundaries and fence, respectively. If we need to stall for some cycles | |
48e1416a | 3689 | before an expr from AV would become available, write this number to |
e1ab7874 | 3690 | *PNEED_STALL. */ |
3691 | static bool | |
3692 | fill_vec_av_set (av_set_t av, blist_t bnds, fence_t fence, | |
3693 | int *pneed_stall) | |
3694 | { | |
3695 | av_set_iterator si; | |
3696 | expr_t expr; | |
3697 | int sched_next_worked = 0, stalled, n; | |
3698 | static int av_max_prio, est_ticks_till_branch; | |
3699 | int min_need_stall = -1; | |
3700 | deps_t dc = BND_DC (BLIST_BND (bnds)); | |
3701 | ||
3702 | /* Bail out early when the ready list contained only USEs/CLOBBERs that are | |
3703 | already scheduled. */ | |
3704 | if (av == NULL) | |
3705 | return false; | |
3706 | ||
3707 | /* Empty vector from the previous stuff. */ | |
f1f41a6c | 3708 | if (vec_av_set.length () > 0) |
3709 | vec_av_set.block_remove (0, vec_av_set.length ()); | |
e1ab7874 | 3710 | |
3711 | /* Turn the set into a vector for sorting and call sel_target_adjust_priority | |
3712 | for each insn. */ | |
f1f41a6c | 3713 | gcc_assert (vec_av_set.is_empty ()); |
e1ab7874 | 3714 | FOR_EACH_EXPR (expr, si, av) |
48e1416a | 3715 | { |
f1f41a6c | 3716 | vec_av_set.safe_push (expr); |
e1ab7874 | 3717 | |
3718 | gcc_assert (EXPR_PRIORITY_ADJ (expr) == 0 || *pneed_stall); | |
3719 | ||
3720 | /* Adjust priority using target backend hook. */ | |
3721 | sel_target_adjust_priority (expr); | |
3722 | } | |
3723 | ||
3724 | /* Sort the vector. */ | |
f1f41a6c | 3725 | vec_av_set.qsort (sel_rank_for_schedule); |
e1ab7874 | 3726 | |
3727 | /* We record maximal priority of insns in av set for current instruction | |
3728 | group. */ | |
3729 | if (FENCE_STARTS_CYCLE_P (fence)) | |
3730 | av_max_prio = est_ticks_till_branch = INT_MIN; | |
3731 | ||
3732 | /* Filter out inappropriate expressions. Loop's direction is reversed to | |
f1f41a6c | 3733 | visit "best" instructions first. We assume that vec::unordered_remove |
e1ab7874 | 3734 | moves last element in place of one being deleted. */ |
f1f41a6c | 3735 | for (n = vec_av_set.length () - 1, stalled = 0; n >= 0; n--) |
e1ab7874 | 3736 | { |
f1f41a6c | 3737 | expr_t expr = vec_av_set[n]; |
e1ab7874 | 3738 | insn_t insn = EXPR_INSN_RTX (expr); |
17435f96 | 3739 | signed char target_available; |
e1ab7874 | 3740 | bool is_orig_reg_p = true; |
3741 | int need_cycles, new_prio; | |
c4326fd2 | 3742 | bool fence_insn_p = INSN_UID (insn) == INSN_UID (FENCE_INSN (fence)); |
e1ab7874 | 3743 | |
3744 | /* Don't allow any insns other than from SCHED_GROUP if we have one. */ | |
3745 | if (FENCE_SCHED_NEXT (fence) && insn != FENCE_SCHED_NEXT (fence)) | |
3746 | { | |
f1f41a6c | 3747 | vec_av_set.unordered_remove (n); |
e1ab7874 | 3748 | continue; |
3749 | } | |
3750 | ||
48e1416a | 3751 | /* Set number of sched_next insns (just in case there |
e1ab7874 | 3752 | could be several). */ |
3753 | if (FENCE_SCHED_NEXT (fence)) | |
3754 | sched_next_worked++; | |
48e1416a | 3755 | |
3756 | /* Check all liveness requirements and try renaming. | |
e1ab7874 | 3757 | FIXME: try to minimize calls to this. */ |
3758 | target_available = EXPR_TARGET_AVAILABLE (expr); | |
3759 | ||
3760 | /* If insn was already scheduled on the current fence, | |
3761 | set TARGET_AVAILABLE to -1 no matter what expr's attribute says. */ | |
a0d15f90 | 3762 | if (vinsn_vec_has_expr_p (vec_target_unavailable_vinsns, expr) |
3763 | && !fence_insn_p) | |
e1ab7874 | 3764 | target_available = -1; |
3765 | ||
3766 | /* If the availability of the EXPR is invalidated by the insertion of | |
3767 | bookkeeping earlier, make sure that we won't choose this expr for | |
3768 | scheduling if it's not separable, and if it is separable, then | |
3769 | we have to recompute the set of available registers for it. */ | |
3770 | if (vinsn_vec_has_expr_p (vec_bookkeeping_blocked_vinsns, expr)) | |
3771 | { | |
f1f41a6c | 3772 | vec_av_set.unordered_remove (n); |
e1ab7874 | 3773 | if (sched_verbose >= 4) |
3774 | sel_print ("Expr %d is blocked by bookkeeping inserted earlier\n", | |
3775 | INSN_UID (insn)); | |
3776 | continue; | |
3777 | } | |
48e1416a | 3778 | |
e1ab7874 | 3779 | if (target_available == true) |
3780 | { | |
3781 | /* Do nothing -- we can use an existing register. */ | |
3782 | is_orig_reg_p = EXPR_SEPARABLE_P (expr); | |
3783 | } | |
48e1416a | 3784 | else if (/* Non-separable instruction will never |
e1ab7874 | 3785 | get another register. */ |
3786 | (target_available == false | |
3787 | && !EXPR_SEPARABLE_P (expr)) | |
3788 | /* Don't try to find a register for low-priority expression. */ | |
f1f41a6c | 3789 | || (int) vec_av_set.length () - 1 - n >= max_insns_to_rename |
e1ab7874 | 3790 | /* ??? FIXME: Don't try to rename data speculation. */ |
3791 | || (EXPR_SPEC_DONE_DS (expr) & BEGIN_DATA) | |
3792 | || ! find_best_reg_for_expr (expr, bnds, &is_orig_reg_p)) | |
3793 | { | |
f1f41a6c | 3794 | vec_av_set.unordered_remove (n); |
e1ab7874 | 3795 | if (sched_verbose >= 4) |
48e1416a | 3796 | sel_print ("Expr %d has no suitable target register\n", |
e1ab7874 | 3797 | INSN_UID (insn)); |
c4326fd2 | 3798 | |
3799 | /* A fence insn should not get here. */ | |
3800 | gcc_assert (!fence_insn_p); | |
3801 | continue; | |
e1ab7874 | 3802 | } |
3803 | ||
c4326fd2 | 3804 | /* At this point a fence insn should always be available. */ |
3805 | gcc_assert (!fence_insn_p | |
3806 | || INSN_UID (FENCE_INSN (fence)) == INSN_UID (EXPR_INSN_RTX (expr))); | |
3807 | ||
e1ab7874 | 3808 | /* Filter expressions that need to be renamed or speculated when |
3809 | pipelining, because compensating register copies or speculation | |
3810 | checks are likely to be placed near the beginning of the loop, | |
3811 | causing a stall. */ | |
3812 | if (pipelining_p && EXPR_ORIG_SCHED_CYCLE (expr) > 0 | |
3813 | && (!is_orig_reg_p || EXPR_SPEC_DONE_DS (expr) != 0)) | |
3814 | { | |
3815 | /* Estimation of number of cycles until loop branch for | |
3816 | renaming/speculation to be successful. */ | |
3817 | int need_n_ticks_till_branch = sel_vinsn_cost (EXPR_VINSN (expr)); | |
3818 | ||
3819 | if ((int) current_loop_nest->ninsns < 9) | |
3820 | { | |
f1f41a6c | 3821 | vec_av_set.unordered_remove (n); |
e1ab7874 | 3822 | if (sched_verbose >= 4) |
3823 | sel_print ("Pipelining expr %d will likely cause stall\n", | |
3824 | INSN_UID (insn)); | |
3825 | continue; | |
3826 | } | |
3827 | ||
3828 | if ((int) current_loop_nest->ninsns - num_insns_scheduled | |
3829 | < need_n_ticks_till_branch * issue_rate / 2 | |
3830 | && est_ticks_till_branch < need_n_ticks_till_branch) | |
3831 | { | |
f1f41a6c | 3832 | vec_av_set.unordered_remove (n); |
e1ab7874 | 3833 | if (sched_verbose >= 4) |
3834 | sel_print ("Pipelining expr %d will likely cause stall\n", | |
3835 | INSN_UID (insn)); | |
3836 | continue; | |
3837 | } | |
3838 | } | |
3839 | ||
3840 | /* We want to schedule speculation checks as late as possible. Discard | |
3841 | them from av set if there are instructions with higher priority. */ | |
3842 | if (sel_insn_is_speculation_check (insn) | |
3843 | && EXPR_PRIORITY (expr) < av_max_prio) | |
3844 | { | |
3845 | stalled++; | |
3846 | min_need_stall = min_need_stall < 0 ? 1 : MIN (min_need_stall, 1); | |
f1f41a6c | 3847 | vec_av_set.unordered_remove (n); |
e1ab7874 | 3848 | if (sched_verbose >= 4) |
3849 | sel_print ("Delaying speculation check %d until its first use\n", | |
3850 | INSN_UID (insn)); | |
3851 | continue; | |
3852 | } | |
3853 | ||
3854 | /* Ignore EXPRs available from pipelining to update AV_MAX_PRIO. */ | |
3855 | if (EXPR_ORIG_SCHED_CYCLE (expr) <= 0) | |
3856 | av_max_prio = MAX (av_max_prio, EXPR_PRIORITY (expr)); | |
3857 | ||
3858 | /* Don't allow any insns whose data is not yet ready. | |
3859 | Check first whether we've already tried them and failed. */ | |
3860 | if (INSN_UID (insn) < FENCE_READY_TICKS_SIZE (fence)) | |
3861 | { | |
3862 | need_cycles = (FENCE_READY_TICKS (fence)[INSN_UID (insn)] | |
3863 | - FENCE_CYCLE (fence)); | |
3864 | if (EXPR_ORIG_SCHED_CYCLE (expr) <= 0) | |
3865 | est_ticks_till_branch = MAX (est_ticks_till_branch, | |
3866 | EXPR_PRIORITY (expr) + need_cycles); | |
3867 | ||
3868 | if (need_cycles > 0) | |
3869 | { | |
3870 | stalled++; | |
48e1416a | 3871 | min_need_stall = (min_need_stall < 0 |
e1ab7874 | 3872 | ? need_cycles |
3873 | : MIN (min_need_stall, need_cycles)); | |
f1f41a6c | 3874 | vec_av_set.unordered_remove (n); |
e1ab7874 | 3875 | |
3876 | if (sched_verbose >= 4) | |
48e1416a | 3877 | sel_print ("Expr %d is not ready until cycle %d (cached)\n", |
e1ab7874 | 3878 | INSN_UID (insn), |
3879 | FENCE_READY_TICKS (fence)[INSN_UID (insn)]); | |
3880 | continue; | |
3881 | } | |
3882 | } | |
3883 | ||
48e1416a | 3884 | /* Now resort to dependence analysis to find whether EXPR might be |
e1ab7874 | 3885 | stalled due to dependencies from FENCE's context. */ |
3886 | need_cycles = tick_check_p (expr, dc, fence); | |
3887 | new_prio = EXPR_PRIORITY (expr) + EXPR_PRIORITY_ADJ (expr) + need_cycles; | |
3888 | ||
3889 | if (EXPR_ORIG_SCHED_CYCLE (expr) <= 0) | |
3890 | est_ticks_till_branch = MAX (est_ticks_till_branch, | |
3891 | new_prio); | |
3892 | ||
3893 | if (need_cycles > 0) | |
3894 | { | |
3895 | if (INSN_UID (insn) >= FENCE_READY_TICKS_SIZE (fence)) | |
3896 | { | |
3897 | int new_size = INSN_UID (insn) * 3 / 2; | |
48e1416a | 3898 | |
3899 | FENCE_READY_TICKS (fence) | |
e1ab7874 | 3900 | = (int *) xrecalloc (FENCE_READY_TICKS (fence), |
3901 | new_size, FENCE_READY_TICKS_SIZE (fence), | |
3902 | sizeof (int)); | |
3903 | } | |
48e1416a | 3904 | FENCE_READY_TICKS (fence)[INSN_UID (insn)] |
3905 | = FENCE_CYCLE (fence) + need_cycles; | |
3906 | ||
e1ab7874 | 3907 | stalled++; |
48e1416a | 3908 | min_need_stall = (min_need_stall < 0 |
e1ab7874 | 3909 | ? need_cycles |
3910 | : MIN (min_need_stall, need_cycles)); | |
3911 | ||
f1f41a6c | 3912 | vec_av_set.unordered_remove (n); |
48e1416a | 3913 | |
e1ab7874 | 3914 | if (sched_verbose >= 4) |
48e1416a | 3915 | sel_print ("Expr %d is not ready yet until cycle %d\n", |
e1ab7874 | 3916 | INSN_UID (insn), |
3917 | FENCE_READY_TICKS (fence)[INSN_UID (insn)]); | |
3918 | continue; | |
3919 | } | |
3920 | ||
3921 | if (sched_verbose >= 4) | |
3922 | sel_print ("Expr %d is ok\n", INSN_UID (insn)); | |
3923 | min_need_stall = 0; | |
3924 | } | |
3925 | ||
3926 | /* Clear SCHED_NEXT. */ | |
3927 | if (FENCE_SCHED_NEXT (fence)) | |
3928 | { | |
3929 | gcc_assert (sched_next_worked == 1); | |
2f3c9801 | 3930 | FENCE_SCHED_NEXT (fence) = NULL; |
e1ab7874 | 3931 | } |
3932 | ||
3933 | /* No need to stall if this variable was not initialized. */ | |
3934 | if (min_need_stall < 0) | |
3935 | min_need_stall = 0; | |
3936 | ||
f1f41a6c | 3937 | if (vec_av_set.is_empty ()) |
e1ab7874 | 3938 | { |
3939 | /* We need to set *pneed_stall here, because later we skip this code | |
3940 | when ready list is empty. */ | |
3941 | *pneed_stall = min_need_stall; | |
3942 | return false; | |
3943 | } | |
3944 | else | |
3945 | gcc_assert (min_need_stall == 0); | |
3946 | ||
3947 | /* Sort the vector. */ | |
f1f41a6c | 3948 | vec_av_set.qsort (sel_rank_for_schedule); |
48e1416a | 3949 | |
e1ab7874 | 3950 | if (sched_verbose >= 4) |
3951 | { | |
48e1416a | 3952 | sel_print ("Total ready exprs: %d, stalled: %d\n", |
f1f41a6c | 3953 | vec_av_set.length (), stalled); |
3954 | sel_print ("Sorted av set (%d): ", vec_av_set.length ()); | |
3955 | FOR_EACH_VEC_ELT (vec_av_set, n, expr) | |
e1ab7874 | 3956 | dump_expr (expr); |
3957 | sel_print ("\n"); | |
3958 | } | |
3959 | ||
3960 | *pneed_stall = 0; | |
3961 | return true; | |
3962 | } | |
3963 | ||
3964 | /* Convert a vectored and sorted av set to the ready list that | |
3965 | the rest of the backend wants to see. */ | |
3966 | static void | |
3967 | convert_vec_av_set_to_ready (void) | |
3968 | { | |
3969 | int n; | |
3970 | expr_t expr; | |
3971 | ||
3972 | /* Allocate and fill the ready list from the sorted vector. */ | |
f1f41a6c | 3973 | ready.n_ready = vec_av_set.length (); |
e1ab7874 | 3974 | ready.first = ready.n_ready - 1; |
48e1416a | 3975 | |
e1ab7874 | 3976 | gcc_assert (ready.n_ready > 0); |
3977 | ||
3978 | if (ready.n_ready > max_issue_size) | |
3979 | { | |
3980 | max_issue_size = ready.n_ready; | |
3981 | sched_extend_ready_list (ready.n_ready); | |
3982 | } | |
48e1416a | 3983 | |
f1f41a6c | 3984 | FOR_EACH_VEC_ELT (vec_av_set, n, expr) |
e1ab7874 | 3985 | { |
3986 | vinsn_t vi = EXPR_VINSN (expr); | |
3987 | insn_t insn = VINSN_INSN_RTX (vi); | |
3988 | ||
3989 | ready_try[n] = 0; | |
2f3c9801 | 3990 | ready.vec[n] = insn; |
e1ab7874 | 3991 | } |
3992 | } | |
3993 | ||
3994 | /* Initialize ready list from *AV_PTR for the max_issue () call. | |
3995 | If any unrecognizable insn found in *AV_PTR, return it (and skip | |
48e1416a | 3996 | max_issue). BND and FENCE are current boundary and fence, |
3997 | respectively. If we need to stall for some cycles before an expr | |
e1ab7874 | 3998 | from *AV_PTR would become available, write this number to *PNEED_STALL. */ |
3999 | static expr_t | |
4000 | fill_ready_list (av_set_t *av_ptr, blist_t bnds, fence_t fence, | |
4001 | int *pneed_stall) | |
4002 | { | |
4003 | expr_t expr; | |
4004 | ||
4005 | /* We do not support multiple boundaries per fence. */ | |
4006 | gcc_assert (BLIST_NEXT (bnds) == NULL); | |
4007 | ||
48e1416a | 4008 | /* Process expressions required special handling, i.e. pipelined, |
e1ab7874 | 4009 | speculative and recog() < 0 expressions first. */ |
4010 | process_pipelined_exprs (av_ptr); | |
4011 | process_spec_exprs (av_ptr); | |
4012 | ||
4013 | /* A USE could be scheduled immediately. */ | |
4014 | expr = process_use_exprs (av_ptr); | |
4015 | if (expr) | |
4016 | { | |
4017 | *pneed_stall = 0; | |
4018 | return expr; | |
4019 | } | |
4020 | ||
4021 | /* Turn the av set to a vector for sorting. */ | |
4022 | if (! fill_vec_av_set (*av_ptr, bnds, fence, pneed_stall)) | |
4023 | { | |
4024 | ready.n_ready = 0; | |
4025 | return NULL; | |
4026 | } | |
4027 | ||
4028 | /* Build the final ready list. */ | |
4029 | convert_vec_av_set_to_ready (); | |
4030 | return NULL; | |
4031 | } | |
4032 | ||
4033 | /* Wrapper for dfa_new_cycle (). Returns TRUE if cycle was advanced. */ | |
4034 | static bool | |
4035 | sel_dfa_new_cycle (insn_t insn, fence_t fence) | |
4036 | { | |
48e1416a | 4037 | int last_scheduled_cycle = FENCE_LAST_SCHEDULED_INSN (fence) |
4038 | ? INSN_SCHED_CYCLE (FENCE_LAST_SCHEDULED_INSN (fence)) | |
e1ab7874 | 4039 | : FENCE_CYCLE (fence) - 1; |
4040 | bool res = false; | |
4041 | int sort_p = 0; | |
4042 | ||
4043 | if (!targetm.sched.dfa_new_cycle) | |
4044 | return false; | |
4045 | ||
4046 | memcpy (curr_state, FENCE_STATE (fence), dfa_state_size); | |
4047 | ||
4048 | while (!sort_p && targetm.sched.dfa_new_cycle (sched_dump, sched_verbose, | |
4049 | insn, last_scheduled_cycle, | |
4050 | FENCE_CYCLE (fence), &sort_p)) | |
4051 | { | |
4052 | memcpy (FENCE_STATE (fence), curr_state, dfa_state_size); | |
4053 | advance_one_cycle (fence); | |
4054 | memcpy (curr_state, FENCE_STATE (fence), dfa_state_size); | |
4055 | res = true; | |
4056 | } | |
4057 | ||
4058 | return res; | |
4059 | } | |
4060 | ||
4061 | /* Invoke reorder* target hooks on the ready list. Return the number of insns | |
4062 | we can issue. FENCE is the current fence. */ | |
4063 | static int | |
4064 | invoke_reorder_hooks (fence_t fence) | |
4065 | { | |
4066 | int issue_more; | |
4067 | bool ran_hook = false; | |
4068 | ||
4069 | /* Call the reorder hook at the beginning of the cycle, and call | |
4070 | the reorder2 hook in the middle of the cycle. */ | |
4071 | if (FENCE_ISSUED_INSNS (fence) == 0) | |
4072 | { | |
4073 | if (targetm.sched.reorder | |
4074 | && !SCHED_GROUP_P (ready_element (&ready, 0)) | |
4075 | && ready.n_ready > 1) | |
4076 | { | |
4077 | /* Don't give reorder the most prioritized insn as it can break | |
4078 | pipelining. */ | |
4079 | if (pipelining_p) | |
4080 | --ready.n_ready; | |
4081 | ||
4082 | issue_more | |
4083 | = targetm.sched.reorder (sched_dump, sched_verbose, | |
4084 | ready_lastpos (&ready), | |
4085 | &ready.n_ready, FENCE_CYCLE (fence)); | |
4086 | ||
4087 | if (pipelining_p) | |
4088 | ++ready.n_ready; | |
4089 | ||
4090 | ran_hook = true; | |
4091 | } | |
4092 | else | |
4093 | /* Initialize can_issue_more for variable_issue. */ | |
4094 | issue_more = issue_rate; | |
4095 | } | |
4096 | else if (targetm.sched.reorder2 | |
4097 | && !SCHED_GROUP_P (ready_element (&ready, 0))) | |
4098 | { | |
4099 | if (ready.n_ready == 1) | |
48e1416a | 4100 | issue_more = |
e1ab7874 | 4101 | targetm.sched.reorder2 (sched_dump, sched_verbose, |
4102 | ready_lastpos (&ready), | |
4103 | &ready.n_ready, FENCE_CYCLE (fence)); | |
4104 | else | |
4105 | { | |
4106 | if (pipelining_p) | |
4107 | --ready.n_ready; | |
4108 | ||
4109 | issue_more = | |
4110 | targetm.sched.reorder2 (sched_dump, sched_verbose, | |
4111 | ready.n_ready | |
4112 | ? ready_lastpos (&ready) : NULL, | |
4113 | &ready.n_ready, FENCE_CYCLE (fence)); | |
4114 | ||
4115 | if (pipelining_p) | |
4116 | ++ready.n_ready; | |
4117 | } | |
4118 | ||
4119 | ran_hook = true; | |
4120 | } | |
48e1416a | 4121 | else |
abb9c563 | 4122 | issue_more = FENCE_ISSUE_MORE (fence); |
e1ab7874 | 4123 | |
4124 | /* Ensure that ready list and vec_av_set are in line with each other, | |
4125 | i.e. vec_av_set[i] == ready_element (&ready, i). */ | |
4126 | if (issue_more && ran_hook) | |
4127 | { | |
4128 | int i, j, n; | |
b24ef467 | 4129 | rtx_insn **arr = ready.vec; |
f1f41a6c | 4130 | expr_t *vec = vec_av_set.address (); |
e1ab7874 | 4131 | |
4132 | for (i = 0, n = ready.n_ready; i < n; i++) | |
4133 | if (EXPR_INSN_RTX (vec[i]) != arr[i]) | |
4134 | { | |
e1ab7874 | 4135 | for (j = i; j < n; j++) |
4136 | if (EXPR_INSN_RTX (vec[j]) == arr[i]) | |
4137 | break; | |
4138 | gcc_assert (j < n); | |
4139 | ||
dfcf26a5 | 4140 | std::swap (vec[i], vec[j]); |
e1ab7874 | 4141 | } |
4142 | } | |
4143 | ||
4144 | return issue_more; | |
4145 | } | |
4146 | ||
9d75589a | 4147 | /* Return an EXPR corresponding to INDEX element of ready list, if |
48e1416a | 4148 | FOLLOW_READY_ELEMENT is true (i.e., an expr of |
4149 | ready_element (&ready, INDEX) will be returned), and to INDEX element of | |
e1ab7874 | 4150 | ready.vec otherwise. */ |
4151 | static inline expr_t | |
4152 | find_expr_for_ready (int index, bool follow_ready_element) | |
4153 | { | |
4154 | expr_t expr; | |
4155 | int real_index; | |
4156 | ||
4157 | real_index = follow_ready_element ? ready.first - index : index; | |
4158 | ||
f1f41a6c | 4159 | expr = vec_av_set[real_index]; |
e1ab7874 | 4160 | gcc_assert (ready.vec[real_index] == EXPR_INSN_RTX (expr)); |
4161 | ||
4162 | return expr; | |
4163 | } | |
4164 | ||
4165 | /* Calculate insns worth trying via lookahead_guard hook. Return a number | |
4166 | of such insns found. */ | |
4167 | static int | |
4168 | invoke_dfa_lookahead_guard (void) | |
4169 | { | |
4170 | int i, n; | |
48e1416a | 4171 | bool have_hook |
e1ab7874 | 4172 | = targetm.sched.first_cycle_multipass_dfa_lookahead_guard != NULL; |
4173 | ||
4174 | if (sched_verbose >= 2) | |
4175 | sel_print ("ready after reorder: "); | |
4176 | ||
4177 | for (i = 0, n = 0; i < ready.n_ready; i++) | |
4178 | { | |
4179 | expr_t expr; | |
4180 | insn_t insn; | |
4181 | int r; | |
4182 | ||
48e1416a | 4183 | /* In this loop insn is Ith element of the ready list given by |
e1ab7874 | 4184 | ready_element, not Ith element of ready.vec. */ |
4185 | insn = ready_element (&ready, i); | |
48e1416a | 4186 | |
e1ab7874 | 4187 | if (! have_hook || i == 0) |
4188 | r = 0; | |
4189 | else | |
d9d89d92 | 4190 | r = targetm.sched.first_cycle_multipass_dfa_lookahead_guard (insn, i); |
48e1416a | 4191 | |
e1ab7874 | 4192 | gcc_assert (INSN_CODE (insn) >= 0); |
48e1416a | 4193 | |
4194 | /* Only insns with ready_try = 0 can get here | |
e1ab7874 | 4195 | from fill_ready_list. */ |
4196 | gcc_assert (ready_try [i] == 0); | |
4197 | ready_try[i] = r; | |
4198 | if (!r) | |
4199 | n++; | |
4200 | ||
4201 | expr = find_expr_for_ready (i, true); | |
48e1416a | 4202 | |
e1ab7874 | 4203 | if (sched_verbose >= 2) |
4204 | { | |
4205 | dump_vinsn (EXPR_VINSN (expr)); | |
4206 | sel_print (":%d; ", ready_try[i]); | |
4207 | } | |
4208 | } | |
4209 | ||
4210 | if (sched_verbose >= 2) | |
4211 | sel_print ("\n"); | |
4212 | return n; | |
4213 | } | |
4214 | ||
4215 | /* Calculate the number of privileged insns and return it. */ | |
4216 | static int | |
4217 | calculate_privileged_insns (void) | |
4218 | { | |
4219 | expr_t cur_expr, min_spec_expr = NULL; | |
e1ab7874 | 4220 | int privileged_n = 0, i; |
4221 | ||
4222 | for (i = 0; i < ready.n_ready; i++) | |
4223 | { | |
4224 | if (ready_try[i]) | |
4225 | continue; | |
4226 | ||
4227 | if (! min_spec_expr) | |
57ab8ec3 | 4228 | min_spec_expr = find_expr_for_ready (i, true); |
48e1416a | 4229 | |
e1ab7874 | 4230 | cur_expr = find_expr_for_ready (i, true); |
4231 | ||
4232 | if (EXPR_SPEC (cur_expr) > EXPR_SPEC (min_spec_expr)) | |
4233 | break; | |
4234 | ||
4235 | ++privileged_n; | |
4236 | } | |
4237 | ||
4238 | if (i == ready.n_ready) | |
4239 | privileged_n = 0; | |
4240 | ||
4241 | if (sched_verbose >= 2) | |
4242 | sel_print ("privileged_n: %d insns with SPEC %d\n", | |
4243 | privileged_n, privileged_n ? EXPR_SPEC (min_spec_expr) : -1); | |
4244 | return privileged_n; | |
4245 | } | |
4246 | ||
48e1416a | 4247 | /* Call the rest of the hooks after the choice was made. Return |
e1ab7874 | 4248 | the number of insns that still can be issued given that the current |
4249 | number is ISSUE_MORE. FENCE and BEST_INSN are the current fence | |
4250 | and the insn chosen for scheduling, respectively. */ | |
4251 | static int | |
2f3c9801 | 4252 | invoke_aftermath_hooks (fence_t fence, rtx_insn *best_insn, int issue_more) |
e1ab7874 | 4253 | { |
4254 | gcc_assert (INSN_P (best_insn)); | |
4255 | ||
4256 | /* First, call dfa_new_cycle, and then variable_issue, if available. */ | |
4257 | sel_dfa_new_cycle (best_insn, fence); | |
48e1416a | 4258 | |
e1ab7874 | 4259 | if (targetm.sched.variable_issue) |
4260 | { | |
4261 | memcpy (curr_state, FENCE_STATE (fence), dfa_state_size); | |
48e1416a | 4262 | issue_more = |
e1ab7874 | 4263 | targetm.sched.variable_issue (sched_dump, sched_verbose, best_insn, |
4264 | issue_more); | |
4265 | memcpy (FENCE_STATE (fence), curr_state, dfa_state_size); | |
4266 | } | |
4267 | else if (GET_CODE (PATTERN (best_insn)) != USE | |
4268 | && GET_CODE (PATTERN (best_insn)) != CLOBBER) | |
4269 | issue_more--; | |
4270 | ||
4271 | return issue_more; | |
4272 | } | |
4273 | ||
30474b14 | 4274 | /* Estimate the cost of issuing INSN on DFA state STATE. */ |
e1ab7874 | 4275 | static int |
d3ffa7b4 | 4276 | estimate_insn_cost (rtx_insn *insn, state_t state) |
e1ab7874 | 4277 | { |
4278 | static state_t temp = NULL; | |
4279 | int cost; | |
4280 | ||
4281 | if (!temp) | |
4282 | temp = xmalloc (dfa_state_size); | |
4283 | ||
4284 | memcpy (temp, state, dfa_state_size); | |
4285 | cost = state_transition (temp, insn); | |
4286 | ||
4287 | if (cost < 0) | |
4288 | return 0; | |
4289 | else if (cost == 0) | |
4290 | return 1; | |
4291 | return cost; | |
4292 | } | |
4293 | ||
48e1416a | 4294 | /* Return the cost of issuing EXPR on the FENCE as estimated by DFA. |
e1ab7874 | 4295 | This function properly handles ASMs, USEs etc. */ |
4296 | static int | |
4297 | get_expr_cost (expr_t expr, fence_t fence) | |
4298 | { | |
9c4c93d0 | 4299 | rtx_insn *insn = EXPR_INSN_RTX (expr); |
e1ab7874 | 4300 | |
4301 | if (recog_memoized (insn) < 0) | |
4302 | { | |
48e1416a | 4303 | if (!FENCE_STARTS_CYCLE_P (fence) |
e1ab7874 | 4304 | && INSN_ASM_P (insn)) |
4305 | /* This is asm insn which is tryed to be issued on the | |
4306 | cycle not first. Issue it on the next cycle. */ | |
4307 | return 1; | |
4308 | else | |
4309 | /* A USE insn, or something else we don't need to | |
4310 | understand. We can't pass these directly to | |
4311 | state_transition because it will trigger a | |
4312 | fatal error for unrecognizable insns. */ | |
4313 | return 0; | |
4314 | } | |
4315 | else | |
30474b14 | 4316 | return estimate_insn_cost (insn, FENCE_STATE (fence)); |
e1ab7874 | 4317 | } |
4318 | ||
48e1416a | 4319 | /* Find the best insn for scheduling, either via max_issue or just take |
e1ab7874 | 4320 | the most prioritized available. */ |
4321 | static int | |
4322 | choose_best_insn (fence_t fence, int privileged_n, int *index) | |
4323 | { | |
4324 | int can_issue = 0; | |
4325 | ||
4326 | if (dfa_lookahead > 0) | |
4327 | { | |
4328 | cycle_issued_insns = FENCE_ISSUED_INSNS (fence); | |
44ad1e56 | 4329 | /* TODO: pass equivalent of first_cycle_insn_p to max_issue (). */ |
e1ab7874 | 4330 | can_issue = max_issue (&ready, privileged_n, |
44ad1e56 | 4331 | FENCE_STATE (fence), true, index); |
e1ab7874 | 4332 | if (sched_verbose >= 2) |
4333 | sel_print ("max_issue: we can issue %d insns, already did %d insns\n", | |
4334 | can_issue, FENCE_ISSUED_INSNS (fence)); | |
4335 | } | |
4336 | else | |
4337 | { | |
4338 | /* We can't use max_issue; just return the first available element. */ | |
4339 | int i; | |
4340 | ||
4341 | for (i = 0; i < ready.n_ready; i++) | |
4342 | { | |
4343 | expr_t expr = find_expr_for_ready (i, true); | |
4344 | ||
4345 | if (get_expr_cost (expr, fence) < 1) | |
4346 | { | |
4347 | can_issue = can_issue_more; | |
4348 | *index = i; | |
4349 | ||
4350 | if (sched_verbose >= 2) | |
4351 | sel_print ("using %dth insn from the ready list\n", i + 1); | |
4352 | ||
4353 | break; | |
4354 | } | |
4355 | } | |
4356 | ||
4357 | if (i == ready.n_ready) | |
4358 | { | |
4359 | can_issue = 0; | |
4360 | *index = -1; | |
4361 | } | |
4362 | } | |
4363 | ||
4364 | return can_issue; | |
4365 | } | |
4366 | ||
48e1416a | 4367 | /* Choose the best expr from *AV_VLIW_PTR and a suitable register for it. |
4368 | BNDS and FENCE are current boundaries and scheduling fence respectively. | |
4369 | Return the expr found and NULL if nothing can be issued atm. | |
4370 | Write to PNEED_STALL the number of cycles to stall if no expr was found. */ | |
e1ab7874 | 4371 | static expr_t |
4372 | find_best_expr (av_set_t *av_vliw_ptr, blist_t bnds, fence_t fence, | |
4373 | int *pneed_stall) | |
4374 | { | |
4375 | expr_t best; | |
48e1416a | 4376 | |
e1ab7874 | 4377 | /* Choose the best insn for scheduling via: |
4378 | 1) sorting the ready list based on priority; | |
4379 | 2) calling the reorder hook; | |
4380 | 3) calling max_issue. */ | |
4381 | best = fill_ready_list (av_vliw_ptr, bnds, fence, pneed_stall); | |
4382 | if (best == NULL && ready.n_ready > 0) | |
4383 | { | |
57ab8ec3 | 4384 | int privileged_n, index; |
e1ab7874 | 4385 | |
4386 | can_issue_more = invoke_reorder_hooks (fence); | |
4387 | if (can_issue_more > 0) | |
4388 | { | |
48e1416a | 4389 | /* Try choosing the best insn until we find one that is could be |
e1ab7874 | 4390 | scheduled due to liveness restrictions on its destination register. |
4391 | In the future, we'd like to choose once and then just probe insns | |
4392 | in the order of their priority. */ | |
57ab8ec3 | 4393 | invoke_dfa_lookahead_guard (); |
e1ab7874 | 4394 | privileged_n = calculate_privileged_insns (); |
4395 | can_issue_more = choose_best_insn (fence, privileged_n, &index); | |
4396 | if (can_issue_more) | |
4397 | best = find_expr_for_ready (index, true); | |
4398 | } | |
48e1416a | 4399 | /* We had some available insns, so if we can't issue them, |
e1ab7874 | 4400 | we have a stall. */ |
4401 | if (can_issue_more == 0) | |
4402 | { | |
4403 | best = NULL; | |
4404 | *pneed_stall = 1; | |
4405 | } | |
4406 | } | |
4407 | ||
4408 | if (best != NULL) | |
4409 | { | |
4410 | can_issue_more = invoke_aftermath_hooks (fence, EXPR_INSN_RTX (best), | |
4411 | can_issue_more); | |
08b41748 | 4412 | if (targetm.sched.variable_issue |
4413 | && can_issue_more == 0) | |
e1ab7874 | 4414 | *pneed_stall = 1; |
4415 | } | |
48e1416a | 4416 | |
e1ab7874 | 4417 | if (sched_verbose >= 2) |
4418 | { | |
4419 | if (best != NULL) | |
4420 | { | |
4421 | sel_print ("Best expression (vliw form): "); | |
4422 | dump_expr (best); | |
4423 | sel_print ("; cycle %d\n", FENCE_CYCLE (fence)); | |
4424 | } | |
4425 | else | |
4426 | sel_print ("No best expr found!\n"); | |
4427 | } | |
4428 | ||
4429 | return best; | |
4430 | } | |
4431 | \f | |
4432 | ||
4433 | /* Functions that implement the core of the scheduler. */ | |
4434 | ||
4435 | ||
48e1416a | 4436 | /* Emit an instruction from EXPR with SEQNO and VINSN after |
e1ab7874 | 4437 | PLACE_TO_INSERT. */ |
4438 | static insn_t | |
48e1416a | 4439 | emit_insn_from_expr_after (expr_t expr, vinsn_t vinsn, int seqno, |
e1ab7874 | 4440 | insn_t place_to_insert) |
4441 | { | |
4442 | /* This assert fails when we have identical instructions | |
4443 | one of which dominates the other. In this case move_op () | |
4444 | finds the first instruction and doesn't search for second one. | |
4445 | The solution would be to compute av_set after the first found | |
4446 | insn and, if insn present in that set, continue searching. | |
4447 | For now we workaround this issue in move_op. */ | |
4448 | gcc_assert (!INSN_IN_STREAM_P (EXPR_INSN_RTX (expr))); | |
4449 | ||
4450 | if (EXPR_WAS_RENAMED (expr)) | |
4451 | { | |
4452 | unsigned regno = expr_dest_regno (expr); | |
48e1416a | 4453 | |
e1ab7874 | 4454 | if (HARD_REGISTER_NUM_P (regno)) |
4455 | { | |
4456 | df_set_regs_ever_live (regno, true); | |
4457 | reg_rename_tick[regno] = ++reg_rename_this_tick; | |
4458 | } | |
4459 | } | |
48e1416a | 4460 | |
4461 | return sel_gen_insn_from_expr_after (expr, vinsn, seqno, | |
e1ab7874 | 4462 | place_to_insert); |
4463 | } | |
4464 | ||
4465 | /* Return TRUE if BB can hold bookkeeping code. */ | |
4466 | static bool | |
4467 | block_valid_for_bookkeeping_p (basic_block bb) | |
4468 | { | |
4469 | insn_t bb_end = BB_END (bb); | |
4470 | ||
4471 | if (!in_current_region_p (bb) || EDGE_COUNT (bb->succs) > 1) | |
4472 | return false; | |
4473 | ||
4474 | if (INSN_P (bb_end)) | |
4475 | { | |
4476 | if (INSN_SCHED_TIMES (bb_end) > 0) | |
4477 | return false; | |
4478 | } | |
4479 | else | |
4480 | gcc_assert (NOTE_INSN_BASIC_BLOCK_P (bb_end)); | |
4481 | ||
4482 | return true; | |
4483 | } | |
4484 | ||
4485 | /* Attempt to find a block that can hold bookkeeping code for path(s) incoming | |
4486 | into E2->dest, except from E1->src (there may be a sequence of empty basic | |
4487 | blocks between E1->src and E2->dest). Return found block, or NULL if new | |
9845d120 | 4488 | one must be created. If LAX holds, don't assume there is a simple path |
4489 | from E1->src to E2->dest. */ | |
e1ab7874 | 4490 | static basic_block |
9845d120 | 4491 | find_block_for_bookkeeping (edge e1, edge e2, bool lax) |
e1ab7874 | 4492 | { |
4493 | basic_block candidate_block = NULL; | |
4494 | edge e; | |
4495 | ||
4496 | /* Loop over edges from E1 to E2, inclusive. */ | |
34154e27 | 4497 | for (e = e1; !lax || e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun); e = |
4498 | EDGE_SUCC (e->dest, 0)) | |
e1ab7874 | 4499 | { |
4500 | if (EDGE_COUNT (e->dest->preds) == 2) | |
4501 | { | |
4502 | if (candidate_block == NULL) | |
4503 | candidate_block = (EDGE_PRED (e->dest, 0) == e | |
4504 | ? EDGE_PRED (e->dest, 1)->src | |
4505 | : EDGE_PRED (e->dest, 0)->src); | |
4506 | else | |
4507 | /* Found additional edge leading to path from e1 to e2 | |
4508 | from aside. */ | |
4509 | return NULL; | |
4510 | } | |
4511 | else if (EDGE_COUNT (e->dest->preds) > 2) | |
4512 | /* Several edges leading to path from e1 to e2 from aside. */ | |
4513 | return NULL; | |
4514 | ||
4515 | if (e == e2) | |
9845d120 | 4516 | return ((!lax || candidate_block) |
4517 | && block_valid_for_bookkeeping_p (candidate_block) | |
e1ab7874 | 4518 | ? candidate_block |
4519 | : NULL); | |
9845d120 | 4520 | |
4521 | if (lax && EDGE_COUNT (e->dest->succs) != 1) | |
4522 | return NULL; | |
e1ab7874 | 4523 | } |
9845d120 | 4524 | |
4525 | if (lax) | |
4526 | return NULL; | |
4527 | ||
e1ab7874 | 4528 | gcc_unreachable (); |
4529 | } | |
4530 | ||
4531 | /* Create new basic block for bookkeeping code for path(s) incoming into | |
4532 | E2->dest, except from E1->src. Return created block. */ | |
4533 | static basic_block | |
4534 | create_block_for_bookkeeping (edge e1, edge e2) | |
4535 | { | |
4536 | basic_block new_bb, bb = e2->dest; | |
4537 | ||
4538 | /* Check that we don't spoil the loop structure. */ | |
4539 | if (current_loop_nest) | |
4540 | { | |
4541 | basic_block latch = current_loop_nest->latch; | |
4542 | ||
4543 | /* We do not split header. */ | |
4544 | gcc_assert (e2->dest != current_loop_nest->header); | |
4545 | ||
4546 | /* We do not redirect the only edge to the latch block. */ | |
4547 | gcc_assert (e1->dest != latch | |
4548 | || !single_pred_p (latch) | |
4549 | || e1 != single_pred_edge (latch)); | |
4550 | } | |
4551 | ||
4552 | /* Split BB to insert BOOK_INSN there. */ | |
4553 | new_bb = sched_split_block (bb, NULL); | |
4554 | ||
4555 | /* Move note_list from the upper bb. */ | |
4556 | gcc_assert (BB_NOTE_LIST (new_bb) == NULL_RTX); | |
e97a173d | 4557 | BB_NOTE_LIST (new_bb) = BB_NOTE_LIST (bb); |
4558 | BB_NOTE_LIST (bb) = NULL; | |
e1ab7874 | 4559 | |
4560 | gcc_assert (e2->dest == bb); | |
4561 | ||
4562 | /* Skip block for bookkeeping copy when leaving E1->src. */ | |
4563 | if (e1->flags & EDGE_FALLTHRU) | |
4564 | sel_redirect_edge_and_branch_force (e1, new_bb); | |
4565 | else | |
4566 | sel_redirect_edge_and_branch (e1, new_bb); | |
4567 | ||
4568 | gcc_assert (e1->dest == new_bb); | |
4569 | gcc_assert (sel_bb_empty_p (bb)); | |
4570 | ||
9845d120 | 4571 | /* To keep basic block numbers in sync between debug and non-debug |
4572 | compilations, we have to rotate blocks here. Consider that we | |
4573 | started from (a,b)->d, (c,d)->e, and d contained only debug | |
4574 | insns. It would have been removed before if the debug insns | |
4575 | weren't there, so we'd have split e rather than d. So what we do | |
4576 | now is to swap the block numbers of new_bb and | |
4577 | single_succ(new_bb) == e, so that the insns that were in e before | |
4578 | get the new block number. */ | |
4579 | ||
4580 | if (MAY_HAVE_DEBUG_INSNS) | |
4581 | { | |
4582 | basic_block succ; | |
4583 | insn_t insn = sel_bb_head (new_bb); | |
4584 | insn_t last; | |
4585 | ||
4586 | if (DEBUG_INSN_P (insn) | |
4587 | && single_succ_p (new_bb) | |
4588 | && (succ = single_succ (new_bb)) | |
34154e27 | 4589 | && succ != EXIT_BLOCK_PTR_FOR_FN (cfun) |
9845d120 | 4590 | && DEBUG_INSN_P ((last = sel_bb_end (new_bb)))) |
4591 | { | |
4592 | while (insn != last && (DEBUG_INSN_P (insn) || NOTE_P (insn))) | |
4593 | insn = NEXT_INSN (insn); | |
4594 | ||
4595 | if (insn == last) | |
4596 | { | |
4597 | sel_global_bb_info_def gbi; | |
4598 | sel_region_bb_info_def rbi; | |
9845d120 | 4599 | |
4600 | if (sched_verbose >= 2) | |
4601 | sel_print ("Swapping block ids %i and %i\n", | |
4602 | new_bb->index, succ->index); | |
4603 | ||
dfcf26a5 | 4604 | std::swap (new_bb->index, succ->index); |
9845d120 | 4605 | |
f64d2ca4 | 4606 | SET_BASIC_BLOCK_FOR_FN (cfun, new_bb->index, new_bb); |
4607 | SET_BASIC_BLOCK_FOR_FN (cfun, succ->index, succ); | |
9845d120 | 4608 | |
4609 | memcpy (&gbi, SEL_GLOBAL_BB_INFO (new_bb), sizeof (gbi)); | |
4610 | memcpy (SEL_GLOBAL_BB_INFO (new_bb), SEL_GLOBAL_BB_INFO (succ), | |
4611 | sizeof (gbi)); | |
4612 | memcpy (SEL_GLOBAL_BB_INFO (succ), &gbi, sizeof (gbi)); | |
4613 | ||
4614 | memcpy (&rbi, SEL_REGION_BB_INFO (new_bb), sizeof (rbi)); | |
4615 | memcpy (SEL_REGION_BB_INFO (new_bb), SEL_REGION_BB_INFO (succ), | |
4616 | sizeof (rbi)); | |
4617 | memcpy (SEL_REGION_BB_INFO (succ), &rbi, sizeof (rbi)); | |
4618 | ||
dfcf26a5 | 4619 | std::swap (BLOCK_TO_BB (new_bb->index), |
4620 | BLOCK_TO_BB (succ->index)); | |
9845d120 | 4621 | |
dfcf26a5 | 4622 | std::swap (CONTAINING_RGN (new_bb->index), |
4623 | CONTAINING_RGN (succ->index)); | |
9845d120 | 4624 | |
dfcf26a5 | 4625 | for (int i = 0; i < current_nr_blocks; i++) |
9845d120 | 4626 | if (BB_TO_BLOCK (i) == succ->index) |
4627 | BB_TO_BLOCK (i) = new_bb->index; | |
4628 | else if (BB_TO_BLOCK (i) == new_bb->index) | |
4629 | BB_TO_BLOCK (i) = succ->index; | |
4630 | ||
4631 | FOR_BB_INSNS (new_bb, insn) | |
4632 | if (INSN_P (insn)) | |
4633 | EXPR_ORIG_BB_INDEX (INSN_EXPR (insn)) = new_bb->index; | |
4634 | ||
4635 | FOR_BB_INSNS (succ, insn) | |
4636 | if (INSN_P (insn)) | |
4637 | EXPR_ORIG_BB_INDEX (INSN_EXPR (insn)) = succ->index; | |
4638 | ||
6ef9bbe0 | 4639 | if (bitmap_clear_bit (code_motion_visited_blocks, new_bb->index)) |
4640 | bitmap_set_bit (code_motion_visited_blocks, succ->index); | |
9845d120 | 4641 | |
4642 | gcc_assert (LABEL_P (BB_HEAD (new_bb)) | |
4643 | && LABEL_P (BB_HEAD (succ))); | |
4644 | ||
4645 | if (sched_verbose >= 4) | |
4646 | sel_print ("Swapping code labels %i and %i\n", | |
4647 | CODE_LABEL_NUMBER (BB_HEAD (new_bb)), | |
4648 | CODE_LABEL_NUMBER (BB_HEAD (succ))); | |
4649 | ||
dfcf26a5 | 4650 | std::swap (CODE_LABEL_NUMBER (BB_HEAD (new_bb)), |
4651 | CODE_LABEL_NUMBER (BB_HEAD (succ))); | |
9845d120 | 4652 | } |
4653 | } | |
4654 | } | |
4655 | ||
e1ab7874 | 4656 | return bb; |
4657 | } | |
4658 | ||
4659 | /* Return insn after which we must insert bookkeeping code for path(s) incoming | |
f550c9b3 | 4660 | into E2->dest, except from E1->src. If the returned insn immediately |
4661 | precedes a fence, assign that fence to *FENCE_TO_REWIND. */ | |
e1ab7874 | 4662 | static insn_t |
f550c9b3 | 4663 | find_place_for_bookkeeping (edge e1, edge e2, fence_t *fence_to_rewind) |
e1ab7874 | 4664 | { |
4665 | insn_t place_to_insert; | |
4666 | /* Find a basic block that can hold bookkeeping. If it can be found, do not | |
4667 | create new basic block, but insert bookkeeping there. */ | |
9845d120 | 4668 | basic_block book_block = find_block_for_bookkeeping (e1, e2, FALSE); |
e1ab7874 | 4669 | |
9845d120 | 4670 | if (book_block) |
4671 | { | |
4672 | place_to_insert = BB_END (book_block); | |
4673 | ||
4674 | /* Don't use a block containing only debug insns for | |
4675 | bookkeeping, this causes scheduling differences between debug | |
4676 | and non-debug compilations, for the block would have been | |
4677 | removed already. */ | |
4678 | if (DEBUG_INSN_P (place_to_insert)) | |
4679 | { | |
ff88d074 | 4680 | rtx_insn *insn = sel_bb_head (book_block); |
e1ab7874 | 4681 | |
9845d120 | 4682 | while (insn != place_to_insert && |
4683 | (DEBUG_INSN_P (insn) || NOTE_P (insn))) | |
4684 | insn = NEXT_INSN (insn); | |
4685 | ||
4686 | if (insn == place_to_insert) | |
4687 | book_block = NULL; | |
4688 | } | |
4689 | } | |
4690 | ||
4691 | if (!book_block) | |
4692 | { | |
4693 | book_block = create_block_for_bookkeeping (e1, e2); | |
4694 | place_to_insert = BB_END (book_block); | |
4695 | if (sched_verbose >= 9) | |
4696 | sel_print ("New block is %i, split from bookkeeping block %i\n", | |
4697 | EDGE_SUCC (book_block, 0)->dest->index, book_block->index); | |
4698 | } | |
4699 | else | |
4700 | { | |
4701 | if (sched_verbose >= 9) | |
4702 | sel_print ("Pre-existing bookkeeping block is %i\n", book_block->index); | |
4703 | } | |
e1ab7874 | 4704 | |
f550c9b3 | 4705 | *fence_to_rewind = NULL; |
4706 | /* If basic block ends with a jump, insert bookkeeping code right before it. | |
4707 | Notice if we are crossing a fence when taking PREV_INSN. */ | |
e1ab7874 | 4708 | if (INSN_P (place_to_insert) && control_flow_insn_p (place_to_insert)) |
f550c9b3 | 4709 | { |
4710 | *fence_to_rewind = flist_lookup (fences, place_to_insert); | |
4711 | place_to_insert = PREV_INSN (place_to_insert); | |
4712 | } | |
e1ab7874 | 4713 | |
4714 | return place_to_insert; | |
4715 | } | |
4716 | ||
4717 | /* Find a proper seqno for bookkeeing insn inserted at PLACE_TO_INSERT | |
4718 | for JOIN_POINT. */ | |
4719 | static int | |
4720 | find_seqno_for_bookkeeping (insn_t place_to_insert, insn_t join_point) | |
4721 | { | |
4722 | int seqno; | |
e1ab7874 | 4723 | |
4724 | /* Check if we are about to insert bookkeeping copy before a jump, and use | |
4725 | jump's seqno for the copy; otherwise, use JOIN_POINT's seqno. */ | |
9ed997be | 4726 | rtx_insn *next = NEXT_INSN (place_to_insert); |
48e1416a | 4727 | if (INSN_P (next) |
e1ab7874 | 4728 | && JUMP_P (next) |
4729 | && BLOCK_FOR_INSN (next) == BLOCK_FOR_INSN (place_to_insert)) | |
961d3eb8 | 4730 | { |
4731 | gcc_assert (INSN_SCHED_TIMES (next) == 0); | |
4732 | seqno = INSN_SEQNO (next); | |
4733 | } | |
e1ab7874 | 4734 | else if (INSN_SEQNO (join_point) > 0) |
4735 | seqno = INSN_SEQNO (join_point); | |
4736 | else | |
961d3eb8 | 4737 | { |
4738 | seqno = get_seqno_by_preds (place_to_insert); | |
4739 | ||
48e1416a | 4740 | /* Sometimes the fences can move in such a way that there will be |
4741 | no instructions with positive seqno around this bookkeeping. | |
961d3eb8 | 4742 | This means that there will be no way to get to it by a regular |
4743 | fence movement. Never mind because we pick up such pieces for | |
4744 | rescheduling anyways, so any positive value will do for now. */ | |
4745 | if (seqno < 0) | |
4746 | { | |
4747 | gcc_assert (pipelining_p); | |
4748 | seqno = 1; | |
4749 | } | |
4750 | } | |
48e1416a | 4751 | |
e1ab7874 | 4752 | gcc_assert (seqno > 0); |
4753 | return seqno; | |
4754 | } | |
4755 | ||
4756 | /* Insert bookkeeping copy of C_EXPS's insn after PLACE_TO_INSERT, assigning | |
4757 | NEW_SEQNO to it. Return created insn. */ | |
4758 | static insn_t | |
4759 | emit_bookkeeping_insn (insn_t place_to_insert, expr_t c_expr, int new_seqno) | |
4760 | { | |
9c4c93d0 | 4761 | rtx_insn *new_insn_rtx = create_copy_of_insn_rtx (EXPR_INSN_RTX (c_expr)); |
e1ab7874 | 4762 | |
4763 | vinsn_t new_vinsn | |
4764 | = create_vinsn_from_insn_rtx (new_insn_rtx, | |
4765 | VINSN_UNIQUE_P (EXPR_VINSN (c_expr))); | |
4766 | ||
4767 | insn_t new_insn = emit_insn_from_expr_after (c_expr, new_vinsn, new_seqno, | |
4768 | place_to_insert); | |
4769 | ||
4770 | INSN_SCHED_TIMES (new_insn) = 0; | |
4771 | bitmap_set_bit (current_copies, INSN_UID (new_insn)); | |
4772 | ||
4773 | return new_insn; | |
4774 | } | |
4775 | ||
4776 | /* Generate a bookkeeping copy of C_EXPR's insn for path(s) incoming into to | |
4777 | E2->dest, except from E1->src (there may be a sequence of empty blocks | |
4778 | between E1->src and E2->dest). Return block containing the copy. | |
4779 | All scheduler data is initialized for the newly created insn. */ | |
4780 | static basic_block | |
4781 | generate_bookkeeping_insn (expr_t c_expr, edge e1, edge e2) | |
4782 | { | |
4783 | insn_t join_point, place_to_insert, new_insn; | |
4784 | int new_seqno; | |
4785 | bool need_to_exchange_data_sets; | |
f550c9b3 | 4786 | fence_t fence_to_rewind; |
e1ab7874 | 4787 | |
4788 | if (sched_verbose >= 4) | |
4789 | sel_print ("Generating bookkeeping insn (%d->%d)\n", e1->src->index, | |
4790 | e2->dest->index); | |
4791 | ||
4792 | join_point = sel_bb_head (e2->dest); | |
f550c9b3 | 4793 | place_to_insert = find_place_for_bookkeeping (e1, e2, &fence_to_rewind); |
e1ab7874 | 4794 | new_seqno = find_seqno_for_bookkeeping (place_to_insert, join_point); |
4795 | need_to_exchange_data_sets | |
4796 | = sel_bb_empty_p (BLOCK_FOR_INSN (place_to_insert)); | |
4797 | ||
4798 | new_insn = emit_bookkeeping_insn (place_to_insert, c_expr, new_seqno); | |
4799 | ||
f550c9b3 | 4800 | if (fence_to_rewind) |
4801 | FENCE_INSN (fence_to_rewind) = new_insn; | |
4802 | ||
e1ab7874 | 4803 | /* When inserting bookkeeping insn in new block, av sets should be |
4804 | following: old basic block (that now holds bookkeeping) data sets are | |
4805 | the same as was before generation of bookkeeping, and new basic block | |
4806 | (that now hold all other insns of old basic block) data sets are | |
4807 | invalid. So exchange data sets for these basic blocks as sel_split_block | |
4808 | mistakenly exchanges them in this case. Cannot do it earlier because | |
4809 | when single instruction is added to new basic block it should hold NULL | |
4810 | lv_set. */ | |
4811 | if (need_to_exchange_data_sets) | |
4812 | exchange_data_sets (BLOCK_FOR_INSN (new_insn), | |
4813 | BLOCK_FOR_INSN (join_point)); | |
4814 | ||
4815 | stat_bookkeeping_copies++; | |
4816 | return BLOCK_FOR_INSN (new_insn); | |
4817 | } | |
4818 | ||
48e1416a | 4819 | /* Remove from AV_PTR all insns that may need bookkeeping when scheduling |
e1ab7874 | 4820 | on FENCE, but we are unable to copy them. */ |
4821 | static void | |
4822 | remove_insns_that_need_bookkeeping (fence_t fence, av_set_t *av_ptr) | |
4823 | { | |
4824 | expr_t expr; | |
4825 | av_set_iterator i; | |
4826 | ||
48e1416a | 4827 | /* An expression does not need bookkeeping if it is available on all paths |
4828 | from current block to original block and current block dominates | |
4829 | original block. We check availability on all paths by examining | |
4830 | EXPR_SPEC; this is not equivalent, because it may be positive even | |
4831 | if expr is available on all paths (but if expr is not available on | |
e1ab7874 | 4832 | any path, EXPR_SPEC will be positive). */ |
4833 | ||
4834 | FOR_EACH_EXPR_1 (expr, i, av_ptr) | |
4835 | { | |
4836 | if (!control_flow_insn_p (EXPR_INSN_RTX (expr)) | |
4837 | && (!bookkeeping_p || VINSN_UNIQUE_P (EXPR_VINSN (expr))) | |
4838 | && (EXPR_SPEC (expr) | |
4839 | || !EXPR_ORIG_BB_INDEX (expr) | |
4840 | || !dominated_by_p (CDI_DOMINATORS, | |
f5a6b05f | 4841 | BASIC_BLOCK_FOR_FN (cfun, |
4842 | EXPR_ORIG_BB_INDEX (expr)), | |
e1ab7874 | 4843 | BLOCK_FOR_INSN (FENCE_INSN (fence))))) |
4844 | { | |
4845 | if (sched_verbose >= 4) | |
4846 | sel_print ("Expr %d removed because it would need bookkeeping, which " | |
4847 | "cannot be created\n", INSN_UID (EXPR_INSN_RTX (expr))); | |
4848 | av_set_iter_remove (&i); | |
4849 | } | |
4850 | } | |
4851 | } | |
4852 | ||
4853 | /* Moving conditional jump through some instructions. | |
4854 | ||
4855 | Consider example: | |
4856 | ||
4857 | ... <- current scheduling point | |
4858 | NOTE BASIC BLOCK: <- bb header | |
4859 | (p8) add r14=r14+0x9;; | |
4860 | (p8) mov [r14]=r23 | |
4861 | (!p8) jump L1;; | |
4862 | NOTE BASIC BLOCK: | |
4863 | ... | |
4864 | ||
48e1416a | 4865 | We can schedule jump one cycle earlier, than mov, because they cannot be |
e1ab7874 | 4866 | executed together as their predicates are mutually exclusive. |
4867 | ||
48e1416a | 4868 | This is done in this way: first, new fallthrough basic block is created |
4869 | after jump (it is always can be done, because there already should be a | |
e1ab7874 | 4870 | fallthrough block, where control flow goes in case of predicate being true - |
48e1416a | 4871 | in our example; otherwise there should be a dependence between those |
4872 | instructions and jump and we cannot schedule jump right now); | |
4873 | next, all instructions between jump and current scheduling point are moved | |
e1ab7874 | 4874 | to this new block. And the result is this: |
4875 | ||
4876 | NOTE BASIC BLOCK: | |
4877 | (!p8) jump L1 <- current scheduling point | |
4878 | NOTE BASIC BLOCK: <- bb header | |
4879 | (p8) add r14=r14+0x9;; | |
4880 | (p8) mov [r14]=r23 | |
4881 | NOTE BASIC BLOCK: | |
4882 | ... | |
4883 | */ | |
4884 | static void | |
2f3c9801 | 4885 | move_cond_jump (rtx_insn *insn, bnd_t bnd) |
e1ab7874 | 4886 | { |
4887 | edge ft_edge; | |
c6cff213 | 4888 | basic_block block_from, block_next, block_new, block_bnd, bb; |
9c4c93d0 | 4889 | rtx_insn *next, *prev, *link, *head; |
e1ab7874 | 4890 | |
e1ab7874 | 4891 | block_from = BLOCK_FOR_INSN (insn); |
c6cff213 | 4892 | block_bnd = BLOCK_FOR_INSN (BND_TO (bnd)); |
4893 | prev = BND_TO (bnd); | |
e1ab7874 | 4894 | |
c6cff213 | 4895 | #ifdef ENABLE_CHECKING |
4896 | /* Moving of jump should not cross any other jumps or beginnings of new | |
4897 | basic blocks. The only exception is when we move a jump through | |
4898 | mutually exclusive insns along fallthru edges. */ | |
4899 | if (block_from != block_bnd) | |
4900 | { | |
4901 | bb = block_from; | |
4902 | for (link = PREV_INSN (insn); link != PREV_INSN (prev); | |
4903 | link = PREV_INSN (link)) | |
4904 | { | |
4905 | if (INSN_P (link)) | |
4906 | gcc_assert (sched_insns_conditions_mutex_p (insn, link)); | |
4907 | if (BLOCK_FOR_INSN (link) && BLOCK_FOR_INSN (link) != bb) | |
4908 | { | |
4909 | gcc_assert (single_pred (bb) == BLOCK_FOR_INSN (link)); | |
4910 | bb = BLOCK_FOR_INSN (link); | |
4911 | } | |
4912 | } | |
4913 | } | |
4914 | #endif | |
e1ab7874 | 4915 | |
4916 | /* Jump is moved to the boundary. */ | |
e1ab7874 | 4917 | next = PREV_INSN (insn); |
2f3c9801 | 4918 | BND_TO (bnd) = insn; |
e1ab7874 | 4919 | |
7f58c05e | 4920 | ft_edge = find_fallthru_edge_from (block_from); |
e1ab7874 | 4921 | block_next = ft_edge->dest; |
4922 | /* There must be a fallthrough block (or where should go | |
4923 | control flow in case of false jump predicate otherwise?). */ | |
4924 | gcc_assert (block_next); | |
4925 | ||
4926 | /* Create new empty basic block after source block. */ | |
4927 | block_new = sel_split_edge (ft_edge); | |
4928 | gcc_assert (block_new->next_bb == block_next | |
4929 | && block_from->next_bb == block_new); | |
4930 | ||
c6cff213 | 4931 | /* Move all instructions except INSN to BLOCK_NEW. */ |
4932 | bb = block_bnd; | |
4933 | head = BB_HEAD (block_new); | |
4934 | while (bb != block_from->next_bb) | |
e1ab7874 | 4935 | { |
9c4c93d0 | 4936 | rtx_insn *from, *to; |
c6cff213 | 4937 | from = bb == block_bnd ? prev : sel_bb_head (bb); |
4938 | to = bb == block_from ? next : sel_bb_end (bb); | |
e1ab7874 | 4939 | |
c6cff213 | 4940 | /* The jump being moved can be the first insn in the block. |
4941 | In this case we don't have to move anything in this block. */ | |
4942 | if (NEXT_INSN (to) != from) | |
4943 | { | |
4944 | reorder_insns (from, to, head); | |
4945 | ||
4946 | for (link = to; link != head; link = PREV_INSN (link)) | |
4947 | EXPR_ORIG_BB_INDEX (INSN_EXPR (link)) = block_new->index; | |
4948 | head = to; | |
4949 | } | |
e1ab7874 | 4950 | |
c6cff213 | 4951 | /* Cleanup possibly empty blocks left. */ |
4952 | block_next = bb->next_bb; | |
4953 | if (bb != block_from) | |
81d1ad0f | 4954 | tidy_control_flow (bb, false); |
c6cff213 | 4955 | bb = block_next; |
4956 | } | |
e1ab7874 | 4957 | |
4958 | /* Assert there is no jump to BLOCK_NEW, only fallthrough edge. */ | |
4959 | gcc_assert (NOTE_INSN_BASIC_BLOCK_P (BB_HEAD (block_new))); | |
e1ab7874 | 4960 | |
4961 | gcc_assert (!sel_bb_empty_p (block_from) | |
4962 | && !sel_bb_empty_p (block_new)); | |
4963 | ||
4964 | /* Update data sets for BLOCK_NEW to represent that INSN and | |
4965 | instructions from the other branch of INSN is no longer | |
4966 | available at BLOCK_NEW. */ | |
4967 | BB_AV_LEVEL (block_new) = global_level; | |
4968 | gcc_assert (BB_LV_SET (block_new) == NULL); | |
4969 | BB_LV_SET (block_new) = get_clear_regset_from_pool (); | |
4970 | update_data_sets (sel_bb_head (block_new)); | |
4971 | ||
4972 | /* INSN is a new basic block header - so prepare its data | |
4973 | structures and update availability and liveness sets. */ | |
4974 | update_data_sets (insn); | |
4975 | ||
4976 | if (sched_verbose >= 4) | |
4977 | sel_print ("Moving jump %d\n", INSN_UID (insn)); | |
4978 | } | |
4979 | ||
4980 | /* Remove nops generated during move_op for preventing removal of empty | |
4981 | basic blocks. */ | |
4982 | static void | |
9845d120 | 4983 | remove_temp_moveop_nops (bool full_tidying) |
e1ab7874 | 4984 | { |
4985 | int i; | |
4986 | insn_t insn; | |
48e1416a | 4987 | |
f1f41a6c | 4988 | FOR_EACH_VEC_ELT (vec_temp_moveop_nops, i, insn) |
e1ab7874 | 4989 | { |
4990 | gcc_assert (INSN_NOP_P (insn)); | |
9845d120 | 4991 | return_nop_to_pool (insn, full_tidying); |
e1ab7874 | 4992 | } |
4993 | ||
4994 | /* Empty the vector. */ | |
f1f41a6c | 4995 | if (vec_temp_moveop_nops.length () > 0) |
4996 | vec_temp_moveop_nops.block_remove (0, vec_temp_moveop_nops.length ()); | |
e1ab7874 | 4997 | } |
4998 | ||
4999 | /* Records the maximal UID before moving up an instruction. Used for | |
5000 | distinguishing between bookkeeping copies and original insns. */ | |
5001 | static int max_uid_before_move_op = 0; | |
5002 | ||
5003 | /* Remove from AV_VLIW_P all instructions but next when debug counter | |
5004 | tells us so. Next instruction is fetched from BNDS. */ | |
5005 | static void | |
5006 | remove_insns_for_debug (blist_t bnds, av_set_t *av_vliw_p) | |
5007 | { | |
5008 | if (! dbg_cnt (sel_sched_insn_cnt)) | |
5009 | /* Leave only the next insn in av_vliw. */ | |
5010 | { | |
5011 | av_set_iterator av_it; | |
5012 | expr_t expr; | |
5013 | bnd_t bnd = BLIST_BND (bnds); | |
5014 | insn_t next = BND_TO (bnd); | |
5015 | ||
5016 | gcc_assert (BLIST_NEXT (bnds) == NULL); | |
5017 | ||
5018 | FOR_EACH_EXPR_1 (expr, av_it, av_vliw_p) | |
5019 | if (EXPR_INSN_RTX (expr) != next) | |
5020 | av_set_iter_remove (&av_it); | |
5021 | } | |
5022 | } | |
5023 | ||
48e1416a | 5024 | /* Compute available instructions on BNDS. FENCE is the current fence. Write |
e1ab7874 | 5025 | the computed set to *AV_VLIW_P. */ |
5026 | static void | |
5027 | compute_av_set_on_boundaries (fence_t fence, blist_t bnds, av_set_t *av_vliw_p) | |
5028 | { | |
5029 | if (sched_verbose >= 2) | |
5030 | { | |
5031 | sel_print ("Boundaries: "); | |
5032 | dump_blist (bnds); | |
5033 | sel_print ("\n"); | |
5034 | } | |
5035 | ||
5036 | for (; bnds; bnds = BLIST_NEXT (bnds)) | |
5037 | { | |
5038 | bnd_t bnd = BLIST_BND (bnds); | |
5039 | av_set_t av1_copy; | |
5040 | insn_t bnd_to = BND_TO (bnd); | |
5041 | ||
5042 | /* Rewind BND->TO to the basic block header in case some bookkeeping | |
5043 | instructions were inserted before BND->TO and it needs to be | |
5044 | adjusted. */ | |
5045 | if (sel_bb_head_p (bnd_to)) | |
5046 | gcc_assert (INSN_SCHED_TIMES (bnd_to) == 0); | |
5047 | else | |
5048 | while (INSN_SCHED_TIMES (PREV_INSN (bnd_to)) == 0) | |
5049 | { | |
5050 | bnd_to = PREV_INSN (bnd_to); | |
5051 | if (sel_bb_head_p (bnd_to)) | |
5052 | break; | |
5053 | } | |
5054 | ||
5055 | if (BND_TO (bnd) != bnd_to) | |
5056 | { | |
5057 | gcc_assert (FENCE_INSN (fence) == BND_TO (bnd)); | |
5058 | FENCE_INSN (fence) = bnd_to; | |
2f3c9801 | 5059 | BND_TO (bnd) = bnd_to; |
e1ab7874 | 5060 | } |
5061 | ||
5062 | av_set_clear (&BND_AV (bnd)); | |
5063 | BND_AV (bnd) = compute_av_set (BND_TO (bnd), NULL, 0, true); | |
5064 | ||
5065 | av_set_clear (&BND_AV1 (bnd)); | |
5066 | BND_AV1 (bnd) = av_set_copy (BND_AV (bnd)); | |
5067 | ||
5068 | moveup_set_inside_insn_group (&BND_AV1 (bnd), NULL); | |
48e1416a | 5069 | |
e1ab7874 | 5070 | av1_copy = av_set_copy (BND_AV1 (bnd)); |
5071 | av_set_union_and_clear (av_vliw_p, &av1_copy, NULL); | |
5072 | } | |
5073 | ||
5074 | if (sched_verbose >= 2) | |
5075 | { | |
5076 | sel_print ("Available exprs (vliw form): "); | |
5077 | dump_av_set (*av_vliw_p); | |
5078 | sel_print ("\n"); | |
5079 | } | |
5080 | } | |
5081 | ||
48e1416a | 5082 | /* Calculate the sequential av set on BND corresponding to the EXPR_VLIW |
5083 | expression. When FOR_MOVEOP is true, also replace the register of | |
e1ab7874 | 5084 | expressions found with the register from EXPR_VLIW. */ |
5085 | static av_set_t | |
5086 | find_sequential_best_exprs (bnd_t bnd, expr_t expr_vliw, bool for_moveop) | |
5087 | { | |
5088 | av_set_t expr_seq = NULL; | |
5089 | expr_t expr; | |
5090 | av_set_iterator i; | |
48e1416a | 5091 | |
e1ab7874 | 5092 | FOR_EACH_EXPR (expr, i, BND_AV (bnd)) |
5093 | { | |
5094 | if (equal_after_moveup_path_p (expr, NULL, expr_vliw)) | |
5095 | { | |
5096 | if (for_moveop) | |
5097 | { | |
48e1416a | 5098 | /* The sequential expression has the right form to pass |
5099 | to move_op except when renaming happened. Put the | |
e1ab7874 | 5100 | correct register in EXPR then. */ |
5101 | if (EXPR_SEPARABLE_P (expr) && REG_P (EXPR_LHS (expr))) | |
5102 | { | |
5103 | if (expr_dest_regno (expr) != expr_dest_regno (expr_vliw)) | |
5104 | { | |
5105 | replace_dest_with_reg_in_expr (expr, EXPR_LHS (expr_vliw)); | |
5106 | stat_renamed_scheduled++; | |
5107 | } | |
48e1416a | 5108 | /* Also put the correct TARGET_AVAILABLE bit on the expr. |
5109 | This is needed when renaming came up with original | |
e1ab7874 | 5110 | register. */ |
48e1416a | 5111 | else if (EXPR_TARGET_AVAILABLE (expr) |
e1ab7874 | 5112 | != EXPR_TARGET_AVAILABLE (expr_vliw)) |
5113 | { | |
5114 | gcc_assert (EXPR_TARGET_AVAILABLE (expr_vliw) == 1); | |
5115 | EXPR_TARGET_AVAILABLE (expr) = 1; | |
5116 | } | |
5117 | } | |
5118 | if (EXPR_WAS_SUBSTITUTED (expr)) | |
5119 | stat_substitutions_total++; | |
5120 | } | |
5121 | ||
5122 | av_set_add (&expr_seq, expr); | |
48e1416a | 5123 | |
5124 | /* With substitution inside insn group, it is possible | |
5125 | that more than one expression in expr_seq will correspond | |
5126 | to expr_vliw. In this case, choose one as the attempt to | |
e1ab7874 | 5127 | move both leads to miscompiles. */ |
5128 | break; | |
5129 | } | |
5130 | } | |
5131 | ||
5132 | if (for_moveop && sched_verbose >= 2) | |
5133 | { | |
5134 | sel_print ("Best expression(s) (sequential form): "); | |
5135 | dump_av_set (expr_seq); | |
5136 | sel_print ("\n"); | |
5137 | } | |
48e1416a | 5138 | |
e1ab7874 | 5139 | return expr_seq; |
5140 | } | |
5141 | ||
5142 | ||
5143 | /* Move nop to previous block. */ | |
5144 | static void ATTRIBUTE_UNUSED | |
5145 | move_nop_to_previous_block (insn_t nop, basic_block prev_bb) | |
5146 | { | |
9ed997be | 5147 | insn_t prev_insn, next_insn; |
e1ab7874 | 5148 | |
48e1416a | 5149 | gcc_assert (sel_bb_head_p (nop) |
e1ab7874 | 5150 | && prev_bb == BLOCK_FOR_INSN (nop)->prev_bb); |
9ed997be | 5151 | rtx_note *note = bb_note (BLOCK_FOR_INSN (nop)); |
e1ab7874 | 5152 | prev_insn = sel_bb_end (prev_bb); |
5153 | next_insn = NEXT_INSN (nop); | |
5154 | gcc_assert (prev_insn != NULL_RTX | |
5155 | && PREV_INSN (note) == prev_insn); | |
5156 | ||
4a57a2e8 | 5157 | SET_NEXT_INSN (prev_insn) = nop; |
5158 | SET_PREV_INSN (nop) = prev_insn; | |
e1ab7874 | 5159 | |
4a57a2e8 | 5160 | SET_PREV_INSN (note) = nop; |
5161 | SET_NEXT_INSN (note) = next_insn; | |
e1ab7874 | 5162 | |
4a57a2e8 | 5163 | SET_NEXT_INSN (nop) = note; |
5164 | SET_PREV_INSN (next_insn) = note; | |
e1ab7874 | 5165 | |
26bb3cb2 | 5166 | BB_END (prev_bb) = nop; |
e1ab7874 | 5167 | BLOCK_FOR_INSN (nop) = prev_bb; |
5168 | } | |
5169 | ||
5170 | /* Prepare a place to insert the chosen expression on BND. */ | |
5171 | static insn_t | |
5172 | prepare_place_to_insert (bnd_t bnd) | |
5173 | { | |
5174 | insn_t place_to_insert; | |
5175 | ||
5176 | /* Init place_to_insert before calling move_op, as the later | |
5177 | can possibly remove BND_TO (bnd). */ | |
5178 | if (/* If this is not the first insn scheduled. */ | |
5179 | BND_PTR (bnd)) | |
5180 | { | |
5181 | /* Add it after last scheduled. */ | |
5182 | place_to_insert = ILIST_INSN (BND_PTR (bnd)); | |
9845d120 | 5183 | if (DEBUG_INSN_P (place_to_insert)) |
5184 | { | |
5185 | ilist_t l = BND_PTR (bnd); | |
5186 | while ((l = ILIST_NEXT (l)) && | |
5187 | DEBUG_INSN_P (ILIST_INSN (l))) | |
5188 | ; | |
5189 | if (!l) | |
5190 | place_to_insert = NULL; | |
5191 | } | |
e1ab7874 | 5192 | } |
5193 | else | |
9845d120 | 5194 | place_to_insert = NULL; |
5195 | ||
5196 | if (!place_to_insert) | |
e1ab7874 | 5197 | { |
5198 | /* Add it before BND_TO. The difference is in the | |
5199 | basic block, where INSN will be added. */ | |
5200 | place_to_insert = get_nop_from_pool (BND_TO (bnd)); | |
5201 | gcc_assert (BLOCK_FOR_INSN (place_to_insert) | |
5202 | == BLOCK_FOR_INSN (BND_TO (bnd))); | |
5203 | } | |
5204 | ||
5205 | return place_to_insert; | |
5206 | } | |
5207 | ||
48e1416a | 5208 | /* Find original instructions for EXPR_SEQ and move it to BND boundary. |
e1ab7874 | 5209 | Return the expression to emit in C_EXPR. */ |
de353418 | 5210 | static bool |
48e1416a | 5211 | move_exprs_to_boundary (bnd_t bnd, expr_t expr_vliw, |
e1ab7874 | 5212 | av_set_t expr_seq, expr_t c_expr) |
5213 | { | |
de353418 | 5214 | bool b, should_move; |
e1ab7874 | 5215 | unsigned book_uid; |
5216 | bitmap_iterator bi; | |
5217 | int n_bookkeeping_copies_before_moveop; | |
5218 | ||
5219 | /* Make a move. This call will remove the original operation, | |
5220 | insert all necessary bookkeeping instructions and update the | |
5221 | data sets. After that all we have to do is add the operation | |
5222 | at before BND_TO (BND). */ | |
5223 | n_bookkeeping_copies_before_moveop = stat_bookkeeping_copies; | |
5224 | max_uid_before_move_op = get_max_uid (); | |
5225 | bitmap_clear (current_copies); | |
5226 | bitmap_clear (current_originators); | |
5227 | ||
48e1416a | 5228 | b = move_op (BND_TO (bnd), expr_seq, expr_vliw, |
de353418 | 5229 | get_dest_from_orig_ops (expr_seq), c_expr, &should_move); |
e1ab7874 | 5230 | |
48e1416a | 5231 | /* We should be able to find the expression we've chosen for |
e1ab7874 | 5232 | scheduling. */ |
de353418 | 5233 | gcc_assert (b); |
48e1416a | 5234 | |
e1ab7874 | 5235 | if (stat_bookkeeping_copies > n_bookkeeping_copies_before_moveop) |
5236 | stat_insns_needed_bookkeeping++; | |
48e1416a | 5237 | |
e1ab7874 | 5238 | EXECUTE_IF_SET_IN_BITMAP (current_copies, 0, book_uid, bi) |
5239 | { | |
dca13bd7 | 5240 | unsigned uid; |
5241 | bitmap_iterator bi; | |
5242 | ||
e1ab7874 | 5243 | /* We allocate these bitmaps lazily. */ |
5244 | if (! INSN_ORIGINATORS_BY_UID (book_uid)) | |
5245 | INSN_ORIGINATORS_BY_UID (book_uid) = BITMAP_ALLOC (NULL); | |
48e1416a | 5246 | |
5247 | bitmap_copy (INSN_ORIGINATORS_BY_UID (book_uid), | |
e1ab7874 | 5248 | current_originators); |
dca13bd7 | 5249 | |
5250 | /* Transitively add all originators' originators. */ | |
5251 | EXECUTE_IF_SET_IN_BITMAP (current_originators, 0, uid, bi) | |
5252 | if (INSN_ORIGINATORS_BY_UID (uid)) | |
5253 | bitmap_ior_into (INSN_ORIGINATORS_BY_UID (book_uid), | |
5254 | INSN_ORIGINATORS_BY_UID (uid)); | |
e1ab7874 | 5255 | } |
de353418 | 5256 | |
5257 | return should_move; | |
e1ab7874 | 5258 | } |
5259 | ||
5260 | ||
5261 | /* Debug a DFA state as an array of bytes. */ | |
5262 | static void | |
5263 | debug_state (state_t state) | |
5264 | { | |
5265 | unsigned char *p; | |
5266 | unsigned int i, size = dfa_state_size; | |
5267 | ||
5268 | sel_print ("state (%u):", size); | |
5269 | for (i = 0, p = (unsigned char *) state; i < size; i++) | |
5270 | sel_print (" %d", p[i]); | |
5271 | sel_print ("\n"); | |
5272 | } | |
5273 | ||
48e1416a | 5274 | /* Advance state on FENCE with INSN. Return true if INSN is |
e1ab7874 | 5275 | an ASM, and we should advance state once more. */ |
5276 | static bool | |
5277 | advance_state_on_fence (fence_t fence, insn_t insn) | |
5278 | { | |
5279 | bool asm_p; | |
5280 | ||
5281 | if (recog_memoized (insn) >= 0) | |
5282 | { | |
5283 | int res; | |
5284 | state_t temp_state = alloca (dfa_state_size); | |
48e1416a | 5285 | |
e1ab7874 | 5286 | gcc_assert (!INSN_ASM_P (insn)); |
5287 | asm_p = false; | |
5288 | ||
5289 | memcpy (temp_state, FENCE_STATE (fence), dfa_state_size); | |
5290 | res = state_transition (FENCE_STATE (fence), insn); | |
5291 | gcc_assert (res < 0); | |
5292 | ||
5293 | if (memcmp (temp_state, FENCE_STATE (fence), dfa_state_size)) | |
5294 | { | |
5295 | FENCE_ISSUED_INSNS (fence)++; | |
5296 | ||
5297 | /* We should never issue more than issue_rate insns. */ | |
5298 | if (FENCE_ISSUED_INSNS (fence) > issue_rate) | |
5299 | gcc_unreachable (); | |
5300 | } | |
48e1416a | 5301 | } |
e1ab7874 | 5302 | else |
5303 | { | |
48e1416a | 5304 | /* This could be an ASM insn which we'd like to schedule |
e1ab7874 | 5305 | on the next cycle. */ |
5306 | asm_p = INSN_ASM_P (insn); | |
5307 | if (!FENCE_STARTS_CYCLE_P (fence) && asm_p) | |
5308 | advance_one_cycle (fence); | |
5309 | } | |
5310 | ||
5311 | if (sched_verbose >= 2) | |
5312 | debug_state (FENCE_STATE (fence)); | |
9845d120 | 5313 | if (!DEBUG_INSN_P (insn)) |
5314 | FENCE_STARTS_CYCLE_P (fence) = 0; | |
abb9c563 | 5315 | FENCE_ISSUE_MORE (fence) = can_issue_more; |
e1ab7874 | 5316 | return asm_p; |
5317 | } | |
5318 | ||
5319 | /* Update FENCE on which INSN was scheduled and this INSN, too. NEED_STALL | |
5320 | is nonzero if we need to stall after issuing INSN. */ | |
5321 | static void | |
5322 | update_fence_and_insn (fence_t fence, insn_t insn, int need_stall) | |
5323 | { | |
5324 | bool asm_p; | |
48e1416a | 5325 | |
e1ab7874 | 5326 | /* First, reflect that something is scheduled on this fence. */ |
5327 | asm_p = advance_state_on_fence (fence, insn); | |
5328 | FENCE_LAST_SCHEDULED_INSN (fence) = insn; | |
f1f41a6c | 5329 | vec_safe_push (FENCE_EXECUTING_INSNS (fence), insn); |
e1ab7874 | 5330 | if (SCHED_GROUP_P (insn)) |
5331 | { | |
5332 | FENCE_SCHED_NEXT (fence) = INSN_SCHED_NEXT (insn); | |
5333 | SCHED_GROUP_P (insn) = 0; | |
5334 | } | |
5335 | else | |
2f3c9801 | 5336 | FENCE_SCHED_NEXT (fence) = NULL; |
e1ab7874 | 5337 | if (INSN_UID (insn) < FENCE_READY_TICKS_SIZE (fence)) |
5338 | FENCE_READY_TICKS (fence) [INSN_UID (insn)] = 0; | |
5339 | ||
5340 | /* Set instruction scheduling info. This will be used in bundling, | |
5341 | pipelining, tick computations etc. */ | |
5342 | ++INSN_SCHED_TIMES (insn); | |
5343 | EXPR_TARGET_AVAILABLE (INSN_EXPR (insn)) = true; | |
5344 | EXPR_ORIG_SCHED_CYCLE (INSN_EXPR (insn)) = FENCE_CYCLE (fence); | |
5345 | INSN_AFTER_STALL_P (insn) = FENCE_AFTER_STALL_P (fence); | |
5346 | INSN_SCHED_CYCLE (insn) = FENCE_CYCLE (fence); | |
5347 | ||
5348 | /* This does not account for adjust_cost hooks, just add the biggest | |
48e1416a | 5349 | constant the hook may add to the latency. TODO: make this |
e1ab7874 | 5350 | a target dependent constant. */ |
48e1416a | 5351 | INSN_READY_CYCLE (insn) |
5352 | = INSN_SCHED_CYCLE (insn) + (INSN_CODE (insn) < 0 | |
e1ab7874 | 5353 | ? 1 |
5354 | : maximal_insn_latency (insn) + 1); | |
5355 | ||
5356 | /* Change these fields last, as they're used above. */ | |
5357 | FENCE_AFTER_STALL_P (fence) = 0; | |
5358 | if (asm_p || need_stall) | |
5359 | advance_one_cycle (fence); | |
48e1416a | 5360 | |
e1ab7874 | 5361 | /* Indicate that we've scheduled something on this fence. */ |
5362 | FENCE_SCHEDULED_P (fence) = true; | |
5363 | scheduled_something_on_previous_fence = true; | |
5364 | ||
5365 | /* Print debug information when insn's fields are updated. */ | |
5366 | if (sched_verbose >= 2) | |
5367 | { | |
5368 | sel_print ("Scheduling insn: "); | |
5369 | dump_insn_1 (insn, 1); | |
5370 | sel_print ("\n"); | |
5371 | } | |
5372 | } | |
5373 | ||
9845d120 | 5374 | /* Update boundary BND (and, if needed, FENCE) with INSN, remove the |
5375 | old boundary from BNDSP, add new boundaries to BNDS_TAIL_P and | |
5376 | return it. */ | |
e1ab7874 | 5377 | static blist_t * |
9845d120 | 5378 | update_boundaries (fence_t fence, bnd_t bnd, insn_t insn, blist_t *bndsp, |
e1ab7874 | 5379 | blist_t *bnds_tailp) |
5380 | { | |
5381 | succ_iterator si; | |
5382 | insn_t succ; | |
5383 | ||
5384 | advance_deps_context (BND_DC (bnd), insn); | |
48e1416a | 5385 | FOR_EACH_SUCC_1 (succ, si, insn, |
e1ab7874 | 5386 | SUCCS_NORMAL | SUCCS_SKIP_TO_LOOP_EXITS) |
5387 | { | |
5388 | ilist_t ptr = ilist_copy (BND_PTR (bnd)); | |
48e1416a | 5389 | |
e1ab7874 | 5390 | ilist_add (&ptr, insn); |
9845d120 | 5391 | |
5392 | if (DEBUG_INSN_P (insn) && sel_bb_end_p (insn) | |
5393 | && is_ineligible_successor (succ, ptr)) | |
5394 | { | |
5395 | ilist_clear (&ptr); | |
5396 | continue; | |
5397 | } | |
5398 | ||
5399 | if (FENCE_INSN (fence) == insn && !sel_bb_end_p (insn)) | |
5400 | { | |
5401 | if (sched_verbose >= 9) | |
5402 | sel_print ("Updating fence insn from %i to %i\n", | |
5403 | INSN_UID (insn), INSN_UID (succ)); | |
5404 | FENCE_INSN (fence) = succ; | |
5405 | } | |
e1ab7874 | 5406 | blist_add (bnds_tailp, succ, ptr, BND_DC (bnd)); |
5407 | bnds_tailp = &BLIST_NEXT (*bnds_tailp); | |
5408 | } | |
48e1416a | 5409 | |
e1ab7874 | 5410 | blist_remove (bndsp); |
5411 | return bnds_tailp; | |
5412 | } | |
5413 | ||
5414 | /* Schedule EXPR_VLIW on BND. Return the insn emitted. */ | |
5415 | static insn_t | |
5416 | schedule_expr_on_boundary (bnd_t bnd, expr_t expr_vliw, int seqno) | |
5417 | { | |
5418 | av_set_t expr_seq; | |
5419 | expr_t c_expr = XALLOCA (expr_def); | |
5420 | insn_t place_to_insert; | |
5421 | insn_t insn; | |
de353418 | 5422 | bool should_move; |
e1ab7874 | 5423 | |
5424 | expr_seq = find_sequential_best_exprs (bnd, expr_vliw, true); | |
5425 | ||
5426 | /* In case of scheduling a jump skipping some other instructions, | |
48e1416a | 5427 | prepare CFG. After this, jump is at the boundary and can be |
e1ab7874 | 5428 | scheduled as usual insn by MOVE_OP. */ |
5429 | if (vinsn_cond_branch_p (EXPR_VINSN (expr_vliw))) | |
5430 | { | |
5431 | insn = EXPR_INSN_RTX (expr_vliw); | |
48e1416a | 5432 | |
e1ab7874 | 5433 | /* Speculative jumps are not handled. */ |
48e1416a | 5434 | if (insn != BND_TO (bnd) |
e1ab7874 | 5435 | && !sel_insn_is_speculation_check (insn)) |
5436 | move_cond_jump (insn, bnd); | |
5437 | } | |
5438 | ||
e1ab7874 | 5439 | /* Find a place for C_EXPR to schedule. */ |
5440 | place_to_insert = prepare_place_to_insert (bnd); | |
de353418 | 5441 | should_move = move_exprs_to_boundary (bnd, expr_vliw, expr_seq, c_expr); |
e1ab7874 | 5442 | clear_expr (c_expr); |
48e1416a | 5443 | |
5444 | /* Add the instruction. The corner case to care about is when | |
5445 | the expr_seq set has more than one expr, and we chose the one that | |
5446 | is not equal to expr_vliw. Then expr_vliw may be insn in stream, and | |
e1ab7874 | 5447 | we can't use it. Generate the new vinsn. */ |
5448 | if (INSN_IN_STREAM_P (EXPR_INSN_RTX (expr_vliw))) | |
5449 | { | |
5450 | vinsn_t vinsn_new; | |
48e1416a | 5451 | |
e1ab7874 | 5452 | vinsn_new = vinsn_copy (EXPR_VINSN (expr_vliw), false); |
5453 | change_vinsn_in_expr (expr_vliw, vinsn_new); | |
de353418 | 5454 | should_move = false; |
e1ab7874 | 5455 | } |
de353418 | 5456 | if (should_move) |
5457 | insn = sel_move_insn (expr_vliw, seqno, place_to_insert); | |
5458 | else | |
48e1416a | 5459 | insn = emit_insn_from_expr_after (expr_vliw, NULL, seqno, |
e1ab7874 | 5460 | place_to_insert); |
e1ab7874 | 5461 | |
5462 | /* Return the nops generated for preserving of data sets back | |
5463 | into pool. */ | |
5464 | if (INSN_NOP_P (place_to_insert)) | |
9845d120 | 5465 | return_nop_to_pool (place_to_insert, !DEBUG_INSN_P (insn)); |
5466 | remove_temp_moveop_nops (!DEBUG_INSN_P (insn)); | |
e1ab7874 | 5467 | |
5468 | av_set_clear (&expr_seq); | |
48e1416a | 5469 | |
5470 | /* Save the expression scheduled so to reset target availability if we'll | |
e1ab7874 | 5471 | meet it later on the same fence. */ |
5472 | if (EXPR_WAS_RENAMED (expr_vliw)) | |
5473 | vinsn_vec_add (&vec_target_unavailable_vinsns, INSN_EXPR (insn)); | |
5474 | ||
5475 | /* Check that the recent movement didn't destroyed loop | |
5476 | structure. */ | |
5477 | gcc_assert (!pipelining_p | |
5478 | || current_loop_nest == NULL | |
5479 | || loop_latch_edge (current_loop_nest)); | |
5480 | return insn; | |
5481 | } | |
5482 | ||
5483 | /* Stall for N cycles on FENCE. */ | |
5484 | static void | |
5485 | stall_for_cycles (fence_t fence, int n) | |
5486 | { | |
5487 | int could_more; | |
48e1416a | 5488 | |
e1ab7874 | 5489 | could_more = n > 1 || FENCE_ISSUED_INSNS (fence) < issue_rate; |
5490 | while (n--) | |
5491 | advance_one_cycle (fence); | |
5492 | if (could_more) | |
5493 | FENCE_AFTER_STALL_P (fence) = 1; | |
5494 | } | |
5495 | ||
48e1416a | 5496 | /* Gather a parallel group of insns at FENCE and assign their seqno |
5497 | to SEQNO. All scheduled insns are gathered in SCHEDULED_INSNS_TAILPP | |
e1ab7874 | 5498 | list for later recalculation of seqnos. */ |
5499 | static void | |
5500 | fill_insns (fence_t fence, int seqno, ilist_t **scheduled_insns_tailpp) | |
5501 | { | |
5502 | blist_t bnds = NULL, *bnds_tailp; | |
5503 | av_set_t av_vliw = NULL; | |
5504 | insn_t insn = FENCE_INSN (fence); | |
5505 | ||
5506 | if (sched_verbose >= 2) | |
48e1416a | 5507 | sel_print ("Starting fill_insns for insn %d, cycle %d\n", |
e1ab7874 | 5508 | INSN_UID (insn), FENCE_CYCLE (fence)); |
5509 | ||
5510 | blist_add (&bnds, insn, NULL, FENCE_DC (fence)); | |
5511 | bnds_tailp = &BLIST_NEXT (bnds); | |
5512 | set_target_context (FENCE_TC (fence)); | |
abb9c563 | 5513 | can_issue_more = FENCE_ISSUE_MORE (fence); |
e1ab7874 | 5514 | target_bb = INSN_BB (insn); |
5515 | ||
5516 | /* Do while we can add any operation to the current group. */ | |
5517 | do | |
5518 | { | |
5519 | blist_t *bnds_tailp1, *bndsp; | |
5520 | expr_t expr_vliw; | |
4055a556 | 5521 | int need_stall = false; |
08b41748 | 5522 | int was_stall = 0, scheduled_insns = 0; |
e1ab7874 | 5523 | int max_insns = pipelining_p ? issue_rate : 2 * issue_rate; |
5524 | int max_stall = pipelining_p ? 1 : 3; | |
9845d120 | 5525 | bool last_insn_was_debug = false; |
5526 | bool was_debug_bb_end_p = false; | |
5527 | ||
e1ab7874 | 5528 | compute_av_set_on_boundaries (fence, bnds, &av_vliw); |
5529 | remove_insns_that_need_bookkeeping (fence, &av_vliw); | |
5530 | remove_insns_for_debug (bnds, &av_vliw); | |
5531 | ||
5532 | /* Return early if we have nothing to schedule. */ | |
5533 | if (av_vliw == NULL) | |
5534 | break; | |
5535 | ||
5536 | /* Choose the best expression and, if needed, destination register | |
5537 | for it. */ | |
5538 | do | |
5539 | { | |
5540 | expr_vliw = find_best_expr (&av_vliw, bnds, fence, &need_stall); | |
08b41748 | 5541 | if (! expr_vliw && need_stall) |
e1ab7874 | 5542 | { |
5543 | /* All expressions required a stall. Do not recompute av sets | |
5544 | as we'll get the same answer (modulo the insns between | |
5545 | the fence and its boundary, which will not be available for | |
08b41748 | 5546 | pipelining). |
5547 | If we are going to stall for too long, break to recompute av | |
e1ab7874 | 5548 | sets and bring more insns for pipelining. */ |
08b41748 | 5549 | was_stall++; |
e1ab7874 | 5550 | if (need_stall <= 3) |
5551 | stall_for_cycles (fence, need_stall); | |
5552 | else | |
5553 | { | |
5554 | stall_for_cycles (fence, 1); | |
5555 | break; | |
5556 | } | |
5557 | } | |
5558 | } | |
5559 | while (! expr_vliw && need_stall); | |
48e1416a | 5560 | |
e1ab7874 | 5561 | /* Now either we've selected expr_vliw or we have nothing to schedule. */ |
5562 | if (!expr_vliw) | |
5563 | { | |
5564 | av_set_clear (&av_vliw); | |
5565 | break; | |
5566 | } | |
5567 | ||
5568 | bndsp = &bnds; | |
5569 | bnds_tailp1 = bnds_tailp; | |
5570 | ||
5571 | do | |
48e1416a | 5572 | /* This code will be executed only once until we'd have several |
e1ab7874 | 5573 | boundaries per fence. */ |
5574 | { | |
5575 | bnd_t bnd = BLIST_BND (*bndsp); | |
5576 | ||
5577 | if (!av_set_is_in_p (BND_AV1 (bnd), EXPR_VINSN (expr_vliw))) | |
5578 | { | |
5579 | bndsp = &BLIST_NEXT (*bndsp); | |
5580 | continue; | |
5581 | } | |
48e1416a | 5582 | |
e1ab7874 | 5583 | insn = schedule_expr_on_boundary (bnd, expr_vliw, seqno); |
9845d120 | 5584 | last_insn_was_debug = DEBUG_INSN_P (insn); |
5585 | if (last_insn_was_debug) | |
5586 | was_debug_bb_end_p = (insn == BND_TO (bnd) && sel_bb_end_p (insn)); | |
e1ab7874 | 5587 | update_fence_and_insn (fence, insn, need_stall); |
9845d120 | 5588 | bnds_tailp = update_boundaries (fence, bnd, insn, bndsp, bnds_tailp); |
e1ab7874 | 5589 | |
5590 | /* Add insn to the list of scheduled on this cycle instructions. */ | |
5591 | ilist_add (*scheduled_insns_tailpp, insn); | |
5592 | *scheduled_insns_tailpp = &ILIST_NEXT (**scheduled_insns_tailpp); | |
5593 | } | |
5594 | while (*bndsp != *bnds_tailp1); | |
5595 | ||
5596 | av_set_clear (&av_vliw); | |
9845d120 | 5597 | if (!last_insn_was_debug) |
5598 | scheduled_insns++; | |
e1ab7874 | 5599 | |
5600 | /* We currently support information about candidate blocks only for | |
5601 | one 'target_bb' block. Hence we can't schedule after jump insn, | |
5602 | as this will bring two boundaries and, hence, necessity to handle | |
5603 | information for two or more blocks concurrently. */ | |
9845d120 | 5604 | if ((last_insn_was_debug ? was_debug_bb_end_p : sel_bb_end_p (insn)) |
48e1416a | 5605 | || (was_stall |
5606 | && (was_stall >= max_stall | |
e1ab7874 | 5607 | || scheduled_insns >= max_insns))) |
5608 | break; | |
5609 | } | |
5610 | while (bnds); | |
5611 | ||
5612 | gcc_assert (!FENCE_BNDS (fence)); | |
48e1416a | 5613 | |
e1ab7874 | 5614 | /* Update boundaries of the FENCE. */ |
5615 | while (bnds) | |
5616 | { | |
5617 | ilist_t ptr = BND_PTR (BLIST_BND (bnds)); | |
5618 | ||
5619 | if (ptr) | |
5620 | { | |
5621 | insn = ILIST_INSN (ptr); | |
5622 | ||
5623 | if (!ilist_is_in_p (FENCE_BNDS (fence), insn)) | |
5624 | ilist_add (&FENCE_BNDS (fence), insn); | |
5625 | } | |
48e1416a | 5626 | |
e1ab7874 | 5627 | blist_remove (&bnds); |
5628 | } | |
5629 | ||
5630 | /* Update target context on the fence. */ | |
5631 | reset_target_context (FENCE_TC (fence), false); | |
5632 | } | |
5633 | ||
5634 | /* All exprs in ORIG_OPS must have the same destination register or memory. | |
5635 | Return that destination. */ | |
5636 | static rtx | |
5637 | get_dest_from_orig_ops (av_set_t orig_ops) | |
5638 | { | |
5639 | rtx dest = NULL_RTX; | |
5640 | av_set_iterator av_it; | |
5641 | expr_t expr; | |
5642 | bool first_p = true; | |
5643 | ||
5644 | FOR_EACH_EXPR (expr, av_it, orig_ops) | |
5645 | { | |
5646 | rtx x = EXPR_LHS (expr); | |
5647 | ||
5648 | if (first_p) | |
5649 | { | |
5650 | first_p = false; | |
5651 | dest = x; | |
5652 | } | |
5653 | else | |
5654 | gcc_assert (dest == x | |
5655 | || (dest != NULL_RTX && x != NULL_RTX | |
5656 | && rtx_equal_p (dest, x))); | |
5657 | } | |
5658 | ||
5659 | return dest; | |
5660 | } | |
5661 | ||
5662 | /* Update data sets for the bookkeeping block and record those expressions | |
5663 | which become no longer available after inserting this bookkeeping. */ | |
5664 | static void | |
5665 | update_and_record_unavailable_insns (basic_block book_block) | |
5666 | { | |
5667 | av_set_iterator i; | |
5668 | av_set_t old_av_set = NULL; | |
5669 | expr_t cur_expr; | |
2f3c9801 | 5670 | rtx_insn *bb_end = sel_bb_end (book_block); |
e1ab7874 | 5671 | |
48e1416a | 5672 | /* First, get correct liveness in the bookkeeping block. The problem is |
e1ab7874 | 5673 | the range between the bookeeping insn and the end of block. */ |
5674 | update_liveness_on_insn (bb_end); | |
5675 | if (control_flow_insn_p (bb_end)) | |
5676 | update_liveness_on_insn (PREV_INSN (bb_end)); | |
5677 | ||
5678 | /* If there's valid av_set on BOOK_BLOCK, then there might exist another | |
5679 | fence above, where we may choose to schedule an insn which is | |
5680 | actually blocked from moving up with the bookkeeping we create here. */ | |
5681 | if (AV_SET_VALID_P (sel_bb_head (book_block))) | |
5682 | { | |
5683 | old_av_set = av_set_copy (BB_AV_SET (book_block)); | |
5684 | update_data_sets (sel_bb_head (book_block)); | |
48e1416a | 5685 | |
e1ab7874 | 5686 | /* Traverse all the expressions in the old av_set and check whether |
5687 | CUR_EXPR is in new AV_SET. */ | |
5688 | FOR_EACH_EXPR (cur_expr, i, old_av_set) | |
5689 | { | |
48e1416a | 5690 | expr_t new_expr = av_set_lookup (BB_AV_SET (book_block), |
e1ab7874 | 5691 | EXPR_VINSN (cur_expr)); |
5692 | ||
48e1416a | 5693 | if (! new_expr |
5694 | /* In this case, we can just turn off the E_T_A bit, but we can't | |
e1ab7874 | 5695 | represent this information with the current vector. */ |
48e1416a | 5696 | || EXPR_TARGET_AVAILABLE (new_expr) |
e1ab7874 | 5697 | != EXPR_TARGET_AVAILABLE (cur_expr)) |
5698 | /* Unfortunately, the below code could be also fired up on | |
846800d7 | 5699 | separable insns, e.g. when moving insns through the new |
5700 | speculation check as in PR 53701. */ | |
e1ab7874 | 5701 | vinsn_vec_add (&vec_bookkeeping_blocked_vinsns, cur_expr); |
5702 | } | |
5703 | ||
5704 | av_set_clear (&old_av_set); | |
5705 | } | |
5706 | } | |
5707 | ||
48e1416a | 5708 | /* The main effect of this function is that sparams->c_expr is merged |
e1ab7874 | 5709 | with (or copied to) lparams->c_expr_merged. If there's only one successor, |
5710 | we avoid merging anything by copying sparams->c_expr to lparams->c_expr_merged. | |
48e1416a | 5711 | lparams->c_expr_merged is copied back to sparams->c_expr after all |
5712 | successors has been traversed. lparams->c_expr_local is an expr allocated | |
5713 | on stack in the caller function, and is used if there is more than one | |
5714 | successor. | |
e1ab7874 | 5715 | |
5716 | SUCC is one of the SUCCS_NORMAL successors of INSN, | |
5717 | MOVEOP_DRV_CALL_RES is the result of call code_motion_path_driver on succ, | |
5718 | LPARAMS and STATIC_PARAMS contain the parameters described above. */ | |
5719 | static void | |
48e1416a | 5720 | move_op_merge_succs (insn_t insn ATTRIBUTE_UNUSED, |
5721 | insn_t succ ATTRIBUTE_UNUSED, | |
5722 | int moveop_drv_call_res, | |
e1ab7874 | 5723 | cmpd_local_params_p lparams, void *static_params) |
5724 | { | |
5725 | moveop_static_params_p sparams = (moveop_static_params_p) static_params; | |
5726 | ||
5727 | /* Nothing to do, if original expr wasn't found below. */ | |
5728 | if (moveop_drv_call_res != 1) | |
5729 | return; | |
5730 | ||
5731 | /* If this is a first successor. */ | |
5732 | if (!lparams->c_expr_merged) | |
5733 | { | |
5734 | lparams->c_expr_merged = sparams->c_expr; | |
5735 | sparams->c_expr = lparams->c_expr_local; | |
5736 | } | |
5737 | else | |
5738 | { | |
5739 | /* We must merge all found expressions to get reasonable | |
5740 | EXPR_SPEC_DONE_DS for the resulting insn. If we don't | |
5741 | do so then we can first find the expr with epsilon | |
5742 | speculation success probability and only then with the | |
5743 | good probability. As a result the insn will get epsilon | |
5744 | probability and will never be scheduled because of | |
5745 | weakness_cutoff in find_best_expr. | |
5746 | ||
48e1416a | 5747 | We call merge_expr_data here instead of merge_expr |
e1ab7874 | 5748 | because due to speculation C_EXPR and X may have the |
5749 | same insns with different speculation types. And as of | |
48e1416a | 5750 | now such insns are considered non-equal. |
e1ab7874 | 5751 | |
48e1416a | 5752 | However, EXPR_SCHED_TIMES is different -- we must get |
5753 | SCHED_TIMES from a real insn, not a bookkeeping copy. | |
e1ab7874 | 5754 | We force this here. Instead, we may consider merging |
48e1416a | 5755 | SCHED_TIMES to the maximum instead of minimum in the |
e1ab7874 | 5756 | below function. */ |
5757 | int old_times = EXPR_SCHED_TIMES (lparams->c_expr_merged); | |
5758 | ||
5759 | merge_expr_data (lparams->c_expr_merged, sparams->c_expr, NULL); | |
5760 | if (EXPR_SCHED_TIMES (sparams->c_expr) == 0) | |
5761 | EXPR_SCHED_TIMES (lparams->c_expr_merged) = old_times; | |
5762 | ||
5763 | clear_expr (sparams->c_expr); | |
5764 | } | |
5765 | } | |
5766 | ||
5767 | /* Add used regs for the successor SUCC into SPARAMS->USED_REGS. | |
5768 | ||
5769 | SUCC is one of the SUCCS_NORMAL successors of INSN, | |
5770 | MOVEOP_DRV_CALL_RES is the result of call code_motion_path_driver on succ or 0, | |
5771 | if SUCC is one of SUCCS_BACK or SUCCS_OUT. | |
5772 | STATIC_PARAMS contain USED_REGS set. */ | |
5773 | static void | |
48e1416a | 5774 | fur_merge_succs (insn_t insn ATTRIBUTE_UNUSED, insn_t succ, |
5775 | int moveop_drv_call_res, | |
5776 | cmpd_local_params_p lparams ATTRIBUTE_UNUSED, | |
e1ab7874 | 5777 | void *static_params) |
5778 | { | |
5779 | regset succ_live; | |
5780 | fur_static_params_p sparams = (fur_static_params_p) static_params; | |
5781 | ||
5782 | /* Here we compute live regsets only for branches that do not lie | |
48e1416a | 5783 | on the code motion paths. These branches correspond to value |
e1ab7874 | 5784 | MOVEOP_DRV_CALL_RES==0 and include SUCCS_BACK and SUCCS_OUT, though |
5785 | for such branches code_motion_path_driver is not called. */ | |
5786 | if (moveop_drv_call_res != 0) | |
5787 | return; | |
5788 | ||
5789 | /* Mark all registers that do not meet the following condition: | |
5790 | (3) not live on the other path of any conditional branch | |
5791 | that is passed by the operation, in case original | |
5792 | operations are not present on both paths of the | |
5793 | conditional branch. */ | |
5794 | succ_live = compute_live (succ); | |
5795 | IOR_REG_SET (sparams->used_regs, succ_live); | |
5796 | } | |
5797 | ||
5798 | /* This function is called after the last successor. Copies LP->C_EXPR_MERGED | |
5799 | into SP->CEXPR. */ | |
5800 | static void | |
5801 | move_op_after_merge_succs (cmpd_local_params_p lp, void *sparams) | |
48e1416a | 5802 | { |
e1ab7874 | 5803 | moveop_static_params_p sp = (moveop_static_params_p) sparams; |
5804 | ||
5805 | sp->c_expr = lp->c_expr_merged; | |
5806 | } | |
5807 | ||
5808 | /* Track bookkeeping copies created, insns scheduled, and blocks for | |
5809 | rescheduling when INSN is found by move_op. */ | |
5810 | static void | |
71ce7f59 | 5811 | track_scheduled_insns_and_blocks (rtx_insn *insn) |
e1ab7874 | 5812 | { |
5813 | /* Even if this insn can be a copy that will be removed during current move_op, | |
5814 | we still need to count it as an originator. */ | |
5815 | bitmap_set_bit (current_originators, INSN_UID (insn)); | |
5816 | ||
6ef9bbe0 | 5817 | if (!bitmap_clear_bit (current_copies, INSN_UID (insn))) |
e1ab7874 | 5818 | { |
5819 | /* Note that original block needs to be rescheduled, as we pulled an | |
5820 | instruction out of it. */ | |
5821 | if (INSN_SCHED_TIMES (insn) > 0) | |
5822 | bitmap_set_bit (blocks_to_reschedule, BLOCK_FOR_INSN (insn)->index); | |
9845d120 | 5823 | else if (INSN_UID (insn) < first_emitted_uid && !DEBUG_INSN_P (insn)) |
e1ab7874 | 5824 | num_insns_scheduled++; |
5825 | } | |
e1ab7874 | 5826 | |
5827 | /* For instructions we must immediately remove insn from the | |
5828 | stream, so subsequent update_data_sets () won't include this | |
5829 | insn into av_set. | |
5830 | For expr we must make insn look like "INSN_REG (insn) := c_expr". */ | |
5831 | if (INSN_UID (insn) > max_uid_before_move_op) | |
5832 | stat_bookkeeping_copies--; | |
5833 | } | |
5834 | ||
48e1416a | 5835 | /* Emit a register-register copy for INSN if needed. Return true if |
e1ab7874 | 5836 | emitted one. PARAMS is the move_op static parameters. */ |
5837 | static bool | |
2f3c9801 | 5838 | maybe_emit_renaming_copy (rtx_insn *insn, |
e1ab7874 | 5839 | moveop_static_params_p params) |
5840 | { | |
5841 | bool insn_emitted = false; | |
f7d03b30 | 5842 | rtx cur_reg; |
e1ab7874 | 5843 | |
f7d03b30 | 5844 | /* Bail out early when expression can not be renamed at all. */ |
5845 | if (!EXPR_SEPARABLE_P (params->c_expr)) | |
5846 | return false; | |
5847 | ||
5848 | cur_reg = expr_dest_reg (params->c_expr); | |
5849 | gcc_assert (cur_reg && params->dest && REG_P (params->dest)); | |
e1ab7874 | 5850 | |
5851 | /* If original operation has expr and the register chosen for | |
5852 | that expr is not original operation's dest reg, substitute | |
5853 | operation's right hand side with the register chosen. */ | |
f7d03b30 | 5854 | if (REGNO (params->dest) != REGNO (cur_reg)) |
e1ab7874 | 5855 | { |
5856 | insn_t reg_move_insn, reg_move_insn_rtx; | |
48e1416a | 5857 | |
5858 | reg_move_insn_rtx = create_insn_rtx_with_rhs (INSN_VINSN (insn), | |
e1ab7874 | 5859 | params->dest); |
48e1416a | 5860 | reg_move_insn = sel_gen_insn_from_rtx_after (reg_move_insn_rtx, |
5861 | INSN_EXPR (insn), | |
5862 | INSN_SEQNO (insn), | |
e1ab7874 | 5863 | insn); |
5864 | EXPR_SPEC_DONE_DS (INSN_EXPR (reg_move_insn)) = 0; | |
5865 | replace_dest_with_reg_in_expr (params->c_expr, params->dest); | |
48e1416a | 5866 | |
e1ab7874 | 5867 | insn_emitted = true; |
5868 | params->was_renamed = true; | |
5869 | } | |
48e1416a | 5870 | |
e1ab7874 | 5871 | return insn_emitted; |
5872 | } | |
5873 | ||
48e1416a | 5874 | /* Emit a speculative check for INSN speculated as EXPR if needed. |
5875 | Return true if we've emitted one. PARAMS is the move_op static | |
e1ab7874 | 5876 | parameters. */ |
5877 | static bool | |
2f3c9801 | 5878 | maybe_emit_speculative_check (rtx_insn *insn, expr_t expr, |
e1ab7874 | 5879 | moveop_static_params_p params) |
5880 | { | |
5881 | bool insn_emitted = false; | |
5882 | insn_t x; | |
5883 | ds_t check_ds; | |
5884 | ||
5885 | check_ds = get_spec_check_type_for_insn (insn, expr); | |
5886 | if (check_ds != 0) | |
5887 | { | |
5888 | /* A speculation check should be inserted. */ | |
5889 | x = create_speculation_check (params->c_expr, check_ds, insn); | |
5890 | insn_emitted = true; | |
5891 | } | |
5892 | else | |
5893 | { | |
5894 | EXPR_SPEC_DONE_DS (INSN_EXPR (insn)) = 0; | |
5895 | x = insn; | |
5896 | } | |
48e1416a | 5897 | |
e1ab7874 | 5898 | gcc_assert (EXPR_SPEC_DONE_DS (INSN_EXPR (x)) == 0 |
5899 | && EXPR_SPEC_TO_CHECK_DS (INSN_EXPR (x)) == 0); | |
5900 | return insn_emitted; | |
5901 | } | |
5902 | ||
48e1416a | 5903 | /* Handle transformations that leave an insn in place of original |
5904 | insn such as renaming/speculation. Return true if one of such | |
e1ab7874 | 5905 | transformations actually happened, and we have emitted this insn. */ |
5906 | static bool | |
2f3c9801 | 5907 | handle_emitting_transformations (rtx_insn *insn, expr_t expr, |
e1ab7874 | 5908 | moveop_static_params_p params) |
5909 | { | |
5910 | bool insn_emitted = false; | |
5911 | ||
5912 | insn_emitted = maybe_emit_renaming_copy (insn, params); | |
5913 | insn_emitted |= maybe_emit_speculative_check (insn, expr, params); | |
5914 | ||
5915 | return insn_emitted; | |
48e1416a | 5916 | } |
e1ab7874 | 5917 | |
9845d120 | 5918 | /* If INSN is the only insn in the basic block (not counting JUMP, |
5919 | which may be a jump to next insn, and DEBUG_INSNs), we want to | |
5920 | leave a NOP there till the return to fill_insns. */ | |
5921 | ||
5922 | static bool | |
ff88d074 | 5923 | need_nop_to_preserve_insn_bb (rtx_insn *insn) |
e1ab7874 | 5924 | { |
9845d120 | 5925 | insn_t bb_head, bb_end, bb_next, in_next; |
e1ab7874 | 5926 | basic_block bb = BLOCK_FOR_INSN (insn); |
5927 | ||
e1ab7874 | 5928 | bb_head = sel_bb_head (bb); |
5929 | bb_end = sel_bb_end (bb); | |
e1ab7874 | 5930 | |
9845d120 | 5931 | if (bb_head == bb_end) |
5932 | return true; | |
5933 | ||
5934 | while (bb_head != bb_end && DEBUG_INSN_P (bb_head)) | |
5935 | bb_head = NEXT_INSN (bb_head); | |
5936 | ||
5937 | if (bb_head == bb_end) | |
5938 | return true; | |
5939 | ||
5940 | while (bb_head != bb_end && DEBUG_INSN_P (bb_end)) | |
5941 | bb_end = PREV_INSN (bb_end); | |
5942 | ||
5943 | if (bb_head == bb_end) | |
5944 | return true; | |
5945 | ||
5946 | bb_next = NEXT_INSN (bb_head); | |
5947 | while (bb_next != bb_end && DEBUG_INSN_P (bb_next)) | |
5948 | bb_next = NEXT_INSN (bb_next); | |
5949 | ||
5950 | if (bb_next == bb_end && JUMP_P (bb_end)) | |
5951 | return true; | |
5952 | ||
5953 | in_next = NEXT_INSN (insn); | |
5954 | while (DEBUG_INSN_P (in_next)) | |
5955 | in_next = NEXT_INSN (in_next); | |
5956 | ||
5957 | if (IN_CURRENT_FENCE_P (in_next)) | |
5958 | return true; | |
5959 | ||
5960 | return false; | |
5961 | } | |
5962 | ||
5963 | /* Remove INSN from stream. When ONLY_DISCONNECT is true, its data | |
5964 | is not removed but reused when INSN is re-emitted. */ | |
5965 | static void | |
2f3c9801 | 5966 | remove_insn_from_stream (rtx_insn *insn, bool only_disconnect) |
9845d120 | 5967 | { |
e1ab7874 | 5968 | /* If there's only one insn in the BB, make sure that a nop is |
5969 | inserted into it, so the basic block won't disappear when we'll | |
5970 | delete INSN below with sel_remove_insn. It should also survive | |
48e1416a | 5971 | till the return to fill_insns. */ |
9845d120 | 5972 | if (need_nop_to_preserve_insn_bb (insn)) |
e1ab7874 | 5973 | { |
9845d120 | 5974 | insn_t nop = get_nop_from_pool (insn); |
e1ab7874 | 5975 | gcc_assert (INSN_NOP_P (nop)); |
f1f41a6c | 5976 | vec_temp_moveop_nops.safe_push (nop); |
e1ab7874 | 5977 | } |
5978 | ||
5979 | sel_remove_insn (insn, only_disconnect, false); | |
5980 | } | |
5981 | ||
5982 | /* This function is called when original expr is found. | |
48e1416a | 5983 | INSN - current insn traversed, EXPR - the corresponding expr found. |
e1ab7874 | 5984 | LPARAMS is the local parameters of code modion driver, STATIC_PARAMS |
5985 | is static parameters of move_op. */ | |
5986 | static void | |
48e1416a | 5987 | move_op_orig_expr_found (insn_t insn, expr_t expr, |
5988 | cmpd_local_params_p lparams ATTRIBUTE_UNUSED, | |
e1ab7874 | 5989 | void *static_params) |
5990 | { | |
d5897457 | 5991 | bool only_disconnect; |
e1ab7874 | 5992 | moveop_static_params_p params = (moveop_static_params_p) static_params; |
48e1416a | 5993 | |
e1ab7874 | 5994 | copy_expr_onside (params->c_expr, INSN_EXPR (insn)); |
5995 | track_scheduled_insns_and_blocks (insn); | |
d5897457 | 5996 | handle_emitting_transformations (insn, expr, params); |
5997 | only_disconnect = params->uid == INSN_UID (insn); | |
de353418 | 5998 | |
5999 | /* Mark that we've disconnected an insn. */ | |
6000 | if (only_disconnect) | |
6001 | params->uid = -1; | |
e1ab7874 | 6002 | remove_insn_from_stream (insn, only_disconnect); |
6003 | } | |
6004 | ||
6005 | /* The function is called when original expr is found. | |
6006 | INSN - current insn traversed, EXPR - the corresponding expr found, | |
6007 | crosses_call and original_insns in STATIC_PARAMS are updated. */ | |
6008 | static void | |
6009 | fur_orig_expr_found (insn_t insn, expr_t expr ATTRIBUTE_UNUSED, | |
6010 | cmpd_local_params_p lparams ATTRIBUTE_UNUSED, | |
6011 | void *static_params) | |
6012 | { | |
6013 | fur_static_params_p params = (fur_static_params_p) static_params; | |
6014 | regset tmp; | |
6015 | ||
6016 | if (CALL_P (insn)) | |
6017 | params->crosses_call = true; | |
6018 | ||
6019 | def_list_add (params->original_insns, insn, params->crosses_call); | |
6020 | ||
6021 | /* Mark the registers that do not meet the following condition: | |
48e1416a | 6022 | (2) not among the live registers of the point |
6023 | immediately following the first original operation on | |
e1ab7874 | 6024 | a given downward path, except for the original target |
6025 | register of the operation. */ | |
6026 | tmp = get_clear_regset_from_pool (); | |
6027 | compute_live_below_insn (insn, tmp); | |
6028 | AND_COMPL_REG_SET (tmp, INSN_REG_SETS (insn)); | |
6029 | AND_COMPL_REG_SET (tmp, INSN_REG_CLOBBERS (insn)); | |
6030 | IOR_REG_SET (params->used_regs, tmp); | |
6031 | return_regset_to_pool (tmp); | |
6032 | ||
6033 | /* (*1) We need to add to USED_REGS registers that are read by | |
6034 | INSN's lhs. This may lead to choosing wrong src register. | |
6035 | E.g. (scheduling const expr enabled): | |
6036 | ||
6037 | 429: ax=0x0 <- Can't use AX for this expr (0x0) | |
6038 | 433: dx=[bp-0x18] | |
6039 | 427: [ax+dx+0x1]=ax | |
6040 | REG_DEAD: ax | |
6041 | 168: di=dx | |
6042 | REG_DEAD: dx | |
6043 | */ | |
48e1416a | 6044 | /* FIXME: see comment above and enable MEM_P |
e1ab7874 | 6045 | in vinsn_separable_p. */ |
6046 | gcc_assert (!VINSN_SEPARABLE_P (INSN_VINSN (insn)) | |
6047 | || !MEM_P (INSN_LHS (insn))); | |
6048 | } | |
6049 | ||
6050 | /* This function is called on the ascending pass, before returning from | |
6051 | current basic block. */ | |
6052 | static void | |
48e1416a | 6053 | move_op_at_first_insn (insn_t insn, cmpd_local_params_p lparams, |
e1ab7874 | 6054 | void *static_params) |
6055 | { | |
6056 | moveop_static_params_p sparams = (moveop_static_params_p) static_params; | |
6057 | basic_block book_block = NULL; | |
6058 | ||
48e1416a | 6059 | /* When we have removed the boundary insn for scheduling, which also |
e1ab7874 | 6060 | happened to be the end insn in its bb, we don't need to update sets. */ |
48e1416a | 6061 | if (!lparams->removed_last_insn |
e1ab7874 | 6062 | && lparams->e1 |
6063 | && sel_bb_head_p (insn)) | |
6064 | { | |
6065 | /* We should generate bookkeeping code only if we are not at the | |
6066 | top level of the move_op. */ | |
6067 | if (sel_num_cfg_preds_gt_1 (insn)) | |
6068 | book_block = generate_bookkeeping_insn (sparams->c_expr, | |
6069 | lparams->e1, lparams->e2); | |
6070 | /* Update data sets for the current insn. */ | |
6071 | update_data_sets (insn); | |
6072 | } | |
48e1416a | 6073 | |
e1ab7874 | 6074 | /* If bookkeeping code was inserted, we need to update av sets of basic |
48e1416a | 6075 | block that received bookkeeping. After generation of bookkeeping insn, |
e1ab7874 | 6076 | bookkeeping block does not contain valid av set because we are not following |
48e1416a | 6077 | the original algorithm in every detail with regards to e.g. renaming |
e1ab7874 | 6078 | simple reg-reg copies. Consider example: |
48e1416a | 6079 | |
e1ab7874 | 6080 | bookkeeping block scheduling fence |
6081 | \ / | |
6082 | \ join / | |
6083 | ---------- | |
6084 | | | | |
6085 | ---------- | |
6086 | / \ | |
6087 | / \ | |
6088 | r1 := r2 r1 := r3 | |
6089 | ||
48e1416a | 6090 | We try to schedule insn "r1 := r3" on the current |
e1ab7874 | 6091 | scheduling fence. Also, note that av set of bookkeeping block |
6092 | contain both insns "r1 := r2" and "r1 := r3". When the insn has | |
6093 | been scheduled, the CFG is as follows: | |
6094 | ||
6095 | r1 := r3 r1 := r3 | |
6096 | bookkeeping block scheduling fence | |
6097 | \ / | |
6098 | \ join / | |
6099 | ---------- | |
6100 | | | | |
6101 | ---------- | |
6102 | / \ | |
6103 | / \ | |
6104 | r1 := r2 | |
6105 | ||
6106 | Here, insn "r1 := r3" was scheduled at the current scheduling point | |
6107 | and bookkeeping code was generated at the bookeeping block. This | |
6108 | way insn "r1 := r2" is no longer available as a whole instruction | |
6109 | (but only as expr) ahead of insn "r1 := r3" in bookkeeping block. | |
48e1416a | 6110 | This situation is handled by calling update_data_sets. |
e1ab7874 | 6111 | |
6112 | Since update_data_sets is called only on the bookkeeping block, and | |
48e1416a | 6113 | it also may have predecessors with av_sets, containing instructions that |
e1ab7874 | 6114 | are no longer available, we save all such expressions that become |
6115 | unavailable during data sets update on the bookkeeping block in | |
48e1416a | 6116 | VEC_BOOKKEEPING_BLOCKED_VINSNS. Later we avoid selecting such |
6117 | expressions for scheduling. This allows us to avoid recomputation of | |
e1ab7874 | 6118 | av_sets outside the code motion path. */ |
48e1416a | 6119 | |
e1ab7874 | 6120 | if (book_block) |
6121 | update_and_record_unavailable_insns (book_block); | |
6122 | ||
6123 | /* If INSN was previously marked for deletion, it's time to do it. */ | |
6124 | if (lparams->removed_last_insn) | |
6125 | insn = PREV_INSN (insn); | |
48e1416a | 6126 | |
e1ab7874 | 6127 | /* Do not tidy control flow at the topmost moveop, as we can erroneously |
6128 | kill a block with a single nop in which the insn should be emitted. */ | |
6129 | if (lparams->e1) | |
6130 | tidy_control_flow (BLOCK_FOR_INSN (insn), true); | |
6131 | } | |
6132 | ||
6133 | /* This function is called on the ascending pass, before returning from the | |
6134 | current basic block. */ | |
6135 | static void | |
48e1416a | 6136 | fur_at_first_insn (insn_t insn, |
6137 | cmpd_local_params_p lparams ATTRIBUTE_UNUSED, | |
e1ab7874 | 6138 | void *static_params ATTRIBUTE_UNUSED) |
6139 | { | |
6140 | gcc_assert (!sel_bb_head_p (insn) || AV_SET_VALID_P (insn) | |
6141 | || AV_LEVEL (insn) == -1); | |
6142 | } | |
6143 | ||
6144 | /* Called on the backward stage of recursion to call moveup_expr for insn | |
6145 | and sparams->c_expr. */ | |
6146 | static void | |
6147 | move_op_ascend (insn_t insn, void *static_params) | |
6148 | { | |
6149 | enum MOVEUP_EXPR_CODE res; | |
6150 | moveop_static_params_p sparams = (moveop_static_params_p) static_params; | |
6151 | ||
6152 | if (! INSN_NOP_P (insn)) | |
6153 | { | |
6154 | res = moveup_expr_cached (sparams->c_expr, insn, false); | |
6155 | gcc_assert (res != MOVEUP_EXPR_NULL); | |
6156 | } | |
6157 | ||
6158 | /* Update liveness for this insn as it was invalidated. */ | |
6159 | update_liveness_on_insn (insn); | |
6160 | } | |
6161 | ||
48e1416a | 6162 | /* This function is called on enter to the basic block. |
6163 | Returns TRUE if this block already have been visited and | |
e1ab7874 | 6164 | code_motion_path_driver should return 1, FALSE otherwise. */ |
6165 | static int | |
48e1416a | 6166 | fur_on_enter (insn_t insn ATTRIBUTE_UNUSED, cmpd_local_params_p local_params, |
e1ab7874 | 6167 | void *static_params, bool visited_p) |
6168 | { | |
6169 | fur_static_params_p sparams = (fur_static_params_p) static_params; | |
6170 | ||
6171 | if (visited_p) | |
6172 | { | |
6173 | /* If we have found something below this block, there should be at | |
6174 | least one insn in ORIGINAL_INSNS. */ | |
6175 | gcc_assert (*sparams->original_insns); | |
6176 | ||
6177 | /* Adjust CROSSES_CALL, since we may have come to this block along | |
6178 | different path. */ | |
6179 | DEF_LIST_DEF (*sparams->original_insns)->crosses_call | |
6180 | |= sparams->crosses_call; | |
6181 | } | |
6182 | else | |
6183 | local_params->old_original_insns = *sparams->original_insns; | |
6184 | ||
6185 | return 1; | |
6186 | } | |
6187 | ||
6188 | /* Same as above but for move_op. */ | |
6189 | static int | |
48e1416a | 6190 | move_op_on_enter (insn_t insn ATTRIBUTE_UNUSED, |
6191 | cmpd_local_params_p local_params ATTRIBUTE_UNUSED, | |
e1ab7874 | 6192 | void *static_params ATTRIBUTE_UNUSED, bool visited_p) |
6193 | { | |
6194 | if (visited_p) | |
6195 | return -1; | |
6196 | return 1; | |
6197 | } | |
6198 | ||
48e1416a | 6199 | /* This function is called while descending current basic block if current |
e1ab7874 | 6200 | insn is not the original EXPR we're searching for. |
6201 | ||
48e1416a | 6202 | Return value: FALSE, if code_motion_path_driver should perform a local |
e1ab7874 | 6203 | cleanup and return 0 itself; |
6204 | TRUE, if code_motion_path_driver should continue. */ | |
6205 | static bool | |
6206 | move_op_orig_expr_not_found (insn_t insn, av_set_t orig_ops ATTRIBUTE_UNUSED, | |
6207 | void *static_params) | |
6208 | { | |
6209 | moveop_static_params_p sparams = (moveop_static_params_p) static_params; | |
6210 | ||
6211 | #ifdef ENABLE_CHECKING | |
6212 | sparams->failed_insn = insn; | |
6213 | #endif | |
6214 | ||
6215 | /* If we're scheduling separate expr, in order to generate correct code | |
48e1416a | 6216 | we need to stop the search at bookkeeping code generated with the |
e1ab7874 | 6217 | same destination register or memory. */ |
6218 | if (lhs_of_insn_equals_to_dest_p (insn, sparams->dest)) | |
6219 | return false; | |
6220 | return true; | |
6221 | } | |
6222 | ||
48e1416a | 6223 | /* This function is called while descending current basic block if current |
e1ab7874 | 6224 | insn is not the original EXPR we're searching for. |
6225 | ||
6226 | Return value: TRUE (code_motion_path_driver should continue). */ | |
6227 | static bool | |
6228 | fur_orig_expr_not_found (insn_t insn, av_set_t orig_ops, void *static_params) | |
6229 | { | |
6230 | bool mutexed; | |
6231 | expr_t r; | |
6232 | av_set_iterator avi; | |
6233 | fur_static_params_p sparams = (fur_static_params_p) static_params; | |
6234 | ||
6235 | if (CALL_P (insn)) | |
6236 | sparams->crosses_call = true; | |
9845d120 | 6237 | else if (DEBUG_INSN_P (insn)) |
6238 | return true; | |
e1ab7874 | 6239 | |
6240 | /* If current insn we are looking at cannot be executed together | |
6241 | with original insn, then we can skip it safely. | |
6242 | ||
6243 | Example: ORIG_OPS = { (p6) r14 = sign_extend (r15); } | |
6244 | INSN = (!p6) r14 = r14 + 1; | |
6245 | ||
6246 | Here we can schedule ORIG_OP with lhs = r14, though only | |
6247 | looking at the set of used and set registers of INSN we must | |
6248 | forbid it. So, add set/used in INSN registers to the | |
6249 | untouchable set only if there is an insn in ORIG_OPS that can | |
6250 | affect INSN. */ | |
6251 | mutexed = true; | |
6252 | FOR_EACH_EXPR (r, avi, orig_ops) | |
6253 | if (!sched_insns_conditions_mutex_p (insn, EXPR_INSN_RTX (r))) | |
6254 | { | |
6255 | mutexed = false; | |
6256 | break; | |
6257 | } | |
6258 | ||
6259 | /* Mark all registers that do not meet the following condition: | |
6260 | (1) Not set or read on any path from xi to an instance of the | |
6261 | original operation. */ | |
6262 | if (!mutexed) | |
6263 | { | |
6264 | IOR_REG_SET (sparams->used_regs, INSN_REG_SETS (insn)); | |
6265 | IOR_REG_SET (sparams->used_regs, INSN_REG_USES (insn)); | |
6266 | IOR_REG_SET (sparams->used_regs, INSN_REG_CLOBBERS (insn)); | |
6267 | } | |
6268 | ||
6269 | return true; | |
6270 | } | |
6271 | ||
6272 | /* Hooks and data to perform move_op operations with code_motion_path_driver. */ | |
6273 | struct code_motion_path_driver_info_def move_op_hooks = { | |
6274 | move_op_on_enter, | |
6275 | move_op_orig_expr_found, | |
6276 | move_op_orig_expr_not_found, | |
6277 | move_op_merge_succs, | |
6278 | move_op_after_merge_succs, | |
6279 | move_op_ascend, | |
6280 | move_op_at_first_insn, | |
6281 | SUCCS_NORMAL, | |
6282 | "move_op" | |
6283 | }; | |
6284 | ||
48e1416a | 6285 | /* Hooks and data to perform find_used_regs operations |
e1ab7874 | 6286 | with code_motion_path_driver. */ |
6287 | struct code_motion_path_driver_info_def fur_hooks = { | |
6288 | fur_on_enter, | |
6289 | fur_orig_expr_found, | |
6290 | fur_orig_expr_not_found, | |
6291 | fur_merge_succs, | |
6292 | NULL, /* fur_after_merge_succs */ | |
6293 | NULL, /* fur_ascend */ | |
6294 | fur_at_first_insn, | |
6295 | SUCCS_ALL, | |
6296 | "find_used_regs" | |
6297 | }; | |
6298 | ||
6299 | /* Traverse all successors of INSN. For each successor that is SUCCS_NORMAL | |
48e1416a | 6300 | code_motion_path_driver is called recursively. Original operation |
6301 | was found at least on one path that is starting with one of INSN's | |
e1ab7874 | 6302 | successors (this fact is asserted). ORIG_OPS is expressions we're looking |
6303 | for, PATH is the path we've traversed, STATIC_PARAMS is the parameters | |
48e1416a | 6304 | of either move_op or find_used_regs depending on the caller. |
e1ab7874 | 6305 | |
6306 | Return 0 if we haven't found expression, 1 if we found it, -1 if we don't | |
6307 | know for sure at this point. */ | |
6308 | static int | |
48e1416a | 6309 | code_motion_process_successors (insn_t insn, av_set_t orig_ops, |
e1ab7874 | 6310 | ilist_t path, void *static_params) |
6311 | { | |
6312 | int res = 0; | |
6313 | succ_iterator succ_i; | |
2f3c9801 | 6314 | insn_t succ; |
e1ab7874 | 6315 | basic_block bb; |
6316 | int old_index; | |
6317 | unsigned old_succs; | |
6318 | ||
6319 | struct cmpd_local_params lparams; | |
6320 | expr_def _x; | |
6321 | ||
6322 | lparams.c_expr_local = &_x; | |
6323 | lparams.c_expr_merged = NULL; | |
6324 | ||
6325 | /* We need to process only NORMAL succs for move_op, and collect live | |
48e1416a | 6326 | registers from ALL branches (including those leading out of the |
6327 | region) for find_used_regs. | |
e1ab7874 | 6328 | |
6329 | In move_op, there can be a case when insn's bb number has changed | |
48e1416a | 6330 | due to created bookkeeping. This happens very rare, as we need to |
6331 | move expression from the beginning to the end of the same block. | |
6332 | Rescan successors in this case. */ | |
e1ab7874 | 6333 | |
6334 | rescan: | |
6335 | bb = BLOCK_FOR_INSN (insn); | |
48e1416a | 6336 | old_index = bb->index; |
e1ab7874 | 6337 | old_succs = EDGE_COUNT (bb->succs); |
48e1416a | 6338 | |
e1ab7874 | 6339 | FOR_EACH_SUCC_1 (succ, succ_i, insn, code_motion_path_driver_info->succ_flags) |
6340 | { | |
6341 | int b; | |
6342 | ||
6343 | lparams.e1 = succ_i.e1; | |
6344 | lparams.e2 = succ_i.e2; | |
6345 | ||
6346 | /* Go deep into recursion only for NORMAL edges (non-backedges within the | |
6347 | current region). */ | |
6348 | if (succ_i.current_flags == SUCCS_NORMAL) | |
48e1416a | 6349 | b = code_motion_path_driver (succ, orig_ops, path, &lparams, |
e1ab7874 | 6350 | static_params); |
6351 | else | |
6352 | b = 0; | |
6353 | ||
6354 | /* Merge c_expres found or unify live register sets from different | |
6355 | successors. */ | |
6356 | code_motion_path_driver_info->merge_succs (insn, succ, b, &lparams, | |
6357 | static_params); | |
6358 | if (b == 1) | |
6359 | res = b; | |
6360 | else if (b == -1 && res != 1) | |
6361 | res = b; | |
6362 | ||
6363 | /* We have simplified the control flow below this point. In this case, | |
91b338ea | 6364 | the iterator becomes invalid. We need to try again. |
6365 | If we have removed the insn itself, it could be only an | |
6366 | unconditional jump. Thus, do not rescan but break immediately -- | |
6367 | we have already visited the only successor block. */ | |
6368 | if (!BLOCK_FOR_INSN (insn)) | |
6369 | { | |
6370 | if (sched_verbose >= 6) | |
6371 | sel_print ("Not doing rescan: already visited the only successor" | |
6372 | " of block %d\n", old_index); | |
6373 | break; | |
6374 | } | |
e1ab7874 | 6375 | if (BLOCK_FOR_INSN (insn)->index != old_index |
6376 | || EDGE_COUNT (bb->succs) != old_succs) | |
8ff642e9 | 6377 | { |
91b338ea | 6378 | if (sched_verbose >= 6) |
6379 | sel_print ("Rescan: CFG was simplified below insn %d, block %d\n", | |
6380 | INSN_UID (insn), BLOCK_FOR_INSN (insn)->index); | |
8ff642e9 | 6381 | insn = sel_bb_end (BLOCK_FOR_INSN (insn)); |
6382 | goto rescan; | |
6383 | } | |
e1ab7874 | 6384 | } |
6385 | ||
8d653dbe | 6386 | #ifdef ENABLE_CHECKING |
48e1416a | 6387 | /* Here, RES==1 if original expr was found at least for one of the |
e1ab7874 | 6388 | successors. After the loop, RES may happen to have zero value |
48e1416a | 6389 | only if at some point the expr searched is present in av_set, but is |
6390 | not found below. In most cases, this situation is an error. | |
e1ab7874 | 6391 | The exception is when the original operation is blocked by |
6392 | bookkeeping generated for another fence or for another path in current | |
6393 | move_op. */ | |
8d653dbe | 6394 | gcc_assert (res == 1 |
6395 | || (res == 0 | |
6396 | && av_set_could_be_blocked_by_bookkeeping_p (orig_ops, | |
6397 | static_params)) | |
6398 | || res == -1); | |
6399 | #endif | |
48e1416a | 6400 | |
e1ab7874 | 6401 | /* Merge data, clean up, etc. */ |
de353418 | 6402 | if (res != -1 && code_motion_path_driver_info->after_merge_succs) |
e1ab7874 | 6403 | code_motion_path_driver_info->after_merge_succs (&lparams, static_params); |
6404 | ||
6405 | return res; | |
6406 | } | |
6407 | ||
6408 | ||
48e1416a | 6409 | /* Perform a cleanup when the driver is about to terminate. ORIG_OPS_P |
6410 | is the pointer to the av set with expressions we were looking for, | |
e1ab7874 | 6411 | PATH_P is the pointer to the traversed path. */ |
6412 | static inline void | |
6413 | code_motion_path_driver_cleanup (av_set_t *orig_ops_p, ilist_t *path_p) | |
6414 | { | |
6415 | ilist_remove (path_p); | |
6416 | av_set_clear (orig_ops_p); | |
6417 | } | |
6418 | ||
48e1416a | 6419 | /* The driver function that implements move_op or find_used_regs |
6420 | functionality dependent whether code_motion_path_driver_INFO is set to | |
6421 | &MOVE_OP_HOOKS or &FUR_HOOKS. This function implements the common parts | |
e1ab7874 | 6422 | of code (CFG traversal etc) that are shared among both functions. INSN |
6423 | is the insn we're starting the search from, ORIG_OPS are the expressions | |
6424 | we're searching for, PATH is traversed path, LOCAL_PARAMS_IN are local | |
6425 | parameters of the driver, and STATIC_PARAMS are static parameters of | |
48e1416a | 6426 | the caller. |
e1ab7874 | 6427 | |
6428 | Returns whether original instructions were found. Note that top-level | |
6429 | code_motion_path_driver always returns true. */ | |
de353418 | 6430 | static int |
48e1416a | 6431 | code_motion_path_driver (insn_t insn, av_set_t orig_ops, ilist_t path, |
6432 | cmpd_local_params_p local_params_in, | |
e1ab7874 | 6433 | void *static_params) |
6434 | { | |
6435 | expr_t expr = NULL; | |
6436 | basic_block bb = BLOCK_FOR_INSN (insn); | |
6437 | insn_t first_insn, bb_tail, before_first; | |
6438 | bool removed_last_insn = false; | |
6439 | ||
6440 | if (sched_verbose >= 6) | |
6441 | { | |
6442 | sel_print ("%s (", code_motion_path_driver_info->routine_name); | |
6443 | dump_insn (insn); | |
6444 | sel_print (","); | |
6445 | dump_av_set (orig_ops); | |
6446 | sel_print (")\n"); | |
6447 | } | |
6448 | ||
6449 | gcc_assert (orig_ops); | |
6450 | ||
6451 | /* If no original operations exist below this insn, return immediately. */ | |
6452 | if (is_ineligible_successor (insn, path)) | |
6453 | { | |
6454 | if (sched_verbose >= 6) | |
6455 | sel_print ("Insn %d is ineligible successor\n", INSN_UID (insn)); | |
6456 | return false; | |
6457 | } | |
48e1416a | 6458 | |
e1ab7874 | 6459 | /* The block can have invalid av set, in which case it was created earlier |
6460 | during move_op. Return immediately. */ | |
6461 | if (sel_bb_head_p (insn)) | |
6462 | { | |
6463 | if (! AV_SET_VALID_P (insn)) | |
6464 | { | |
6465 | if (sched_verbose >= 6) | |
6466 | sel_print ("Returned from block %d as it had invalid av set\n", | |
6467 | bb->index); | |
6468 | return false; | |
6469 | } | |
6470 | ||
6471 | if (bitmap_bit_p (code_motion_visited_blocks, bb->index)) | |
6472 | { | |
6473 | /* We have already found an original operation on this branch, do not | |
6474 | go any further and just return TRUE here. If we don't stop here, | |
48e1416a | 6475 | function can have exponential behaviour even on the small code |
e1ab7874 | 6476 | with many different paths (e.g. with data speculation and |
6477 | recovery blocks). */ | |
6478 | if (sched_verbose >= 6) | |
6479 | sel_print ("Block %d already visited in this traversal\n", bb->index); | |
6480 | if (code_motion_path_driver_info->on_enter) | |
48e1416a | 6481 | return code_motion_path_driver_info->on_enter (insn, |
e1ab7874 | 6482 | local_params_in, |
48e1416a | 6483 | static_params, |
e1ab7874 | 6484 | true); |
6485 | } | |
6486 | } | |
48e1416a | 6487 | |
e1ab7874 | 6488 | if (code_motion_path_driver_info->on_enter) |
6489 | code_motion_path_driver_info->on_enter (insn, local_params_in, | |
6490 | static_params, false); | |
6491 | orig_ops = av_set_copy (orig_ops); | |
6492 | ||
6493 | /* Filter the orig_ops set. */ | |
6494 | if (AV_SET_VALID_P (insn)) | |
c53624fb | 6495 | av_set_code_motion_filter (&orig_ops, AV_SET (insn)); |
e1ab7874 | 6496 | |
6497 | /* If no more original ops, return immediately. */ | |
6498 | if (!orig_ops) | |
6499 | { | |
6500 | if (sched_verbose >= 6) | |
6501 | sel_print ("No intersection with av set of block %d\n", bb->index); | |
6502 | return false; | |
6503 | } | |
6504 | ||
6505 | /* For non-speculative insns we have to leave only one form of the | |
48e1416a | 6506 | original operation, because if we don't, we may end up with |
e1ab7874 | 6507 | different C_EXPRes and, consequently, with bookkeepings for different |
6508 | expression forms along the same code motion path. That may lead to | |
48e1416a | 6509 | generation of incorrect code. So for each code motion we stick to |
6510 | the single form of the instruction, except for speculative insns | |
6511 | which we need to keep in different forms with all speculation | |
e1ab7874 | 6512 | types. */ |
6513 | av_set_leave_one_nonspec (&orig_ops); | |
6514 | ||
6515 | /* It is not possible that all ORIG_OPS are filtered out. */ | |
6516 | gcc_assert (orig_ops); | |
6517 | ||
6518 | /* It is enough to place only heads and tails of visited basic blocks into | |
6519 | the PATH. */ | |
6520 | ilist_add (&path, insn); | |
6521 | first_insn = insn; | |
6522 | bb_tail = sel_bb_end (bb); | |
6523 | ||
6524 | /* Descend the basic block in search of the original expr; this part | |
48e1416a | 6525 | corresponds to the part of the original move_op procedure executed |
e1ab7874 | 6526 | before the recursive call. */ |
6527 | for (;;) | |
6528 | { | |
6529 | /* Look at the insn and decide if it could be an ancestor of currently | |
6530 | scheduling operation. If it is so, then the insn "dest = op" could | |
6531 | either be replaced with "dest = reg", because REG now holds the result | |
6532 | of OP, or just removed, if we've scheduled the insn as a whole. | |
6533 | ||
6534 | If this insn doesn't contain currently scheduling OP, then proceed | |
6535 | with searching and look at its successors. Operations we're searching | |
48e1416a | 6536 | for could have changed when moving up through this insn via |
e1ab7874 | 6537 | substituting. In this case, perform unsubstitution on them first. |
6538 | ||
6539 | When traversing the DAG below this insn is finished, insert | |
6540 | bookkeeping code, if the insn is a joint point, and remove | |
6541 | leftovers. */ | |
6542 | ||
6543 | expr = av_set_lookup (orig_ops, INSN_VINSN (insn)); | |
6544 | if (expr) | |
6545 | { | |
6546 | insn_t last_insn = PREV_INSN (insn); | |
6547 | ||
6548 | /* We have found the original operation. */ | |
6549 | if (sched_verbose >= 6) | |
6550 | sel_print ("Found original operation at insn %d\n", INSN_UID (insn)); | |
6551 | ||
48e1416a | 6552 | code_motion_path_driver_info->orig_expr_found |
e1ab7874 | 6553 | (insn, expr, local_params_in, static_params); |
6554 | ||
6555 | /* Step back, so on the way back we'll start traversing from the | |
48e1416a | 6556 | previous insn (or we'll see that it's bb_note and skip that |
e1ab7874 | 6557 | loop). */ |
6558 | if (insn == first_insn) | |
6559 | { | |
6560 | first_insn = NEXT_INSN (last_insn); | |
6561 | removed_last_insn = sel_bb_end_p (last_insn); | |
6562 | } | |
6563 | insn = last_insn; | |
6564 | break; | |
6565 | } | |
6566 | else | |
6567 | { | |
6568 | /* We haven't found the original expr, continue descending the basic | |
6569 | block. */ | |
48e1416a | 6570 | if (code_motion_path_driver_info->orig_expr_not_found |
e1ab7874 | 6571 | (insn, orig_ops, static_params)) |
6572 | { | |
48e1416a | 6573 | /* Av set ops could have been changed when moving through this |
e1ab7874 | 6574 | insn. To find them below it, we have to un-substitute them. */ |
6575 | undo_transformations (&orig_ops, insn); | |
6576 | } | |
6577 | else | |
6578 | { | |
6579 | /* Clean up and return, if the hook tells us to do so. It may | |
48e1416a | 6580 | happen if we've encountered the previously created |
e1ab7874 | 6581 | bookkeeping. */ |
6582 | code_motion_path_driver_cleanup (&orig_ops, &path); | |
6583 | return -1; | |
6584 | } | |
6585 | ||
6586 | gcc_assert (orig_ops); | |
6587 | } | |
6588 | ||
6589 | /* Stop at insn if we got to the end of BB. */ | |
6590 | if (insn == bb_tail) | |
6591 | break; | |
6592 | ||
6593 | insn = NEXT_INSN (insn); | |
6594 | } | |
6595 | ||
48e1416a | 6596 | /* Here INSN either points to the insn before the original insn (may be |
e1ab7874 | 6597 | bb_note, if original insn was a bb_head) or to the bb_end. */ |
6598 | if (!expr) | |
6599 | { | |
6600 | int res; | |
ff88d074 | 6601 | rtx_insn *last_insn = PREV_INSN (insn); |
8ff642e9 | 6602 | bool added_to_path; |
e1ab7874 | 6603 | |
6604 | gcc_assert (insn == sel_bb_end (bb)); | |
6605 | ||
6606 | /* Add bb tail to PATH (but it doesn't make any sense if it's a bb_head - | |
6607 | it's already in PATH then). */ | |
6608 | if (insn != first_insn) | |
8ff642e9 | 6609 | { |
6610 | ilist_add (&path, insn); | |
6611 | added_to_path = true; | |
6612 | } | |
6613 | else | |
6614 | added_to_path = false; | |
e1ab7874 | 6615 | |
48e1416a | 6616 | /* Process_successors should be able to find at least one |
6617 | successor for which code_motion_path_driver returns TRUE. */ | |
6618 | res = code_motion_process_successors (insn, orig_ops, | |
e1ab7874 | 6619 | path, static_params); |
6620 | ||
8ff642e9 | 6621 | /* Jump in the end of basic block could have been removed or replaced |
6622 | during code_motion_process_successors, so recompute insn as the | |
6623 | last insn in bb. */ | |
6624 | if (NEXT_INSN (last_insn) != insn) | |
6625 | { | |
6626 | insn = sel_bb_end (bb); | |
6627 | first_insn = sel_bb_head (bb); | |
6628 | } | |
6629 | ||
e1ab7874 | 6630 | /* Remove bb tail from path. */ |
8ff642e9 | 6631 | if (added_to_path) |
e1ab7874 | 6632 | ilist_remove (&path); |
6633 | ||
6634 | if (res != 1) | |
6635 | { | |
6636 | /* This is the case when one of the original expr is no longer available | |
48e1416a | 6637 | due to bookkeeping created on this branch with the same register. |
e1ab7874 | 6638 | In the original algorithm, which doesn't have update_data_sets call |
48e1416a | 6639 | on a bookkeeping block, it would simply result in returning |
6640 | FALSE when we've encountered a previously generated bookkeeping | |
e1ab7874 | 6641 | insn in moveop_orig_expr_not_found. */ |
6642 | code_motion_path_driver_cleanup (&orig_ops, &path); | |
6643 | return res; | |
6644 | } | |
6645 | } | |
6646 | ||
6647 | /* Don't need it any more. */ | |
6648 | av_set_clear (&orig_ops); | |
6649 | ||
48e1416a | 6650 | /* Backward pass: now, when we have C_EXPR computed, we'll drag it to |
e1ab7874 | 6651 | the beginning of the basic block. */ |
6652 | before_first = PREV_INSN (first_insn); | |
6653 | while (insn != before_first) | |
48e1416a | 6654 | { |
e1ab7874 | 6655 | if (code_motion_path_driver_info->ascend) |
6656 | code_motion_path_driver_info->ascend (insn, static_params); | |
6657 | ||
6658 | insn = PREV_INSN (insn); | |
6659 | } | |
48e1416a | 6660 | |
e1ab7874 | 6661 | /* Now we're at the bb head. */ |
6662 | insn = first_insn; | |
6663 | ilist_remove (&path); | |
6664 | local_params_in->removed_last_insn = removed_last_insn; | |
6665 | code_motion_path_driver_info->at_first_insn (insn, local_params_in, static_params); | |
48e1416a | 6666 | |
e1ab7874 | 6667 | /* This should be the very last operation as at bb head we could change |
6668 | the numbering by creating bookkeeping blocks. */ | |
6669 | if (removed_last_insn) | |
6670 | insn = PREV_INSN (insn); | |
f18c3345 | 6671 | |
6672 | /* If we have simplified the control flow and removed the first jump insn, | |
6673 | there's no point in marking this block in the visited blocks bitmap. */ | |
6674 | if (BLOCK_FOR_INSN (insn)) | |
6675 | bitmap_set_bit (code_motion_visited_blocks, BLOCK_FOR_INSN (insn)->index); | |
e1ab7874 | 6676 | return true; |
6677 | } | |
6678 | ||
48e1416a | 6679 | /* Move up the operations from ORIG_OPS set traversing the dag starting |
e1ab7874 | 6680 | from INSN. PATH represents the edges traversed so far. |
6681 | DEST is the register chosen for scheduling the current expr. Insert | |
6682 | bookkeeping code in the join points. EXPR_VLIW is the chosen expression, | |
48e1416a | 6683 | C_EXPR is how it looks like at the given cfg point. |
de353418 | 6684 | Set *SHOULD_MOVE to indicate whether we have only disconnected |
6685 | one of the insns found. | |
e1ab7874 | 6686 | |
48e1416a | 6687 | Returns whether original instructions were found, which is asserted |
e1ab7874 | 6688 | to be true in the caller. */ |
6689 | static bool | |
6690 | move_op (insn_t insn, av_set_t orig_ops, expr_t expr_vliw, | |
de353418 | 6691 | rtx dest, expr_t c_expr, bool *should_move) |
e1ab7874 | 6692 | { |
6693 | struct moveop_static_params sparams; | |
6694 | struct cmpd_local_params lparams; | |
93457441 | 6695 | int res; |
e1ab7874 | 6696 | |
48e1416a | 6697 | /* Init params for code_motion_path_driver. */ |
e1ab7874 | 6698 | sparams.dest = dest; |
6699 | sparams.c_expr = c_expr; | |
6700 | sparams.uid = INSN_UID (EXPR_INSN_RTX (expr_vliw)); | |
6701 | #ifdef ENABLE_CHECKING | |
6702 | sparams.failed_insn = NULL; | |
6703 | #endif | |
6704 | sparams.was_renamed = false; | |
6705 | lparams.e1 = NULL; | |
6706 | ||
6707 | /* We haven't visited any blocks yet. */ | |
6708 | bitmap_clear (code_motion_visited_blocks); | |
48e1416a | 6709 | |
e1ab7874 | 6710 | /* Set appropriate hooks and data. */ |
6711 | code_motion_path_driver_info = &move_op_hooks; | |
6712 | res = code_motion_path_driver (insn, orig_ops, NULL, &lparams, &sparams); | |
6713 | ||
93457441 | 6714 | gcc_assert (res != -1); |
6715 | ||
e1ab7874 | 6716 | if (sparams.was_renamed) |
6717 | EXPR_WAS_RENAMED (expr_vliw) = true; | |
6718 | ||
de353418 | 6719 | *should_move = (sparams.uid == -1); |
6720 | ||
e1ab7874 | 6721 | return res; |
6722 | } | |
6723 | \f | |
6724 | ||
6725 | /* Functions that work with regions. */ | |
6726 | ||
6727 | /* Current number of seqno used in init_seqno and init_seqno_1. */ | |
6728 | static int cur_seqno; | |
6729 | ||
48e1416a | 6730 | /* A helper for init_seqno. Traverse the region starting from BB and |
6731 | compute seqnos for visited insns, marking visited bbs in VISITED_BBS. | |
e1ab7874 | 6732 | Clear visited blocks from BLOCKS_TO_RESCHEDULE. */ |
6733 | static void | |
6734 | init_seqno_1 (basic_block bb, sbitmap visited_bbs, bitmap blocks_to_reschedule) | |
6735 | { | |
6736 | int bbi = BLOCK_TO_BB (bb->index); | |
9ed997be | 6737 | insn_t insn; |
e1ab7874 | 6738 | insn_t succ_insn; |
6739 | succ_iterator si; | |
6740 | ||
9ed997be | 6741 | rtx_note *note = bb_note (bb); |
08b7917c | 6742 | bitmap_set_bit (visited_bbs, bbi); |
e1ab7874 | 6743 | if (blocks_to_reschedule) |
6744 | bitmap_clear_bit (blocks_to_reschedule, bb->index); | |
6745 | ||
48e1416a | 6746 | FOR_EACH_SUCC_1 (succ_insn, si, BB_END (bb), |
e1ab7874 | 6747 | SUCCS_NORMAL | SUCCS_SKIP_TO_LOOP_EXITS) |
6748 | { | |
6749 | basic_block succ = BLOCK_FOR_INSN (succ_insn); | |
6750 | int succ_bbi = BLOCK_TO_BB (succ->index); | |
6751 | ||
6752 | gcc_assert (in_current_region_p (succ)); | |
6753 | ||
08b7917c | 6754 | if (!bitmap_bit_p (visited_bbs, succ_bbi)) |
e1ab7874 | 6755 | { |
6756 | gcc_assert (succ_bbi > bbi); | |
6757 | ||
6758 | init_seqno_1 (succ, visited_bbs, blocks_to_reschedule); | |
6759 | } | |
08b41748 | 6760 | else if (blocks_to_reschedule) |
6761 | bitmap_set_bit (forced_ebb_heads, succ->index); | |
e1ab7874 | 6762 | } |
6763 | ||
6764 | for (insn = BB_END (bb); insn != note; insn = PREV_INSN (insn)) | |
6765 | INSN_SEQNO (insn) = cur_seqno--; | |
6766 | } | |
6767 | ||
def66588 | 6768 | /* Initialize seqnos for the current region. BLOCKS_TO_RESCHEDULE contains |
6769 | blocks on which we're rescheduling when pipelining, FROM is the block where | |
e1ab7874 | 6770 | traversing region begins (it may not be the head of the region when |
48e1416a | 6771 | pipelining, but the head of the loop instead). |
e1ab7874 | 6772 | |
6773 | Returns the maximal seqno found. */ | |
6774 | static int | |
def66588 | 6775 | init_seqno (bitmap blocks_to_reschedule, basic_block from) |
e1ab7874 | 6776 | { |
6777 | sbitmap visited_bbs; | |
6778 | bitmap_iterator bi; | |
6779 | unsigned bbi; | |
6780 | ||
6781 | visited_bbs = sbitmap_alloc (current_nr_blocks); | |
6782 | ||
6783 | if (blocks_to_reschedule) | |
6784 | { | |
53c5d9d4 | 6785 | bitmap_ones (visited_bbs); |
e1ab7874 | 6786 | EXECUTE_IF_SET_IN_BITMAP (blocks_to_reschedule, 0, bbi, bi) |
6787 | { | |
6788 | gcc_assert (BLOCK_TO_BB (bbi) < current_nr_blocks); | |
08b7917c | 6789 | bitmap_clear_bit (visited_bbs, BLOCK_TO_BB (bbi)); |
e1ab7874 | 6790 | } |
6791 | } | |
6792 | else | |
6793 | { | |
53c5d9d4 | 6794 | bitmap_clear (visited_bbs); |
e1ab7874 | 6795 | from = EBB_FIRST_BB (0); |
6796 | } | |
6797 | ||
def66588 | 6798 | cur_seqno = sched_max_luid - 1; |
e1ab7874 | 6799 | init_seqno_1 (from, visited_bbs, blocks_to_reschedule); |
def66588 | 6800 | |
6801 | /* cur_seqno may be positive if the number of instructions is less than | |
6802 | sched_max_luid - 1 (when rescheduling or if some instructions have been | |
6803 | removed by the call to purge_empty_blocks in sel_sched_region_1). */ | |
6804 | gcc_assert (cur_seqno >= 0); | |
e1ab7874 | 6805 | |
6806 | sbitmap_free (visited_bbs); | |
6807 | return sched_max_luid - 1; | |
6808 | } | |
6809 | ||
6810 | /* Initialize scheduling parameters for current region. */ | |
6811 | static void | |
6812 | sel_setup_region_sched_flags (void) | |
6813 | { | |
6814 | enable_schedule_as_rhs_p = 1; | |
6815 | bookkeeping_p = 1; | |
48e1416a | 6816 | pipelining_p = (bookkeeping_p |
e1ab7874 | 6817 | && (flag_sel_sched_pipelining != 0) |
a8d6ade3 | 6818 | && current_loop_nest != NULL |
6819 | && loop_has_exit_edges (current_loop_nest)); | |
e1ab7874 | 6820 | max_insns_to_rename = PARAM_VALUE (PARAM_SELSCHED_INSNS_TO_RENAME); |
6821 | max_ws = MAX_WS; | |
6822 | } | |
6823 | ||
6824 | /* Return true if all basic blocks of current region are empty. */ | |
6825 | static bool | |
6826 | current_region_empty_p (void) | |
6827 | { | |
6828 | int i; | |
6829 | for (i = 0; i < current_nr_blocks; i++) | |
f5a6b05f | 6830 | if (! sel_bb_empty_p (BASIC_BLOCK_FOR_FN (cfun, BB_TO_BLOCK (i)))) |
e1ab7874 | 6831 | return false; |
6832 | ||
6833 | return true; | |
6834 | } | |
6835 | ||
6836 | /* Prepare and verify loop nest for pipelining. */ | |
6837 | static void | |
b73edd22 | 6838 | setup_current_loop_nest (int rgn, bb_vec_t *bbs) |
e1ab7874 | 6839 | { |
6840 | current_loop_nest = get_loop_nest_for_rgn (rgn); | |
6841 | ||
6842 | if (!current_loop_nest) | |
6843 | return; | |
6844 | ||
6845 | /* If this loop has any saved loop preheaders from nested loops, | |
6846 | add these basic blocks to the current region. */ | |
b73edd22 | 6847 | sel_add_loop_preheaders (bbs); |
e1ab7874 | 6848 | |
6849 | /* Check that we're starting with a valid information. */ | |
6850 | gcc_assert (loop_latch_edge (current_loop_nest)); | |
6851 | gcc_assert (LOOP_MARKED_FOR_PIPELINING_P (current_loop_nest)); | |
6852 | } | |
6853 | ||
e1ab7874 | 6854 | /* Compute instruction priorities for current region. */ |
6855 | static void | |
6856 | sel_compute_priorities (int rgn) | |
6857 | { | |
6858 | sched_rgn_compute_dependencies (rgn); | |
6859 | ||
6860 | /* Compute insn priorities in haifa style. Then free haifa style | |
6861 | dependencies that we've calculated for this. */ | |
6862 | compute_priorities (); | |
6863 | ||
6864 | if (sched_verbose >= 5) | |
6865 | debug_rgn_dependencies (0); | |
6866 | ||
6867 | free_rgn_deps (); | |
6868 | } | |
6869 | ||
6870 | /* Init scheduling data for RGN. Returns true when this region should not | |
6871 | be scheduled. */ | |
6872 | static bool | |
6873 | sel_region_init (int rgn) | |
6874 | { | |
6875 | int i; | |
6876 | bb_vec_t bbs; | |
6877 | ||
6878 | rgn_setup_region (rgn); | |
6879 | ||
48e1416a | 6880 | /* Even if sched_is_disabled_for_current_region_p() is true, we still |
e1ab7874 | 6881 | do region initialization here so the region can be bundled correctly, |
6882 | but we'll skip the scheduling in sel_sched_region (). */ | |
6883 | if (current_region_empty_p ()) | |
6884 | return true; | |
6885 | ||
f1f41a6c | 6886 | bbs.create (current_nr_blocks); |
e1ab7874 | 6887 | |
6888 | for (i = 0; i < current_nr_blocks; i++) | |
f5a6b05f | 6889 | bbs.quick_push (BASIC_BLOCK_FOR_FN (cfun, BB_TO_BLOCK (i))); |
e1ab7874 | 6890 | |
52d7e28c | 6891 | sel_init_bbs (bbs); |
e1ab7874 | 6892 | |
b73edd22 | 6893 | if (flag_sel_sched_pipelining) |
6894 | setup_current_loop_nest (rgn, &bbs); | |
6895 | ||
a060ed03 | 6896 | sel_setup_region_sched_flags (); |
6897 | ||
e1ab7874 | 6898 | /* Initialize luids and dependence analysis which both sel-sched and haifa |
6899 | need. */ | |
52d7e28c | 6900 | sched_init_luids (bbs); |
e1ab7874 | 6901 | sched_deps_init (false); |
6902 | ||
6903 | /* Initialize haifa data. */ | |
6904 | rgn_setup_sched_infos (); | |
6905 | sel_set_sched_flags (); | |
52d7e28c | 6906 | haifa_init_h_i_d (bbs); |
e1ab7874 | 6907 | |
6908 | sel_compute_priorities (rgn); | |
6909 | init_deps_global (); | |
6910 | ||
6911 | /* Main initialization. */ | |
6912 | sel_setup_sched_infos (); | |
6913 | sel_init_global_and_expr (bbs); | |
6914 | ||
f1f41a6c | 6915 | bbs.release (); |
e1ab7874 | 6916 | |
6917 | blocks_to_reschedule = BITMAP_ALLOC (NULL); | |
6918 | ||
6919 | /* Init correct liveness sets on each instruction of a single-block loop. | |
6920 | This is the only situation when we can't update liveness when calling | |
6921 | compute_live for the first insn of the loop. */ | |
6922 | if (current_loop_nest) | |
6923 | { | |
f5a6b05f | 6924 | int header = |
6925 | (sel_is_loop_preheader_p (BASIC_BLOCK_FOR_FN (cfun, BB_TO_BLOCK (0))) | |
6926 | ? 1 | |
6927 | : 0); | |
e1ab7874 | 6928 | |
6929 | if (current_nr_blocks == header + 1) | |
48e1416a | 6930 | update_liveness_on_insn |
f5a6b05f | 6931 | (sel_bb_head (BASIC_BLOCK_FOR_FN (cfun, BB_TO_BLOCK (header)))); |
e1ab7874 | 6932 | } |
48e1416a | 6933 | |
e1ab7874 | 6934 | /* Set hooks so that no newly generated insn will go out unnoticed. */ |
6935 | sel_register_cfg_hooks (); | |
6936 | ||
202d6e5f | 6937 | /* !!! We call target.sched.init () for the whole region, but we invoke |
6938 | targetm.sched.finish () for every ebb. */ | |
6939 | if (targetm.sched.init) | |
e1ab7874 | 6940 | /* None of the arguments are actually used in any target. */ |
202d6e5f | 6941 | targetm.sched.init (sched_dump, sched_verbose, -1); |
e1ab7874 | 6942 | |
6943 | first_emitted_uid = get_max_uid () + 1; | |
6944 | preheader_removed = false; | |
6945 | ||
6946 | /* Reset register allocation ticks array. */ | |
6947 | memset (reg_rename_tick, 0, sizeof reg_rename_tick); | |
6948 | reg_rename_this_tick = 0; | |
6949 | ||
6950 | bitmap_initialize (forced_ebb_heads, 0); | |
6951 | bitmap_clear (forced_ebb_heads); | |
6952 | ||
6953 | setup_nop_vinsn (); | |
6954 | current_copies = BITMAP_ALLOC (NULL); | |
6955 | current_originators = BITMAP_ALLOC (NULL); | |
6956 | code_motion_visited_blocks = BITMAP_ALLOC (NULL); | |
6957 | ||
6958 | return false; | |
6959 | } | |
6960 | ||
6961 | /* Simplify insns after the scheduling. */ | |
6962 | static void | |
6963 | simplify_changed_insns (void) | |
6964 | { | |
6965 | int i; | |
6966 | ||
6967 | for (i = 0; i < current_nr_blocks; i++) | |
6968 | { | |
f5a6b05f | 6969 | basic_block bb = BASIC_BLOCK_FOR_FN (cfun, BB_TO_BLOCK (i)); |
ff88d074 | 6970 | rtx_insn *insn; |
e1ab7874 | 6971 | |
6972 | FOR_BB_INSNS (bb, insn) | |
6973 | if (INSN_P (insn)) | |
6974 | { | |
6975 | expr_t expr = INSN_EXPR (insn); | |
6976 | ||
48e1416a | 6977 | if (EXPR_WAS_SUBSTITUTED (expr)) |
e1ab7874 | 6978 | validate_simplify_insn (insn); |
6979 | } | |
6980 | } | |
6981 | } | |
6982 | ||
6983 | /* Find boundaries of the EBB starting from basic block BB, marking blocks of | |
6984 | this EBB in SCHEDULED_BLOCKS and appropriately filling in HEAD, TAIL, | |
6985 | PREV_HEAD, and NEXT_TAIL fields of CURRENT_SCHED_INFO structure. */ | |
6986 | static void | |
6987 | find_ebb_boundaries (basic_block bb, bitmap scheduled_blocks) | |
6988 | { | |
6fe7b8c2 | 6989 | rtx_insn *head, *tail; |
e1ab7874 | 6990 | basic_block bb1 = bb; |
6991 | if (sched_verbose >= 2) | |
6992 | sel_print ("Finishing schedule in bbs: "); | |
6993 | ||
6994 | do | |
6995 | { | |
6996 | bitmap_set_bit (scheduled_blocks, BLOCK_TO_BB (bb1->index)); | |
6997 | ||
6998 | if (sched_verbose >= 2) | |
6999 | sel_print ("%d; ", bb1->index); | |
7000 | } | |
7001 | while (!bb_ends_ebb_p (bb1) && (bb1 = bb_next_bb (bb1))); | |
7002 | ||
7003 | if (sched_verbose >= 2) | |
7004 | sel_print ("\n"); | |
7005 | ||
7006 | get_ebb_head_tail (bb, bb1, &head, &tail); | |
7007 | ||
7008 | current_sched_info->head = head; | |
7009 | current_sched_info->tail = tail; | |
7010 | current_sched_info->prev_head = PREV_INSN (head); | |
7011 | current_sched_info->next_tail = NEXT_INSN (tail); | |
7012 | } | |
7013 | ||
7014 | /* Regenerate INSN_SCHED_CYCLEs for insns of current EBB. */ | |
7015 | static void | |
7016 | reset_sched_cycles_in_current_ebb (void) | |
7017 | { | |
7018 | int last_clock = 0; | |
7019 | int haifa_last_clock = -1; | |
7020 | int haifa_clock = 0; | |
08b41748 | 7021 | int issued_insns = 0; |
e1ab7874 | 7022 | insn_t insn; |
7023 | ||
202d6e5f | 7024 | if (targetm.sched.init) |
e1ab7874 | 7025 | { |
7026 | /* None of the arguments are actually used in any target. | |
7027 | NB: We should have md_reset () hook for cases like this. */ | |
202d6e5f | 7028 | targetm.sched.init (sched_dump, sched_verbose, -1); |
e1ab7874 | 7029 | } |
7030 | ||
7031 | state_reset (curr_state); | |
7032 | advance_state (curr_state); | |
48e1416a | 7033 | |
e1ab7874 | 7034 | for (insn = current_sched_info->head; |
7035 | insn != current_sched_info->next_tail; | |
7036 | insn = NEXT_INSN (insn)) | |
7037 | { | |
7038 | int cost, haifa_cost; | |
7039 | int sort_p; | |
30474b14 | 7040 | bool asm_p, real_insn, after_stall, all_issued; |
e1ab7874 | 7041 | int clock; |
7042 | ||
7043 | if (!INSN_P (insn)) | |
7044 | continue; | |
7045 | ||
7046 | asm_p = false; | |
7047 | real_insn = recog_memoized (insn) >= 0; | |
7048 | clock = INSN_SCHED_CYCLE (insn); | |
7049 | ||
7050 | cost = clock - last_clock; | |
7051 | ||
7052 | /* Initialize HAIFA_COST. */ | |
7053 | if (! real_insn) | |
7054 | { | |
7055 | asm_p = INSN_ASM_P (insn); | |
7056 | ||
7057 | if (asm_p) | |
7058 | /* This is asm insn which *had* to be scheduled first | |
7059 | on the cycle. */ | |
7060 | haifa_cost = 1; | |
7061 | else | |
48e1416a | 7062 | /* This is a use/clobber insn. It should not change |
e1ab7874 | 7063 | cost. */ |
7064 | haifa_cost = 0; | |
7065 | } | |
7066 | else | |
30474b14 | 7067 | haifa_cost = estimate_insn_cost (insn, curr_state); |
e1ab7874 | 7068 | |
7069 | /* Stall for whatever cycles we've stalled before. */ | |
7070 | after_stall = 0; | |
7071 | if (INSN_AFTER_STALL_P (insn) && cost > haifa_cost) | |
7072 | { | |
7073 | haifa_cost = cost; | |
7074 | after_stall = 1; | |
7075 | } | |
946d6c2b | 7076 | all_issued = issued_insns == issue_rate; |
7077 | if (haifa_cost == 0 && all_issued) | |
08b41748 | 7078 | haifa_cost = 1; |
e1ab7874 | 7079 | if (haifa_cost > 0) |
7080 | { | |
7081 | int i = 0; | |
7082 | ||
7083 | while (haifa_cost--) | |
7084 | { | |
7085 | advance_state (curr_state); | |
08b41748 | 7086 | issued_insns = 0; |
e1ab7874 | 7087 | i++; |
7088 | ||
7089 | if (sched_verbose >= 2) | |
7090 | { | |
7091 | sel_print ("advance_state (state_transition)\n"); | |
7092 | debug_state (curr_state); | |
7093 | } | |
7094 | ||
48e1416a | 7095 | /* The DFA may report that e.g. insn requires 2 cycles to be |
7096 | issued, but on the next cycle it says that insn is ready | |
e1ab7874 | 7097 | to go. Check this here. */ |
7098 | if (!after_stall | |
48e1416a | 7099 | && real_insn |
e1ab7874 | 7100 | && haifa_cost > 0 |
30474b14 | 7101 | && estimate_insn_cost (insn, curr_state) == 0) |
e1ab7874 | 7102 | break; |
e7ea26b5 | 7103 | |
7104 | /* When the data dependency stall is longer than the DFA stall, | |
946d6c2b | 7105 | and when we have issued exactly issue_rate insns and stalled, |
7106 | it could be that after this longer stall the insn will again | |
e7ea26b5 | 7107 | become unavailable to the DFA restrictions. Looks strange |
7108 | but happens e.g. on x86-64. So recheck DFA on the last | |
7109 | iteration. */ | |
946d6c2b | 7110 | if ((after_stall || all_issued) |
e7ea26b5 | 7111 | && real_insn |
7112 | && haifa_cost == 0) | |
30474b14 | 7113 | haifa_cost = estimate_insn_cost (insn, curr_state); |
e7ea26b5 | 7114 | } |
e1ab7874 | 7115 | |
7116 | haifa_clock += i; | |
08b41748 | 7117 | if (sched_verbose >= 2) |
7118 | sel_print ("haifa clock: %d\n", haifa_clock); | |
e1ab7874 | 7119 | } |
7120 | else | |
7121 | gcc_assert (haifa_cost == 0); | |
7122 | ||
7123 | if (sched_verbose >= 2) | |
7124 | sel_print ("Haifa cost for insn %d: %d\n", INSN_UID (insn), haifa_cost); | |
7125 | ||
7126 | if (targetm.sched.dfa_new_cycle) | |
7127 | while (targetm.sched.dfa_new_cycle (sched_dump, sched_verbose, insn, | |
7128 | haifa_last_clock, haifa_clock, | |
7129 | &sort_p)) | |
7130 | { | |
7131 | advance_state (curr_state); | |
08b41748 | 7132 | issued_insns = 0; |
e1ab7874 | 7133 | haifa_clock++; |
7134 | if (sched_verbose >= 2) | |
7135 | { | |
7136 | sel_print ("advance_state (dfa_new_cycle)\n"); | |
7137 | debug_state (curr_state); | |
08b41748 | 7138 | sel_print ("haifa clock: %d\n", haifa_clock + 1); |
e1ab7874 | 7139 | } |
7140 | } | |
7141 | ||
7142 | if (real_insn) | |
7143 | { | |
30474b14 | 7144 | static state_t temp = NULL; |
7145 | ||
7146 | if (!temp) | |
7147 | temp = xmalloc (dfa_state_size); | |
7148 | memcpy (temp, curr_state, dfa_state_size); | |
7149 | ||
e1ab7874 | 7150 | cost = state_transition (curr_state, insn); |
30474b14 | 7151 | if (memcmp (temp, curr_state, dfa_state_size)) |
ed726cbf | 7152 | issued_insns++; |
e1ab7874 | 7153 | |
7154 | if (sched_verbose >= 2) | |
08b41748 | 7155 | { |
7156 | sel_print ("scheduled insn %d, clock %d\n", INSN_UID (insn), | |
7157 | haifa_clock + 1); | |
7158 | debug_state (curr_state); | |
7159 | } | |
e1ab7874 | 7160 | gcc_assert (cost < 0); |
7161 | } | |
7162 | ||
7163 | if (targetm.sched.variable_issue) | |
7164 | targetm.sched.variable_issue (sched_dump, sched_verbose, insn, 0); | |
7165 | ||
7166 | INSN_SCHED_CYCLE (insn) = haifa_clock; | |
7167 | ||
7168 | last_clock = clock; | |
7169 | haifa_last_clock = haifa_clock; | |
7170 | } | |
7171 | } | |
7172 | ||
7173 | /* Put TImode markers on insns starting a new issue group. */ | |
7174 | static void | |
7175 | put_TImodes (void) | |
7176 | { | |
7177 | int last_clock = -1; | |
7178 | insn_t insn; | |
7179 | ||
7180 | for (insn = current_sched_info->head; insn != current_sched_info->next_tail; | |
7181 | insn = NEXT_INSN (insn)) | |
7182 | { | |
7183 | int cost, clock; | |
7184 | ||
7185 | if (!INSN_P (insn)) | |
7186 | continue; | |
7187 | ||
7188 | clock = INSN_SCHED_CYCLE (insn); | |
7189 | cost = (last_clock == -1) ? 1 : clock - last_clock; | |
7190 | ||
7191 | gcc_assert (cost >= 0); | |
7192 | ||
7193 | if (issue_rate > 1 | |
7194 | && GET_CODE (PATTERN (insn)) != USE | |
7195 | && GET_CODE (PATTERN (insn)) != CLOBBER) | |
7196 | { | |
7197 | if (reload_completed && cost > 0) | |
7198 | PUT_MODE (insn, TImode); | |
7199 | ||
7200 | last_clock = clock; | |
7201 | } | |
7202 | ||
7203 | if (sched_verbose >= 2) | |
7204 | sel_print ("Cost for insn %d is %d\n", INSN_UID (insn), cost); | |
7205 | } | |
7206 | } | |
7207 | ||
48e1416a | 7208 | /* Perform MD_FINISH on EBBs comprising current region. When |
e1ab7874 | 7209 | RESET_SCHED_CYCLES_P is true, run a pass emulating the scheduler |
7210 | to produce correct sched cycles on insns. */ | |
7211 | static void | |
7212 | sel_region_target_finish (bool reset_sched_cycles_p) | |
7213 | { | |
7214 | int i; | |
7215 | bitmap scheduled_blocks = BITMAP_ALLOC (NULL); | |
7216 | ||
7217 | for (i = 0; i < current_nr_blocks; i++) | |
7218 | { | |
7219 | if (bitmap_bit_p (scheduled_blocks, i)) | |
7220 | continue; | |
7221 | ||
7222 | /* While pipelining outer loops, skip bundling for loop | |
7223 | preheaders. Those will be rescheduled in the outer loop. */ | |
7224 | if (sel_is_loop_preheader_p (EBB_FIRST_BB (i))) | |
7225 | continue; | |
7226 | ||
7227 | find_ebb_boundaries (EBB_FIRST_BB (i), scheduled_blocks); | |
7228 | ||
7229 | if (no_real_insns_p (current_sched_info->head, current_sched_info->tail)) | |
7230 | continue; | |
7231 | ||
7232 | if (reset_sched_cycles_p) | |
7233 | reset_sched_cycles_in_current_ebb (); | |
7234 | ||
202d6e5f | 7235 | if (targetm.sched.init) |
7236 | targetm.sched.init (sched_dump, sched_verbose, -1); | |
e1ab7874 | 7237 | |
7238 | put_TImodes (); | |
7239 | ||
202d6e5f | 7240 | if (targetm.sched.finish) |
e1ab7874 | 7241 | { |
202d6e5f | 7242 | targetm.sched.finish (sched_dump, sched_verbose); |
e1ab7874 | 7243 | |
7244 | /* Extend luids so that insns generated by the target will | |
7245 | get zero luid. */ | |
52d7e28c | 7246 | sched_extend_luids (); |
e1ab7874 | 7247 | } |
7248 | } | |
7249 | ||
7250 | BITMAP_FREE (scheduled_blocks); | |
7251 | } | |
7252 | ||
7253 | /* Free the scheduling data for the current region. When RESET_SCHED_CYCLES_P | |
48e1416a | 7254 | is true, make an additional pass emulating scheduler to get correct insn |
e1ab7874 | 7255 | cycles for md_finish calls. */ |
7256 | static void | |
7257 | sel_region_finish (bool reset_sched_cycles_p) | |
7258 | { | |
7259 | simplify_changed_insns (); | |
7260 | sched_finish_ready_list (); | |
7261 | free_nop_pool (); | |
7262 | ||
7263 | /* Free the vectors. */ | |
f1f41a6c | 7264 | vec_av_set.release (); |
e1ab7874 | 7265 | BITMAP_FREE (current_copies); |
7266 | BITMAP_FREE (current_originators); | |
7267 | BITMAP_FREE (code_motion_visited_blocks); | |
f1f41a6c | 7268 | vinsn_vec_free (vec_bookkeeping_blocked_vinsns); |
7269 | vinsn_vec_free (vec_target_unavailable_vinsns); | |
e1ab7874 | 7270 | |
7271 | /* If LV_SET of the region head should be updated, do it now because | |
7272 | there will be no other chance. */ | |
7273 | { | |
7274 | succ_iterator si; | |
7275 | insn_t insn; | |
7276 | ||
7277 | FOR_EACH_SUCC_1 (insn, si, bb_note (EBB_FIRST_BB (0)), | |
7278 | SUCCS_NORMAL | SUCCS_SKIP_TO_LOOP_EXITS) | |
7279 | { | |
7280 | basic_block bb = BLOCK_FOR_INSN (insn); | |
7281 | ||
7282 | if (!BB_LV_SET_VALID_P (bb)) | |
7283 | compute_live (insn); | |
7284 | } | |
7285 | } | |
7286 | ||
7287 | /* Emulate the Haifa scheduler for bundling. */ | |
7288 | if (reload_completed) | |
7289 | sel_region_target_finish (reset_sched_cycles_p); | |
7290 | ||
7291 | sel_finish_global_and_expr (); | |
7292 | ||
7293 | bitmap_clear (forced_ebb_heads); | |
7294 | ||
7295 | free_nop_vinsn (); | |
7296 | ||
7297 | finish_deps_global (); | |
7298 | sched_finish_luids (); | |
f1f41a6c | 7299 | h_d_i_d.release (); |
e1ab7874 | 7300 | |
7301 | sel_finish_bbs (); | |
7302 | BITMAP_FREE (blocks_to_reschedule); | |
7303 | ||
7304 | sel_unregister_cfg_hooks (); | |
7305 | ||
7306 | max_issue_size = 0; | |
7307 | } | |
7308 | \f | |
7309 | ||
7310 | /* Functions that implement the scheduler driver. */ | |
7311 | ||
7312 | /* Schedule a parallel instruction group on each of FENCES. MAX_SEQNO | |
7313 | is the current maximum seqno. SCHEDULED_INSNS_TAILPP is the list | |
7314 | of insns scheduled -- these would be postprocessed later. */ | |
7315 | static void | |
7316 | schedule_on_fences (flist_t fences, int max_seqno, | |
7317 | ilist_t **scheduled_insns_tailpp) | |
7318 | { | |
7319 | flist_t old_fences = fences; | |
7320 | ||
7321 | if (sched_verbose >= 1) | |
7322 | { | |
7323 | sel_print ("\nScheduling on fences: "); | |
7324 | dump_flist (fences); | |
7325 | sel_print ("\n"); | |
7326 | } | |
7327 | ||
7328 | scheduled_something_on_previous_fence = false; | |
7329 | for (; fences; fences = FLIST_NEXT (fences)) | |
7330 | { | |
7331 | fence_t fence = NULL; | |
7332 | int seqno = 0; | |
7333 | flist_t fences2; | |
7334 | bool first_p = true; | |
48e1416a | 7335 | |
e1ab7874 | 7336 | /* Choose the next fence group to schedule. |
7337 | The fact that insn can be scheduled only once | |
7338 | on the cycle is guaranteed by two properties: | |
7339 | 1. seqnos of parallel groups decrease with each iteration. | |
7340 | 2. If is_ineligible_successor () sees the larger seqno, it | |
7341 | checks if candidate insn is_in_current_fence_p (). */ | |
7342 | for (fences2 = old_fences; fences2; fences2 = FLIST_NEXT (fences2)) | |
7343 | { | |
7344 | fence_t f = FLIST_FENCE (fences2); | |
7345 | ||
7346 | if (!FENCE_PROCESSED_P (f)) | |
7347 | { | |
7348 | int i = INSN_SEQNO (FENCE_INSN (f)); | |
7349 | ||
7350 | if (first_p || i > seqno) | |
7351 | { | |
7352 | seqno = i; | |
7353 | fence = f; | |
7354 | first_p = false; | |
7355 | } | |
7356 | else | |
7357 | /* ??? Seqnos of different groups should be different. */ | |
7358 | gcc_assert (1 || i != seqno); | |
7359 | } | |
7360 | } | |
7361 | ||
7362 | gcc_assert (fence); | |
7363 | ||
7364 | /* As FENCE is nonnull, SEQNO is initialized. */ | |
7365 | seqno -= max_seqno + 1; | |
7366 | fill_insns (fence, seqno, scheduled_insns_tailpp); | |
7367 | FENCE_PROCESSED_P (fence) = true; | |
7368 | } | |
7369 | ||
7370 | /* All av_sets are invalidated by GLOBAL_LEVEL increase, thus we | |
48e1416a | 7371 | don't need to keep bookkeeping-invalidated and target-unavailable |
e1ab7874 | 7372 | vinsns any more. */ |
7373 | vinsn_vec_clear (&vec_bookkeeping_blocked_vinsns); | |
7374 | vinsn_vec_clear (&vec_target_unavailable_vinsns); | |
7375 | } | |
7376 | ||
7377 | /* Calculate MIN_SEQNO and MAX_SEQNO. */ | |
7378 | static void | |
7379 | find_min_max_seqno (flist_t fences, int *min_seqno, int *max_seqno) | |
7380 | { | |
7381 | *min_seqno = *max_seqno = INSN_SEQNO (FENCE_INSN (FLIST_FENCE (fences))); | |
7382 | ||
7383 | /* The first element is already processed. */ | |
7384 | while ((fences = FLIST_NEXT (fences))) | |
7385 | { | |
7386 | int seqno = INSN_SEQNO (FENCE_INSN (FLIST_FENCE (fences))); | |
48e1416a | 7387 | |
e1ab7874 | 7388 | if (*min_seqno > seqno) |
7389 | *min_seqno = seqno; | |
7390 | else if (*max_seqno < seqno) | |
7391 | *max_seqno = seqno; | |
7392 | } | |
7393 | } | |
7394 | ||
dce9387e | 7395 | /* Calculate new fences from FENCES. Write the current time to PTIME. */ |
48e1416a | 7396 | static flist_t |
dce9387e | 7397 | calculate_new_fences (flist_t fences, int orig_max_seqno, int *ptime) |
e1ab7874 | 7398 | { |
7399 | flist_t old_fences = fences; | |
7400 | struct flist_tail_def _new_fences, *new_fences = &_new_fences; | |
dce9387e | 7401 | int max_time = 0; |
e1ab7874 | 7402 | |
7403 | flist_tail_init (new_fences); | |
7404 | for (; fences; fences = FLIST_NEXT (fences)) | |
7405 | { | |
7406 | fence_t fence = FLIST_FENCE (fences); | |
7407 | insn_t insn; | |
48e1416a | 7408 | |
e1ab7874 | 7409 | if (!FENCE_BNDS (fence)) |
7410 | { | |
7411 | /* This fence doesn't have any successors. */ | |
7412 | if (!FENCE_SCHEDULED_P (fence)) | |
7413 | { | |
7414 | /* Nothing was scheduled on this fence. */ | |
7415 | int seqno; | |
7416 | ||
7417 | insn = FENCE_INSN (fence); | |
7418 | seqno = INSN_SEQNO (insn); | |
7419 | gcc_assert (seqno > 0 && seqno <= orig_max_seqno); | |
7420 | ||
7421 | if (sched_verbose >= 1) | |
48e1416a | 7422 | sel_print ("Fence %d[%d] has not changed\n", |
e1ab7874 | 7423 | INSN_UID (insn), |
7424 | BLOCK_NUM (insn)); | |
7425 | move_fence_to_fences (fences, new_fences); | |
7426 | } | |
7427 | } | |
7428 | else | |
7429 | extract_new_fences_from (fences, new_fences, orig_max_seqno); | |
dce9387e | 7430 | max_time = MAX (max_time, FENCE_CYCLE (fence)); |
e1ab7874 | 7431 | } |
7432 | ||
7433 | flist_clear (&old_fences); | |
dce9387e | 7434 | *ptime = max_time; |
e1ab7874 | 7435 | return FLIST_TAIL_HEAD (new_fences); |
7436 | } | |
7437 | ||
7438 | /* Update seqnos of insns given by PSCHEDULED_INSNS. MIN_SEQNO and MAX_SEQNO | |
7439 | are the miminum and maximum seqnos of the group, HIGHEST_SEQNO_IN_USE is | |
7440 | the highest seqno used in a region. Return the updated highest seqno. */ | |
7441 | static int | |
48e1416a | 7442 | update_seqnos_and_stage (int min_seqno, int max_seqno, |
7443 | int highest_seqno_in_use, | |
e1ab7874 | 7444 | ilist_t *pscheduled_insns) |
7445 | { | |
7446 | int new_hs; | |
7447 | ilist_iterator ii; | |
7448 | insn_t insn; | |
48e1416a | 7449 | |
e1ab7874 | 7450 | /* Actually, new_hs is the seqno of the instruction, that was |
7451 | scheduled first (i.e. it is the first one in SCHEDULED_INSNS). */ | |
7452 | if (*pscheduled_insns) | |
7453 | { | |
7454 | new_hs = (INSN_SEQNO (ILIST_INSN (*pscheduled_insns)) | |
7455 | + highest_seqno_in_use + max_seqno - min_seqno + 2); | |
7456 | gcc_assert (new_hs > highest_seqno_in_use); | |
7457 | } | |
7458 | else | |
7459 | new_hs = highest_seqno_in_use; | |
7460 | ||
7461 | FOR_EACH_INSN (insn, ii, *pscheduled_insns) | |
7462 | { | |
7463 | gcc_assert (INSN_SEQNO (insn) < 0); | |
7464 | INSN_SEQNO (insn) += highest_seqno_in_use + max_seqno - min_seqno + 2; | |
7465 | gcc_assert (INSN_SEQNO (insn) <= new_hs); | |
d9ab2038 | 7466 | |
7467 | /* When not pipelining, purge unneeded insn info on the scheduled insns. | |
7468 | For example, having reg_last array of INSN_DEPS_CONTEXT in memory may | |
7469 | require > 1GB of memory e.g. on limit-fnargs.c. */ | |
7470 | if (! pipelining_p) | |
7471 | free_data_for_scheduled_insn (insn); | |
e1ab7874 | 7472 | } |
7473 | ||
7474 | ilist_clear (pscheduled_insns); | |
7475 | global_level++; | |
7476 | ||
7477 | return new_hs; | |
7478 | } | |
7479 | ||
48e1416a | 7480 | /* The main driver for scheduling a region. This function is responsible |
7481 | for correct propagation of fences (i.e. scheduling points) and creating | |
7482 | a group of parallel insns at each of them. It also supports | |
e1ab7874 | 7483 | pipelining. ORIG_MAX_SEQNO is the maximal seqno before this pass |
7484 | of scheduling. */ | |
7485 | static void | |
7486 | sel_sched_region_2 (int orig_max_seqno) | |
7487 | { | |
7488 | int highest_seqno_in_use = orig_max_seqno; | |
dce9387e | 7489 | int max_time = 0; |
e1ab7874 | 7490 | |
7491 | stat_bookkeeping_copies = 0; | |
7492 | stat_insns_needed_bookkeeping = 0; | |
7493 | stat_renamed_scheduled = 0; | |
7494 | stat_substitutions_total = 0; | |
7495 | num_insns_scheduled = 0; | |
7496 | ||
7497 | while (fences) | |
7498 | { | |
7499 | int min_seqno, max_seqno; | |
7500 | ilist_t scheduled_insns = NULL; | |
7501 | ilist_t *scheduled_insns_tailp = &scheduled_insns; | |
7502 | ||
7503 | find_min_max_seqno (fences, &min_seqno, &max_seqno); | |
7504 | schedule_on_fences (fences, max_seqno, &scheduled_insns_tailp); | |
dce9387e | 7505 | fences = calculate_new_fences (fences, orig_max_seqno, &max_time); |
e1ab7874 | 7506 | highest_seqno_in_use = update_seqnos_and_stage (min_seqno, max_seqno, |
7507 | highest_seqno_in_use, | |
7508 | &scheduled_insns); | |
7509 | } | |
7510 | ||
7511 | if (sched_verbose >= 1) | |
dce9387e | 7512 | { |
7513 | sel_print ("Total scheduling time: %d cycles\n", max_time); | |
7514 | sel_print ("Scheduled %d bookkeeping copies, %d insns needed " | |
7515 | "bookkeeping, %d insns renamed, %d insns substituted\n", | |
7516 | stat_bookkeeping_copies, | |
7517 | stat_insns_needed_bookkeeping, | |
7518 | stat_renamed_scheduled, | |
7519 | stat_substitutions_total); | |
7520 | } | |
e1ab7874 | 7521 | } |
7522 | ||
48e1416a | 7523 | /* Schedule a region. When pipelining, search for possibly never scheduled |
7524 | bookkeeping code and schedule it. Reschedule pipelined code without | |
e1ab7874 | 7525 | pipelining after. */ |
7526 | static void | |
7527 | sel_sched_region_1 (void) | |
7528 | { | |
e1ab7874 | 7529 | int orig_max_seqno; |
7530 | ||
def66588 | 7531 | /* Remove empty blocks that might be in the region from the beginning. */ |
e1ab7874 | 7532 | purge_empty_blocks (); |
7533 | ||
def66588 | 7534 | orig_max_seqno = init_seqno (NULL, NULL); |
e1ab7874 | 7535 | gcc_assert (orig_max_seqno >= 1); |
7536 | ||
7537 | /* When pipelining outer loops, create fences on the loop header, | |
7538 | not preheader. */ | |
7539 | fences = NULL; | |
7540 | if (current_loop_nest) | |
7541 | init_fences (BB_END (EBB_FIRST_BB (0))); | |
7542 | else | |
7543 | init_fences (bb_note (EBB_FIRST_BB (0))); | |
7544 | global_level = 1; | |
7545 | ||
7546 | sel_sched_region_2 (orig_max_seqno); | |
7547 | ||
7548 | gcc_assert (fences == NULL); | |
7549 | ||
7550 | if (pipelining_p) | |
7551 | { | |
7552 | int i; | |
7553 | basic_block bb; | |
7554 | struct flist_tail_def _new_fences; | |
7555 | flist_tail_t new_fences = &_new_fences; | |
7556 | bool do_p = true; | |
7557 | ||
7558 | pipelining_p = false; | |
7559 | max_ws = MIN (max_ws, issue_rate * 3 / 2); | |
7560 | bookkeeping_p = false; | |
7561 | enable_schedule_as_rhs_p = false; | |
7562 | ||
7563 | /* Schedule newly created code, that has not been scheduled yet. */ | |
7564 | do_p = true; | |
7565 | ||
7566 | while (do_p) | |
7567 | { | |
7568 | do_p = false; | |
7569 | ||
7570 | for (i = 0; i < current_nr_blocks; i++) | |
7571 | { | |
7572 | basic_block bb = EBB_FIRST_BB (i); | |
7573 | ||
e1ab7874 | 7574 | if (bitmap_bit_p (blocks_to_reschedule, bb->index)) |
7575 | { | |
e7ea26b5 | 7576 | if (! bb_ends_ebb_p (bb)) |
7577 | bitmap_set_bit (blocks_to_reschedule, bb_next_bb (bb)->index); | |
7578 | if (sel_bb_empty_p (bb)) | |
7579 | { | |
7580 | bitmap_clear_bit (blocks_to_reschedule, bb->index); | |
7581 | continue; | |
7582 | } | |
e1ab7874 | 7583 | clear_outdated_rtx_info (bb); |
7584 | if (sel_insn_is_speculation_check (BB_END (bb)) | |
7585 | && JUMP_P (BB_END (bb))) | |
7586 | bitmap_set_bit (blocks_to_reschedule, | |
7587 | BRANCH_EDGE (bb)->dest->index); | |
7588 | } | |
e7ea26b5 | 7589 | else if (! sel_bb_empty_p (bb) |
7590 | && INSN_SCHED_TIMES (sel_bb_head (bb)) <= 0) | |
e1ab7874 | 7591 | bitmap_set_bit (blocks_to_reschedule, bb->index); |
7592 | } | |
7593 | ||
7594 | for (i = 0; i < current_nr_blocks; i++) | |
7595 | { | |
7596 | bb = EBB_FIRST_BB (i); | |
7597 | ||
48e1416a | 7598 | /* While pipelining outer loops, skip bundling for loop |
e1ab7874 | 7599 | preheaders. Those will be rescheduled in the outer |
7600 | loop. */ | |
7601 | if (sel_is_loop_preheader_p (bb)) | |
7602 | { | |
7603 | clear_outdated_rtx_info (bb); | |
7604 | continue; | |
7605 | } | |
48e1416a | 7606 | |
08b41748 | 7607 | if (bitmap_bit_p (blocks_to_reschedule, bb->index)) |
e1ab7874 | 7608 | { |
7609 | flist_tail_init (new_fences); | |
7610 | ||
def66588 | 7611 | orig_max_seqno = init_seqno (blocks_to_reschedule, bb); |
e1ab7874 | 7612 | |
7613 | /* Mark BB as head of the new ebb. */ | |
7614 | bitmap_set_bit (forced_ebb_heads, bb->index); | |
7615 | ||
e1ab7874 | 7616 | gcc_assert (fences == NULL); |
7617 | ||
7618 | init_fences (bb_note (bb)); | |
48e1416a | 7619 | |
e1ab7874 | 7620 | sel_sched_region_2 (orig_max_seqno); |
48e1416a | 7621 | |
e1ab7874 | 7622 | do_p = true; |
7623 | break; | |
7624 | } | |
7625 | } | |
7626 | } | |
7627 | } | |
7628 | } | |
7629 | ||
7630 | /* Schedule the RGN region. */ | |
7631 | void | |
7632 | sel_sched_region (int rgn) | |
7633 | { | |
7634 | bool schedule_p; | |
7635 | bool reset_sched_cycles_p; | |
7636 | ||
7637 | if (sel_region_init (rgn)) | |
7638 | return; | |
7639 | ||
7640 | if (sched_verbose >= 1) | |
7641 | sel_print ("Scheduling region %d\n", rgn); | |
7642 | ||
7643 | schedule_p = (!sched_is_disabled_for_current_region_p () | |
7644 | && dbg_cnt (sel_sched_region_cnt)); | |
7645 | reset_sched_cycles_p = pipelining_p; | |
7646 | if (schedule_p) | |
7647 | sel_sched_region_1 (); | |
7648 | else | |
7649 | /* Force initialization of INSN_SCHED_CYCLEs for correct bundling. */ | |
7650 | reset_sched_cycles_p = true; | |
48e1416a | 7651 | |
e1ab7874 | 7652 | sel_region_finish (reset_sched_cycles_p); |
7653 | } | |
7654 | ||
7655 | /* Perform global init for the scheduler. */ | |
7656 | static void | |
7657 | sel_global_init (void) | |
7658 | { | |
7659 | calculate_dominance_info (CDI_DOMINATORS); | |
7660 | alloc_sched_pools (); | |
7661 | ||
7662 | /* Setup the infos for sched_init. */ | |
7663 | sel_setup_sched_infos (); | |
7664 | setup_sched_dump (); | |
7665 | ||
c486a06e | 7666 | sched_rgn_init (false); |
2bc1ac5a | 7667 | sched_init (); |
e1ab7874 | 7668 | |
7669 | sched_init_bbs (); | |
7670 | /* Reset AFTER_RECOVERY if it has been set by the 1st scheduler pass. */ | |
7671 | after_recovery = 0; | |
48e1416a | 7672 | can_issue_more = issue_rate; |
e1ab7874 | 7673 | |
7674 | sched_extend_target (); | |
7675 | sched_deps_init (true); | |
7676 | setup_nop_and_exit_insns (); | |
7677 | sel_extend_global_bb_info (); | |
7678 | init_lv_sets (); | |
7679 | init_hard_regs_data (); | |
7680 | } | |
7681 | ||
7682 | /* Free the global data of the scheduler. */ | |
7683 | static void | |
7684 | sel_global_finish (void) | |
7685 | { | |
7686 | free_bb_note_pool (); | |
7687 | free_lv_sets (); | |
7688 | sel_finish_global_bb_info (); | |
7689 | ||
7690 | free_regset_pool (); | |
7691 | free_nop_and_exit_insns (); | |
7692 | ||
7693 | sched_rgn_finish (); | |
7694 | sched_deps_finish (); | |
7695 | sched_finish (); | |
7696 | ||
7697 | if (current_loops) | |
7698 | sel_finish_pipelining (); | |
7699 | ||
7700 | free_sched_pools (); | |
7701 | free_dominance_info (CDI_DOMINATORS); | |
7702 | } | |
7703 | ||
7704 | /* Return true when we need to skip selective scheduling. Used for debugging. */ | |
7705 | bool | |
7706 | maybe_skip_selective_scheduling (void) | |
7707 | { | |
7708 | return ! dbg_cnt (sel_sched_cnt); | |
7709 | } | |
7710 | ||
7711 | /* The entry point. */ | |
7712 | void | |
7713 | run_selective_scheduling (void) | |
7714 | { | |
7715 | int rgn; | |
7716 | ||
a28770e1 | 7717 | if (n_basic_blocks_for_fn (cfun) == NUM_FIXED_BLOCKS) |
e1ab7874 | 7718 | return; |
7719 | ||
7720 | sel_global_init (); | |
7721 | ||
7722 | for (rgn = 0; rgn < nr_regions; rgn++) | |
7723 | sel_sched_region (rgn); | |
7724 | ||
7725 | sel_global_finish (); | |
7726 | } | |
7727 | ||
7728 | #endif |