]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/tree-switch-conversion.cc
Remove some uses of deprecated irange API.
[thirdparty/gcc.git] / gcc / tree-switch-conversion.cc
1 /* Lower GIMPLE_SWITCH expressions to something more efficient than
2 a jump table.
3 Copyright (C) 2006-2023 Free Software Foundation, Inc.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by the
9 Free Software Foundation; either version 3, or (at your option) any
10 later version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not, write to the Free
19 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
20 02110-1301, USA. */
21
22 /* This file handles the lowering of GIMPLE_SWITCH to an indexed
23 load, or a series of bit-test-and-branch expressions. */
24
25 #include "config.h"
26 #include "system.h"
27 #include "coretypes.h"
28 #include "backend.h"
29 #include "insn-codes.h"
30 #include "rtl.h"
31 #include "tree.h"
32 #include "gimple.h"
33 #include "cfghooks.h"
34 #include "tree-pass.h"
35 #include "ssa.h"
36 #include "optabs-tree.h"
37 #include "cgraph.h"
38 #include "gimple-pretty-print.h"
39 #include "fold-const.h"
40 #include "varasm.h"
41 #include "stor-layout.h"
42 #include "cfganal.h"
43 #include "gimplify.h"
44 #include "gimple-iterator.h"
45 #include "gimplify-me.h"
46 #include "gimple-fold.h"
47 #include "tree-cfg.h"
48 #include "cfgloop.h"
49 #include "alloc-pool.h"
50 #include "target.h"
51 #include "tree-into-ssa.h"
52 #include "omp-general.h"
53 #include "gimple-range.h"
54 #include "tree-cfgcleanup.h"
55
56 /* ??? For lang_hooks.types.type_for_mode, but is there a word_mode
57 type in the GIMPLE type system that is language-independent? */
58 #include "langhooks.h"
59
60 #include "tree-switch-conversion.h"
61 \f
62 using namespace tree_switch_conversion;
63
64 /* Constructor. */
65
66 switch_conversion::switch_conversion (): m_final_bb (NULL),
67 m_constructors (NULL), m_default_values (NULL),
68 m_arr_ref_first (NULL), m_arr_ref_last (NULL),
69 m_reason (NULL), m_default_case_nonstandard (false), m_cfg_altered (false)
70 {
71 }
72
73 /* Collection information about SWTCH statement. */
74
75 void
76 switch_conversion::collect (gswitch *swtch)
77 {
78 unsigned int branch_num = gimple_switch_num_labels (swtch);
79 tree min_case, max_case;
80 unsigned int i;
81 edge e, e_default, e_first;
82 edge_iterator ei;
83
84 m_switch = swtch;
85
86 /* The gimplifier has already sorted the cases by CASE_LOW and ensured there
87 is a default label which is the first in the vector.
88 Collect the bits we can deduce from the CFG. */
89 m_index_expr = gimple_switch_index (swtch);
90 m_switch_bb = gimple_bb (swtch);
91 e_default = gimple_switch_default_edge (cfun, swtch);
92 m_default_bb = e_default->dest;
93 m_default_prob = e_default->probability;
94
95 /* Get upper and lower bounds of case values, and the covered range. */
96 min_case = gimple_switch_label (swtch, 1);
97 max_case = gimple_switch_label (swtch, branch_num - 1);
98
99 m_range_min = CASE_LOW (min_case);
100 if (CASE_HIGH (max_case) != NULL_TREE)
101 m_range_max = CASE_HIGH (max_case);
102 else
103 m_range_max = CASE_LOW (max_case);
104
105 m_contiguous_range = true;
106 tree last = CASE_HIGH (min_case) ? CASE_HIGH (min_case) : m_range_min;
107 for (i = 2; i < branch_num; i++)
108 {
109 tree elt = gimple_switch_label (swtch, i);
110 if (wi::to_wide (last) + 1 != wi::to_wide (CASE_LOW (elt)))
111 {
112 m_contiguous_range = false;
113 break;
114 }
115 last = CASE_HIGH (elt) ? CASE_HIGH (elt) : CASE_LOW (elt);
116 }
117
118 if (m_contiguous_range)
119 e_first = gimple_switch_edge (cfun, swtch, 1);
120 else
121 e_first = e_default;
122
123 /* See if there is one common successor block for all branch
124 targets. If it exists, record it in FINAL_BB.
125 Start with the destination of the first non-default case
126 if the range is contiguous and default case otherwise as
127 guess or its destination in case it is a forwarder block. */
128 if (! single_pred_p (e_first->dest))
129 m_final_bb = e_first->dest;
130 else if (single_succ_p (e_first->dest)
131 && ! single_pred_p (single_succ (e_first->dest)))
132 m_final_bb = single_succ (e_first->dest);
133 /* Require that all switch destinations are either that common
134 FINAL_BB or a forwarder to it, except for the default
135 case if contiguous range. */
136 auto_vec<edge, 10> fw_edges;
137 m_uniq = 0;
138 if (m_final_bb)
139 FOR_EACH_EDGE (e, ei, m_switch_bb->succs)
140 {
141 edge phi_e = nullptr;
142 if (e->dest == m_final_bb)
143 phi_e = e;
144 else if (single_pred_p (e->dest)
145 && single_succ_p (e->dest)
146 && single_succ (e->dest) == m_final_bb)
147 phi_e = single_succ_edge (e->dest);
148 if (phi_e)
149 {
150 if (e == e_default)
151 ;
152 else if (phi_e == e || empty_block_p (e->dest))
153 {
154 /* For empty blocks consider forwarders with equal
155 PHI arguments in m_final_bb as unique. */
156 unsigned i;
157 for (i = 0; i < fw_edges.length (); ++i)
158 if (phi_alternatives_equal (m_final_bb, fw_edges[i], phi_e))
159 break;
160 if (i == fw_edges.length ())
161 {
162 /* But limit the above possibly quadratic search. */
163 if (fw_edges.length () < 10)
164 fw_edges.quick_push (phi_e);
165 m_uniq++;
166 }
167 }
168 else
169 m_uniq++;
170 continue;
171 }
172
173 if (e == e_default && m_contiguous_range)
174 {
175 m_default_case_nonstandard = true;
176 continue;
177 }
178
179 m_final_bb = NULL;
180 break;
181 }
182
183 /* When there's not a single common successor block conservatively
184 approximate the number of unique non-default targets. */
185 if (!m_final_bb)
186 m_uniq = EDGE_COUNT (gimple_bb (swtch)->succs) - 1;
187
188 m_range_size
189 = int_const_binop (MINUS_EXPR, m_range_max, m_range_min);
190
191 /* Get a count of the number of case labels. Single-valued case labels
192 simply count as one, but a case range counts double, since it may
193 require two compares if it gets lowered as a branching tree. */
194 m_count = 0;
195 for (i = 1; i < branch_num; i++)
196 {
197 tree elt = gimple_switch_label (swtch, i);
198 m_count++;
199 if (CASE_HIGH (elt)
200 && ! tree_int_cst_equal (CASE_LOW (elt), CASE_HIGH (elt)))
201 m_count++;
202 }
203 }
204
205 /* Checks whether the range given by individual case statements of the switch
206 switch statement isn't too big and whether the number of branches actually
207 satisfies the size of the new array. */
208
209 bool
210 switch_conversion::check_range ()
211 {
212 gcc_assert (m_range_size);
213 if (!tree_fits_uhwi_p (m_range_size))
214 {
215 m_reason = "index range way too large or otherwise unusable";
216 return false;
217 }
218
219 if (tree_to_uhwi (m_range_size)
220 > ((unsigned) m_count * param_switch_conversion_branch_ratio))
221 {
222 m_reason = "the maximum range-branch ratio exceeded";
223 return false;
224 }
225
226 return true;
227 }
228
229 /* Checks whether all but the final BB basic blocks are empty. */
230
231 bool
232 switch_conversion::check_all_empty_except_final ()
233 {
234 edge e, e_default = find_edge (m_switch_bb, m_default_bb);
235 edge_iterator ei;
236
237 FOR_EACH_EDGE (e, ei, m_switch_bb->succs)
238 {
239 if (e->dest == m_final_bb)
240 continue;
241
242 if (!empty_block_p (e->dest))
243 {
244 if (m_contiguous_range && e == e_default)
245 {
246 m_default_case_nonstandard = true;
247 continue;
248 }
249
250 m_reason = "bad case - a non-final BB not empty";
251 return false;
252 }
253 }
254
255 return true;
256 }
257
258 /* This function checks whether all required values in phi nodes in final_bb
259 are constants. Required values are those that correspond to a basic block
260 which is a part of the examined switch statement. It returns true if the
261 phi nodes are OK, otherwise false. */
262
263 bool
264 switch_conversion::check_final_bb ()
265 {
266 gphi_iterator gsi;
267
268 m_phi_count = 0;
269 for (gsi = gsi_start_phis (m_final_bb); !gsi_end_p (gsi); gsi_next (&gsi))
270 {
271 gphi *phi = gsi.phi ();
272 unsigned int i;
273
274 if (virtual_operand_p (gimple_phi_result (phi)))
275 continue;
276
277 m_phi_count++;
278
279 for (i = 0; i < gimple_phi_num_args (phi); i++)
280 {
281 basic_block bb = gimple_phi_arg_edge (phi, i)->src;
282
283 if (bb == m_switch_bb
284 || (single_pred_p (bb)
285 && single_pred (bb) == m_switch_bb
286 && (!m_default_case_nonstandard
287 || empty_block_p (bb))))
288 {
289 tree reloc, val;
290 const char *reason = NULL;
291
292 val = gimple_phi_arg_def (phi, i);
293 if (!is_gimple_ip_invariant (val))
294 reason = "non-invariant value from a case";
295 else
296 {
297 reloc = initializer_constant_valid_p (val, TREE_TYPE (val));
298 if ((flag_pic && reloc != null_pointer_node)
299 || (!flag_pic && reloc == NULL_TREE))
300 {
301 if (reloc)
302 reason
303 = "value from a case would need runtime relocations";
304 else
305 reason
306 = "value from a case is not a valid initializer";
307 }
308 }
309 if (reason)
310 {
311 /* For contiguous range, we can allow non-constant
312 or one that needs relocation, as long as it is
313 only reachable from the default case. */
314 if (bb == m_switch_bb)
315 bb = m_final_bb;
316 if (!m_contiguous_range || bb != m_default_bb)
317 {
318 m_reason = reason;
319 return false;
320 }
321
322 unsigned int branch_num = gimple_switch_num_labels (m_switch);
323 for (unsigned int i = 1; i < branch_num; i++)
324 {
325 if (gimple_switch_label_bb (cfun, m_switch, i) == bb)
326 {
327 m_reason = reason;
328 return false;
329 }
330 }
331 m_default_case_nonstandard = true;
332 }
333 }
334 }
335 }
336
337 return true;
338 }
339
340 /* The following function allocates default_values, target_{in,out}_names and
341 constructors arrays. The last one is also populated with pointers to
342 vectors that will become constructors of new arrays. */
343
344 void
345 switch_conversion::create_temp_arrays ()
346 {
347 int i;
348
349 m_default_values = XCNEWVEC (tree, m_phi_count * 3);
350 /* ??? Macros do not support multi argument templates in their
351 argument list. We create a typedef to work around that problem. */
352 typedef vec<constructor_elt, va_gc> *vec_constructor_elt_gc;
353 m_constructors = XCNEWVEC (vec_constructor_elt_gc, m_phi_count);
354 m_target_inbound_names = m_default_values + m_phi_count;
355 m_target_outbound_names = m_target_inbound_names + m_phi_count;
356 for (i = 0; i < m_phi_count; i++)
357 vec_alloc (m_constructors[i], tree_to_uhwi (m_range_size) + 1);
358 }
359
360 /* Populate the array of default values in the order of phi nodes.
361 DEFAULT_CASE is the CASE_LABEL_EXPR for the default switch branch
362 if the range is non-contiguous or the default case has standard
363 structure, otherwise it is the first non-default case instead. */
364
365 void
366 switch_conversion::gather_default_values (tree default_case)
367 {
368 gphi_iterator gsi;
369 basic_block bb = label_to_block (cfun, CASE_LABEL (default_case));
370 edge e;
371 int i = 0;
372
373 gcc_assert (CASE_LOW (default_case) == NULL_TREE
374 || m_default_case_nonstandard);
375
376 if (bb == m_final_bb)
377 e = find_edge (m_switch_bb, bb);
378 else
379 e = single_succ_edge (bb);
380
381 for (gsi = gsi_start_phis (m_final_bb); !gsi_end_p (gsi); gsi_next (&gsi))
382 {
383 gphi *phi = gsi.phi ();
384 if (virtual_operand_p (gimple_phi_result (phi)))
385 continue;
386 tree val = PHI_ARG_DEF_FROM_EDGE (phi, e);
387 gcc_assert (val);
388 m_default_values[i++] = val;
389 }
390 }
391
392 /* The following function populates the vectors in the constructors array with
393 future contents of the static arrays. The vectors are populated in the
394 order of phi nodes. */
395
396 void
397 switch_conversion::build_constructors ()
398 {
399 unsigned i, branch_num = gimple_switch_num_labels (m_switch);
400 tree pos = m_range_min;
401 tree pos_one = build_int_cst (TREE_TYPE (pos), 1);
402
403 for (i = 1; i < branch_num; i++)
404 {
405 tree cs = gimple_switch_label (m_switch, i);
406 basic_block bb = label_to_block (cfun, CASE_LABEL (cs));
407 edge e;
408 tree high;
409 gphi_iterator gsi;
410 int j;
411
412 if (bb == m_final_bb)
413 e = find_edge (m_switch_bb, bb);
414 else
415 e = single_succ_edge (bb);
416 gcc_assert (e);
417
418 while (tree_int_cst_lt (pos, CASE_LOW (cs)))
419 {
420 int k;
421 for (k = 0; k < m_phi_count; k++)
422 {
423 constructor_elt elt;
424
425 elt.index = int_const_binop (MINUS_EXPR, pos, m_range_min);
426 elt.value
427 = unshare_expr_without_location (m_default_values[k]);
428 m_constructors[k]->quick_push (elt);
429 }
430
431 pos = int_const_binop (PLUS_EXPR, pos, pos_one);
432 }
433 gcc_assert (tree_int_cst_equal (pos, CASE_LOW (cs)));
434
435 j = 0;
436 if (CASE_HIGH (cs))
437 high = CASE_HIGH (cs);
438 else
439 high = CASE_LOW (cs);
440 for (gsi = gsi_start_phis (m_final_bb);
441 !gsi_end_p (gsi); gsi_next (&gsi))
442 {
443 gphi *phi = gsi.phi ();
444 if (virtual_operand_p (gimple_phi_result (phi)))
445 continue;
446 tree val = PHI_ARG_DEF_FROM_EDGE (phi, e);
447 tree low = CASE_LOW (cs);
448 pos = CASE_LOW (cs);
449
450 do
451 {
452 constructor_elt elt;
453
454 elt.index = int_const_binop (MINUS_EXPR, pos, m_range_min);
455 elt.value = unshare_expr_without_location (val);
456 m_constructors[j]->quick_push (elt);
457
458 pos = int_const_binop (PLUS_EXPR, pos, pos_one);
459 } while (!tree_int_cst_lt (high, pos)
460 && tree_int_cst_lt (low, pos));
461 j++;
462 }
463 }
464 }
465
466 /* If all values in the constructor vector are products of a linear function
467 a * x + b, then return true. When true, COEFF_A and COEFF_B and
468 coefficients of the linear function. Note that equal values are special
469 case of a linear function with a and b equal to zero. */
470
471 bool
472 switch_conversion::contains_linear_function_p (vec<constructor_elt, va_gc> *vec,
473 wide_int *coeff_a,
474 wide_int *coeff_b)
475 {
476 unsigned int i;
477 constructor_elt *elt;
478
479 gcc_assert (vec->length () >= 2);
480
481 /* Let's try to find any linear function a * x + y that can apply to
482 given values. 'a' can be calculated as follows:
483
484 a = (y2 - y1) / (x2 - x1) where x2 - x1 = 1 (consecutive case indices)
485 a = y2 - y1
486
487 and
488
489 b = y2 - a * x2
490
491 */
492
493 tree elt0 = (*vec)[0].value;
494 tree elt1 = (*vec)[1].value;
495
496 if (TREE_CODE (elt0) != INTEGER_CST || TREE_CODE (elt1) != INTEGER_CST)
497 return false;
498
499 wide_int range_min
500 = wide_int::from (wi::to_wide (m_range_min),
501 TYPE_PRECISION (TREE_TYPE (elt0)),
502 TYPE_SIGN (TREE_TYPE (m_range_min)));
503 wide_int y1 = wi::to_wide (elt0);
504 wide_int y2 = wi::to_wide (elt1);
505 wide_int a = y2 - y1;
506 wide_int b = y2 - a * (range_min + 1);
507
508 /* Verify that all values fulfill the linear function. */
509 FOR_EACH_VEC_SAFE_ELT (vec, i, elt)
510 {
511 if (TREE_CODE (elt->value) != INTEGER_CST)
512 return false;
513
514 wide_int value = wi::to_wide (elt->value);
515 if (a * range_min + b != value)
516 return false;
517
518 ++range_min;
519 }
520
521 *coeff_a = a;
522 *coeff_b = b;
523
524 return true;
525 }
526
527 /* Return type which should be used for array elements, either TYPE's
528 main variant or, for integral types, some smaller integral type
529 that can still hold all the constants. */
530
531 tree
532 switch_conversion::array_value_type (tree type, int num)
533 {
534 unsigned int i, len = vec_safe_length (m_constructors[num]);
535 constructor_elt *elt;
536 int sign = 0;
537 tree smaller_type;
538
539 /* Types with alignments greater than their size can reach here, e.g. out of
540 SRA. We couldn't use these as an array component type so get back to the
541 main variant first, which, for our purposes, is fine for other types as
542 well. */
543
544 type = TYPE_MAIN_VARIANT (type);
545
546 if (!INTEGRAL_TYPE_P (type))
547 return type;
548
549 scalar_int_mode type_mode = SCALAR_INT_TYPE_MODE (type);
550 scalar_int_mode mode = get_narrowest_mode (type_mode);
551 if (GET_MODE_SIZE (type_mode) <= GET_MODE_SIZE (mode))
552 return type;
553
554 if (len < (optimize_bb_for_size_p (gimple_bb (m_switch)) ? 2 : 32))
555 return type;
556
557 FOR_EACH_VEC_SAFE_ELT (m_constructors[num], i, elt)
558 {
559 wide_int cst;
560
561 if (TREE_CODE (elt->value) != INTEGER_CST)
562 return type;
563
564 cst = wi::to_wide (elt->value);
565 while (1)
566 {
567 unsigned int prec = GET_MODE_BITSIZE (mode);
568 if (prec > HOST_BITS_PER_WIDE_INT)
569 return type;
570
571 if (sign >= 0 && cst == wi::zext (cst, prec))
572 {
573 if (sign == 0 && cst == wi::sext (cst, prec))
574 break;
575 sign = 1;
576 break;
577 }
578 if (sign <= 0 && cst == wi::sext (cst, prec))
579 {
580 sign = -1;
581 break;
582 }
583
584 if (sign == 1)
585 sign = 0;
586
587 if (!GET_MODE_WIDER_MODE (mode).exists (&mode)
588 || GET_MODE_SIZE (mode) >= GET_MODE_SIZE (type_mode))
589 return type;
590 }
591 }
592
593 if (sign == 0)
594 sign = TYPE_UNSIGNED (type) ? 1 : -1;
595 smaller_type = lang_hooks.types.type_for_mode (mode, sign >= 0);
596 if (GET_MODE_SIZE (type_mode)
597 <= GET_MODE_SIZE (SCALAR_INT_TYPE_MODE (smaller_type)))
598 return type;
599
600 return smaller_type;
601 }
602
603 /* Create an appropriate array type and declaration and assemble a static
604 array variable. Also create a load statement that initializes
605 the variable in question with a value from the static array. SWTCH is
606 the switch statement being converted, NUM is the index to
607 arrays of constructors, default values and target SSA names
608 for this particular array. ARR_INDEX_TYPE is the type of the index
609 of the new array, PHI is the phi node of the final BB that corresponds
610 to the value that will be loaded from the created array. TIDX
611 is an ssa name of a temporary variable holding the index for loads from the
612 new array. */
613
614 void
615 switch_conversion::build_one_array (int num, tree arr_index_type,
616 gphi *phi, tree tidx)
617 {
618 tree name;
619 gimple *load;
620 gimple_stmt_iterator gsi = gsi_for_stmt (m_switch);
621 location_t loc = gimple_location (m_switch);
622
623 gcc_assert (m_default_values[num]);
624
625 name = copy_ssa_name (PHI_RESULT (phi));
626 m_target_inbound_names[num] = name;
627
628 vec<constructor_elt, va_gc> *constructor = m_constructors[num];
629 wide_int coeff_a, coeff_b;
630 bool linear_p = contains_linear_function_p (constructor, &coeff_a, &coeff_b);
631 tree type;
632 if (linear_p
633 && (type = range_check_type (TREE_TYPE ((*constructor)[0].value))))
634 {
635 if (dump_file && coeff_a.to_uhwi () > 0)
636 fprintf (dump_file, "Linear transformation with A = %" PRId64
637 " and B = %" PRId64 "\n", coeff_a.to_shwi (),
638 coeff_b.to_shwi ());
639
640 /* We must use type of constructor values. */
641 gimple_seq seq = NULL;
642 tree tmp = gimple_convert (&seq, type, m_index_expr);
643 tree tmp2 = gimple_build (&seq, MULT_EXPR, type,
644 wide_int_to_tree (type, coeff_a), tmp);
645 tree tmp3 = gimple_build (&seq, PLUS_EXPR, type, tmp2,
646 wide_int_to_tree (type, coeff_b));
647 tree tmp4 = gimple_convert (&seq, TREE_TYPE (name), tmp3);
648 gsi_insert_seq_before (&gsi, seq, GSI_SAME_STMT);
649 load = gimple_build_assign (name, tmp4);
650 }
651 else
652 {
653 tree array_type, ctor, decl, value_type, fetch, default_type;
654
655 default_type = TREE_TYPE (m_default_values[num]);
656 value_type = array_value_type (default_type, num);
657 array_type = build_array_type (value_type, arr_index_type);
658 if (default_type != value_type)
659 {
660 unsigned int i;
661 constructor_elt *elt;
662
663 FOR_EACH_VEC_SAFE_ELT (constructor, i, elt)
664 elt->value = fold_convert (value_type, elt->value);
665 }
666 ctor = build_constructor (array_type, constructor);
667 TREE_CONSTANT (ctor) = true;
668 TREE_STATIC (ctor) = true;
669
670 decl = build_decl (loc, VAR_DECL, NULL_TREE, array_type);
671 TREE_STATIC (decl) = 1;
672 DECL_INITIAL (decl) = ctor;
673
674 DECL_NAME (decl) = create_tmp_var_name ("CSWTCH");
675 DECL_ARTIFICIAL (decl) = 1;
676 DECL_IGNORED_P (decl) = 1;
677 TREE_CONSTANT (decl) = 1;
678 TREE_READONLY (decl) = 1;
679 DECL_IGNORED_P (decl) = 1;
680 if (offloading_function_p (cfun->decl))
681 DECL_ATTRIBUTES (decl)
682 = tree_cons (get_identifier ("omp declare target"), NULL_TREE,
683 NULL_TREE);
684 varpool_node::finalize_decl (decl);
685
686 fetch = build4 (ARRAY_REF, value_type, decl, tidx, NULL_TREE,
687 NULL_TREE);
688 if (default_type != value_type)
689 {
690 fetch = fold_convert (default_type, fetch);
691 fetch = force_gimple_operand_gsi (&gsi, fetch, true, NULL_TREE,
692 true, GSI_SAME_STMT);
693 }
694 load = gimple_build_assign (name, fetch);
695 }
696
697 gsi_insert_before (&gsi, load, GSI_SAME_STMT);
698 update_stmt (load);
699 m_arr_ref_last = load;
700 }
701
702 /* Builds and initializes static arrays initialized with values gathered from
703 the switch statement. Also creates statements that load values from
704 them. */
705
706 void
707 switch_conversion::build_arrays ()
708 {
709 tree arr_index_type;
710 tree tidx, sub, utype;
711 gimple *stmt;
712 gimple_stmt_iterator gsi;
713 gphi_iterator gpi;
714 int i;
715 location_t loc = gimple_location (m_switch);
716
717 gsi = gsi_for_stmt (m_switch);
718
719 /* Make sure we do not generate arithmetics in a subrange. */
720 utype = TREE_TYPE (m_index_expr);
721 if (TREE_TYPE (utype))
722 utype = lang_hooks.types.type_for_mode (TYPE_MODE (TREE_TYPE (utype)), 1);
723 else
724 utype = lang_hooks.types.type_for_mode (TYPE_MODE (utype), 1);
725
726 arr_index_type = build_index_type (m_range_size);
727 tidx = make_ssa_name (utype);
728 sub = fold_build2_loc (loc, MINUS_EXPR, utype,
729 fold_convert_loc (loc, utype, m_index_expr),
730 fold_convert_loc (loc, utype, m_range_min));
731 sub = force_gimple_operand_gsi (&gsi, sub,
732 false, NULL, true, GSI_SAME_STMT);
733 stmt = gimple_build_assign (tidx, sub);
734
735 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
736 update_stmt (stmt);
737 m_arr_ref_first = stmt;
738
739 for (gpi = gsi_start_phis (m_final_bb), i = 0;
740 !gsi_end_p (gpi); gsi_next (&gpi))
741 {
742 gphi *phi = gpi.phi ();
743 if (!virtual_operand_p (gimple_phi_result (phi)))
744 build_one_array (i++, arr_index_type, phi, tidx);
745 else
746 {
747 edge e;
748 edge_iterator ei;
749 FOR_EACH_EDGE (e, ei, m_switch_bb->succs)
750 {
751 if (e->dest == m_final_bb)
752 break;
753 if (!m_default_case_nonstandard
754 || e->dest != m_default_bb)
755 {
756 e = single_succ_edge (e->dest);
757 break;
758 }
759 }
760 gcc_assert (e && e->dest == m_final_bb);
761 m_target_vop = PHI_ARG_DEF_FROM_EDGE (phi, e);
762 }
763 }
764 }
765
766 /* Generates and appropriately inserts loads of default values at the position
767 given by GSI. Returns the last inserted statement. */
768
769 gassign *
770 switch_conversion::gen_def_assigns (gimple_stmt_iterator *gsi)
771 {
772 int i;
773 gassign *assign = NULL;
774
775 for (i = 0; i < m_phi_count; i++)
776 {
777 tree name = copy_ssa_name (m_target_inbound_names[i]);
778 m_target_outbound_names[i] = name;
779 assign = gimple_build_assign (name, m_default_values[i]);
780 gsi_insert_before (gsi, assign, GSI_SAME_STMT);
781 update_stmt (assign);
782 }
783 return assign;
784 }
785
786 /* Deletes the unused bbs and edges that now contain the switch statement and
787 its empty branch bbs. BBD is the now dead BB containing
788 the original switch statement, FINAL is the last BB of the converted
789 switch statement (in terms of succession). */
790
791 void
792 switch_conversion::prune_bbs (basic_block bbd, basic_block final,
793 basic_block default_bb)
794 {
795 edge_iterator ei;
796 edge e;
797
798 for (ei = ei_start (bbd->succs); (e = ei_safe_edge (ei)); )
799 {
800 basic_block bb;
801 bb = e->dest;
802 remove_edge (e);
803 if (bb != final && bb != default_bb)
804 delete_basic_block (bb);
805 }
806 delete_basic_block (bbd);
807 }
808
809 /* Add values to phi nodes in final_bb for the two new edges. E1F is the edge
810 from the basic block loading values from an array and E2F from the basic
811 block loading default values. BBF is the last switch basic block (see the
812 bbf description in the comment below). */
813
814 void
815 switch_conversion::fix_phi_nodes (edge e1f, edge e2f, basic_block bbf)
816 {
817 gphi_iterator gsi;
818 int i;
819
820 for (gsi = gsi_start_phis (bbf), i = 0;
821 !gsi_end_p (gsi); gsi_next (&gsi))
822 {
823 gphi *phi = gsi.phi ();
824 tree inbound, outbound;
825 if (virtual_operand_p (gimple_phi_result (phi)))
826 inbound = outbound = m_target_vop;
827 else
828 {
829 inbound = m_target_inbound_names[i];
830 outbound = m_target_outbound_names[i++];
831 }
832 add_phi_arg (phi, inbound, e1f, UNKNOWN_LOCATION);
833 if (!m_default_case_nonstandard)
834 add_phi_arg (phi, outbound, e2f, UNKNOWN_LOCATION);
835 }
836 }
837
838 /* Creates a check whether the switch expression value actually falls into the
839 range given by all the cases. If it does not, the temporaries are loaded
840 with default values instead. */
841
842 void
843 switch_conversion::gen_inbound_check ()
844 {
845 tree label_decl1 = create_artificial_label (UNKNOWN_LOCATION);
846 tree label_decl2 = create_artificial_label (UNKNOWN_LOCATION);
847 tree label_decl3 = create_artificial_label (UNKNOWN_LOCATION);
848 glabel *label1, *label2, *label3;
849 tree utype, tidx;
850 tree bound;
851
852 gcond *cond_stmt;
853
854 gassign *last_assign = NULL;
855 gimple_stmt_iterator gsi;
856 basic_block bb0, bb1, bb2, bbf, bbd;
857 edge e01 = NULL, e02, e21, e1d, e1f, e2f;
858 location_t loc = gimple_location (m_switch);
859
860 gcc_assert (m_default_values);
861
862 bb0 = gimple_bb (m_switch);
863
864 tidx = gimple_assign_lhs (m_arr_ref_first);
865 utype = TREE_TYPE (tidx);
866
867 /* (end of) block 0 */
868 gsi = gsi_for_stmt (m_arr_ref_first);
869 gsi_next (&gsi);
870
871 bound = fold_convert_loc (loc, utype, m_range_size);
872 cond_stmt = gimple_build_cond (LE_EXPR, tidx, bound, NULL_TREE, NULL_TREE);
873 gsi_insert_before (&gsi, cond_stmt, GSI_SAME_STMT);
874 update_stmt (cond_stmt);
875
876 /* block 2 */
877 if (!m_default_case_nonstandard)
878 {
879 label2 = gimple_build_label (label_decl2);
880 gsi_insert_before (&gsi, label2, GSI_SAME_STMT);
881 last_assign = gen_def_assigns (&gsi);
882 }
883
884 /* block 1 */
885 label1 = gimple_build_label (label_decl1);
886 gsi_insert_before (&gsi, label1, GSI_SAME_STMT);
887
888 /* block F */
889 gsi = gsi_start_bb (m_final_bb);
890 label3 = gimple_build_label (label_decl3);
891 gsi_insert_before (&gsi, label3, GSI_SAME_STMT);
892
893 /* cfg fix */
894 e02 = split_block (bb0, cond_stmt);
895 bb2 = e02->dest;
896
897 if (m_default_case_nonstandard)
898 {
899 bb1 = bb2;
900 bb2 = m_default_bb;
901 e01 = e02;
902 e01->flags = EDGE_TRUE_VALUE;
903 e02 = make_edge (bb0, bb2, EDGE_FALSE_VALUE);
904 edge e_default = find_edge (bb1, bb2);
905 for (gphi_iterator gsi = gsi_start_phis (bb2);
906 !gsi_end_p (gsi); gsi_next (&gsi))
907 {
908 gphi *phi = gsi.phi ();
909 tree arg = PHI_ARG_DEF_FROM_EDGE (phi, e_default);
910 add_phi_arg (phi, arg, e02,
911 gimple_phi_arg_location_from_edge (phi, e_default));
912 }
913 /* Partially fix the dominator tree, if it is available. */
914 if (dom_info_available_p (CDI_DOMINATORS))
915 redirect_immediate_dominators (CDI_DOMINATORS, bb1, bb0);
916 }
917 else
918 {
919 e21 = split_block (bb2, last_assign);
920 bb1 = e21->dest;
921 remove_edge (e21);
922 }
923
924 e1d = split_block (bb1, m_arr_ref_last);
925 bbd = e1d->dest;
926 remove_edge (e1d);
927
928 /* Flags and profiles of the edge for in-range values. */
929 if (!m_default_case_nonstandard)
930 e01 = make_edge (bb0, bb1, EDGE_TRUE_VALUE);
931 e01->probability = m_default_prob.invert ();
932
933 /* Flags and profiles of the edge taking care of out-of-range values. */
934 e02->flags &= ~EDGE_FALLTHRU;
935 e02->flags |= EDGE_FALSE_VALUE;
936 e02->probability = m_default_prob;
937
938 bbf = m_final_bb;
939
940 e1f = make_edge (bb1, bbf, EDGE_FALLTHRU);
941 e1f->probability = profile_probability::always ();
942
943 if (m_default_case_nonstandard)
944 e2f = NULL;
945 else
946 {
947 e2f = make_edge (bb2, bbf, EDGE_FALLTHRU);
948 e2f->probability = profile_probability::always ();
949 }
950
951 /* frequencies of the new BBs */
952 bb1->count = e01->count ();
953 bb2->count = e02->count ();
954 if (!m_default_case_nonstandard)
955 bbf->count = e1f->count () + e2f->count ();
956
957 /* Tidy blocks that have become unreachable. */
958 prune_bbs (bbd, m_final_bb,
959 m_default_case_nonstandard ? m_default_bb : NULL);
960
961 /* Fixup the PHI nodes in bbF. */
962 fix_phi_nodes (e1f, e2f, bbf);
963
964 /* Fix the dominator tree, if it is available. */
965 if (dom_info_available_p (CDI_DOMINATORS))
966 {
967 vec<basic_block> bbs_to_fix_dom;
968
969 set_immediate_dominator (CDI_DOMINATORS, bb1, bb0);
970 if (!m_default_case_nonstandard)
971 set_immediate_dominator (CDI_DOMINATORS, bb2, bb0);
972 if (! get_immediate_dominator (CDI_DOMINATORS, bbf))
973 /* If bbD was the immediate dominator ... */
974 set_immediate_dominator (CDI_DOMINATORS, bbf, bb0);
975
976 bbs_to_fix_dom.create (3 + (bb2 != bbf));
977 bbs_to_fix_dom.quick_push (bb0);
978 bbs_to_fix_dom.quick_push (bb1);
979 if (bb2 != bbf)
980 bbs_to_fix_dom.quick_push (bb2);
981 bbs_to_fix_dom.quick_push (bbf);
982
983 iterate_fix_dominators (CDI_DOMINATORS, bbs_to_fix_dom, true);
984 bbs_to_fix_dom.release ();
985 }
986 }
987
988 /* The following function is invoked on every switch statement (the current
989 one is given in SWTCH) and runs the individual phases of switch
990 conversion on it one after another until one fails or the conversion
991 is completed. On success, NULL is in m_reason, otherwise points
992 to a string with the reason why the conversion failed. */
993
994 void
995 switch_conversion::expand (gswitch *swtch)
996 {
997 /* Group case labels so that we get the right results from the heuristics
998 that decide on the code generation approach for this switch. */
999 m_cfg_altered |= group_case_labels_stmt (swtch);
1000
1001 /* If this switch is now a degenerate case with only a default label,
1002 there is nothing left for us to do. */
1003 if (gimple_switch_num_labels (swtch) < 2)
1004 {
1005 m_reason = "switch is a degenerate case";
1006 return;
1007 }
1008
1009 collect (swtch);
1010
1011 /* No error markers should reach here (they should be filtered out
1012 during gimplification). */
1013 gcc_checking_assert (TREE_TYPE (m_index_expr) != error_mark_node);
1014
1015 /* Prefer bit test if possible. */
1016 if (tree_fits_uhwi_p (m_range_size)
1017 && bit_test_cluster::can_be_handled (tree_to_uhwi (m_range_size), m_uniq)
1018 && bit_test_cluster::is_beneficial (m_count, m_uniq))
1019 {
1020 m_reason = "expanding as bit test is preferable";
1021 return;
1022 }
1023
1024 if (m_uniq <= 2)
1025 {
1026 /* This will be expanded as a decision tree . */
1027 m_reason = "expanding as jumps is preferable";
1028 return;
1029 }
1030
1031 /* If there is no common successor, we cannot do the transformation. */
1032 if (!m_final_bb)
1033 {
1034 m_reason = "no common successor to all case label target blocks found";
1035 return;
1036 }
1037
1038 /* Check the case label values are within reasonable range: */
1039 if (!check_range ())
1040 {
1041 gcc_assert (m_reason);
1042 return;
1043 }
1044
1045 /* For all the cases, see whether they are empty, the assignments they
1046 represent constant and so on... */
1047 if (!check_all_empty_except_final ())
1048 {
1049 gcc_assert (m_reason);
1050 return;
1051 }
1052 if (!check_final_bb ())
1053 {
1054 gcc_assert (m_reason);
1055 return;
1056 }
1057
1058 /* At this point all checks have passed and we can proceed with the
1059 transformation. */
1060
1061 create_temp_arrays ();
1062 gather_default_values (m_default_case_nonstandard
1063 ? gimple_switch_label (swtch, 1)
1064 : gimple_switch_default_label (swtch));
1065 build_constructors ();
1066
1067 build_arrays (); /* Build the static arrays and assignments. */
1068 gen_inbound_check (); /* Build the bounds check. */
1069
1070 m_cfg_altered = true;
1071 }
1072
1073 /* Destructor. */
1074
1075 switch_conversion::~switch_conversion ()
1076 {
1077 XDELETEVEC (m_constructors);
1078 XDELETEVEC (m_default_values);
1079 }
1080
1081 /* Constructor. */
1082
1083 group_cluster::group_cluster (vec<cluster *> &clusters,
1084 unsigned start, unsigned end)
1085 {
1086 gcc_checking_assert (end - start + 1 >= 1);
1087 m_prob = profile_probability::never ();
1088 m_cases.create (end - start + 1);
1089 for (unsigned i = start; i <= end; i++)
1090 {
1091 m_cases.quick_push (static_cast<simple_cluster *> (clusters[i]));
1092 m_prob += clusters[i]->m_prob;
1093 }
1094 m_subtree_prob = m_prob;
1095 }
1096
1097 /* Destructor. */
1098
1099 group_cluster::~group_cluster ()
1100 {
1101 for (unsigned i = 0; i < m_cases.length (); i++)
1102 delete m_cases[i];
1103
1104 m_cases.release ();
1105 }
1106
1107 /* Dump content of a cluster. */
1108
1109 void
1110 group_cluster::dump (FILE *f, bool details)
1111 {
1112 unsigned total_values = 0;
1113 for (unsigned i = 0; i < m_cases.length (); i++)
1114 total_values += m_cases[i]->get_range (m_cases[i]->get_low (),
1115 m_cases[i]->get_high ());
1116
1117 unsigned comparison_count = 0;
1118 for (unsigned i = 0; i < m_cases.length (); i++)
1119 {
1120 simple_cluster *sc = static_cast<simple_cluster *> (m_cases[i]);
1121 comparison_count += sc->get_comparison_count ();
1122 }
1123
1124 unsigned HOST_WIDE_INT range = get_range (get_low (), get_high ());
1125 fprintf (f, "%s", get_type () == JUMP_TABLE ? "JT" : "BT");
1126
1127 if (details)
1128 fprintf (f, "(values:%d comparisons:%d range:" HOST_WIDE_INT_PRINT_DEC
1129 " density: %.2f%%)", total_values, comparison_count, range,
1130 100.0f * comparison_count / range);
1131
1132 fprintf (f, ":");
1133 PRINT_CASE (f, get_low ());
1134 fprintf (f, "-");
1135 PRINT_CASE (f, get_high ());
1136 fprintf (f, " ");
1137 }
1138
1139 /* Emit GIMPLE code to handle the cluster. */
1140
1141 void
1142 jump_table_cluster::emit (tree index_expr, tree,
1143 tree default_label_expr, basic_block default_bb,
1144 location_t loc)
1145 {
1146 unsigned HOST_WIDE_INT range = get_range (get_low (), get_high ());
1147 unsigned HOST_WIDE_INT nondefault_range = 0;
1148
1149 /* For jump table we just emit a new gswitch statement that will
1150 be latter lowered to jump table. */
1151 auto_vec <tree> labels;
1152 labels.create (m_cases.length ());
1153
1154 make_edge (m_case_bb, default_bb, 0);
1155 for (unsigned i = 0; i < m_cases.length (); i++)
1156 {
1157 labels.quick_push (unshare_expr (m_cases[i]->m_case_label_expr));
1158 make_edge (m_case_bb, m_cases[i]->m_case_bb, 0);
1159 }
1160
1161 gswitch *s = gimple_build_switch (index_expr,
1162 unshare_expr (default_label_expr), labels);
1163 gimple_set_location (s, loc);
1164 gimple_stmt_iterator gsi = gsi_start_bb (m_case_bb);
1165 gsi_insert_after (&gsi, s, GSI_NEW_STMT);
1166
1167 /* Set up even probabilities for all cases. */
1168 for (unsigned i = 0; i < m_cases.length (); i++)
1169 {
1170 simple_cluster *sc = static_cast<simple_cluster *> (m_cases[i]);
1171 edge case_edge = find_edge (m_case_bb, sc->m_case_bb);
1172 unsigned HOST_WIDE_INT case_range
1173 = sc->get_range (sc->get_low (), sc->get_high ());
1174 nondefault_range += case_range;
1175
1176 /* case_edge->aux is number of values in a jump-table that are covered
1177 by the case_edge. */
1178 case_edge->aux = (void *) ((intptr_t) (case_edge->aux) + case_range);
1179 }
1180
1181 edge default_edge = gimple_switch_default_edge (cfun, s);
1182 default_edge->probability = profile_probability::never ();
1183
1184 for (unsigned i = 0; i < m_cases.length (); i++)
1185 {
1186 simple_cluster *sc = static_cast<simple_cluster *> (m_cases[i]);
1187 edge case_edge = find_edge (m_case_bb, sc->m_case_bb);
1188 case_edge->probability
1189 = profile_probability::always ().apply_scale ((intptr_t)case_edge->aux,
1190 range);
1191 }
1192
1193 /* Number of non-default values is probability of default edge. */
1194 default_edge->probability
1195 += profile_probability::always ().apply_scale (nondefault_range,
1196 range).invert ();
1197
1198 switch_decision_tree::reset_out_edges_aux (s);
1199 }
1200
1201 /* Find jump tables of given CLUSTERS, where all members of the vector
1202 are of type simple_cluster. New clusters are returned. */
1203
1204 vec<cluster *>
1205 jump_table_cluster::find_jump_tables (vec<cluster *> &clusters)
1206 {
1207 if (!is_enabled ())
1208 return clusters.copy ();
1209
1210 unsigned l = clusters.length ();
1211 auto_vec<min_cluster_item> min;
1212 min.reserve (l + 1);
1213
1214 min.quick_push (min_cluster_item (0, 0, 0));
1215
1216 unsigned HOST_WIDE_INT max_ratio
1217 = (optimize_insn_for_size_p ()
1218 ? param_jump_table_max_growth_ratio_for_size
1219 : param_jump_table_max_growth_ratio_for_speed);
1220
1221 for (unsigned i = 1; i <= l; i++)
1222 {
1223 /* Set minimal # of clusters with i-th item to infinite. */
1224 min.quick_push (min_cluster_item (INT_MAX, INT_MAX, INT_MAX));
1225
1226 /* Pre-calculate number of comparisons for the clusters. */
1227 HOST_WIDE_INT comparison_count = 0;
1228 for (unsigned k = 0; k <= i - 1; k++)
1229 {
1230 simple_cluster *sc = static_cast<simple_cluster *> (clusters[k]);
1231 comparison_count += sc->get_comparison_count ();
1232 }
1233
1234 for (unsigned j = 0; j < i; j++)
1235 {
1236 unsigned HOST_WIDE_INT s = min[j].m_non_jt_cases;
1237 if (i - j < case_values_threshold ())
1238 s += i - j;
1239
1240 /* Prefer clusters with smaller number of numbers covered. */
1241 if ((min[j].m_count + 1 < min[i].m_count
1242 || (min[j].m_count + 1 == min[i].m_count
1243 && s < min[i].m_non_jt_cases))
1244 && can_be_handled (clusters, j, i - 1, max_ratio,
1245 comparison_count))
1246 min[i] = min_cluster_item (min[j].m_count + 1, j, s);
1247
1248 simple_cluster *sc = static_cast<simple_cluster *> (clusters[j]);
1249 comparison_count -= sc->get_comparison_count ();
1250 }
1251
1252 gcc_checking_assert (comparison_count == 0);
1253 gcc_checking_assert (min[i].m_count != INT_MAX);
1254 }
1255
1256 /* No result. */
1257 if (min[l].m_count == l)
1258 return clusters.copy ();
1259
1260 vec<cluster *> output;
1261 output.create (4);
1262
1263 /* Find and build the clusters. */
1264 for (unsigned int end = l;;)
1265 {
1266 int start = min[end].m_start;
1267
1268 /* Do not allow clusters with small number of cases. */
1269 if (is_beneficial (clusters, start, end - 1))
1270 output.safe_push (new jump_table_cluster (clusters, start, end - 1));
1271 else
1272 for (int i = end - 1; i >= start; i--)
1273 output.safe_push (clusters[i]);
1274
1275 end = start;
1276
1277 if (start <= 0)
1278 break;
1279 }
1280
1281 output.reverse ();
1282 return output;
1283 }
1284
1285 /* Return true when cluster starting at START and ending at END (inclusive)
1286 can build a jump-table. */
1287
1288 bool
1289 jump_table_cluster::can_be_handled (const vec<cluster *> &clusters,
1290 unsigned start, unsigned end,
1291 unsigned HOST_WIDE_INT max_ratio,
1292 unsigned HOST_WIDE_INT comparison_count)
1293 {
1294 /* If the switch is relatively small such that the cost of one
1295 indirect jump on the target are higher than the cost of a
1296 decision tree, go with the decision tree.
1297
1298 If range of values is much bigger than number of values,
1299 or if it is too large to represent in a HOST_WIDE_INT,
1300 make a sequence of conditional branches instead of a dispatch.
1301
1302 The definition of "much bigger" depends on whether we are
1303 optimizing for size or for speed.
1304
1305 For algorithm correctness, jump table for a single case must return
1306 true. We bail out in is_beneficial if it's called just for
1307 a single case. */
1308 if (start == end)
1309 return true;
1310
1311 unsigned HOST_WIDE_INT range = get_range (clusters[start]->get_low (),
1312 clusters[end]->get_high ());
1313 /* Check overflow. */
1314 if (range == 0)
1315 return false;
1316
1317 if (range > HOST_WIDE_INT_M1U / 100)
1318 return false;
1319
1320 unsigned HOST_WIDE_INT lhs = 100 * range;
1321 if (lhs < range)
1322 return false;
1323
1324 return lhs <= max_ratio * comparison_count;
1325 }
1326
1327 /* Return true if cluster starting at START and ending at END (inclusive)
1328 is profitable transformation. */
1329
1330 bool
1331 jump_table_cluster::is_beneficial (const vec<cluster *> &,
1332 unsigned start, unsigned end)
1333 {
1334 /* Single case bail out. */
1335 if (start == end)
1336 return false;
1337
1338 return end - start + 1 >= case_values_threshold ();
1339 }
1340
1341 /* Find bit tests of given CLUSTERS, where all members of the vector
1342 are of type simple_cluster. New clusters are returned. */
1343
1344 vec<cluster *>
1345 bit_test_cluster::find_bit_tests (vec<cluster *> &clusters)
1346 {
1347 if (!is_enabled ())
1348 return clusters.copy ();
1349
1350 unsigned l = clusters.length ();
1351 auto_vec<min_cluster_item> min;
1352 min.reserve (l + 1);
1353
1354 min.quick_push (min_cluster_item (0, 0, 0));
1355
1356 for (unsigned i = 1; i <= l; i++)
1357 {
1358 /* Set minimal # of clusters with i-th item to infinite. */
1359 min.quick_push (min_cluster_item (INT_MAX, INT_MAX, INT_MAX));
1360
1361 for (unsigned j = 0; j < i; j++)
1362 {
1363 if (min[j].m_count + 1 < min[i].m_count
1364 && can_be_handled (clusters, j, i - 1))
1365 min[i] = min_cluster_item (min[j].m_count + 1, j, INT_MAX);
1366 }
1367
1368 gcc_checking_assert (min[i].m_count != INT_MAX);
1369 }
1370
1371 /* No result. */
1372 if (min[l].m_count == l)
1373 return clusters.copy ();
1374
1375 vec<cluster *> output;
1376 output.create (4);
1377
1378 /* Find and build the clusters. */
1379 for (unsigned end = l;;)
1380 {
1381 int start = min[end].m_start;
1382
1383 if (is_beneficial (clusters, start, end - 1))
1384 {
1385 bool entire = start == 0 && end == clusters.length ();
1386 output.safe_push (new bit_test_cluster (clusters, start, end - 1,
1387 entire));
1388 }
1389 else
1390 for (int i = end - 1; i >= start; i--)
1391 output.safe_push (clusters[i]);
1392
1393 end = start;
1394
1395 if (start <= 0)
1396 break;
1397 }
1398
1399 output.reverse ();
1400 return output;
1401 }
1402
1403 /* Return true when RANGE of case values with UNIQ labels
1404 can build a bit test. */
1405
1406 bool
1407 bit_test_cluster::can_be_handled (unsigned HOST_WIDE_INT range,
1408 unsigned int uniq)
1409 {
1410 /* Check overflow. */
1411 if (range == 0)
1412 return false;
1413
1414 if (range >= GET_MODE_BITSIZE (word_mode))
1415 return false;
1416
1417 return uniq <= m_max_case_bit_tests;
1418 }
1419
1420 /* Return true when cluster starting at START and ending at END (inclusive)
1421 can build a bit test. */
1422
1423 bool
1424 bit_test_cluster::can_be_handled (const vec<cluster *> &clusters,
1425 unsigned start, unsigned end)
1426 {
1427 auto_vec<int, m_max_case_bit_tests> dest_bbs;
1428 /* For algorithm correctness, bit test for a single case must return
1429 true. We bail out in is_beneficial if it's called just for
1430 a single case. */
1431 if (start == end)
1432 return true;
1433
1434 unsigned HOST_WIDE_INT range = get_range (clusters[start]->get_low (),
1435 clusters[end]->get_high ());
1436
1437 /* Make a guess first. */
1438 if (!can_be_handled (range, m_max_case_bit_tests))
1439 return false;
1440
1441 for (unsigned i = start; i <= end; i++)
1442 {
1443 simple_cluster *sc = static_cast<simple_cluster *> (clusters[i]);
1444 /* m_max_case_bit_tests is very small integer, thus the operation
1445 is constant. */
1446 if (!dest_bbs.contains (sc->m_case_bb->index))
1447 {
1448 if (dest_bbs.length () >= m_max_case_bit_tests)
1449 return false;
1450 dest_bbs.quick_push (sc->m_case_bb->index);
1451 }
1452 }
1453
1454 return true;
1455 }
1456
1457 /* Return true when COUNT of cases of UNIQ labels is beneficial for bit test
1458 transformation. */
1459
1460 bool
1461 bit_test_cluster::is_beneficial (unsigned count, unsigned uniq)
1462 {
1463 return (((uniq == 1 && count >= 3)
1464 || (uniq == 2 && count >= 5)
1465 || (uniq == 3 && count >= 6)));
1466 }
1467
1468 /* Return true if cluster starting at START and ending at END (inclusive)
1469 is profitable transformation. */
1470
1471 bool
1472 bit_test_cluster::is_beneficial (const vec<cluster *> &clusters,
1473 unsigned start, unsigned end)
1474 {
1475 /* Single case bail out. */
1476 if (start == end)
1477 return false;
1478
1479 auto_bitmap dest_bbs;
1480
1481 for (unsigned i = start; i <= end; i++)
1482 {
1483 simple_cluster *sc = static_cast<simple_cluster *> (clusters[i]);
1484 bitmap_set_bit (dest_bbs, sc->m_case_bb->index);
1485 }
1486
1487 unsigned uniq = bitmap_count_bits (dest_bbs);
1488 unsigned count = end - start + 1;
1489 return is_beneficial (count, uniq);
1490 }
1491
1492 /* Comparison function for qsort to order bit tests by decreasing
1493 probability of execution. */
1494
1495 int
1496 case_bit_test::cmp (const void *p1, const void *p2)
1497 {
1498 const case_bit_test *const d1 = (const case_bit_test *) p1;
1499 const case_bit_test *const d2 = (const case_bit_test *) p2;
1500
1501 if (d2->bits != d1->bits)
1502 return d2->bits - d1->bits;
1503
1504 /* Stabilize the sort. */
1505 return (LABEL_DECL_UID (CASE_LABEL (d2->label))
1506 - LABEL_DECL_UID (CASE_LABEL (d1->label)));
1507 }
1508
1509 /* Expand a switch statement by a short sequence of bit-wise
1510 comparisons. "switch(x)" is effectively converted into
1511 "if ((1 << (x-MINVAL)) & CST)" where CST and MINVAL are
1512 integer constants.
1513
1514 INDEX_EXPR is the value being switched on.
1515
1516 MINVAL is the lowest case value of in the case nodes,
1517 and RANGE is highest value minus MINVAL. MINVAL and RANGE
1518 are not guaranteed to be of the same type as INDEX_EXPR
1519 (the gimplifier doesn't change the type of case label values,
1520 and MINVAL and RANGE are derived from those values).
1521 MAXVAL is MINVAL + RANGE.
1522
1523 There *MUST* be max_case_bit_tests or less unique case
1524 node targets. */
1525
1526 void
1527 bit_test_cluster::emit (tree index_expr, tree index_type,
1528 tree, basic_block default_bb, location_t loc)
1529 {
1530 case_bit_test test[m_max_case_bit_tests] = { {} };
1531 unsigned int i, j, k;
1532 unsigned int count;
1533
1534 tree unsigned_index_type = range_check_type (index_type);
1535
1536 gimple_stmt_iterator gsi;
1537 gassign *shift_stmt;
1538
1539 tree idx, tmp, csui;
1540 tree word_type_node = lang_hooks.types.type_for_mode (word_mode, 1);
1541 tree word_mode_zero = fold_convert (word_type_node, integer_zero_node);
1542 tree word_mode_one = fold_convert (word_type_node, integer_one_node);
1543 int prec = TYPE_PRECISION (word_type_node);
1544 wide_int wone = wi::one (prec);
1545
1546 tree minval = get_low ();
1547 tree maxval = get_high ();
1548
1549 /* Go through all case labels, and collect the case labels, profile
1550 counts, and other information we need to build the branch tests. */
1551 count = 0;
1552 for (i = 0; i < m_cases.length (); i++)
1553 {
1554 unsigned int lo, hi;
1555 simple_cluster *n = static_cast<simple_cluster *> (m_cases[i]);
1556 for (k = 0; k < count; k++)
1557 if (n->m_case_bb == test[k].target_bb)
1558 break;
1559
1560 if (k == count)
1561 {
1562 gcc_checking_assert (count < m_max_case_bit_tests);
1563 test[k].mask = wi::zero (prec);
1564 test[k].target_bb = n->m_case_bb;
1565 test[k].label = n->m_case_label_expr;
1566 test[k].bits = 0;
1567 test[k].prob = profile_probability::never ();
1568 count++;
1569 }
1570
1571 test[k].bits += n->get_range (n->get_low (), n->get_high ());
1572 test[k].prob += n->m_prob;
1573
1574 lo = tree_to_uhwi (int_const_binop (MINUS_EXPR, n->get_low (), minval));
1575 if (n->get_high () == NULL_TREE)
1576 hi = lo;
1577 else
1578 hi = tree_to_uhwi (int_const_binop (MINUS_EXPR, n->get_high (),
1579 minval));
1580
1581 for (j = lo; j <= hi; j++)
1582 test[k].mask |= wi::lshift (wone, j);
1583 }
1584
1585 qsort (test, count, sizeof (*test), case_bit_test::cmp);
1586
1587 /* If every possible relative value of the index expression is a valid shift
1588 amount, then we can merge the entry test in the bit test. */
1589 bool entry_test_needed;
1590 value_range r;
1591 if (TREE_CODE (index_expr) == SSA_NAME
1592 && get_range_query (cfun)->range_of_expr (r, index_expr)
1593 && !r.undefined_p ()
1594 && !r.varying_p ()
1595 && wi::leu_p (r.upper_bound () - r.lower_bound (), prec - 1))
1596 {
1597 wide_int min = r.lower_bound ();
1598 wide_int max = r.upper_bound ();
1599 tree index_type = TREE_TYPE (index_expr);
1600 minval = fold_convert (index_type, minval);
1601 wide_int iminval = wi::to_wide (minval);
1602 if (wi::lt_p (min, iminval, TYPE_SIGN (index_type)))
1603 {
1604 minval = wide_int_to_tree (index_type, min);
1605 for (i = 0; i < count; i++)
1606 test[i].mask = wi::lshift (test[i].mask, iminval - min);
1607 }
1608 else if (wi::gt_p (min, iminval, TYPE_SIGN (index_type)))
1609 {
1610 minval = wide_int_to_tree (index_type, min);
1611 for (i = 0; i < count; i++)
1612 test[i].mask = wi::lrshift (test[i].mask, min - iminval);
1613 }
1614 maxval = wide_int_to_tree (index_type, max);
1615 entry_test_needed = false;
1616 }
1617 else
1618 entry_test_needed = true;
1619
1620 /* If all values are in the 0 .. BITS_PER_WORD-1 range, we can get rid of
1621 the minval subtractions, but it might make the mask constants more
1622 expensive. So, compare the costs. */
1623 if (compare_tree_int (minval, 0) > 0 && compare_tree_int (maxval, prec) < 0)
1624 {
1625 int cost_diff;
1626 HOST_WIDE_INT m = tree_to_uhwi (minval);
1627 rtx reg = gen_raw_REG (word_mode, 10000);
1628 bool speed_p = optimize_insn_for_speed_p ();
1629 cost_diff = set_src_cost (gen_rtx_PLUS (word_mode, reg,
1630 GEN_INT (-m)),
1631 word_mode, speed_p);
1632 for (i = 0; i < count; i++)
1633 {
1634 rtx r = immed_wide_int_const (test[i].mask, word_mode);
1635 cost_diff += set_src_cost (gen_rtx_AND (word_mode, reg, r),
1636 word_mode, speed_p);
1637 r = immed_wide_int_const (wi::lshift (test[i].mask, m), word_mode);
1638 cost_diff -= set_src_cost (gen_rtx_AND (word_mode, reg, r),
1639 word_mode, speed_p);
1640 }
1641 if (cost_diff > 0)
1642 {
1643 for (i = 0; i < count; i++)
1644 test[i].mask = wi::lshift (test[i].mask, m);
1645 minval = build_zero_cst (TREE_TYPE (minval));
1646 }
1647 }
1648
1649 /* Now build the test-and-branch code. */
1650
1651 gsi = gsi_last_bb (m_case_bb);
1652
1653 /* idx = (unsigned)x - minval. */
1654 idx = fold_convert_loc (loc, unsigned_index_type, index_expr);
1655 idx = fold_build2_loc (loc, MINUS_EXPR, unsigned_index_type, idx,
1656 fold_convert_loc (loc, unsigned_index_type, minval));
1657 idx = force_gimple_operand_gsi (&gsi, idx,
1658 /*simple=*/true, NULL_TREE,
1659 /*before=*/true, GSI_SAME_STMT);
1660
1661 profile_probability subtree_prob = m_subtree_prob;
1662 profile_probability default_prob = m_default_prob;
1663 if (!default_prob.initialized_p ())
1664 default_prob = m_subtree_prob.invert ();
1665
1666 if (m_handles_entire_switch && entry_test_needed)
1667 {
1668 tree range = int_const_binop (MINUS_EXPR, maxval, minval);
1669 /* if (idx > range) goto default */
1670 range
1671 = force_gimple_operand_gsi (&gsi,
1672 fold_convert (unsigned_index_type, range),
1673 /*simple=*/true, NULL_TREE,
1674 /*before=*/true, GSI_SAME_STMT);
1675 tmp = fold_build2 (GT_EXPR, boolean_type_node, idx, range);
1676 default_prob = default_prob / 2;
1677 basic_block new_bb
1678 = hoist_edge_and_branch_if_true (&gsi, tmp, default_bb,
1679 default_prob, loc);
1680 gsi = gsi_last_bb (new_bb);
1681 }
1682
1683 tmp = fold_build2_loc (loc, LSHIFT_EXPR, word_type_node, word_mode_one,
1684 fold_convert_loc (loc, word_type_node, idx));
1685
1686 /* csui = (1 << (word_mode) idx) */
1687 if (count > 1)
1688 {
1689 csui = make_ssa_name (word_type_node);
1690 tmp = force_gimple_operand_gsi (&gsi, tmp,
1691 /*simple=*/false, NULL_TREE,
1692 /*before=*/true, GSI_SAME_STMT);
1693 shift_stmt = gimple_build_assign (csui, tmp);
1694 gsi_insert_before (&gsi, shift_stmt, GSI_SAME_STMT);
1695 update_stmt (shift_stmt);
1696 }
1697 else
1698 csui = tmp;
1699
1700 /* for each unique set of cases:
1701 if (const & csui) goto target */
1702 for (k = 0; k < count; k++)
1703 {
1704 profile_probability prob = test[k].prob / (subtree_prob + default_prob);
1705 subtree_prob -= test[k].prob;
1706 tmp = wide_int_to_tree (word_type_node, test[k].mask);
1707 tmp = fold_build2_loc (loc, BIT_AND_EXPR, word_type_node, csui, tmp);
1708 tmp = fold_build2_loc (loc, NE_EXPR, boolean_type_node,
1709 tmp, word_mode_zero);
1710 tmp = force_gimple_operand_gsi (&gsi, tmp,
1711 /*simple=*/true, NULL_TREE,
1712 /*before=*/true, GSI_SAME_STMT);
1713 basic_block new_bb
1714 = hoist_edge_and_branch_if_true (&gsi, tmp, test[k].target_bb,
1715 prob, loc);
1716 gsi = gsi_last_bb (new_bb);
1717 }
1718
1719 /* We should have removed all edges now. */
1720 gcc_assert (EDGE_COUNT (gsi_bb (gsi)->succs) == 0);
1721
1722 /* If nothing matched, go to the default label. */
1723 edge e = make_edge (gsi_bb (gsi), default_bb, EDGE_FALLTHRU);
1724 e->probability = profile_probability::always ();
1725 }
1726
1727 /* Split the basic block at the statement pointed to by GSIP, and insert
1728 a branch to the target basic block of E_TRUE conditional on tree
1729 expression COND.
1730
1731 It is assumed that there is already an edge from the to-be-split
1732 basic block to E_TRUE->dest block. This edge is removed, and the
1733 profile information on the edge is re-used for the new conditional
1734 jump.
1735
1736 The CFG is updated. The dominator tree will not be valid after
1737 this transformation, but the immediate dominators are updated if
1738 UPDATE_DOMINATORS is true.
1739
1740 Returns the newly created basic block. */
1741
1742 basic_block
1743 bit_test_cluster::hoist_edge_and_branch_if_true (gimple_stmt_iterator *gsip,
1744 tree cond, basic_block case_bb,
1745 profile_probability prob,
1746 location_t loc)
1747 {
1748 tree tmp;
1749 gcond *cond_stmt;
1750 edge e_false;
1751 basic_block new_bb, split_bb = gsi_bb (*gsip);
1752
1753 edge e_true = make_edge (split_bb, case_bb, EDGE_TRUE_VALUE);
1754 e_true->probability = prob;
1755 gcc_assert (e_true->src == split_bb);
1756
1757 tmp = force_gimple_operand_gsi (gsip, cond, /*simple=*/true, NULL,
1758 /*before=*/true, GSI_SAME_STMT);
1759 cond_stmt = gimple_build_cond_from_tree (tmp, NULL_TREE, NULL_TREE);
1760 gimple_set_location (cond_stmt, loc);
1761 gsi_insert_before (gsip, cond_stmt, GSI_SAME_STMT);
1762
1763 e_false = split_block (split_bb, cond_stmt);
1764 new_bb = e_false->dest;
1765 redirect_edge_pred (e_true, split_bb);
1766
1767 e_false->flags &= ~EDGE_FALLTHRU;
1768 e_false->flags |= EDGE_FALSE_VALUE;
1769 e_false->probability = e_true->probability.invert ();
1770 new_bb->count = e_false->count ();
1771
1772 return new_bb;
1773 }
1774
1775 /* Compute the number of case labels that correspond to each outgoing edge of
1776 switch statement. Record this information in the aux field of the edge. */
1777
1778 void
1779 switch_decision_tree::compute_cases_per_edge ()
1780 {
1781 reset_out_edges_aux (m_switch);
1782 int ncases = gimple_switch_num_labels (m_switch);
1783 for (int i = ncases - 1; i >= 1; --i)
1784 {
1785 edge case_edge = gimple_switch_edge (cfun, m_switch, i);
1786 case_edge->aux = (void *) ((intptr_t) (case_edge->aux) + 1);
1787 }
1788 }
1789
1790 /* Analyze switch statement and return true when the statement is expanded
1791 as decision tree. */
1792
1793 bool
1794 switch_decision_tree::analyze_switch_statement ()
1795 {
1796 unsigned l = gimple_switch_num_labels (m_switch);
1797 basic_block bb = gimple_bb (m_switch);
1798 auto_vec<cluster *> clusters;
1799 clusters.create (l - 1);
1800
1801 basic_block default_bb = gimple_switch_default_bb (cfun, m_switch);
1802 m_case_bbs.reserve (l);
1803 m_case_bbs.quick_push (default_bb);
1804
1805 compute_cases_per_edge ();
1806
1807 for (unsigned i = 1; i < l; i++)
1808 {
1809 tree elt = gimple_switch_label (m_switch, i);
1810 tree lab = CASE_LABEL (elt);
1811 basic_block case_bb = label_to_block (cfun, lab);
1812 edge case_edge = find_edge (bb, case_bb);
1813 tree low = CASE_LOW (elt);
1814 tree high = CASE_HIGH (elt);
1815
1816 profile_probability p
1817 = case_edge->probability / ((intptr_t) (case_edge->aux));
1818 clusters.quick_push (new simple_cluster (low, high, elt, case_edge->dest,
1819 p));
1820 m_case_bbs.quick_push (case_edge->dest);
1821 }
1822
1823 reset_out_edges_aux (m_switch);
1824
1825 /* Find bit-test clusters. */
1826 vec<cluster *> output = bit_test_cluster::find_bit_tests (clusters);
1827
1828 /* Find jump table clusters. */
1829 vec<cluster *> output2;
1830 auto_vec<cluster *> tmp;
1831 output2.create (1);
1832 tmp.create (1);
1833
1834 for (unsigned i = 0; i < output.length (); i++)
1835 {
1836 cluster *c = output[i];
1837 if (c->get_type () != SIMPLE_CASE)
1838 {
1839 if (!tmp.is_empty ())
1840 {
1841 vec<cluster *> n = jump_table_cluster::find_jump_tables (tmp);
1842 output2.safe_splice (n);
1843 n.release ();
1844 tmp.truncate (0);
1845 }
1846 output2.safe_push (c);
1847 }
1848 else
1849 tmp.safe_push (c);
1850 }
1851
1852 /* We still can have a temporary vector to test. */
1853 if (!tmp.is_empty ())
1854 {
1855 vec<cluster *> n = jump_table_cluster::find_jump_tables (tmp);
1856 output2.safe_splice (n);
1857 n.release ();
1858 }
1859
1860 if (dump_file)
1861 {
1862 fprintf (dump_file, ";; GIMPLE switch case clusters: ");
1863 for (unsigned i = 0; i < output2.length (); i++)
1864 output2[i]->dump (dump_file, dump_flags & TDF_DETAILS);
1865 fprintf (dump_file, "\n");
1866 }
1867
1868 output.release ();
1869
1870 bool expanded = try_switch_expansion (output2);
1871 release_clusters (output2);
1872 return expanded;
1873 }
1874
1875 /* Attempt to expand CLUSTERS as a decision tree. Return true when
1876 expanded. */
1877
1878 bool
1879 switch_decision_tree::try_switch_expansion (vec<cluster *> &clusters)
1880 {
1881 tree index_expr = gimple_switch_index (m_switch);
1882 tree index_type = TREE_TYPE (index_expr);
1883 basic_block bb = gimple_bb (m_switch);
1884
1885 if (gimple_switch_num_labels (m_switch) == 1
1886 || range_check_type (index_type) == NULL_TREE)
1887 return false;
1888
1889 /* Find the default case target label. */
1890 edge default_edge = gimple_switch_default_edge (cfun, m_switch);
1891 m_default_bb = default_edge->dest;
1892
1893 /* Do the insertion of a case label into m_case_list. The labels are
1894 fed to us in descending order from the sorted vector of case labels used
1895 in the tree part of the middle end. So the list we construct is
1896 sorted in ascending order. */
1897
1898 for (int i = clusters.length () - 1; i >= 0; i--)
1899 {
1900 case_tree_node *r = m_case_list;
1901 m_case_list = m_case_node_pool.allocate ();
1902 m_case_list->m_right = r;
1903 m_case_list->m_c = clusters[i];
1904 }
1905
1906 record_phi_operand_mapping ();
1907
1908 /* Split basic block that contains the gswitch statement. */
1909 gimple_stmt_iterator gsi = gsi_last_bb (bb);
1910 edge e;
1911 if (gsi_end_p (gsi))
1912 e = split_block_after_labels (bb);
1913 else
1914 {
1915 gsi_prev (&gsi);
1916 e = split_block (bb, gsi_stmt (gsi));
1917 }
1918 bb = split_edge (e);
1919
1920 /* Create new basic blocks for non-case clusters where specific expansion
1921 needs to happen. */
1922 for (unsigned i = 0; i < clusters.length (); i++)
1923 if (clusters[i]->get_type () != SIMPLE_CASE)
1924 {
1925 clusters[i]->m_case_bb = create_empty_bb (bb);
1926 clusters[i]->m_case_bb->count = bb->count;
1927 clusters[i]->m_case_bb->loop_father = bb->loop_father;
1928 }
1929
1930 /* Do not do an extra work for a single cluster. */
1931 if (clusters.length () == 1
1932 && clusters[0]->get_type () != SIMPLE_CASE)
1933 {
1934 cluster *c = clusters[0];
1935 c->emit (index_expr, index_type,
1936 gimple_switch_default_label (m_switch), m_default_bb,
1937 gimple_location (m_switch));
1938 redirect_edge_succ (single_succ_edge (bb), c->m_case_bb);
1939 }
1940 else
1941 {
1942 emit (bb, index_expr, default_edge->probability, index_type);
1943
1944 /* Emit cluster-specific switch handling. */
1945 for (unsigned i = 0; i < clusters.length (); i++)
1946 if (clusters[i]->get_type () != SIMPLE_CASE)
1947 {
1948 edge e = single_pred_edge (clusters[i]->m_case_bb);
1949 e->dest->count = e->src->count.apply_probability (e->probability);
1950 clusters[i]->emit (index_expr, index_type,
1951 gimple_switch_default_label (m_switch),
1952 m_default_bb, gimple_location (m_switch));
1953 }
1954 }
1955
1956 fix_phi_operands_for_edges ();
1957
1958 return true;
1959 }
1960
1961 /* Before switch transformation, record all SSA_NAMEs defined in switch BB
1962 and used in a label basic block. */
1963
1964 void
1965 switch_decision_tree::record_phi_operand_mapping ()
1966 {
1967 basic_block switch_bb = gimple_bb (m_switch);
1968 /* Record all PHI nodes that have to be fixed after conversion. */
1969 for (unsigned i = 0; i < m_case_bbs.length (); i++)
1970 {
1971 gphi_iterator gsi;
1972 basic_block bb = m_case_bbs[i];
1973 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1974 {
1975 gphi *phi = gsi.phi ();
1976
1977 for (unsigned i = 0; i < gimple_phi_num_args (phi); i++)
1978 {
1979 basic_block phi_src_bb = gimple_phi_arg_edge (phi, i)->src;
1980 if (phi_src_bb == switch_bb)
1981 {
1982 tree def = gimple_phi_arg_def (phi, i);
1983 tree result = gimple_phi_result (phi);
1984 m_phi_mapping.put (result, def);
1985 break;
1986 }
1987 }
1988 }
1989 }
1990 }
1991
1992 /* Append new operands to PHI statements that were introduced due to
1993 addition of new edges to case labels. */
1994
1995 void
1996 switch_decision_tree::fix_phi_operands_for_edges ()
1997 {
1998 gphi_iterator gsi;
1999
2000 for (unsigned i = 0; i < m_case_bbs.length (); i++)
2001 {
2002 basic_block bb = m_case_bbs[i];
2003 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2004 {
2005 gphi *phi = gsi.phi ();
2006 for (unsigned j = 0; j < gimple_phi_num_args (phi); j++)
2007 {
2008 tree def = gimple_phi_arg_def (phi, j);
2009 if (def == NULL_TREE)
2010 {
2011 edge e = gimple_phi_arg_edge (phi, j);
2012 tree *definition
2013 = m_phi_mapping.get (gimple_phi_result (phi));
2014 gcc_assert (definition);
2015 add_phi_arg (phi, *definition, e, UNKNOWN_LOCATION);
2016 }
2017 }
2018 }
2019 }
2020 }
2021
2022 /* Generate a decision tree, switching on INDEX_EXPR and jumping to
2023 one of the labels in CASE_LIST or to the DEFAULT_LABEL.
2024
2025 We generate a binary decision tree to select the appropriate target
2026 code. */
2027
2028 void
2029 switch_decision_tree::emit (basic_block bb, tree index_expr,
2030 profile_probability default_prob, tree index_type)
2031 {
2032 balance_case_nodes (&m_case_list, NULL);
2033
2034 if (dump_file)
2035 dump_function_to_file (current_function_decl, dump_file, dump_flags);
2036 if (dump_file && (dump_flags & TDF_DETAILS))
2037 {
2038 int indent_step = ceil_log2 (TYPE_PRECISION (index_type)) + 2;
2039 fprintf (dump_file, ";; Expanding GIMPLE switch as decision tree:\n");
2040 gcc_assert (m_case_list != NULL);
2041 dump_case_nodes (dump_file, m_case_list, indent_step, 0);
2042 }
2043
2044 bb = emit_case_nodes (bb, index_expr, m_case_list, default_prob, index_type,
2045 gimple_location (m_switch));
2046
2047 if (bb)
2048 emit_jump (bb, m_default_bb);
2049
2050 /* Remove all edges and do just an edge that will reach default_bb. */
2051 bb = gimple_bb (m_switch);
2052 gimple_stmt_iterator gsi = gsi_last_bb (bb);
2053 gsi_remove (&gsi, true);
2054
2055 delete_basic_block (bb);
2056 }
2057
2058 /* Take an ordered list of case nodes
2059 and transform them into a near optimal binary tree,
2060 on the assumption that any target code selection value is as
2061 likely as any other.
2062
2063 The transformation is performed by splitting the ordered
2064 list into two equal sections plus a pivot. The parts are
2065 then attached to the pivot as left and right branches. Each
2066 branch is then transformed recursively. */
2067
2068 void
2069 switch_decision_tree::balance_case_nodes (case_tree_node **head,
2070 case_tree_node *parent)
2071 {
2072 case_tree_node *np;
2073
2074 np = *head;
2075 if (np)
2076 {
2077 int i = 0;
2078 case_tree_node **npp;
2079 case_tree_node *left;
2080 profile_probability prob = profile_probability::never ();
2081
2082 /* Count the number of entries on branch. */
2083
2084 while (np)
2085 {
2086 i++;
2087 prob += np->m_c->m_prob;
2088 np = np->m_right;
2089 }
2090
2091 if (i > 2)
2092 {
2093 /* Split this list if it is long enough for that to help. */
2094 npp = head;
2095 left = *npp;
2096 profile_probability pivot_prob = prob / 2;
2097
2098 /* Find the place in the list that bisects the list's total cost
2099 by probability. */
2100 while (1)
2101 {
2102 /* Skip nodes while their probability does not reach
2103 that amount. */
2104 prob -= (*npp)->m_c->m_prob;
2105 if ((prob.initialized_p () && prob < pivot_prob)
2106 || ! (*npp)->m_right)
2107 break;
2108 npp = &(*npp)->m_right;
2109 }
2110
2111 np = *npp;
2112 *npp = 0;
2113 *head = np;
2114 np->m_parent = parent;
2115 np->m_left = left == np ? NULL : left;
2116
2117 /* Optimize each of the two split parts. */
2118 balance_case_nodes (&np->m_left, np);
2119 balance_case_nodes (&np->m_right, np);
2120 np->m_c->m_subtree_prob = np->m_c->m_prob;
2121 if (np->m_left)
2122 np->m_c->m_subtree_prob += np->m_left->m_c->m_subtree_prob;
2123 if (np->m_right)
2124 np->m_c->m_subtree_prob += np->m_right->m_c->m_subtree_prob;
2125 }
2126 else
2127 {
2128 /* Else leave this branch as one level,
2129 but fill in `parent' fields. */
2130 np = *head;
2131 np->m_parent = parent;
2132 np->m_c->m_subtree_prob = np->m_c->m_prob;
2133 for (; np->m_right; np = np->m_right)
2134 {
2135 np->m_right->m_parent = np;
2136 (*head)->m_c->m_subtree_prob += np->m_right->m_c->m_subtree_prob;
2137 }
2138 }
2139 }
2140 }
2141
2142 /* Dump ROOT, a list or tree of case nodes, to file. */
2143
2144 void
2145 switch_decision_tree::dump_case_nodes (FILE *f, case_tree_node *root,
2146 int indent_step, int indent_level)
2147 {
2148 if (root == 0)
2149 return;
2150 indent_level++;
2151
2152 dump_case_nodes (f, root->m_left, indent_step, indent_level);
2153
2154 fputs (";; ", f);
2155 fprintf (f, "%*s", indent_step * indent_level, "");
2156 root->m_c->dump (f);
2157 root->m_c->m_prob.dump (f);
2158 fputs (" subtree: ", f);
2159 root->m_c->m_subtree_prob.dump (f);
2160 fputs (")\n", f);
2161
2162 dump_case_nodes (f, root->m_right, indent_step, indent_level);
2163 }
2164
2165
2166 /* Add an unconditional jump to CASE_BB that happens in basic block BB. */
2167
2168 void
2169 switch_decision_tree::emit_jump (basic_block bb, basic_block case_bb)
2170 {
2171 edge e = single_succ_edge (bb);
2172 redirect_edge_succ (e, case_bb);
2173 }
2174
2175 /* Generate code to compare OP0 with OP1 so that the condition codes are
2176 set and to jump to LABEL_BB if the condition is true.
2177 COMPARISON is the GIMPLE comparison (EQ, NE, GT, etc.).
2178 PROB is the probability of jumping to LABEL_BB. */
2179
2180 basic_block
2181 switch_decision_tree::emit_cmp_and_jump_insns (basic_block bb, tree op0,
2182 tree op1, tree_code comparison,
2183 basic_block label_bb,
2184 profile_probability prob,
2185 location_t loc)
2186 {
2187 // TODO: it's once called with lhs != index.
2188 op1 = fold_convert (TREE_TYPE (op0), op1);
2189
2190 gcond *cond = gimple_build_cond (comparison, op0, op1, NULL_TREE, NULL_TREE);
2191 gimple_set_location (cond, loc);
2192 gimple_stmt_iterator gsi = gsi_last_bb (bb);
2193 gsi_insert_after (&gsi, cond, GSI_NEW_STMT);
2194
2195 gcc_assert (single_succ_p (bb));
2196
2197 /* Make a new basic block where false branch will take place. */
2198 edge false_edge = split_block (bb, cond);
2199 false_edge->flags = EDGE_FALSE_VALUE;
2200 false_edge->probability = prob.invert ();
2201 false_edge->dest->count = bb->count.apply_probability (prob.invert ());
2202
2203 edge true_edge = make_edge (bb, label_bb, EDGE_TRUE_VALUE);
2204 true_edge->probability = prob;
2205
2206 return false_edge->dest;
2207 }
2208
2209 /* Generate code to jump to LABEL if OP0 and OP1 are equal.
2210 PROB is the probability of jumping to LABEL_BB.
2211 BB is a basic block where the new condition will be placed. */
2212
2213 basic_block
2214 switch_decision_tree::do_jump_if_equal (basic_block bb, tree op0, tree op1,
2215 basic_block label_bb,
2216 profile_probability prob,
2217 location_t loc)
2218 {
2219 op1 = fold_convert (TREE_TYPE (op0), op1);
2220
2221 gcond *cond = gimple_build_cond (EQ_EXPR, op0, op1, NULL_TREE, NULL_TREE);
2222 gimple_set_location (cond, loc);
2223 gimple_stmt_iterator gsi = gsi_last_bb (bb);
2224 gsi_insert_before (&gsi, cond, GSI_SAME_STMT);
2225
2226 gcc_assert (single_succ_p (bb));
2227
2228 /* Make a new basic block where false branch will take place. */
2229 edge false_edge = split_block (bb, cond);
2230 false_edge->flags = EDGE_FALSE_VALUE;
2231 false_edge->probability = prob.invert ();
2232 false_edge->dest->count = bb->count.apply_probability (prob.invert ());
2233
2234 edge true_edge = make_edge (bb, label_bb, EDGE_TRUE_VALUE);
2235 true_edge->probability = prob;
2236
2237 return false_edge->dest;
2238 }
2239
2240 /* Emit step-by-step code to select a case for the value of INDEX.
2241 The thus generated decision tree follows the form of the
2242 case-node binary tree NODE, whose nodes represent test conditions.
2243 DEFAULT_PROB is probability of cases leading to default BB.
2244 INDEX_TYPE is the type of the index of the switch. */
2245
2246 basic_block
2247 switch_decision_tree::emit_case_nodes (basic_block bb, tree index,
2248 case_tree_node *node,
2249 profile_probability default_prob,
2250 tree index_type, location_t loc)
2251 {
2252 profile_probability p;
2253
2254 /* If node is null, we are done. */
2255 if (node == NULL)
2256 return bb;
2257
2258 /* Single value case. */
2259 if (node->m_c->is_single_value_p ())
2260 {
2261 /* Node is single valued. First see if the index expression matches
2262 this node and then check our children, if any. */
2263 p = node->m_c->m_prob / (node->m_c->m_subtree_prob + default_prob);
2264 bb = do_jump_if_equal (bb, index, node->m_c->get_low (),
2265 node->m_c->m_case_bb, p, loc);
2266 /* Since this case is taken at this point, reduce its weight from
2267 subtree_weight. */
2268 node->m_c->m_subtree_prob -= node->m_c->m_prob;
2269
2270 if (node->m_left != NULL && node->m_right != NULL)
2271 {
2272 /* 1) the node has both children
2273
2274 If both children are single-valued cases with no
2275 children, finish up all the work. This way, we can save
2276 one ordered comparison. */
2277
2278 if (!node->m_left->has_child ()
2279 && node->m_left->m_c->is_single_value_p ()
2280 && !node->m_right->has_child ()
2281 && node->m_right->m_c->is_single_value_p ())
2282 {
2283 p = (node->m_right->m_c->m_prob
2284 / (node->m_c->m_subtree_prob + default_prob));
2285 bb = do_jump_if_equal (bb, index, node->m_right->m_c->get_low (),
2286 node->m_right->m_c->m_case_bb, p, loc);
2287 node->m_c->m_subtree_prob -= node->m_right->m_c->m_prob;
2288
2289 p = (node->m_left->m_c->m_prob
2290 / (node->m_c->m_subtree_prob + default_prob));
2291 bb = do_jump_if_equal (bb, index, node->m_left->m_c->get_low (),
2292 node->m_left->m_c->m_case_bb, p, loc);
2293 }
2294 else
2295 {
2296 /* Branch to a label where we will handle it later. */
2297 basic_block test_bb = split_edge (single_succ_edge (bb));
2298 redirect_edge_succ (single_pred_edge (test_bb),
2299 single_succ_edge (bb)->dest);
2300
2301 p = ((node->m_right->m_c->m_subtree_prob + default_prob / 2)
2302 / (node->m_c->m_subtree_prob + default_prob));
2303 test_bb->count = bb->count.apply_probability (p);
2304 bb = emit_cmp_and_jump_insns (bb, index, node->m_c->get_high (),
2305 GT_EXPR, test_bb, p, loc);
2306 default_prob /= 2;
2307
2308 /* Handle the left-hand subtree. */
2309 bb = emit_case_nodes (bb, index, node->m_left,
2310 default_prob, index_type, loc);
2311
2312 /* If the left-hand subtree fell through,
2313 don't let it fall into the right-hand subtree. */
2314 if (bb && m_default_bb)
2315 emit_jump (bb, m_default_bb);
2316
2317 bb = emit_case_nodes (test_bb, index, node->m_right,
2318 default_prob, index_type, loc);
2319 }
2320 }
2321 else if (node->m_left == NULL && node->m_right != NULL)
2322 {
2323 /* 2) the node has only right child. */
2324
2325 /* Here we have a right child but no left so we issue a conditional
2326 branch to default and process the right child.
2327
2328 Omit the conditional branch to default if the right child
2329 does not have any children and is single valued; it would
2330 cost too much space to save so little time. */
2331
2332 if (node->m_right->has_child ()
2333 || !node->m_right->m_c->is_single_value_p ())
2334 {
2335 p = ((default_prob / 2)
2336 / (node->m_c->m_subtree_prob + default_prob));
2337 bb = emit_cmp_and_jump_insns (bb, index, node->m_c->get_low (),
2338 LT_EXPR, m_default_bb, p, loc);
2339 default_prob /= 2;
2340
2341 bb = emit_case_nodes (bb, index, node->m_right, default_prob,
2342 index_type, loc);
2343 }
2344 else
2345 {
2346 /* We cannot process node->right normally
2347 since we haven't ruled out the numbers less than
2348 this node's value. So handle node->right explicitly. */
2349 p = (node->m_right->m_c->m_subtree_prob
2350 / (node->m_c->m_subtree_prob + default_prob));
2351 bb = do_jump_if_equal (bb, index, node->m_right->m_c->get_low (),
2352 node->m_right->m_c->m_case_bb, p, loc);
2353 }
2354 }
2355 else if (node->m_left != NULL && node->m_right == NULL)
2356 {
2357 /* 3) just one subtree, on the left. Similar case as previous. */
2358
2359 if (node->m_left->has_child ()
2360 || !node->m_left->m_c->is_single_value_p ())
2361 {
2362 p = ((default_prob / 2)
2363 / (node->m_c->m_subtree_prob + default_prob));
2364 bb = emit_cmp_and_jump_insns (bb, index, node->m_c->get_high (),
2365 GT_EXPR, m_default_bb, p, loc);
2366 default_prob /= 2;
2367
2368 bb = emit_case_nodes (bb, index, node->m_left, default_prob,
2369 index_type, loc);
2370 }
2371 else
2372 {
2373 /* We cannot process node->left normally
2374 since we haven't ruled out the numbers less than
2375 this node's value. So handle node->left explicitly. */
2376 p = (node->m_left->m_c->m_subtree_prob
2377 / (node->m_c->m_subtree_prob + default_prob));
2378 bb = do_jump_if_equal (bb, index, node->m_left->m_c->get_low (),
2379 node->m_left->m_c->m_case_bb, p, loc);
2380 }
2381 }
2382 }
2383 else
2384 {
2385 /* Node is a range. These cases are very similar to those for a single
2386 value, except that we do not start by testing whether this node
2387 is the one to branch to. */
2388 if (node->has_child () || node->m_c->get_type () != SIMPLE_CASE)
2389 {
2390 bool is_bt = node->m_c->get_type () == BIT_TEST;
2391 int parts = is_bt ? 3 : 2;
2392
2393 /* Branch to a label where we will handle it later. */
2394 basic_block test_bb = split_edge (single_succ_edge (bb));
2395 redirect_edge_succ (single_pred_edge (test_bb),
2396 single_succ_edge (bb)->dest);
2397
2398 profile_probability right_prob = profile_probability::never ();
2399 if (node->m_right)
2400 right_prob = node->m_right->m_c->m_subtree_prob;
2401 p = ((right_prob + default_prob / parts)
2402 / (node->m_c->m_subtree_prob + default_prob));
2403 test_bb->count = bb->count.apply_probability (p);
2404
2405 bb = emit_cmp_and_jump_insns (bb, index, node->m_c->get_high (),
2406 GT_EXPR, test_bb, p, loc);
2407
2408 default_prob /= parts;
2409 node->m_c->m_subtree_prob -= right_prob;
2410 if (is_bt)
2411 node->m_c->m_default_prob = default_prob;
2412
2413 /* Value belongs to this node or to the left-hand subtree. */
2414 p = node->m_c->m_prob / (node->m_c->m_subtree_prob + default_prob);
2415 bb = emit_cmp_and_jump_insns (bb, index, node->m_c->get_low (),
2416 GE_EXPR, node->m_c->m_case_bb, p, loc);
2417
2418 /* Handle the left-hand subtree. */
2419 bb = emit_case_nodes (bb, index, node->m_left, default_prob,
2420 index_type, loc);
2421
2422 /* If the left-hand subtree fell through,
2423 don't let it fall into the right-hand subtree. */
2424 if (bb && m_default_bb)
2425 emit_jump (bb, m_default_bb);
2426
2427 bb = emit_case_nodes (test_bb, index, node->m_right, default_prob,
2428 index_type, loc);
2429 }
2430 else
2431 {
2432 /* Node has no children so we check low and high bounds to remove
2433 redundant tests. Only one of the bounds can exist,
2434 since otherwise this node is bounded--a case tested already. */
2435 tree lhs, rhs;
2436 generate_range_test (bb, index, node->m_c->get_low (),
2437 node->m_c->get_high (), &lhs, &rhs);
2438 p = default_prob / (node->m_c->m_subtree_prob + default_prob);
2439
2440 bb = emit_cmp_and_jump_insns (bb, lhs, rhs, GT_EXPR,
2441 m_default_bb, p, loc);
2442
2443 emit_jump (bb, node->m_c->m_case_bb);
2444 return NULL;
2445 }
2446 }
2447
2448 return bb;
2449 }
2450
2451 /* The main function of the pass scans statements for switches and invokes
2452 process_switch on them. */
2453
2454 namespace {
2455
2456 const pass_data pass_data_convert_switch =
2457 {
2458 GIMPLE_PASS, /* type */
2459 "switchconv", /* name */
2460 OPTGROUP_NONE, /* optinfo_flags */
2461 TV_TREE_SWITCH_CONVERSION, /* tv_id */
2462 ( PROP_cfg | PROP_ssa ), /* properties_required */
2463 0, /* properties_provided */
2464 0, /* properties_destroyed */
2465 0, /* todo_flags_start */
2466 TODO_update_ssa, /* todo_flags_finish */
2467 };
2468
2469 class pass_convert_switch : public gimple_opt_pass
2470 {
2471 public:
2472 pass_convert_switch (gcc::context *ctxt)
2473 : gimple_opt_pass (pass_data_convert_switch, ctxt)
2474 {}
2475
2476 /* opt_pass methods: */
2477 bool gate (function *) final override
2478 {
2479 return flag_tree_switch_conversion != 0;
2480 }
2481 unsigned int execute (function *) final override;
2482
2483 }; // class pass_convert_switch
2484
2485 unsigned int
2486 pass_convert_switch::execute (function *fun)
2487 {
2488 basic_block bb;
2489 bool cfg_altered = false;
2490
2491 FOR_EACH_BB_FN (bb, fun)
2492 {
2493 if (gswitch *stmt = safe_dyn_cast <gswitch *> (*gsi_last_bb (bb)))
2494 {
2495 if (dump_file)
2496 {
2497 expanded_location loc = expand_location (gimple_location (stmt));
2498
2499 fprintf (dump_file, "beginning to process the following "
2500 "SWITCH statement (%s:%d) : ------- \n",
2501 loc.file, loc.line);
2502 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
2503 putc ('\n', dump_file);
2504 }
2505
2506 switch_conversion sconv;
2507 sconv.expand (stmt);
2508 cfg_altered |= sconv.m_cfg_altered;
2509 if (!sconv.m_reason)
2510 {
2511 if (dump_file)
2512 {
2513 fputs ("Switch converted\n", dump_file);
2514 fputs ("--------------------------------\n", dump_file);
2515 }
2516
2517 /* Make no effort to update the post-dominator tree.
2518 It is actually not that hard for the transformations
2519 we have performed, but it is not supported
2520 by iterate_fix_dominators. */
2521 free_dominance_info (CDI_POST_DOMINATORS);
2522 }
2523 else
2524 {
2525 if (dump_file)
2526 {
2527 fputs ("Bailing out - ", dump_file);
2528 fputs (sconv.m_reason, dump_file);
2529 fputs ("\n--------------------------------\n", dump_file);
2530 }
2531 }
2532 }
2533 }
2534
2535 return cfg_altered ? TODO_cleanup_cfg : 0;;
2536 }
2537
2538 } // anon namespace
2539
2540 gimple_opt_pass *
2541 make_pass_convert_switch (gcc::context *ctxt)
2542 {
2543 return new pass_convert_switch (ctxt);
2544 }
2545
2546 /* The main function of the pass scans statements for switches and invokes
2547 process_switch on them. */
2548
2549 namespace {
2550
2551 template <bool O0> class pass_lower_switch: public gimple_opt_pass
2552 {
2553 public:
2554 pass_lower_switch (gcc::context *ctxt) : gimple_opt_pass (data, ctxt) {}
2555
2556 static const pass_data data;
2557 opt_pass *
2558 clone () final override
2559 {
2560 return new pass_lower_switch<O0> (m_ctxt);
2561 }
2562
2563 bool
2564 gate (function *) final override
2565 {
2566 return !O0 || !optimize;
2567 }
2568
2569 unsigned int execute (function *fun) final override;
2570 }; // class pass_lower_switch
2571
2572 template <bool O0>
2573 const pass_data pass_lower_switch<O0>::data = {
2574 GIMPLE_PASS, /* type */
2575 O0 ? "switchlower_O0" : "switchlower", /* name */
2576 OPTGROUP_NONE, /* optinfo_flags */
2577 TV_TREE_SWITCH_LOWERING, /* tv_id */
2578 ( PROP_cfg | PROP_ssa ), /* properties_required */
2579 0, /* properties_provided */
2580 0, /* properties_destroyed */
2581 0, /* todo_flags_start */
2582 TODO_update_ssa | TODO_cleanup_cfg, /* todo_flags_finish */
2583 };
2584
2585 template <bool O0>
2586 unsigned int
2587 pass_lower_switch<O0>::execute (function *fun)
2588 {
2589 basic_block bb;
2590 bool expanded = false;
2591
2592 auto_vec<gimple *> switch_statements;
2593 switch_statements.create (1);
2594
2595 FOR_EACH_BB_FN (bb, fun)
2596 {
2597 if (gswitch *swtch = safe_dyn_cast <gswitch *> (*gsi_last_bb (bb)))
2598 {
2599 if (!O0)
2600 group_case_labels_stmt (swtch);
2601 switch_statements.safe_push (swtch);
2602 }
2603 }
2604
2605 for (unsigned i = 0; i < switch_statements.length (); i++)
2606 {
2607 gimple *stmt = switch_statements[i];
2608 if (dump_file)
2609 {
2610 expanded_location loc = expand_location (gimple_location (stmt));
2611
2612 fprintf (dump_file, "beginning to process the following "
2613 "SWITCH statement (%s:%d) : ------- \n",
2614 loc.file, loc.line);
2615 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
2616 putc ('\n', dump_file);
2617 }
2618
2619 gswitch *swtch = dyn_cast<gswitch *> (stmt);
2620 if (swtch)
2621 {
2622 switch_decision_tree dt (swtch);
2623 expanded |= dt.analyze_switch_statement ();
2624 }
2625 }
2626
2627 if (expanded)
2628 {
2629 free_dominance_info (CDI_DOMINATORS);
2630 free_dominance_info (CDI_POST_DOMINATORS);
2631 mark_virtual_operands_for_renaming (cfun);
2632 }
2633
2634 return 0;
2635 }
2636
2637 } // anon namespace
2638
2639 gimple_opt_pass *
2640 make_pass_lower_switch_O0 (gcc::context *ctxt)
2641 {
2642 return new pass_lower_switch<true> (ctxt);
2643 }
2644 gimple_opt_pass *
2645 make_pass_lower_switch (gcc::context *ctxt)
2646 {
2647 return new pass_lower_switch<false> (ctxt);
2648 }