]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/tree-switch-conversion.c
Fix typo in c-parser.c.
[thirdparty/gcc.git] / gcc / tree-switch-conversion.c
1 /* Lower GIMPLE_SWITCH expressions to something more efficient than
2 a jump table.
3 Copyright (C) 2006-2020 Free Software Foundation, Inc.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by the
9 Free Software Foundation; either version 3, or (at your option) any
10 later version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not, write to the Free
19 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
20 02110-1301, USA. */
21
22 /* This file handles the lowering of GIMPLE_SWITCH to an indexed
23 load, or a series of bit-test-and-branch expressions. */
24
25 #include "config.h"
26 #include "system.h"
27 #include "coretypes.h"
28 #include "backend.h"
29 #include "insn-codes.h"
30 #include "rtl.h"
31 #include "tree.h"
32 #include "gimple.h"
33 #include "cfghooks.h"
34 #include "tree-pass.h"
35 #include "ssa.h"
36 #include "optabs-tree.h"
37 #include "cgraph.h"
38 #include "gimple-pretty-print.h"
39 #include "fold-const.h"
40 #include "varasm.h"
41 #include "stor-layout.h"
42 #include "cfganal.h"
43 #include "gimplify.h"
44 #include "gimple-iterator.h"
45 #include "gimplify-me.h"
46 #include "gimple-fold.h"
47 #include "tree-cfg.h"
48 #include "cfgloop.h"
49 #include "alloc-pool.h"
50 #include "target.h"
51 #include "tree-into-ssa.h"
52 #include "omp-general.h"
53
54 /* ??? For lang_hooks.types.type_for_mode, but is there a word_mode
55 type in the GIMPLE type system that is language-independent? */
56 #include "langhooks.h"
57
58 #include "tree-switch-conversion.h"
59 \f
60 using namespace tree_switch_conversion;
61
62 /* Constructor. */
63
64 switch_conversion::switch_conversion (): m_final_bb (NULL),
65 m_constructors (NULL), m_default_values (NULL),
66 m_arr_ref_first (NULL), m_arr_ref_last (NULL),
67 m_reason (NULL), m_default_case_nonstandard (false), m_cfg_altered (false)
68 {
69 }
70
71 /* Collection information about SWTCH statement. */
72
73 void
74 switch_conversion::collect (gswitch *swtch)
75 {
76 unsigned int branch_num = gimple_switch_num_labels (swtch);
77 tree min_case, max_case;
78 unsigned int i;
79 edge e, e_default, e_first;
80 edge_iterator ei;
81
82 m_switch = swtch;
83
84 /* The gimplifier has already sorted the cases by CASE_LOW and ensured there
85 is a default label which is the first in the vector.
86 Collect the bits we can deduce from the CFG. */
87 m_index_expr = gimple_switch_index (swtch);
88 m_switch_bb = gimple_bb (swtch);
89 e_default = gimple_switch_default_edge (cfun, swtch);
90 m_default_bb = e_default->dest;
91 m_default_prob = e_default->probability;
92
93 /* Get upper and lower bounds of case values, and the covered range. */
94 min_case = gimple_switch_label (swtch, 1);
95 max_case = gimple_switch_label (swtch, branch_num - 1);
96
97 m_range_min = CASE_LOW (min_case);
98 if (CASE_HIGH (max_case) != NULL_TREE)
99 m_range_max = CASE_HIGH (max_case);
100 else
101 m_range_max = CASE_LOW (max_case);
102
103 m_contiguous_range = true;
104 tree last = CASE_HIGH (min_case) ? CASE_HIGH (min_case) : m_range_min;
105 for (i = 2; i < branch_num; i++)
106 {
107 tree elt = gimple_switch_label (swtch, i);
108 if (wi::to_wide (last) + 1 != wi::to_wide (CASE_LOW (elt)))
109 {
110 m_contiguous_range = false;
111 break;
112 }
113 last = CASE_HIGH (elt) ? CASE_HIGH (elt) : CASE_LOW (elt);
114 }
115
116 if (m_contiguous_range)
117 e_first = gimple_switch_edge (cfun, swtch, 1);
118 else
119 e_first = e_default;
120
121 /* See if there is one common successor block for all branch
122 targets. If it exists, record it in FINAL_BB.
123 Start with the destination of the first non-default case
124 if the range is contiguous and default case otherwise as
125 guess or its destination in case it is a forwarder block. */
126 if (! single_pred_p (e_first->dest))
127 m_final_bb = e_first->dest;
128 else if (single_succ_p (e_first->dest)
129 && ! single_pred_p (single_succ (e_first->dest)))
130 m_final_bb = single_succ (e_first->dest);
131 /* Require that all switch destinations are either that common
132 FINAL_BB or a forwarder to it, except for the default
133 case if contiguous range. */
134 if (m_final_bb)
135 FOR_EACH_EDGE (e, ei, m_switch_bb->succs)
136 {
137 if (e->dest == m_final_bb)
138 continue;
139
140 if (single_pred_p (e->dest)
141 && single_succ_p (e->dest)
142 && single_succ (e->dest) == m_final_bb)
143 continue;
144
145 if (e == e_default && m_contiguous_range)
146 {
147 m_default_case_nonstandard = true;
148 continue;
149 }
150
151 m_final_bb = NULL;
152 break;
153 }
154
155 m_range_size
156 = int_const_binop (MINUS_EXPR, m_range_max, m_range_min);
157
158 /* Get a count of the number of case labels. Single-valued case labels
159 simply count as one, but a case range counts double, since it may
160 require two compares if it gets lowered as a branching tree. */
161 m_count = 0;
162 for (i = 1; i < branch_num; i++)
163 {
164 tree elt = gimple_switch_label (swtch, i);
165 m_count++;
166 if (CASE_HIGH (elt)
167 && ! tree_int_cst_equal (CASE_LOW (elt), CASE_HIGH (elt)))
168 m_count++;
169 }
170
171 /* Get the number of unique non-default targets out of the GIMPLE_SWITCH
172 block. Assume a CFG cleanup would have already removed degenerate
173 switch statements, this allows us to just use EDGE_COUNT. */
174 m_uniq = EDGE_COUNT (gimple_bb (swtch)->succs) - 1;
175 }
176
177 /* Checks whether the range given by individual case statements of the switch
178 switch statement isn't too big and whether the number of branches actually
179 satisfies the size of the new array. */
180
181 bool
182 switch_conversion::check_range ()
183 {
184 gcc_assert (m_range_size);
185 if (!tree_fits_uhwi_p (m_range_size))
186 {
187 m_reason = "index range way too large or otherwise unusable";
188 return false;
189 }
190
191 if (tree_to_uhwi (m_range_size)
192 > ((unsigned) m_count * param_switch_conversion_branch_ratio))
193 {
194 m_reason = "the maximum range-branch ratio exceeded";
195 return false;
196 }
197
198 return true;
199 }
200
201 /* Checks whether all but the final BB basic blocks are empty. */
202
203 bool
204 switch_conversion::check_all_empty_except_final ()
205 {
206 edge e, e_default = find_edge (m_switch_bb, m_default_bb);
207 edge_iterator ei;
208
209 FOR_EACH_EDGE (e, ei, m_switch_bb->succs)
210 {
211 if (e->dest == m_final_bb)
212 continue;
213
214 if (!empty_block_p (e->dest))
215 {
216 if (m_contiguous_range && e == e_default)
217 {
218 m_default_case_nonstandard = true;
219 continue;
220 }
221
222 m_reason = "bad case - a non-final BB not empty";
223 return false;
224 }
225 }
226
227 return true;
228 }
229
230 /* This function checks whether all required values in phi nodes in final_bb
231 are constants. Required values are those that correspond to a basic block
232 which is a part of the examined switch statement. It returns true if the
233 phi nodes are OK, otherwise false. */
234
235 bool
236 switch_conversion::check_final_bb ()
237 {
238 gphi_iterator gsi;
239
240 m_phi_count = 0;
241 for (gsi = gsi_start_phis (m_final_bb); !gsi_end_p (gsi); gsi_next (&gsi))
242 {
243 gphi *phi = gsi.phi ();
244 unsigned int i;
245
246 if (virtual_operand_p (gimple_phi_result (phi)))
247 continue;
248
249 m_phi_count++;
250
251 for (i = 0; i < gimple_phi_num_args (phi); i++)
252 {
253 basic_block bb = gimple_phi_arg_edge (phi, i)->src;
254
255 if (bb == m_switch_bb
256 || (single_pred_p (bb)
257 && single_pred (bb) == m_switch_bb
258 && (!m_default_case_nonstandard
259 || empty_block_p (bb))))
260 {
261 tree reloc, val;
262 const char *reason = NULL;
263
264 val = gimple_phi_arg_def (phi, i);
265 if (!is_gimple_ip_invariant (val))
266 reason = "non-invariant value from a case";
267 else
268 {
269 reloc = initializer_constant_valid_p (val, TREE_TYPE (val));
270 if ((flag_pic && reloc != null_pointer_node)
271 || (!flag_pic && reloc == NULL_TREE))
272 {
273 if (reloc)
274 reason
275 = "value from a case would need runtime relocations";
276 else
277 reason
278 = "value from a case is not a valid initializer";
279 }
280 }
281 if (reason)
282 {
283 /* For contiguous range, we can allow non-constant
284 or one that needs relocation, as long as it is
285 only reachable from the default case. */
286 if (bb == m_switch_bb)
287 bb = m_final_bb;
288 if (!m_contiguous_range || bb != m_default_bb)
289 {
290 m_reason = reason;
291 return false;
292 }
293
294 unsigned int branch_num = gimple_switch_num_labels (m_switch);
295 for (unsigned int i = 1; i < branch_num; i++)
296 {
297 if (gimple_switch_label_bb (cfun, m_switch, i) == bb)
298 {
299 m_reason = reason;
300 return false;
301 }
302 }
303 m_default_case_nonstandard = true;
304 }
305 }
306 }
307 }
308
309 return true;
310 }
311
312 /* The following function allocates default_values, target_{in,out}_names and
313 constructors arrays. The last one is also populated with pointers to
314 vectors that will become constructors of new arrays. */
315
316 void
317 switch_conversion::create_temp_arrays ()
318 {
319 int i;
320
321 m_default_values = XCNEWVEC (tree, m_phi_count * 3);
322 /* ??? Macros do not support multi argument templates in their
323 argument list. We create a typedef to work around that problem. */
324 typedef vec<constructor_elt, va_gc> *vec_constructor_elt_gc;
325 m_constructors = XCNEWVEC (vec_constructor_elt_gc, m_phi_count);
326 m_target_inbound_names = m_default_values + m_phi_count;
327 m_target_outbound_names = m_target_inbound_names + m_phi_count;
328 for (i = 0; i < m_phi_count; i++)
329 vec_alloc (m_constructors[i], tree_to_uhwi (m_range_size) + 1);
330 }
331
332 /* Populate the array of default values in the order of phi nodes.
333 DEFAULT_CASE is the CASE_LABEL_EXPR for the default switch branch
334 if the range is non-contiguous or the default case has standard
335 structure, otherwise it is the first non-default case instead. */
336
337 void
338 switch_conversion::gather_default_values (tree default_case)
339 {
340 gphi_iterator gsi;
341 basic_block bb = label_to_block (cfun, CASE_LABEL (default_case));
342 edge e;
343 int i = 0;
344
345 gcc_assert (CASE_LOW (default_case) == NULL_TREE
346 || m_default_case_nonstandard);
347
348 if (bb == m_final_bb)
349 e = find_edge (m_switch_bb, bb);
350 else
351 e = single_succ_edge (bb);
352
353 for (gsi = gsi_start_phis (m_final_bb); !gsi_end_p (gsi); gsi_next (&gsi))
354 {
355 gphi *phi = gsi.phi ();
356 if (virtual_operand_p (gimple_phi_result (phi)))
357 continue;
358 tree val = PHI_ARG_DEF_FROM_EDGE (phi, e);
359 gcc_assert (val);
360 m_default_values[i++] = val;
361 }
362 }
363
364 /* The following function populates the vectors in the constructors array with
365 future contents of the static arrays. The vectors are populated in the
366 order of phi nodes. */
367
368 void
369 switch_conversion::build_constructors ()
370 {
371 unsigned i, branch_num = gimple_switch_num_labels (m_switch);
372 tree pos = m_range_min;
373 tree pos_one = build_int_cst (TREE_TYPE (pos), 1);
374
375 for (i = 1; i < branch_num; i++)
376 {
377 tree cs = gimple_switch_label (m_switch, i);
378 basic_block bb = label_to_block (cfun, CASE_LABEL (cs));
379 edge e;
380 tree high;
381 gphi_iterator gsi;
382 int j;
383
384 if (bb == m_final_bb)
385 e = find_edge (m_switch_bb, bb);
386 else
387 e = single_succ_edge (bb);
388 gcc_assert (e);
389
390 while (tree_int_cst_lt (pos, CASE_LOW (cs)))
391 {
392 int k;
393 for (k = 0; k < m_phi_count; k++)
394 {
395 constructor_elt elt;
396
397 elt.index = int_const_binop (MINUS_EXPR, pos, m_range_min);
398 elt.value
399 = unshare_expr_without_location (m_default_values[k]);
400 m_constructors[k]->quick_push (elt);
401 }
402
403 pos = int_const_binop (PLUS_EXPR, pos, pos_one);
404 }
405 gcc_assert (tree_int_cst_equal (pos, CASE_LOW (cs)));
406
407 j = 0;
408 if (CASE_HIGH (cs))
409 high = CASE_HIGH (cs);
410 else
411 high = CASE_LOW (cs);
412 for (gsi = gsi_start_phis (m_final_bb);
413 !gsi_end_p (gsi); gsi_next (&gsi))
414 {
415 gphi *phi = gsi.phi ();
416 if (virtual_operand_p (gimple_phi_result (phi)))
417 continue;
418 tree val = PHI_ARG_DEF_FROM_EDGE (phi, e);
419 tree low = CASE_LOW (cs);
420 pos = CASE_LOW (cs);
421
422 do
423 {
424 constructor_elt elt;
425
426 elt.index = int_const_binop (MINUS_EXPR, pos, m_range_min);
427 elt.value = unshare_expr_without_location (val);
428 m_constructors[j]->quick_push (elt);
429
430 pos = int_const_binop (PLUS_EXPR, pos, pos_one);
431 } while (!tree_int_cst_lt (high, pos)
432 && tree_int_cst_lt (low, pos));
433 j++;
434 }
435 }
436 }
437
438 /* If all values in the constructor vector are products of a linear function
439 a * x + b, then return true. When true, COEFF_A and COEFF_B and
440 coefficients of the linear function. Note that equal values are special
441 case of a linear function with a and b equal to zero. */
442
443 bool
444 switch_conversion::contains_linear_function_p (vec<constructor_elt, va_gc> *vec,
445 wide_int *coeff_a,
446 wide_int *coeff_b)
447 {
448 unsigned int i;
449 constructor_elt *elt;
450
451 gcc_assert (vec->length () >= 2);
452
453 /* Let's try to find any linear function a * x + y that can apply to
454 given values. 'a' can be calculated as follows:
455
456 a = (y2 - y1) / (x2 - x1) where x2 - x1 = 1 (consecutive case indices)
457 a = y2 - y1
458
459 and
460
461 b = y2 - a * x2
462
463 */
464
465 tree elt0 = (*vec)[0].value;
466 tree elt1 = (*vec)[1].value;
467
468 if (TREE_CODE (elt0) != INTEGER_CST || TREE_CODE (elt1) != INTEGER_CST)
469 return false;
470
471 wide_int range_min
472 = wide_int::from (wi::to_wide (m_range_min),
473 TYPE_PRECISION (TREE_TYPE (elt0)),
474 TYPE_SIGN (TREE_TYPE (m_range_min)));
475 wide_int y1 = wi::to_wide (elt0);
476 wide_int y2 = wi::to_wide (elt1);
477 wide_int a = y2 - y1;
478 wide_int b = y2 - a * (range_min + 1);
479
480 /* Verify that all values fulfill the linear function. */
481 FOR_EACH_VEC_SAFE_ELT (vec, i, elt)
482 {
483 if (TREE_CODE (elt->value) != INTEGER_CST)
484 return false;
485
486 wide_int value = wi::to_wide (elt->value);
487 if (a * range_min + b != value)
488 return false;
489
490 ++range_min;
491 }
492
493 *coeff_a = a;
494 *coeff_b = b;
495
496 return true;
497 }
498
499 /* Return type which should be used for array elements, either TYPE's
500 main variant or, for integral types, some smaller integral type
501 that can still hold all the constants. */
502
503 tree
504 switch_conversion::array_value_type (tree type, int num)
505 {
506 unsigned int i, len = vec_safe_length (m_constructors[num]);
507 constructor_elt *elt;
508 int sign = 0;
509 tree smaller_type;
510
511 /* Types with alignments greater than their size can reach here, e.g. out of
512 SRA. We couldn't use these as an array component type so get back to the
513 main variant first, which, for our purposes, is fine for other types as
514 well. */
515
516 type = TYPE_MAIN_VARIANT (type);
517
518 if (!INTEGRAL_TYPE_P (type))
519 return type;
520
521 scalar_int_mode type_mode = SCALAR_INT_TYPE_MODE (type);
522 scalar_int_mode mode = get_narrowest_mode (type_mode);
523 if (GET_MODE_SIZE (type_mode) <= GET_MODE_SIZE (mode))
524 return type;
525
526 if (len < (optimize_bb_for_size_p (gimple_bb (m_switch)) ? 2 : 32))
527 return type;
528
529 FOR_EACH_VEC_SAFE_ELT (m_constructors[num], i, elt)
530 {
531 wide_int cst;
532
533 if (TREE_CODE (elt->value) != INTEGER_CST)
534 return type;
535
536 cst = wi::to_wide (elt->value);
537 while (1)
538 {
539 unsigned int prec = GET_MODE_BITSIZE (mode);
540 if (prec > HOST_BITS_PER_WIDE_INT)
541 return type;
542
543 if (sign >= 0 && cst == wi::zext (cst, prec))
544 {
545 if (sign == 0 && cst == wi::sext (cst, prec))
546 break;
547 sign = 1;
548 break;
549 }
550 if (sign <= 0 && cst == wi::sext (cst, prec))
551 {
552 sign = -1;
553 break;
554 }
555
556 if (sign == 1)
557 sign = 0;
558
559 if (!GET_MODE_WIDER_MODE (mode).exists (&mode)
560 || GET_MODE_SIZE (mode) >= GET_MODE_SIZE (type_mode))
561 return type;
562 }
563 }
564
565 if (sign == 0)
566 sign = TYPE_UNSIGNED (type) ? 1 : -1;
567 smaller_type = lang_hooks.types.type_for_mode (mode, sign >= 0);
568 if (GET_MODE_SIZE (type_mode)
569 <= GET_MODE_SIZE (SCALAR_INT_TYPE_MODE (smaller_type)))
570 return type;
571
572 return smaller_type;
573 }
574
575 /* Create an appropriate array type and declaration and assemble a static
576 array variable. Also create a load statement that initializes
577 the variable in question with a value from the static array. SWTCH is
578 the switch statement being converted, NUM is the index to
579 arrays of constructors, default values and target SSA names
580 for this particular array. ARR_INDEX_TYPE is the type of the index
581 of the new array, PHI is the phi node of the final BB that corresponds
582 to the value that will be loaded from the created array. TIDX
583 is an ssa name of a temporary variable holding the index for loads from the
584 new array. */
585
586 void
587 switch_conversion::build_one_array (int num, tree arr_index_type,
588 gphi *phi, tree tidx)
589 {
590 tree name;
591 gimple *load;
592 gimple_stmt_iterator gsi = gsi_for_stmt (m_switch);
593 location_t loc = gimple_location (m_switch);
594
595 gcc_assert (m_default_values[num]);
596
597 name = copy_ssa_name (PHI_RESULT (phi));
598 m_target_inbound_names[num] = name;
599
600 vec<constructor_elt, va_gc> *constructor = m_constructors[num];
601 wide_int coeff_a, coeff_b;
602 bool linear_p = contains_linear_function_p (constructor, &coeff_a, &coeff_b);
603 tree type;
604 if (linear_p
605 && (type = range_check_type (TREE_TYPE ((*constructor)[0].value))))
606 {
607 if (dump_file && coeff_a.to_uhwi () > 0)
608 fprintf (dump_file, "Linear transformation with A = %" PRId64
609 " and B = %" PRId64 "\n", coeff_a.to_shwi (),
610 coeff_b.to_shwi ());
611
612 /* We must use type of constructor values. */
613 gimple_seq seq = NULL;
614 tree tmp = gimple_convert (&seq, type, m_index_expr);
615 tree tmp2 = gimple_build (&seq, MULT_EXPR, type,
616 wide_int_to_tree (type, coeff_a), tmp);
617 tree tmp3 = gimple_build (&seq, PLUS_EXPR, type, tmp2,
618 wide_int_to_tree (type, coeff_b));
619 tree tmp4 = gimple_convert (&seq, TREE_TYPE (name), tmp3);
620 gsi_insert_seq_before (&gsi, seq, GSI_SAME_STMT);
621 load = gimple_build_assign (name, tmp4);
622 }
623 else
624 {
625 tree array_type, ctor, decl, value_type, fetch, default_type;
626
627 default_type = TREE_TYPE (m_default_values[num]);
628 value_type = array_value_type (default_type, num);
629 array_type = build_array_type (value_type, arr_index_type);
630 if (default_type != value_type)
631 {
632 unsigned int i;
633 constructor_elt *elt;
634
635 FOR_EACH_VEC_SAFE_ELT (constructor, i, elt)
636 elt->value = fold_convert (value_type, elt->value);
637 }
638 ctor = build_constructor (array_type, constructor);
639 TREE_CONSTANT (ctor) = true;
640 TREE_STATIC (ctor) = true;
641
642 decl = build_decl (loc, VAR_DECL, NULL_TREE, array_type);
643 TREE_STATIC (decl) = 1;
644 DECL_INITIAL (decl) = ctor;
645
646 DECL_NAME (decl) = create_tmp_var_name ("CSWTCH");
647 DECL_ARTIFICIAL (decl) = 1;
648 DECL_IGNORED_P (decl) = 1;
649 TREE_CONSTANT (decl) = 1;
650 TREE_READONLY (decl) = 1;
651 DECL_IGNORED_P (decl) = 1;
652 if (offloading_function_p (cfun->decl))
653 DECL_ATTRIBUTES (decl)
654 = tree_cons (get_identifier ("omp declare target"), NULL_TREE,
655 NULL_TREE);
656 varpool_node::finalize_decl (decl);
657
658 fetch = build4 (ARRAY_REF, value_type, decl, tidx, NULL_TREE,
659 NULL_TREE);
660 if (default_type != value_type)
661 {
662 fetch = fold_convert (default_type, fetch);
663 fetch = force_gimple_operand_gsi (&gsi, fetch, true, NULL_TREE,
664 true, GSI_SAME_STMT);
665 }
666 load = gimple_build_assign (name, fetch);
667 }
668
669 gsi_insert_before (&gsi, load, GSI_SAME_STMT);
670 update_stmt (load);
671 m_arr_ref_last = load;
672 }
673
674 /* Builds and initializes static arrays initialized with values gathered from
675 the switch statement. Also creates statements that load values from
676 them. */
677
678 void
679 switch_conversion::build_arrays ()
680 {
681 tree arr_index_type;
682 tree tidx, sub, utype;
683 gimple *stmt;
684 gimple_stmt_iterator gsi;
685 gphi_iterator gpi;
686 int i;
687 location_t loc = gimple_location (m_switch);
688
689 gsi = gsi_for_stmt (m_switch);
690
691 /* Make sure we do not generate arithmetics in a subrange. */
692 utype = TREE_TYPE (m_index_expr);
693 if (TREE_TYPE (utype))
694 utype = lang_hooks.types.type_for_mode (TYPE_MODE (TREE_TYPE (utype)), 1);
695 else
696 utype = lang_hooks.types.type_for_mode (TYPE_MODE (utype), 1);
697
698 arr_index_type = build_index_type (m_range_size);
699 tidx = make_ssa_name (utype);
700 sub = fold_build2_loc (loc, MINUS_EXPR, utype,
701 fold_convert_loc (loc, utype, m_index_expr),
702 fold_convert_loc (loc, utype, m_range_min));
703 sub = force_gimple_operand_gsi (&gsi, sub,
704 false, NULL, true, GSI_SAME_STMT);
705 stmt = gimple_build_assign (tidx, sub);
706
707 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
708 update_stmt (stmt);
709 m_arr_ref_first = stmt;
710
711 for (gpi = gsi_start_phis (m_final_bb), i = 0;
712 !gsi_end_p (gpi); gsi_next (&gpi))
713 {
714 gphi *phi = gpi.phi ();
715 if (!virtual_operand_p (gimple_phi_result (phi)))
716 build_one_array (i++, arr_index_type, phi, tidx);
717 else
718 {
719 edge e;
720 edge_iterator ei;
721 FOR_EACH_EDGE (e, ei, m_switch_bb->succs)
722 {
723 if (e->dest == m_final_bb)
724 break;
725 if (!m_default_case_nonstandard
726 || e->dest != m_default_bb)
727 {
728 e = single_succ_edge (e->dest);
729 break;
730 }
731 }
732 gcc_assert (e && e->dest == m_final_bb);
733 m_target_vop = PHI_ARG_DEF_FROM_EDGE (phi, e);
734 }
735 }
736 }
737
738 /* Generates and appropriately inserts loads of default values at the position
739 given by GSI. Returns the last inserted statement. */
740
741 gassign *
742 switch_conversion::gen_def_assigns (gimple_stmt_iterator *gsi)
743 {
744 int i;
745 gassign *assign = NULL;
746
747 for (i = 0; i < m_phi_count; i++)
748 {
749 tree name = copy_ssa_name (m_target_inbound_names[i]);
750 m_target_outbound_names[i] = name;
751 assign = gimple_build_assign (name, m_default_values[i]);
752 gsi_insert_before (gsi, assign, GSI_SAME_STMT);
753 update_stmt (assign);
754 }
755 return assign;
756 }
757
758 /* Deletes the unused bbs and edges that now contain the switch statement and
759 its empty branch bbs. BBD is the now dead BB containing
760 the original switch statement, FINAL is the last BB of the converted
761 switch statement (in terms of succession). */
762
763 void
764 switch_conversion::prune_bbs (basic_block bbd, basic_block final,
765 basic_block default_bb)
766 {
767 edge_iterator ei;
768 edge e;
769
770 for (ei = ei_start (bbd->succs); (e = ei_safe_edge (ei)); )
771 {
772 basic_block bb;
773 bb = e->dest;
774 remove_edge (e);
775 if (bb != final && bb != default_bb)
776 delete_basic_block (bb);
777 }
778 delete_basic_block (bbd);
779 }
780
781 /* Add values to phi nodes in final_bb for the two new edges. E1F is the edge
782 from the basic block loading values from an array and E2F from the basic
783 block loading default values. BBF is the last switch basic block (see the
784 bbf description in the comment below). */
785
786 void
787 switch_conversion::fix_phi_nodes (edge e1f, edge e2f, basic_block bbf)
788 {
789 gphi_iterator gsi;
790 int i;
791
792 for (gsi = gsi_start_phis (bbf), i = 0;
793 !gsi_end_p (gsi); gsi_next (&gsi))
794 {
795 gphi *phi = gsi.phi ();
796 tree inbound, outbound;
797 if (virtual_operand_p (gimple_phi_result (phi)))
798 inbound = outbound = m_target_vop;
799 else
800 {
801 inbound = m_target_inbound_names[i];
802 outbound = m_target_outbound_names[i++];
803 }
804 add_phi_arg (phi, inbound, e1f, UNKNOWN_LOCATION);
805 if (!m_default_case_nonstandard)
806 add_phi_arg (phi, outbound, e2f, UNKNOWN_LOCATION);
807 }
808 }
809
810 /* Creates a check whether the switch expression value actually falls into the
811 range given by all the cases. If it does not, the temporaries are loaded
812 with default values instead. */
813
814 void
815 switch_conversion::gen_inbound_check ()
816 {
817 tree label_decl1 = create_artificial_label (UNKNOWN_LOCATION);
818 tree label_decl2 = create_artificial_label (UNKNOWN_LOCATION);
819 tree label_decl3 = create_artificial_label (UNKNOWN_LOCATION);
820 glabel *label1, *label2, *label3;
821 tree utype, tidx;
822 tree bound;
823
824 gcond *cond_stmt;
825
826 gassign *last_assign = NULL;
827 gimple_stmt_iterator gsi;
828 basic_block bb0, bb1, bb2, bbf, bbd;
829 edge e01 = NULL, e02, e21, e1d, e1f, e2f;
830 location_t loc = gimple_location (m_switch);
831
832 gcc_assert (m_default_values);
833
834 bb0 = gimple_bb (m_switch);
835
836 tidx = gimple_assign_lhs (m_arr_ref_first);
837 utype = TREE_TYPE (tidx);
838
839 /* (end of) block 0 */
840 gsi = gsi_for_stmt (m_arr_ref_first);
841 gsi_next (&gsi);
842
843 bound = fold_convert_loc (loc, utype, m_range_size);
844 cond_stmt = gimple_build_cond (LE_EXPR, tidx, bound, NULL_TREE, NULL_TREE);
845 gsi_insert_before (&gsi, cond_stmt, GSI_SAME_STMT);
846 update_stmt (cond_stmt);
847
848 /* block 2 */
849 if (!m_default_case_nonstandard)
850 {
851 label2 = gimple_build_label (label_decl2);
852 gsi_insert_before (&gsi, label2, GSI_SAME_STMT);
853 last_assign = gen_def_assigns (&gsi);
854 }
855
856 /* block 1 */
857 label1 = gimple_build_label (label_decl1);
858 gsi_insert_before (&gsi, label1, GSI_SAME_STMT);
859
860 /* block F */
861 gsi = gsi_start_bb (m_final_bb);
862 label3 = gimple_build_label (label_decl3);
863 gsi_insert_before (&gsi, label3, GSI_SAME_STMT);
864
865 /* cfg fix */
866 e02 = split_block (bb0, cond_stmt);
867 bb2 = e02->dest;
868
869 if (m_default_case_nonstandard)
870 {
871 bb1 = bb2;
872 bb2 = m_default_bb;
873 e01 = e02;
874 e01->flags = EDGE_TRUE_VALUE;
875 e02 = make_edge (bb0, bb2, EDGE_FALSE_VALUE);
876 edge e_default = find_edge (bb1, bb2);
877 for (gphi_iterator gsi = gsi_start_phis (bb2);
878 !gsi_end_p (gsi); gsi_next (&gsi))
879 {
880 gphi *phi = gsi.phi ();
881 tree arg = PHI_ARG_DEF_FROM_EDGE (phi, e_default);
882 add_phi_arg (phi, arg, e02,
883 gimple_phi_arg_location_from_edge (phi, e_default));
884 }
885 /* Partially fix the dominator tree, if it is available. */
886 if (dom_info_available_p (CDI_DOMINATORS))
887 redirect_immediate_dominators (CDI_DOMINATORS, bb1, bb0);
888 }
889 else
890 {
891 e21 = split_block (bb2, last_assign);
892 bb1 = e21->dest;
893 remove_edge (e21);
894 }
895
896 e1d = split_block (bb1, m_arr_ref_last);
897 bbd = e1d->dest;
898 remove_edge (e1d);
899
900 /* Flags and profiles of the edge for in-range values. */
901 if (!m_default_case_nonstandard)
902 e01 = make_edge (bb0, bb1, EDGE_TRUE_VALUE);
903 e01->probability = m_default_prob.invert ();
904
905 /* Flags and profiles of the edge taking care of out-of-range values. */
906 e02->flags &= ~EDGE_FALLTHRU;
907 e02->flags |= EDGE_FALSE_VALUE;
908 e02->probability = m_default_prob;
909
910 bbf = m_final_bb;
911
912 e1f = make_edge (bb1, bbf, EDGE_FALLTHRU);
913 e1f->probability = profile_probability::always ();
914
915 if (m_default_case_nonstandard)
916 e2f = NULL;
917 else
918 {
919 e2f = make_edge (bb2, bbf, EDGE_FALLTHRU);
920 e2f->probability = profile_probability::always ();
921 }
922
923 /* frequencies of the new BBs */
924 bb1->count = e01->count ();
925 bb2->count = e02->count ();
926 if (!m_default_case_nonstandard)
927 bbf->count = e1f->count () + e2f->count ();
928
929 /* Tidy blocks that have become unreachable. */
930 prune_bbs (bbd, m_final_bb,
931 m_default_case_nonstandard ? m_default_bb : NULL);
932
933 /* Fixup the PHI nodes in bbF. */
934 fix_phi_nodes (e1f, e2f, bbf);
935
936 /* Fix the dominator tree, if it is available. */
937 if (dom_info_available_p (CDI_DOMINATORS))
938 {
939 vec<basic_block> bbs_to_fix_dom;
940
941 set_immediate_dominator (CDI_DOMINATORS, bb1, bb0);
942 if (!m_default_case_nonstandard)
943 set_immediate_dominator (CDI_DOMINATORS, bb2, bb0);
944 if (! get_immediate_dominator (CDI_DOMINATORS, bbf))
945 /* If bbD was the immediate dominator ... */
946 set_immediate_dominator (CDI_DOMINATORS, bbf, bb0);
947
948 bbs_to_fix_dom.create (3 + (bb2 != bbf));
949 bbs_to_fix_dom.quick_push (bb0);
950 bbs_to_fix_dom.quick_push (bb1);
951 if (bb2 != bbf)
952 bbs_to_fix_dom.quick_push (bb2);
953 bbs_to_fix_dom.quick_push (bbf);
954
955 iterate_fix_dominators (CDI_DOMINATORS, bbs_to_fix_dom, true);
956 bbs_to_fix_dom.release ();
957 }
958 }
959
960 /* The following function is invoked on every switch statement (the current
961 one is given in SWTCH) and runs the individual phases of switch
962 conversion on it one after another until one fails or the conversion
963 is completed. On success, NULL is in m_reason, otherwise points
964 to a string with the reason why the conversion failed. */
965
966 void
967 switch_conversion::expand (gswitch *swtch)
968 {
969 /* Group case labels so that we get the right results from the heuristics
970 that decide on the code generation approach for this switch. */
971 m_cfg_altered |= group_case_labels_stmt (swtch);
972
973 /* If this switch is now a degenerate case with only a default label,
974 there is nothing left for us to do. */
975 if (gimple_switch_num_labels (swtch) < 2)
976 {
977 m_reason = "switch is a degenerate case";
978 return;
979 }
980
981 collect (swtch);
982
983 /* No error markers should reach here (they should be filtered out
984 during gimplification). */
985 gcc_checking_assert (TREE_TYPE (m_index_expr) != error_mark_node);
986
987 /* A switch on a constant should have been optimized in tree-cfg-cleanup. */
988 gcc_checking_assert (!TREE_CONSTANT (m_index_expr));
989
990 /* Prefer bit test if possible. */
991 if (tree_fits_uhwi_p (m_range_size)
992 && bit_test_cluster::can_be_handled (tree_to_uhwi (m_range_size), m_uniq)
993 && bit_test_cluster::is_beneficial (m_count, m_uniq))
994 {
995 m_reason = "expanding as bit test is preferable";
996 return;
997 }
998
999 if (m_uniq <= 2)
1000 {
1001 /* This will be expanded as a decision tree . */
1002 m_reason = "expanding as jumps is preferable";
1003 return;
1004 }
1005
1006 /* If there is no common successor, we cannot do the transformation. */
1007 if (!m_final_bb)
1008 {
1009 m_reason = "no common successor to all case label target blocks found";
1010 return;
1011 }
1012
1013 /* Check the case label values are within reasonable range: */
1014 if (!check_range ())
1015 {
1016 gcc_assert (m_reason);
1017 return;
1018 }
1019
1020 /* For all the cases, see whether they are empty, the assignments they
1021 represent constant and so on... */
1022 if (!check_all_empty_except_final ())
1023 {
1024 gcc_assert (m_reason);
1025 return;
1026 }
1027 if (!check_final_bb ())
1028 {
1029 gcc_assert (m_reason);
1030 return;
1031 }
1032
1033 /* At this point all checks have passed and we can proceed with the
1034 transformation. */
1035
1036 create_temp_arrays ();
1037 gather_default_values (m_default_case_nonstandard
1038 ? gimple_switch_label (swtch, 1)
1039 : gimple_switch_default_label (swtch));
1040 build_constructors ();
1041
1042 build_arrays (); /* Build the static arrays and assignments. */
1043 gen_inbound_check (); /* Build the bounds check. */
1044
1045 m_cfg_altered = true;
1046 }
1047
1048 /* Destructor. */
1049
1050 switch_conversion::~switch_conversion ()
1051 {
1052 XDELETEVEC (m_constructors);
1053 XDELETEVEC (m_default_values);
1054 }
1055
1056 /* Constructor. */
1057
1058 group_cluster::group_cluster (vec<cluster *> &clusters,
1059 unsigned start, unsigned end)
1060 {
1061 gcc_checking_assert (end - start + 1 >= 1);
1062 m_prob = profile_probability::never ();
1063 m_cases.create (end - start + 1);
1064 for (unsigned i = start; i <= end; i++)
1065 {
1066 m_cases.quick_push (static_cast<simple_cluster *> (clusters[i]));
1067 m_prob += clusters[i]->m_prob;
1068 }
1069 m_subtree_prob = m_prob;
1070 }
1071
1072 /* Destructor. */
1073
1074 group_cluster::~group_cluster ()
1075 {
1076 for (unsigned i = 0; i < m_cases.length (); i++)
1077 delete m_cases[i];
1078
1079 m_cases.release ();
1080 }
1081
1082 /* Dump content of a cluster. */
1083
1084 void
1085 group_cluster::dump (FILE *f, bool details)
1086 {
1087 unsigned total_values = 0;
1088 for (unsigned i = 0; i < m_cases.length (); i++)
1089 total_values += m_cases[i]->get_range (m_cases[i]->get_low (),
1090 m_cases[i]->get_high ());
1091
1092 unsigned comparison_count = 0;
1093 for (unsigned i = 0; i < m_cases.length (); i++)
1094 {
1095 simple_cluster *sc = static_cast<simple_cluster *> (m_cases[i]);
1096 comparison_count += sc->m_range_p ? 2 : 1;
1097 }
1098
1099 unsigned HOST_WIDE_INT range = get_range (get_low (), get_high ());
1100 fprintf (f, "%s", get_type () == JUMP_TABLE ? "JT" : "BT");
1101
1102 if (details)
1103 fprintf (f, "(values:%d comparisons:%d range:" HOST_WIDE_INT_PRINT_DEC
1104 " density: %.2f%%)", total_values, comparison_count, range,
1105 100.0f * comparison_count / range);
1106
1107 fprintf (f, ":");
1108 PRINT_CASE (f, get_low ());
1109 fprintf (f, "-");
1110 PRINT_CASE (f, get_high ());
1111 fprintf (f, " ");
1112 }
1113
1114 /* Emit GIMPLE code to handle the cluster. */
1115
1116 void
1117 jump_table_cluster::emit (tree index_expr, tree,
1118 tree default_label_expr, basic_block default_bb)
1119 {
1120 unsigned HOST_WIDE_INT range = get_range (get_low (), get_high ());
1121 unsigned HOST_WIDE_INT nondefault_range = 0;
1122
1123 /* For jump table we just emit a new gswitch statement that will
1124 be latter lowered to jump table. */
1125 auto_vec <tree> labels;
1126 labels.create (m_cases.length ());
1127
1128 make_edge (m_case_bb, default_bb, 0);
1129 for (unsigned i = 0; i < m_cases.length (); i++)
1130 {
1131 labels.quick_push (unshare_expr (m_cases[i]->m_case_label_expr));
1132 make_edge (m_case_bb, m_cases[i]->m_case_bb, 0);
1133 }
1134
1135 gswitch *s = gimple_build_switch (index_expr,
1136 unshare_expr (default_label_expr), labels);
1137 gimple_stmt_iterator gsi = gsi_start_bb (m_case_bb);
1138 gsi_insert_after (&gsi, s, GSI_NEW_STMT);
1139
1140 /* Set up even probabilities for all cases. */
1141 for (unsigned i = 0; i < m_cases.length (); i++)
1142 {
1143 simple_cluster *sc = static_cast<simple_cluster *> (m_cases[i]);
1144 edge case_edge = find_edge (m_case_bb, sc->m_case_bb);
1145 unsigned HOST_WIDE_INT case_range
1146 = sc->get_range (sc->get_low (), sc->get_high ());
1147 nondefault_range += case_range;
1148
1149 /* case_edge->aux is number of values in a jump-table that are covered
1150 by the case_edge. */
1151 case_edge->aux = (void *) ((intptr_t) (case_edge->aux) + case_range);
1152 }
1153
1154 edge default_edge = gimple_switch_default_edge (cfun, s);
1155 default_edge->probability = profile_probability::never ();
1156
1157 for (unsigned i = 0; i < m_cases.length (); i++)
1158 {
1159 simple_cluster *sc = static_cast<simple_cluster *> (m_cases[i]);
1160 edge case_edge = find_edge (m_case_bb, sc->m_case_bb);
1161 case_edge->probability
1162 = profile_probability::always ().apply_scale ((intptr_t)case_edge->aux,
1163 range);
1164 }
1165
1166 /* Number of non-default values is probability of default edge. */
1167 default_edge->probability
1168 += profile_probability::always ().apply_scale (nondefault_range,
1169 range).invert ();
1170
1171 switch_decision_tree::reset_out_edges_aux (s);
1172 }
1173
1174 /* Find jump tables of given CLUSTERS, where all members of the vector
1175 are of type simple_cluster. New clusters are returned. */
1176
1177 vec<cluster *>
1178 jump_table_cluster::find_jump_tables (vec<cluster *> &clusters)
1179 {
1180 if (!is_enabled ())
1181 return clusters.copy ();
1182
1183 unsigned l = clusters.length ();
1184 auto_vec<min_cluster_item> min;
1185 min.reserve (l + 1);
1186
1187 min.quick_push (min_cluster_item (0, 0, 0));
1188
1189 for (unsigned i = 1; i <= l; i++)
1190 {
1191 /* Set minimal # of clusters with i-th item to infinite. */
1192 min.quick_push (min_cluster_item (INT_MAX, INT_MAX, INT_MAX));
1193
1194 for (unsigned j = 0; j < i; j++)
1195 {
1196 unsigned HOST_WIDE_INT s = min[j].m_non_jt_cases;
1197 if (i - j < case_values_threshold ())
1198 s += i - j;
1199
1200 /* Prefer clusters with smaller number of numbers covered. */
1201 if ((min[j].m_count + 1 < min[i].m_count
1202 || (min[j].m_count + 1 == min[i].m_count
1203 && s < min[i].m_non_jt_cases))
1204 && can_be_handled (clusters, j, i - 1))
1205 min[i] = min_cluster_item (min[j].m_count + 1, j, s);
1206 }
1207
1208 gcc_checking_assert (min[i].m_count != INT_MAX);
1209 }
1210
1211 /* No result. */
1212 if (min[l].m_count == l)
1213 return clusters.copy ();
1214
1215 vec<cluster *> output;
1216 output.create (4);
1217
1218 /* Find and build the clusters. */
1219 for (unsigned int end = l;;)
1220 {
1221 int start = min[end].m_start;
1222
1223 /* Do not allow clusters with small number of cases. */
1224 if (is_beneficial (clusters, start, end - 1))
1225 output.safe_push (new jump_table_cluster (clusters, start, end - 1));
1226 else
1227 for (int i = end - 1; i >= start; i--)
1228 output.safe_push (clusters[i]);
1229
1230 end = start;
1231
1232 if (start <= 0)
1233 break;
1234 }
1235
1236 output.reverse ();
1237 return output;
1238 }
1239
1240 /* Return true when cluster starting at START and ending at END (inclusive)
1241 can build a jump-table. */
1242
1243 bool
1244 jump_table_cluster::can_be_handled (const vec<cluster *> &clusters,
1245 unsigned start, unsigned end)
1246 {
1247 /* If the switch is relatively small such that the cost of one
1248 indirect jump on the target are higher than the cost of a
1249 decision tree, go with the decision tree.
1250
1251 If range of values is much bigger than number of values,
1252 or if it is too large to represent in a HOST_WIDE_INT,
1253 make a sequence of conditional branches instead of a dispatch.
1254
1255 The definition of "much bigger" depends on whether we are
1256 optimizing for size or for speed.
1257
1258 For algorithm correctness, jump table for a single case must return
1259 true. We bail out in is_beneficial if it's called just for
1260 a single case. */
1261 if (start == end)
1262 return true;
1263
1264 unsigned HOST_WIDE_INT max_ratio
1265 = (optimize_insn_for_size_p ()
1266 ? param_jump_table_max_growth_ratio_for_size
1267 : param_jump_table_max_growth_ratio_for_speed);
1268 unsigned HOST_WIDE_INT range = get_range (clusters[start]->get_low (),
1269 clusters[end]->get_high ());
1270 /* Check overflow. */
1271 if (range == 0)
1272 return false;
1273
1274 unsigned HOST_WIDE_INT comparison_count = 0;
1275 for (unsigned i = start; i <= end; i++)
1276 {
1277 simple_cluster *sc = static_cast<simple_cluster *> (clusters[i]);
1278 comparison_count += sc->m_range_p ? 2 : 1;
1279 }
1280
1281 unsigned HOST_WIDE_INT lhs = 100 * range;
1282 if (lhs < range)
1283 return false;
1284
1285 return lhs <= max_ratio * comparison_count;
1286 }
1287
1288 /* Return true if cluster starting at START and ending at END (inclusive)
1289 is profitable transformation. */
1290
1291 bool
1292 jump_table_cluster::is_beneficial (const vec<cluster *> &,
1293 unsigned start, unsigned end)
1294 {
1295 /* Single case bail out. */
1296 if (start == end)
1297 return false;
1298
1299 return end - start + 1 >= case_values_threshold ();
1300 }
1301
1302 /* Find bit tests of given CLUSTERS, where all members of the vector
1303 are of type simple_cluster. New clusters are returned. */
1304
1305 vec<cluster *>
1306 bit_test_cluster::find_bit_tests (vec<cluster *> &clusters)
1307 {
1308 unsigned l = clusters.length ();
1309 auto_vec<min_cluster_item> min;
1310 min.reserve (l + 1);
1311
1312 min.quick_push (min_cluster_item (0, 0, 0));
1313
1314 for (unsigned i = 1; i <= l; i++)
1315 {
1316 /* Set minimal # of clusters with i-th item to infinite. */
1317 min.quick_push (min_cluster_item (INT_MAX, INT_MAX, INT_MAX));
1318
1319 for (unsigned j = 0; j < i; j++)
1320 {
1321 if (min[j].m_count + 1 < min[i].m_count
1322 && can_be_handled (clusters, j, i - 1))
1323 min[i] = min_cluster_item (min[j].m_count + 1, j, INT_MAX);
1324 }
1325
1326 gcc_checking_assert (min[i].m_count != INT_MAX);
1327 }
1328
1329 /* No result. */
1330 if (min[l].m_count == l)
1331 return clusters.copy ();
1332
1333 vec<cluster *> output;
1334 output.create (4);
1335
1336 /* Find and build the clusters. */
1337 for (unsigned end = l;;)
1338 {
1339 int start = min[end].m_start;
1340
1341 if (is_beneficial (clusters, start, end - 1))
1342 {
1343 bool entire = start == 0 && end == clusters.length ();
1344 output.safe_push (new bit_test_cluster (clusters, start, end - 1,
1345 entire));
1346 }
1347 else
1348 for (int i = end - 1; i >= start; i--)
1349 output.safe_push (clusters[i]);
1350
1351 end = start;
1352
1353 if (start <= 0)
1354 break;
1355 }
1356
1357 output.reverse ();
1358 return output;
1359 }
1360
1361 /* Return true when RANGE of case values with UNIQ labels
1362 can build a bit test. */
1363
1364 bool
1365 bit_test_cluster::can_be_handled (unsigned HOST_WIDE_INT range,
1366 unsigned int uniq)
1367 {
1368 /* Check overflow. */
1369 if (range == 0)
1370 return 0;
1371
1372 if (range >= GET_MODE_BITSIZE (word_mode))
1373 return false;
1374
1375 return uniq <= 3;
1376 }
1377
1378 /* Return true when cluster starting at START and ending at END (inclusive)
1379 can build a bit test. */
1380
1381 bool
1382 bit_test_cluster::can_be_handled (const vec<cluster *> &clusters,
1383 unsigned start, unsigned end)
1384 {
1385 /* For algorithm correctness, bit test for a single case must return
1386 true. We bail out in is_beneficial if it's called just for
1387 a single case. */
1388 if (start == end)
1389 return true;
1390
1391 unsigned HOST_WIDE_INT range = get_range (clusters[start]->get_low (),
1392 clusters[end]->get_high ());
1393 auto_bitmap dest_bbs;
1394
1395 for (unsigned i = start; i <= end; i++)
1396 {
1397 simple_cluster *sc = static_cast<simple_cluster *> (clusters[i]);
1398 bitmap_set_bit (dest_bbs, sc->m_case_bb->index);
1399 }
1400
1401 return can_be_handled (range, bitmap_count_bits (dest_bbs));
1402 }
1403
1404 /* Return true when COUNT of cases of UNIQ labels is beneficial for bit test
1405 transformation. */
1406
1407 bool
1408 bit_test_cluster::is_beneficial (unsigned count, unsigned uniq)
1409 {
1410 return (((uniq == 1 && count >= 3)
1411 || (uniq == 2 && count >= 5)
1412 || (uniq == 3 && count >= 6)));
1413 }
1414
1415 /* Return true if cluster starting at START and ending at END (inclusive)
1416 is profitable transformation. */
1417
1418 bool
1419 bit_test_cluster::is_beneficial (const vec<cluster *> &clusters,
1420 unsigned start, unsigned end)
1421 {
1422 /* Single case bail out. */
1423 if (start == end)
1424 return false;
1425
1426 auto_bitmap dest_bbs;
1427
1428 for (unsigned i = start; i <= end; i++)
1429 {
1430 simple_cluster *sc = static_cast<simple_cluster *> (clusters[i]);
1431 bitmap_set_bit (dest_bbs, sc->m_case_bb->index);
1432 }
1433
1434 unsigned uniq = bitmap_count_bits (dest_bbs);
1435 unsigned count = end - start + 1;
1436 return is_beneficial (count, uniq);
1437 }
1438
1439 /* Comparison function for qsort to order bit tests by decreasing
1440 probability of execution. */
1441
1442 int
1443 case_bit_test::cmp (const void *p1, const void *p2)
1444 {
1445 const case_bit_test *const d1 = (const case_bit_test *) p1;
1446 const case_bit_test *const d2 = (const case_bit_test *) p2;
1447
1448 if (d2->bits != d1->bits)
1449 return d2->bits - d1->bits;
1450
1451 /* Stabilize the sort. */
1452 return (LABEL_DECL_UID (CASE_LABEL (d2->label))
1453 - LABEL_DECL_UID (CASE_LABEL (d1->label)));
1454 }
1455
1456 /* Expand a switch statement by a short sequence of bit-wise
1457 comparisons. "switch(x)" is effectively converted into
1458 "if ((1 << (x-MINVAL)) & CST)" where CST and MINVAL are
1459 integer constants.
1460
1461 INDEX_EXPR is the value being switched on.
1462
1463 MINVAL is the lowest case value of in the case nodes,
1464 and RANGE is highest value minus MINVAL. MINVAL and RANGE
1465 are not guaranteed to be of the same type as INDEX_EXPR
1466 (the gimplifier doesn't change the type of case label values,
1467 and MINVAL and RANGE are derived from those values).
1468 MAXVAL is MINVAL + RANGE.
1469
1470 There *MUST* be max_case_bit_tests or less unique case
1471 node targets. */
1472
1473 void
1474 bit_test_cluster::emit (tree index_expr, tree index_type,
1475 tree, basic_block default_bb)
1476 {
1477 case_bit_test test[m_max_case_bit_tests] = { {} };
1478 unsigned int i, j, k;
1479 unsigned int count;
1480
1481 tree unsigned_index_type = range_check_type (index_type);
1482
1483 gimple_stmt_iterator gsi;
1484 gassign *shift_stmt;
1485
1486 tree idx, tmp, csui;
1487 tree word_type_node = lang_hooks.types.type_for_mode (word_mode, 1);
1488 tree word_mode_zero = fold_convert (word_type_node, integer_zero_node);
1489 tree word_mode_one = fold_convert (word_type_node, integer_one_node);
1490 int prec = TYPE_PRECISION (word_type_node);
1491 wide_int wone = wi::one (prec);
1492
1493 tree minval = get_low ();
1494 tree maxval = get_high ();
1495 tree range = int_const_binop (MINUS_EXPR, maxval, minval);
1496 unsigned HOST_WIDE_INT bt_range = get_range (minval, maxval);
1497
1498 /* Go through all case labels, and collect the case labels, profile
1499 counts, and other information we need to build the branch tests. */
1500 count = 0;
1501 for (i = 0; i < m_cases.length (); i++)
1502 {
1503 unsigned int lo, hi;
1504 simple_cluster *n = static_cast<simple_cluster *> (m_cases[i]);
1505 for (k = 0; k < count; k++)
1506 if (n->m_case_bb == test[k].target_bb)
1507 break;
1508
1509 if (k == count)
1510 {
1511 gcc_checking_assert (count < m_max_case_bit_tests);
1512 test[k].mask = wi::zero (prec);
1513 test[k].target_bb = n->m_case_bb;
1514 test[k].label = n->m_case_label_expr;
1515 test[k].bits = 0;
1516 count++;
1517 }
1518
1519 test[k].bits += n->get_range (n->get_low (), n->get_high ());
1520
1521 lo = tree_to_uhwi (int_const_binop (MINUS_EXPR, n->get_low (), minval));
1522 if (n->get_high () == NULL_TREE)
1523 hi = lo;
1524 else
1525 hi = tree_to_uhwi (int_const_binop (MINUS_EXPR, n->get_high (),
1526 minval));
1527
1528 for (j = lo; j <= hi; j++)
1529 test[k].mask |= wi::lshift (wone, j);
1530 }
1531
1532 qsort (test, count, sizeof (*test), case_bit_test::cmp);
1533
1534 /* If all values are in the 0 .. BITS_PER_WORD-1 range, we can get rid of
1535 the minval subtractions, but it might make the mask constants more
1536 expensive. So, compare the costs. */
1537 if (compare_tree_int (minval, 0) > 0
1538 && compare_tree_int (maxval, GET_MODE_BITSIZE (word_mode)) < 0)
1539 {
1540 int cost_diff;
1541 HOST_WIDE_INT m = tree_to_uhwi (minval);
1542 rtx reg = gen_raw_REG (word_mode, 10000);
1543 bool speed_p = optimize_insn_for_speed_p ();
1544 cost_diff = set_src_cost (gen_rtx_PLUS (word_mode, reg,
1545 GEN_INT (-m)),
1546 word_mode, speed_p);
1547 for (i = 0; i < count; i++)
1548 {
1549 rtx r = immed_wide_int_const (test[i].mask, word_mode);
1550 cost_diff += set_src_cost (gen_rtx_AND (word_mode, reg, r),
1551 word_mode, speed_p);
1552 r = immed_wide_int_const (wi::lshift (test[i].mask, m), word_mode);
1553 cost_diff -= set_src_cost (gen_rtx_AND (word_mode, reg, r),
1554 word_mode, speed_p);
1555 }
1556 if (cost_diff > 0)
1557 {
1558 for (i = 0; i < count; i++)
1559 test[i].mask = wi::lshift (test[i].mask, m);
1560 minval = build_zero_cst (TREE_TYPE (minval));
1561 range = maxval;
1562 }
1563 }
1564
1565 /* Now build the test-and-branch code. */
1566
1567 gsi = gsi_last_bb (m_case_bb);
1568
1569 /* idx = (unsigned)x - minval. */
1570 idx = fold_convert (unsigned_index_type, index_expr);
1571 idx = fold_build2 (MINUS_EXPR, unsigned_index_type, idx,
1572 fold_convert (unsigned_index_type, minval));
1573 idx = force_gimple_operand_gsi (&gsi, idx,
1574 /*simple=*/true, NULL_TREE,
1575 /*before=*/true, GSI_SAME_STMT);
1576
1577 if (m_handles_entire_switch)
1578 {
1579 /* if (idx > range) goto default */
1580 range
1581 = force_gimple_operand_gsi (&gsi,
1582 fold_convert (unsigned_index_type, range),
1583 /*simple=*/true, NULL_TREE,
1584 /*before=*/true, GSI_SAME_STMT);
1585 tmp = fold_build2 (GT_EXPR, boolean_type_node, idx, range);
1586 basic_block new_bb
1587 = hoist_edge_and_branch_if_true (&gsi, tmp, default_bb,
1588 profile_probability::unlikely ());
1589 gsi = gsi_last_bb (new_bb);
1590 }
1591
1592 /* csui = (1 << (word_mode) idx) */
1593 csui = make_ssa_name (word_type_node);
1594 tmp = fold_build2 (LSHIFT_EXPR, word_type_node, word_mode_one,
1595 fold_convert (word_type_node, idx));
1596 tmp = force_gimple_operand_gsi (&gsi, tmp,
1597 /*simple=*/false, NULL_TREE,
1598 /*before=*/true, GSI_SAME_STMT);
1599 shift_stmt = gimple_build_assign (csui, tmp);
1600 gsi_insert_before (&gsi, shift_stmt, GSI_SAME_STMT);
1601 update_stmt (shift_stmt);
1602
1603 profile_probability prob = profile_probability::always ();
1604
1605 /* for each unique set of cases:
1606 if (const & csui) goto target */
1607 for (k = 0; k < count; k++)
1608 {
1609 prob = profile_probability::always ().apply_scale (test[k].bits,
1610 bt_range);
1611 bt_range -= test[k].bits;
1612 tmp = wide_int_to_tree (word_type_node, test[k].mask);
1613 tmp = fold_build2 (BIT_AND_EXPR, word_type_node, csui, tmp);
1614 tmp = force_gimple_operand_gsi (&gsi, tmp,
1615 /*simple=*/true, NULL_TREE,
1616 /*before=*/true, GSI_SAME_STMT);
1617 tmp = fold_build2 (NE_EXPR, boolean_type_node, tmp, word_mode_zero);
1618 basic_block new_bb
1619 = hoist_edge_and_branch_if_true (&gsi, tmp, test[k].target_bb, prob);
1620 gsi = gsi_last_bb (new_bb);
1621 }
1622
1623 /* We should have removed all edges now. */
1624 gcc_assert (EDGE_COUNT (gsi_bb (gsi)->succs) == 0);
1625
1626 /* If nothing matched, go to the default label. */
1627 edge e = make_edge (gsi_bb (gsi), default_bb, EDGE_FALLTHRU);
1628 e->probability = profile_probability::always ();
1629 }
1630
1631 /* Split the basic block at the statement pointed to by GSIP, and insert
1632 a branch to the target basic block of E_TRUE conditional on tree
1633 expression COND.
1634
1635 It is assumed that there is already an edge from the to-be-split
1636 basic block to E_TRUE->dest block. This edge is removed, and the
1637 profile information on the edge is re-used for the new conditional
1638 jump.
1639
1640 The CFG is updated. The dominator tree will not be valid after
1641 this transformation, but the immediate dominators are updated if
1642 UPDATE_DOMINATORS is true.
1643
1644 Returns the newly created basic block. */
1645
1646 basic_block
1647 bit_test_cluster::hoist_edge_and_branch_if_true (gimple_stmt_iterator *gsip,
1648 tree cond, basic_block case_bb,
1649 profile_probability prob)
1650 {
1651 tree tmp;
1652 gcond *cond_stmt;
1653 edge e_false;
1654 basic_block new_bb, split_bb = gsi_bb (*gsip);
1655
1656 edge e_true = make_edge (split_bb, case_bb, EDGE_TRUE_VALUE);
1657 e_true->probability = prob;
1658 gcc_assert (e_true->src == split_bb);
1659
1660 tmp = force_gimple_operand_gsi (gsip, cond, /*simple=*/true, NULL,
1661 /*before=*/true, GSI_SAME_STMT);
1662 cond_stmt = gimple_build_cond_from_tree (tmp, NULL_TREE, NULL_TREE);
1663 gsi_insert_before (gsip, cond_stmt, GSI_SAME_STMT);
1664
1665 e_false = split_block (split_bb, cond_stmt);
1666 new_bb = e_false->dest;
1667 redirect_edge_pred (e_true, split_bb);
1668
1669 e_false->flags &= ~EDGE_FALLTHRU;
1670 e_false->flags |= EDGE_FALSE_VALUE;
1671 e_false->probability = e_true->probability.invert ();
1672 new_bb->count = e_false->count ();
1673
1674 return new_bb;
1675 }
1676
1677 /* Compute the number of case labels that correspond to each outgoing edge of
1678 switch statement. Record this information in the aux field of the edge. */
1679
1680 void
1681 switch_decision_tree::compute_cases_per_edge ()
1682 {
1683 reset_out_edges_aux (m_switch);
1684 int ncases = gimple_switch_num_labels (m_switch);
1685 for (int i = ncases - 1; i >= 1; --i)
1686 {
1687 edge case_edge = gimple_switch_edge (cfun, m_switch, i);
1688 case_edge->aux = (void *) ((intptr_t) (case_edge->aux) + 1);
1689 }
1690 }
1691
1692 /* Analyze switch statement and return true when the statement is expanded
1693 as decision tree. */
1694
1695 bool
1696 switch_decision_tree::analyze_switch_statement ()
1697 {
1698 unsigned l = gimple_switch_num_labels (m_switch);
1699 basic_block bb = gimple_bb (m_switch);
1700 auto_vec<cluster *> clusters;
1701 clusters.create (l - 1);
1702
1703 basic_block default_bb = gimple_switch_default_bb (cfun, m_switch);
1704 m_case_bbs.reserve (l);
1705 m_case_bbs.quick_push (default_bb);
1706
1707 compute_cases_per_edge ();
1708
1709 for (unsigned i = 1; i < l; i++)
1710 {
1711 tree elt = gimple_switch_label (m_switch, i);
1712 tree lab = CASE_LABEL (elt);
1713 basic_block case_bb = label_to_block (cfun, lab);
1714 edge case_edge = find_edge (bb, case_bb);
1715 tree low = CASE_LOW (elt);
1716 tree high = CASE_HIGH (elt);
1717
1718 profile_probability p
1719 = case_edge->probability.apply_scale (1, (intptr_t) (case_edge->aux));
1720 clusters.quick_push (new simple_cluster (low, high, elt, case_edge->dest,
1721 p));
1722 m_case_bbs.quick_push (case_edge->dest);
1723 }
1724
1725 reset_out_edges_aux (m_switch);
1726
1727 /* Find jump table clusters. */
1728 vec<cluster *> output = jump_table_cluster::find_jump_tables (clusters);
1729
1730 /* Find bit test clusters. */
1731 vec<cluster *> output2;
1732 auto_vec<cluster *> tmp;
1733 output2.create (1);
1734 tmp.create (1);
1735
1736 for (unsigned i = 0; i < output.length (); i++)
1737 {
1738 cluster *c = output[i];
1739 if (c->get_type () != SIMPLE_CASE)
1740 {
1741 if (!tmp.is_empty ())
1742 {
1743 vec<cluster *> n = bit_test_cluster::find_bit_tests (tmp);
1744 output2.safe_splice (n);
1745 n.release ();
1746 tmp.truncate (0);
1747 }
1748 output2.safe_push (c);
1749 }
1750 else
1751 tmp.safe_push (c);
1752 }
1753
1754 /* We still can have a temporary vector to test. */
1755 if (!tmp.is_empty ())
1756 {
1757 vec<cluster *> n = bit_test_cluster::find_bit_tests (tmp);
1758 output2.safe_splice (n);
1759 n.release ();
1760 }
1761
1762 if (dump_file)
1763 {
1764 fprintf (dump_file, ";; GIMPLE switch case clusters: ");
1765 for (unsigned i = 0; i < output2.length (); i++)
1766 output2[i]->dump (dump_file, dump_flags & TDF_DETAILS);
1767 fprintf (dump_file, "\n");
1768 }
1769
1770 output.release ();
1771
1772 bool expanded = try_switch_expansion (output2);
1773
1774 for (unsigned i = 0; i < output2.length (); i++)
1775 delete output2[i];
1776
1777 output2.release ();
1778
1779 return expanded;
1780 }
1781
1782 /* Attempt to expand CLUSTERS as a decision tree. Return true when
1783 expanded. */
1784
1785 bool
1786 switch_decision_tree::try_switch_expansion (vec<cluster *> &clusters)
1787 {
1788 tree index_expr = gimple_switch_index (m_switch);
1789 tree index_type = TREE_TYPE (index_expr);
1790 basic_block bb = gimple_bb (m_switch);
1791
1792 if (gimple_switch_num_labels (m_switch) == 1
1793 || range_check_type (index_type) == NULL_TREE)
1794 return false;
1795
1796 /* Find the default case target label. */
1797 edge default_edge = gimple_switch_default_edge (cfun, m_switch);
1798 m_default_bb = default_edge->dest;
1799
1800 /* Do the insertion of a case label into m_case_list. The labels are
1801 fed to us in descending order from the sorted vector of case labels used
1802 in the tree part of the middle end. So the list we construct is
1803 sorted in ascending order. */
1804
1805 for (int i = clusters.length () - 1; i >= 0; i--)
1806 {
1807 case_tree_node *r = m_case_list;
1808 m_case_list = m_case_node_pool.allocate ();
1809 m_case_list->m_right = r;
1810 m_case_list->m_c = clusters[i];
1811 }
1812
1813 record_phi_operand_mapping ();
1814
1815 /* Split basic block that contains the gswitch statement. */
1816 gimple_stmt_iterator gsi = gsi_last_bb (bb);
1817 edge e;
1818 if (gsi_end_p (gsi))
1819 e = split_block_after_labels (bb);
1820 else
1821 {
1822 gsi_prev (&gsi);
1823 e = split_block (bb, gsi_stmt (gsi));
1824 }
1825 bb = split_edge (e);
1826
1827 /* Create new basic blocks for non-case clusters where specific expansion
1828 needs to happen. */
1829 for (unsigned i = 0; i < clusters.length (); i++)
1830 if (clusters[i]->get_type () != SIMPLE_CASE)
1831 {
1832 clusters[i]->m_case_bb = create_empty_bb (bb);
1833 clusters[i]->m_case_bb->count = bb->count;
1834 clusters[i]->m_case_bb->loop_father = bb->loop_father;
1835 }
1836
1837 /* Do not do an extra work for a single cluster. */
1838 if (clusters.length () == 1
1839 && clusters[0]->get_type () != SIMPLE_CASE)
1840 {
1841 cluster *c = clusters[0];
1842 c->emit (index_expr, index_type,
1843 gimple_switch_default_label (m_switch), m_default_bb);
1844 redirect_edge_succ (single_succ_edge (bb), c->m_case_bb);
1845 }
1846 else
1847 {
1848 emit (bb, index_expr, default_edge->probability, index_type);
1849
1850 /* Emit cluster-specific switch handling. */
1851 for (unsigned i = 0; i < clusters.length (); i++)
1852 if (clusters[i]->get_type () != SIMPLE_CASE)
1853 clusters[i]->emit (index_expr, index_type,
1854 gimple_switch_default_label (m_switch),
1855 m_default_bb);
1856 }
1857
1858 fix_phi_operands_for_edges ();
1859
1860 return true;
1861 }
1862
1863 /* Before switch transformation, record all SSA_NAMEs defined in switch BB
1864 and used in a label basic block. */
1865
1866 void
1867 switch_decision_tree::record_phi_operand_mapping ()
1868 {
1869 basic_block switch_bb = gimple_bb (m_switch);
1870 /* Record all PHI nodes that have to be fixed after conversion. */
1871 for (unsigned i = 0; i < m_case_bbs.length (); i++)
1872 {
1873 gphi_iterator gsi;
1874 basic_block bb = m_case_bbs[i];
1875 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1876 {
1877 gphi *phi = gsi.phi ();
1878
1879 for (unsigned i = 0; i < gimple_phi_num_args (phi); i++)
1880 {
1881 basic_block phi_src_bb = gimple_phi_arg_edge (phi, i)->src;
1882 if (phi_src_bb == switch_bb)
1883 {
1884 tree def = gimple_phi_arg_def (phi, i);
1885 tree result = gimple_phi_result (phi);
1886 m_phi_mapping.put (result, def);
1887 break;
1888 }
1889 }
1890 }
1891 }
1892 }
1893
1894 /* Append new operands to PHI statements that were introduced due to
1895 addition of new edges to case labels. */
1896
1897 void
1898 switch_decision_tree::fix_phi_operands_for_edges ()
1899 {
1900 gphi_iterator gsi;
1901
1902 for (unsigned i = 0; i < m_case_bbs.length (); i++)
1903 {
1904 basic_block bb = m_case_bbs[i];
1905 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1906 {
1907 gphi *phi = gsi.phi ();
1908 for (unsigned j = 0; j < gimple_phi_num_args (phi); j++)
1909 {
1910 tree def = gimple_phi_arg_def (phi, j);
1911 if (def == NULL_TREE)
1912 {
1913 edge e = gimple_phi_arg_edge (phi, j);
1914 tree *definition
1915 = m_phi_mapping.get (gimple_phi_result (phi));
1916 gcc_assert (definition);
1917 add_phi_arg (phi, *definition, e, UNKNOWN_LOCATION);
1918 }
1919 }
1920 }
1921 }
1922 }
1923
1924 /* Generate a decision tree, switching on INDEX_EXPR and jumping to
1925 one of the labels in CASE_LIST or to the DEFAULT_LABEL.
1926
1927 We generate a binary decision tree to select the appropriate target
1928 code. */
1929
1930 void
1931 switch_decision_tree::emit (basic_block bb, tree index_expr,
1932 profile_probability default_prob, tree index_type)
1933 {
1934 balance_case_nodes (&m_case_list, NULL);
1935
1936 if (dump_file)
1937 dump_function_to_file (current_function_decl, dump_file, dump_flags);
1938 if (dump_file && (dump_flags & TDF_DETAILS))
1939 {
1940 int indent_step = ceil_log2 (TYPE_PRECISION (index_type)) + 2;
1941 fprintf (dump_file, ";; Expanding GIMPLE switch as decision tree:\n");
1942 gcc_assert (m_case_list != NULL);
1943 dump_case_nodes (dump_file, m_case_list, indent_step, 0);
1944 }
1945
1946 bb = emit_case_nodes (bb, index_expr, m_case_list, default_prob, index_type,
1947 gimple_location (m_switch));
1948
1949 if (bb)
1950 emit_jump (bb, m_default_bb);
1951
1952 /* Remove all edges and do just an edge that will reach default_bb. */
1953 bb = gimple_bb (m_switch);
1954 gimple_stmt_iterator gsi = gsi_last_bb (bb);
1955 gsi_remove (&gsi, true);
1956
1957 delete_basic_block (bb);
1958 }
1959
1960 /* Take an ordered list of case nodes
1961 and transform them into a near optimal binary tree,
1962 on the assumption that any target code selection value is as
1963 likely as any other.
1964
1965 The transformation is performed by splitting the ordered
1966 list into two equal sections plus a pivot. The parts are
1967 then attached to the pivot as left and right branches. Each
1968 branch is then transformed recursively. */
1969
1970 void
1971 switch_decision_tree::balance_case_nodes (case_tree_node **head,
1972 case_tree_node *parent)
1973 {
1974 case_tree_node *np;
1975
1976 np = *head;
1977 if (np)
1978 {
1979 int i = 0;
1980 int ranges = 0;
1981 case_tree_node **npp;
1982 case_tree_node *left;
1983 profile_probability prob = profile_probability::never ();
1984
1985 /* Count the number of entries on branch. Also count the ranges. */
1986
1987 while (np)
1988 {
1989 if (!tree_int_cst_equal (np->m_c->get_low (), np->m_c->get_high ()))
1990 ranges++;
1991
1992 i++;
1993 prob += np->m_c->m_prob;
1994 np = np->m_right;
1995 }
1996
1997 if (i > 2)
1998 {
1999 /* Split this list if it is long enough for that to help. */
2000 npp = head;
2001 left = *npp;
2002 profile_probability pivot_prob = prob.apply_scale (1, 2);
2003
2004 /* Find the place in the list that bisects the list's total cost,
2005 where ranges count as 2. */
2006 while (1)
2007 {
2008 /* Skip nodes while their probability does not reach
2009 that amount. */
2010 prob -= (*npp)->m_c->m_prob;
2011 if ((prob.initialized_p () && prob < pivot_prob)
2012 || ! (*npp)->m_right)
2013 break;
2014 npp = &(*npp)->m_right;
2015 }
2016
2017 np = *npp;
2018 *npp = 0;
2019 *head = np;
2020 np->m_parent = parent;
2021 np->m_left = left == np ? NULL : left;
2022
2023 /* Optimize each of the two split parts. */
2024 balance_case_nodes (&np->m_left, np);
2025 balance_case_nodes (&np->m_right, np);
2026 np->m_c->m_subtree_prob = np->m_c->m_prob;
2027 if (np->m_left)
2028 np->m_c->m_subtree_prob += np->m_left->m_c->m_subtree_prob;
2029 if (np->m_right)
2030 np->m_c->m_subtree_prob += np->m_right->m_c->m_subtree_prob;
2031 }
2032 else
2033 {
2034 /* Else leave this branch as one level,
2035 but fill in `parent' fields. */
2036 np = *head;
2037 np->m_parent = parent;
2038 np->m_c->m_subtree_prob = np->m_c->m_prob;
2039 for (; np->m_right; np = np->m_right)
2040 {
2041 np->m_right->m_parent = np;
2042 (*head)->m_c->m_subtree_prob += np->m_right->m_c->m_subtree_prob;
2043 }
2044 }
2045 }
2046 }
2047
2048 /* Dump ROOT, a list or tree of case nodes, to file. */
2049
2050 void
2051 switch_decision_tree::dump_case_nodes (FILE *f, case_tree_node *root,
2052 int indent_step, int indent_level)
2053 {
2054 if (root == 0)
2055 return;
2056 indent_level++;
2057
2058 dump_case_nodes (f, root->m_left, indent_step, indent_level);
2059
2060 fputs (";; ", f);
2061 fprintf (f, "%*s", indent_step * indent_level, "");
2062 root->m_c->dump (f);
2063 root->m_c->m_prob.dump (f);
2064 fputs (" subtree: ", f);
2065 root->m_c->m_subtree_prob.dump (f);
2066 fputs (")\n", f);
2067
2068 dump_case_nodes (f, root->m_right, indent_step, indent_level);
2069 }
2070
2071
2072 /* Add an unconditional jump to CASE_BB that happens in basic block BB. */
2073
2074 void
2075 switch_decision_tree::emit_jump (basic_block bb, basic_block case_bb)
2076 {
2077 edge e = single_succ_edge (bb);
2078 redirect_edge_succ (e, case_bb);
2079 }
2080
2081 /* Generate code to compare OP0 with OP1 so that the condition codes are
2082 set and to jump to LABEL_BB if the condition is true.
2083 COMPARISON is the GIMPLE comparison (EQ, NE, GT, etc.).
2084 PROB is the probability of jumping to LABEL_BB. */
2085
2086 basic_block
2087 switch_decision_tree::emit_cmp_and_jump_insns (basic_block bb, tree op0,
2088 tree op1, tree_code comparison,
2089 basic_block label_bb,
2090 profile_probability prob,
2091 location_t loc)
2092 {
2093 // TODO: it's once called with lhs != index.
2094 op1 = fold_convert (TREE_TYPE (op0), op1);
2095
2096 gcond *cond = gimple_build_cond (comparison, op0, op1, NULL_TREE, NULL_TREE);
2097 gimple_set_location (cond, loc);
2098 gimple_stmt_iterator gsi = gsi_last_bb (bb);
2099 gsi_insert_after (&gsi, cond, GSI_NEW_STMT);
2100
2101 gcc_assert (single_succ_p (bb));
2102
2103 /* Make a new basic block where false branch will take place. */
2104 edge false_edge = split_block (bb, cond);
2105 false_edge->flags = EDGE_FALSE_VALUE;
2106 false_edge->probability = prob.invert ();
2107
2108 edge true_edge = make_edge (bb, label_bb, EDGE_TRUE_VALUE);
2109 true_edge->probability = prob;
2110
2111 return false_edge->dest;
2112 }
2113
2114 /* Generate code to jump to LABEL if OP0 and OP1 are equal.
2115 PROB is the probability of jumping to LABEL_BB.
2116 BB is a basic block where the new condition will be placed. */
2117
2118 basic_block
2119 switch_decision_tree::do_jump_if_equal (basic_block bb, tree op0, tree op1,
2120 basic_block label_bb,
2121 profile_probability prob,
2122 location_t loc)
2123 {
2124 op1 = fold_convert (TREE_TYPE (op0), op1);
2125
2126 gcond *cond = gimple_build_cond (EQ_EXPR, op0, op1, NULL_TREE, NULL_TREE);
2127 gimple_set_location (cond, loc);
2128 gimple_stmt_iterator gsi = gsi_last_bb (bb);
2129 gsi_insert_before (&gsi, cond, GSI_SAME_STMT);
2130
2131 gcc_assert (single_succ_p (bb));
2132
2133 /* Make a new basic block where false branch will take place. */
2134 edge false_edge = split_block (bb, cond);
2135 false_edge->flags = EDGE_FALSE_VALUE;
2136 false_edge->probability = prob.invert ();
2137
2138 edge true_edge = make_edge (bb, label_bb, EDGE_TRUE_VALUE);
2139 true_edge->probability = prob;
2140
2141 return false_edge->dest;
2142 }
2143
2144 /* Emit step-by-step code to select a case for the value of INDEX.
2145 The thus generated decision tree follows the form of the
2146 case-node binary tree NODE, whose nodes represent test conditions.
2147 DEFAULT_PROB is probability of cases leading to default BB.
2148 INDEX_TYPE is the type of the index of the switch. */
2149
2150 basic_block
2151 switch_decision_tree::emit_case_nodes (basic_block bb, tree index,
2152 case_tree_node *node,
2153 profile_probability default_prob,
2154 tree index_type, location_t loc)
2155 {
2156 profile_probability p;
2157
2158 /* If node is null, we are done. */
2159 if (node == NULL)
2160 return bb;
2161
2162 /* Single value case. */
2163 if (node->m_c->is_single_value_p ())
2164 {
2165 /* Node is single valued. First see if the index expression matches
2166 this node and then check our children, if any. */
2167 p = node->m_c->m_prob / (node->m_c->m_subtree_prob + default_prob);
2168 bb = do_jump_if_equal (bb, index, node->m_c->get_low (),
2169 node->m_c->m_case_bb, p, loc);
2170 /* Since this case is taken at this point, reduce its weight from
2171 subtree_weight. */
2172 node->m_c->m_subtree_prob -= p;
2173
2174 if (node->m_left != NULL && node->m_right != NULL)
2175 {
2176 /* 1) the node has both children
2177
2178 If both children are single-valued cases with no
2179 children, finish up all the work. This way, we can save
2180 one ordered comparison. */
2181
2182 if (!node->m_left->has_child ()
2183 && node->m_left->m_c->is_single_value_p ()
2184 && !node->m_right->has_child ()
2185 && node->m_right->m_c->is_single_value_p ())
2186 {
2187 p = (node->m_right->m_c->m_prob
2188 / (node->m_c->m_subtree_prob + default_prob));
2189 bb = do_jump_if_equal (bb, index, node->m_right->m_c->get_low (),
2190 node->m_right->m_c->m_case_bb, p, loc);
2191
2192 p = (node->m_left->m_c->m_prob
2193 / (node->m_c->m_subtree_prob + default_prob));
2194 bb = do_jump_if_equal (bb, index, node->m_left->m_c->get_low (),
2195 node->m_left->m_c->m_case_bb, p, loc);
2196 }
2197 else
2198 {
2199 /* Branch to a label where we will handle it later. */
2200 basic_block test_bb = split_edge (single_succ_edge (bb));
2201 redirect_edge_succ (single_pred_edge (test_bb),
2202 single_succ_edge (bb)->dest);
2203
2204 p = ((node->m_right->m_c->m_subtree_prob
2205 + default_prob.apply_scale (1, 2))
2206 / (node->m_c->m_subtree_prob + default_prob));
2207 bb = emit_cmp_and_jump_insns (bb, index, node->m_c->get_high (),
2208 GT_EXPR, test_bb, p, loc);
2209 default_prob = default_prob.apply_scale (1, 2);
2210
2211 /* Handle the left-hand subtree. */
2212 bb = emit_case_nodes (bb, index, node->m_left,
2213 default_prob, index_type, loc);
2214
2215 /* If the left-hand subtree fell through,
2216 don't let it fall into the right-hand subtree. */
2217 if (bb && m_default_bb)
2218 emit_jump (bb, m_default_bb);
2219
2220 bb = emit_case_nodes (test_bb, index, node->m_right,
2221 default_prob, index_type, loc);
2222 }
2223 }
2224 else if (node->m_left == NULL && node->m_right != NULL)
2225 {
2226 /* 2) the node has only right child. */
2227
2228 /* Here we have a right child but no left so we issue a conditional
2229 branch to default and process the right child.
2230
2231 Omit the conditional branch to default if the right child
2232 does not have any children and is single valued; it would
2233 cost too much space to save so little time. */
2234
2235 if (node->m_right->has_child ()
2236 || !node->m_right->m_c->is_single_value_p ())
2237 {
2238 p = (default_prob.apply_scale (1, 2)
2239 / (node->m_c->m_subtree_prob + default_prob));
2240 bb = emit_cmp_and_jump_insns (bb, index, node->m_c->get_low (),
2241 LT_EXPR, m_default_bb, p, loc);
2242 default_prob = default_prob.apply_scale (1, 2);
2243
2244 bb = emit_case_nodes (bb, index, node->m_right, default_prob,
2245 index_type, loc);
2246 }
2247 else
2248 {
2249 /* We cannot process node->right normally
2250 since we haven't ruled out the numbers less than
2251 this node's value. So handle node->right explicitly. */
2252 p = (node->m_right->m_c->m_subtree_prob
2253 / (node->m_c->m_subtree_prob + default_prob));
2254 bb = do_jump_if_equal (bb, index, node->m_right->m_c->get_low (),
2255 node->m_right->m_c->m_case_bb, p, loc);
2256 }
2257 }
2258 else if (node->m_left != NULL && node->m_right == NULL)
2259 {
2260 /* 3) just one subtree, on the left. Similar case as previous. */
2261
2262 if (node->m_left->has_child ()
2263 || !node->m_left->m_c->is_single_value_p ())
2264 {
2265 p = (default_prob.apply_scale (1, 2)
2266 / (node->m_c->m_subtree_prob + default_prob));
2267 bb = emit_cmp_and_jump_insns (bb, index, node->m_c->get_high (),
2268 GT_EXPR, m_default_bb, p, loc);
2269 default_prob = default_prob.apply_scale (1, 2);
2270
2271 bb = emit_case_nodes (bb, index, node->m_left, default_prob,
2272 index_type, loc);
2273 }
2274 else
2275 {
2276 /* We cannot process node->left normally
2277 since we haven't ruled out the numbers less than
2278 this node's value. So handle node->left explicitly. */
2279 p = (node->m_left->m_c->m_subtree_prob
2280 / (node->m_c->m_subtree_prob + default_prob));
2281 bb = do_jump_if_equal (bb, index, node->m_left->m_c->get_low (),
2282 node->m_left->m_c->m_case_bb, p, loc);
2283 }
2284 }
2285 }
2286 else
2287 {
2288 /* Node is a range. These cases are very similar to those for a single
2289 value, except that we do not start by testing whether this node
2290 is the one to branch to. */
2291 if (node->has_child () || node->m_c->get_type () != SIMPLE_CASE)
2292 {
2293 /* Branch to a label where we will handle it later. */
2294 basic_block test_bb = split_edge (single_succ_edge (bb));
2295 redirect_edge_succ (single_pred_edge (test_bb),
2296 single_succ_edge (bb)->dest);
2297
2298
2299 profile_probability right_prob = profile_probability::never ();
2300 if (node->m_right)
2301 right_prob = node->m_right->m_c->m_subtree_prob;
2302 p = ((right_prob + default_prob.apply_scale (1, 2))
2303 / (node->m_c->m_subtree_prob + default_prob));
2304
2305 bb = emit_cmp_and_jump_insns (bb, index, node->m_c->get_high (),
2306 GT_EXPR, test_bb, p, loc);
2307 default_prob = default_prob.apply_scale (1, 2);
2308
2309 /* Value belongs to this node or to the left-hand subtree. */
2310 p = node->m_c->m_prob / (node->m_c->m_subtree_prob + default_prob);
2311 bb = emit_cmp_and_jump_insns (bb, index, node->m_c->get_low (),
2312 GE_EXPR, node->m_c->m_case_bb, p, loc);
2313
2314 /* Handle the left-hand subtree. */
2315 bb = emit_case_nodes (bb, index, node->m_left,
2316 default_prob, index_type, loc);
2317
2318 /* If the left-hand subtree fell through,
2319 don't let it fall into the right-hand subtree. */
2320 if (bb && m_default_bb)
2321 emit_jump (bb, m_default_bb);
2322
2323 bb = emit_case_nodes (test_bb, index, node->m_right,
2324 default_prob, index_type, loc);
2325 }
2326 else
2327 {
2328 /* Node has no children so we check low and high bounds to remove
2329 redundant tests. Only one of the bounds can exist,
2330 since otherwise this node is bounded--a case tested already. */
2331 tree lhs, rhs;
2332 generate_range_test (bb, index, node->m_c->get_low (),
2333 node->m_c->get_high (), &lhs, &rhs);
2334 p = default_prob / (node->m_c->m_subtree_prob + default_prob);
2335
2336 bb = emit_cmp_and_jump_insns (bb, lhs, rhs, GT_EXPR,
2337 m_default_bb, p, loc);
2338
2339 emit_jump (bb, node->m_c->m_case_bb);
2340 return NULL;
2341 }
2342 }
2343
2344 return bb;
2345 }
2346
2347 /* The main function of the pass scans statements for switches and invokes
2348 process_switch on them. */
2349
2350 namespace {
2351
2352 const pass_data pass_data_convert_switch =
2353 {
2354 GIMPLE_PASS, /* type */
2355 "switchconv", /* name */
2356 OPTGROUP_NONE, /* optinfo_flags */
2357 TV_TREE_SWITCH_CONVERSION, /* tv_id */
2358 ( PROP_cfg | PROP_ssa ), /* properties_required */
2359 0, /* properties_provided */
2360 0, /* properties_destroyed */
2361 0, /* todo_flags_start */
2362 TODO_update_ssa, /* todo_flags_finish */
2363 };
2364
2365 class pass_convert_switch : public gimple_opt_pass
2366 {
2367 public:
2368 pass_convert_switch (gcc::context *ctxt)
2369 : gimple_opt_pass (pass_data_convert_switch, ctxt)
2370 {}
2371
2372 /* opt_pass methods: */
2373 virtual bool gate (function *) { return flag_tree_switch_conversion != 0; }
2374 virtual unsigned int execute (function *);
2375
2376 }; // class pass_convert_switch
2377
2378 unsigned int
2379 pass_convert_switch::execute (function *fun)
2380 {
2381 basic_block bb;
2382 bool cfg_altered = false;
2383
2384 FOR_EACH_BB_FN (bb, fun)
2385 {
2386 gimple *stmt = last_stmt (bb);
2387 if (stmt && gimple_code (stmt) == GIMPLE_SWITCH)
2388 {
2389 if (dump_file)
2390 {
2391 expanded_location loc = expand_location (gimple_location (stmt));
2392
2393 fprintf (dump_file, "beginning to process the following "
2394 "SWITCH statement (%s:%d) : ------- \n",
2395 loc.file, loc.line);
2396 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
2397 putc ('\n', dump_file);
2398 }
2399
2400 switch_conversion sconv;
2401 sconv.expand (as_a <gswitch *> (stmt));
2402 cfg_altered |= sconv.m_cfg_altered;
2403 if (!sconv.m_reason)
2404 {
2405 if (dump_file)
2406 {
2407 fputs ("Switch converted\n", dump_file);
2408 fputs ("--------------------------------\n", dump_file);
2409 }
2410
2411 /* Make no effort to update the post-dominator tree.
2412 It is actually not that hard for the transformations
2413 we have performed, but it is not supported
2414 by iterate_fix_dominators. */
2415 free_dominance_info (CDI_POST_DOMINATORS);
2416 }
2417 else
2418 {
2419 if (dump_file)
2420 {
2421 fputs ("Bailing out - ", dump_file);
2422 fputs (sconv.m_reason, dump_file);
2423 fputs ("\n--------------------------------\n", dump_file);
2424 }
2425 }
2426 }
2427 }
2428
2429 return cfg_altered ? TODO_cleanup_cfg : 0;;
2430 }
2431
2432 } // anon namespace
2433
2434 gimple_opt_pass *
2435 make_pass_convert_switch (gcc::context *ctxt)
2436 {
2437 return new pass_convert_switch (ctxt);
2438 }
2439
2440 /* The main function of the pass scans statements for switches and invokes
2441 process_switch on them. */
2442
2443 namespace {
2444
2445 template <bool O0> class pass_lower_switch: public gimple_opt_pass
2446 {
2447 public:
2448 pass_lower_switch (gcc::context *ctxt) : gimple_opt_pass (data, ctxt) {}
2449
2450 static const pass_data data;
2451 opt_pass *
2452 clone ()
2453 {
2454 return new pass_lower_switch<O0> (m_ctxt);
2455 }
2456
2457 virtual bool
2458 gate (function *)
2459 {
2460 return !O0 || !optimize;
2461 }
2462
2463 virtual unsigned int execute (function *fun);
2464 }; // class pass_lower_switch
2465
2466 template <bool O0>
2467 const pass_data pass_lower_switch<O0>::data = {
2468 GIMPLE_PASS, /* type */
2469 O0 ? "switchlower_O0" : "switchlower", /* name */
2470 OPTGROUP_NONE, /* optinfo_flags */
2471 TV_TREE_SWITCH_LOWERING, /* tv_id */
2472 ( PROP_cfg | PROP_ssa ), /* properties_required */
2473 0, /* properties_provided */
2474 0, /* properties_destroyed */
2475 0, /* todo_flags_start */
2476 TODO_update_ssa | TODO_cleanup_cfg, /* todo_flags_finish */
2477 };
2478
2479 template <bool O0>
2480 unsigned int
2481 pass_lower_switch<O0>::execute (function *fun)
2482 {
2483 basic_block bb;
2484 bool expanded = false;
2485
2486 auto_vec<gimple *> switch_statements;
2487 switch_statements.create (1);
2488
2489 FOR_EACH_BB_FN (bb, fun)
2490 {
2491 gimple *stmt = last_stmt (bb);
2492 gswitch *swtch;
2493 if (stmt && (swtch = dyn_cast<gswitch *> (stmt)))
2494 {
2495 if (!O0)
2496 group_case_labels_stmt (swtch);
2497 switch_statements.safe_push (swtch);
2498 }
2499 }
2500
2501 for (unsigned i = 0; i < switch_statements.length (); i++)
2502 {
2503 gimple *stmt = switch_statements[i];
2504 if (dump_file)
2505 {
2506 expanded_location loc = expand_location (gimple_location (stmt));
2507
2508 fprintf (dump_file, "beginning to process the following "
2509 "SWITCH statement (%s:%d) : ------- \n",
2510 loc.file, loc.line);
2511 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
2512 putc ('\n', dump_file);
2513 }
2514
2515 gswitch *swtch = dyn_cast<gswitch *> (stmt);
2516 if (swtch)
2517 {
2518 switch_decision_tree dt (swtch);
2519 expanded |= dt.analyze_switch_statement ();
2520 }
2521 }
2522
2523 if (expanded)
2524 {
2525 free_dominance_info (CDI_DOMINATORS);
2526 free_dominance_info (CDI_POST_DOMINATORS);
2527 mark_virtual_operands_for_renaming (cfun);
2528 }
2529
2530 return 0;
2531 }
2532
2533 } // anon namespace
2534
2535 gimple_opt_pass *
2536 make_pass_lower_switch_O0 (gcc::context *ctxt)
2537 {
2538 return new pass_lower_switch<true> (ctxt);
2539 }
2540 gimple_opt_pass *
2541 make_pass_lower_switch (gcc::context *ctxt)
2542 {
2543 return new pass_lower_switch<false> (ctxt);
2544 }
2545
2546