]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/tree-switch-conversion.c
Update copyright years.
[thirdparty/gcc.git] / gcc / tree-switch-conversion.c
1 /* Lower GIMPLE_SWITCH expressions to something more efficient than
2 a jump table.
3 Copyright (C) 2006-2019 Free Software Foundation, Inc.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by the
9 Free Software Foundation; either version 3, or (at your option) any
10 later version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not, write to the Free
19 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
20 02110-1301, USA. */
21
22 /* This file handles the lowering of GIMPLE_SWITCH to an indexed
23 load, or a series of bit-test-and-branch expressions. */
24
25 #include "config.h"
26 #include "system.h"
27 #include "coretypes.h"
28 #include "backend.h"
29 #include "insn-codes.h"
30 #include "rtl.h"
31 #include "tree.h"
32 #include "gimple.h"
33 #include "cfghooks.h"
34 #include "tree-pass.h"
35 #include "ssa.h"
36 #include "optabs-tree.h"
37 #include "cgraph.h"
38 #include "gimple-pretty-print.h"
39 #include "params.h"
40 #include "fold-const.h"
41 #include "varasm.h"
42 #include "stor-layout.h"
43 #include "cfganal.h"
44 #include "gimplify.h"
45 #include "gimple-iterator.h"
46 #include "gimplify-me.h"
47 #include "gimple-fold.h"
48 #include "tree-cfg.h"
49 #include "cfgloop.h"
50 #include "alloc-pool.h"
51 #include "target.h"
52 #include "tree-into-ssa.h"
53 #include "omp-general.h"
54
55 /* ??? For lang_hooks.types.type_for_mode, but is there a word_mode
56 type in the GIMPLE type system that is language-independent? */
57 #include "langhooks.h"
58
59 #include "tree-switch-conversion.h"
60 \f
61 using namespace tree_switch_conversion;
62
63 /* Constructor. */
64
65 switch_conversion::switch_conversion (): m_final_bb (NULL), m_other_count (),
66 m_constructors (NULL), m_default_values (NULL),
67 m_arr_ref_first (NULL), m_arr_ref_last (NULL),
68 m_reason (NULL), m_default_case_nonstandard (false), m_cfg_altered (false)
69 {
70 }
71
72 /* Collection information about SWTCH statement. */
73
74 void
75 switch_conversion::collect (gswitch *swtch)
76 {
77 unsigned int branch_num = gimple_switch_num_labels (swtch);
78 tree min_case, max_case;
79 unsigned int i;
80 edge e, e_default, e_first;
81 edge_iterator ei;
82
83 m_switch = swtch;
84
85 /* The gimplifier has already sorted the cases by CASE_LOW and ensured there
86 is a default label which is the first in the vector.
87 Collect the bits we can deduce from the CFG. */
88 m_index_expr = gimple_switch_index (swtch);
89 m_switch_bb = gimple_bb (swtch);
90 e_default = gimple_switch_default_edge (cfun, swtch);
91 m_default_bb = e_default->dest;
92 m_default_prob = e_default->probability;
93 m_default_count = e_default->count ();
94 FOR_EACH_EDGE (e, ei, m_switch_bb->succs)
95 if (e != e_default)
96 m_other_count += e->count ();
97
98 /* Get upper and lower bounds of case values, and the covered range. */
99 min_case = gimple_switch_label (swtch, 1);
100 max_case = gimple_switch_label (swtch, branch_num - 1);
101
102 m_range_min = CASE_LOW (min_case);
103 if (CASE_HIGH (max_case) != NULL_TREE)
104 m_range_max = CASE_HIGH (max_case);
105 else
106 m_range_max = CASE_LOW (max_case);
107
108 m_contiguous_range = true;
109 tree last = CASE_HIGH (min_case) ? CASE_HIGH (min_case) : m_range_min;
110 for (i = 2; i < branch_num; i++)
111 {
112 tree elt = gimple_switch_label (swtch, i);
113 if (wi::to_wide (last) + 1 != wi::to_wide (CASE_LOW (elt)))
114 {
115 m_contiguous_range = false;
116 break;
117 }
118 last = CASE_HIGH (elt) ? CASE_HIGH (elt) : CASE_LOW (elt);
119 }
120
121 if (m_contiguous_range)
122 e_first = gimple_switch_edge (cfun, swtch, 1);
123 else
124 e_first = e_default;
125
126 /* See if there is one common successor block for all branch
127 targets. If it exists, record it in FINAL_BB.
128 Start with the destination of the first non-default case
129 if the range is contiguous and default case otherwise as
130 guess or its destination in case it is a forwarder block. */
131 if (! single_pred_p (e_first->dest))
132 m_final_bb = e_first->dest;
133 else if (single_succ_p (e_first->dest)
134 && ! single_pred_p (single_succ (e_first->dest)))
135 m_final_bb = single_succ (e_first->dest);
136 /* Require that all switch destinations are either that common
137 FINAL_BB or a forwarder to it, except for the default
138 case if contiguous range. */
139 if (m_final_bb)
140 FOR_EACH_EDGE (e, ei, m_switch_bb->succs)
141 {
142 if (e->dest == m_final_bb)
143 continue;
144
145 if (single_pred_p (e->dest)
146 && single_succ_p (e->dest)
147 && single_succ (e->dest) == m_final_bb)
148 continue;
149
150 if (e == e_default && m_contiguous_range)
151 {
152 m_default_case_nonstandard = true;
153 continue;
154 }
155
156 m_final_bb = NULL;
157 break;
158 }
159
160 m_range_size
161 = int_const_binop (MINUS_EXPR, m_range_max, m_range_min);
162
163 /* Get a count of the number of case labels. Single-valued case labels
164 simply count as one, but a case range counts double, since it may
165 require two compares if it gets lowered as a branching tree. */
166 m_count = 0;
167 for (i = 1; i < branch_num; i++)
168 {
169 tree elt = gimple_switch_label (swtch, i);
170 m_count++;
171 if (CASE_HIGH (elt)
172 && ! tree_int_cst_equal (CASE_LOW (elt), CASE_HIGH (elt)))
173 m_count++;
174 }
175
176 /* Get the number of unique non-default targets out of the GIMPLE_SWITCH
177 block. Assume a CFG cleanup would have already removed degenerate
178 switch statements, this allows us to just use EDGE_COUNT. */
179 m_uniq = EDGE_COUNT (gimple_bb (swtch)->succs) - 1;
180 }
181
182 /* Checks whether the range given by individual case statements of the switch
183 switch statement isn't too big and whether the number of branches actually
184 satisfies the size of the new array. */
185
186 bool
187 switch_conversion::check_range ()
188 {
189 gcc_assert (m_range_size);
190 if (!tree_fits_uhwi_p (m_range_size))
191 {
192 m_reason = "index range way too large or otherwise unusable";
193 return false;
194 }
195
196 if (tree_to_uhwi (m_range_size)
197 > ((unsigned) m_count * SWITCH_CONVERSION_BRANCH_RATIO))
198 {
199 m_reason = "the maximum range-branch ratio exceeded";
200 return false;
201 }
202
203 return true;
204 }
205
206 /* Checks whether all but the final BB basic blocks are empty. */
207
208 bool
209 switch_conversion::check_all_empty_except_final ()
210 {
211 edge e, e_default = find_edge (m_switch_bb, m_default_bb);
212 edge_iterator ei;
213
214 FOR_EACH_EDGE (e, ei, m_switch_bb->succs)
215 {
216 if (e->dest == m_final_bb)
217 continue;
218
219 if (!empty_block_p (e->dest))
220 {
221 if (m_contiguous_range && e == e_default)
222 {
223 m_default_case_nonstandard = true;
224 continue;
225 }
226
227 m_reason = "bad case - a non-final BB not empty";
228 return false;
229 }
230 }
231
232 return true;
233 }
234
235 /* This function checks whether all required values in phi nodes in final_bb
236 are constants. Required values are those that correspond to a basic block
237 which is a part of the examined switch statement. It returns true if the
238 phi nodes are OK, otherwise false. */
239
240 bool
241 switch_conversion::check_final_bb ()
242 {
243 gphi_iterator gsi;
244
245 m_phi_count = 0;
246 for (gsi = gsi_start_phis (m_final_bb); !gsi_end_p (gsi); gsi_next (&gsi))
247 {
248 gphi *phi = gsi.phi ();
249 unsigned int i;
250
251 if (virtual_operand_p (gimple_phi_result (phi)))
252 continue;
253
254 m_phi_count++;
255
256 for (i = 0; i < gimple_phi_num_args (phi); i++)
257 {
258 basic_block bb = gimple_phi_arg_edge (phi, i)->src;
259
260 if (bb == m_switch_bb
261 || (single_pred_p (bb)
262 && single_pred (bb) == m_switch_bb
263 && (!m_default_case_nonstandard
264 || empty_block_p (bb))))
265 {
266 tree reloc, val;
267 const char *reason = NULL;
268
269 val = gimple_phi_arg_def (phi, i);
270 if (!is_gimple_ip_invariant (val))
271 reason = "non-invariant value from a case";
272 else
273 {
274 reloc = initializer_constant_valid_p (val, TREE_TYPE (val));
275 if ((flag_pic && reloc != null_pointer_node)
276 || (!flag_pic && reloc == NULL_TREE))
277 {
278 if (reloc)
279 reason
280 = "value from a case would need runtime relocations";
281 else
282 reason
283 = "value from a case is not a valid initializer";
284 }
285 }
286 if (reason)
287 {
288 /* For contiguous range, we can allow non-constant
289 or one that needs relocation, as long as it is
290 only reachable from the default case. */
291 if (bb == m_switch_bb)
292 bb = m_final_bb;
293 if (!m_contiguous_range || bb != m_default_bb)
294 {
295 m_reason = reason;
296 return false;
297 }
298
299 unsigned int branch_num = gimple_switch_num_labels (m_switch);
300 for (unsigned int i = 1; i < branch_num; i++)
301 {
302 if (gimple_switch_label_bb (cfun, m_switch, i) == bb)
303 {
304 m_reason = reason;
305 return false;
306 }
307 }
308 m_default_case_nonstandard = true;
309 }
310 }
311 }
312 }
313
314 return true;
315 }
316
317 /* The following function allocates default_values, target_{in,out}_names and
318 constructors arrays. The last one is also populated with pointers to
319 vectors that will become constructors of new arrays. */
320
321 void
322 switch_conversion::create_temp_arrays ()
323 {
324 int i;
325
326 m_default_values = XCNEWVEC (tree, m_phi_count * 3);
327 /* ??? Macros do not support multi argument templates in their
328 argument list. We create a typedef to work around that problem. */
329 typedef vec<constructor_elt, va_gc> *vec_constructor_elt_gc;
330 m_constructors = XCNEWVEC (vec_constructor_elt_gc, m_phi_count);
331 m_target_inbound_names = m_default_values + m_phi_count;
332 m_target_outbound_names = m_target_inbound_names + m_phi_count;
333 for (i = 0; i < m_phi_count; i++)
334 vec_alloc (m_constructors[i], tree_to_uhwi (m_range_size) + 1);
335 }
336
337 /* Populate the array of default values in the order of phi nodes.
338 DEFAULT_CASE is the CASE_LABEL_EXPR for the default switch branch
339 if the range is non-contiguous or the default case has standard
340 structure, otherwise it is the first non-default case instead. */
341
342 void
343 switch_conversion::gather_default_values (tree default_case)
344 {
345 gphi_iterator gsi;
346 basic_block bb = label_to_block (cfun, CASE_LABEL (default_case));
347 edge e;
348 int i = 0;
349
350 gcc_assert (CASE_LOW (default_case) == NULL_TREE
351 || m_default_case_nonstandard);
352
353 if (bb == m_final_bb)
354 e = find_edge (m_switch_bb, bb);
355 else
356 e = single_succ_edge (bb);
357
358 for (gsi = gsi_start_phis (m_final_bb); !gsi_end_p (gsi); gsi_next (&gsi))
359 {
360 gphi *phi = gsi.phi ();
361 if (virtual_operand_p (gimple_phi_result (phi)))
362 continue;
363 tree val = PHI_ARG_DEF_FROM_EDGE (phi, e);
364 gcc_assert (val);
365 m_default_values[i++] = val;
366 }
367 }
368
369 /* The following function populates the vectors in the constructors array with
370 future contents of the static arrays. The vectors are populated in the
371 order of phi nodes. */
372
373 void
374 switch_conversion::build_constructors ()
375 {
376 unsigned i, branch_num = gimple_switch_num_labels (m_switch);
377 tree pos = m_range_min;
378 tree pos_one = build_int_cst (TREE_TYPE (pos), 1);
379
380 for (i = 1; i < branch_num; i++)
381 {
382 tree cs = gimple_switch_label (m_switch, i);
383 basic_block bb = label_to_block (cfun, CASE_LABEL (cs));
384 edge e;
385 tree high;
386 gphi_iterator gsi;
387 int j;
388
389 if (bb == m_final_bb)
390 e = find_edge (m_switch_bb, bb);
391 else
392 e = single_succ_edge (bb);
393 gcc_assert (e);
394
395 while (tree_int_cst_lt (pos, CASE_LOW (cs)))
396 {
397 int k;
398 for (k = 0; k < m_phi_count; k++)
399 {
400 constructor_elt elt;
401
402 elt.index = int_const_binop (MINUS_EXPR, pos, m_range_min);
403 elt.value
404 = unshare_expr_without_location (m_default_values[k]);
405 m_constructors[k]->quick_push (elt);
406 }
407
408 pos = int_const_binop (PLUS_EXPR, pos, pos_one);
409 }
410 gcc_assert (tree_int_cst_equal (pos, CASE_LOW (cs)));
411
412 j = 0;
413 if (CASE_HIGH (cs))
414 high = CASE_HIGH (cs);
415 else
416 high = CASE_LOW (cs);
417 for (gsi = gsi_start_phis (m_final_bb);
418 !gsi_end_p (gsi); gsi_next (&gsi))
419 {
420 gphi *phi = gsi.phi ();
421 if (virtual_operand_p (gimple_phi_result (phi)))
422 continue;
423 tree val = PHI_ARG_DEF_FROM_EDGE (phi, e);
424 tree low = CASE_LOW (cs);
425 pos = CASE_LOW (cs);
426
427 do
428 {
429 constructor_elt elt;
430
431 elt.index = int_const_binop (MINUS_EXPR, pos, m_range_min);
432 elt.value = unshare_expr_without_location (val);
433 m_constructors[j]->quick_push (elt);
434
435 pos = int_const_binop (PLUS_EXPR, pos, pos_one);
436 } while (!tree_int_cst_lt (high, pos)
437 && tree_int_cst_lt (low, pos));
438 j++;
439 }
440 }
441 }
442
443 /* If all values in the constructor vector are products of a linear function
444 a * x + b, then return true. When true, COEFF_A and COEFF_B and
445 coefficients of the linear function. Note that equal values are special
446 case of a linear function with a and b equal to zero. */
447
448 bool
449 switch_conversion::contains_linear_function_p (vec<constructor_elt, va_gc> *vec,
450 wide_int *coeff_a,
451 wide_int *coeff_b)
452 {
453 unsigned int i;
454 constructor_elt *elt;
455
456 gcc_assert (vec->length () >= 2);
457
458 /* Let's try to find any linear function a * x + y that can apply to
459 given values. 'a' can be calculated as follows:
460
461 a = (y2 - y1) / (x2 - x1) where x2 - x1 = 1 (consecutive case indices)
462 a = y2 - y1
463
464 and
465
466 b = y2 - a * x2
467
468 */
469
470 tree elt0 = (*vec)[0].value;
471 tree elt1 = (*vec)[1].value;
472
473 if (TREE_CODE (elt0) != INTEGER_CST || TREE_CODE (elt1) != INTEGER_CST)
474 return false;
475
476 wide_int range_min = wi::to_wide (fold_convert (TREE_TYPE (elt0),
477 m_range_min));
478 wide_int y1 = wi::to_wide (elt0);
479 wide_int y2 = wi::to_wide (elt1);
480 wide_int a = y2 - y1;
481 wide_int b = y2 - a * (range_min + 1);
482
483 /* Verify that all values fulfill the linear function. */
484 FOR_EACH_VEC_SAFE_ELT (vec, i, elt)
485 {
486 if (TREE_CODE (elt->value) != INTEGER_CST)
487 return false;
488
489 wide_int value = wi::to_wide (elt->value);
490 if (a * range_min + b != value)
491 return false;
492
493 ++range_min;
494 }
495
496 *coeff_a = a;
497 *coeff_b = b;
498
499 return true;
500 }
501
502 /* Return type which should be used for array elements, either TYPE's
503 main variant or, for integral types, some smaller integral type
504 that can still hold all the constants. */
505
506 tree
507 switch_conversion::array_value_type (tree type, int num)
508 {
509 unsigned int i, len = vec_safe_length (m_constructors[num]);
510 constructor_elt *elt;
511 int sign = 0;
512 tree smaller_type;
513
514 /* Types with alignments greater than their size can reach here, e.g. out of
515 SRA. We couldn't use these as an array component type so get back to the
516 main variant first, which, for our purposes, is fine for other types as
517 well. */
518
519 type = TYPE_MAIN_VARIANT (type);
520
521 if (!INTEGRAL_TYPE_P (type))
522 return type;
523
524 scalar_int_mode type_mode = SCALAR_INT_TYPE_MODE (type);
525 scalar_int_mode mode = get_narrowest_mode (type_mode);
526 if (GET_MODE_SIZE (type_mode) <= GET_MODE_SIZE (mode))
527 return type;
528
529 if (len < (optimize_bb_for_size_p (gimple_bb (m_switch)) ? 2 : 32))
530 return type;
531
532 FOR_EACH_VEC_SAFE_ELT (m_constructors[num], i, elt)
533 {
534 wide_int cst;
535
536 if (TREE_CODE (elt->value) != INTEGER_CST)
537 return type;
538
539 cst = wi::to_wide (elt->value);
540 while (1)
541 {
542 unsigned int prec = GET_MODE_BITSIZE (mode);
543 if (prec > HOST_BITS_PER_WIDE_INT)
544 return type;
545
546 if (sign >= 0 && cst == wi::zext (cst, prec))
547 {
548 if (sign == 0 && cst == wi::sext (cst, prec))
549 break;
550 sign = 1;
551 break;
552 }
553 if (sign <= 0 && cst == wi::sext (cst, prec))
554 {
555 sign = -1;
556 break;
557 }
558
559 if (sign == 1)
560 sign = 0;
561
562 if (!GET_MODE_WIDER_MODE (mode).exists (&mode)
563 || GET_MODE_SIZE (mode) >= GET_MODE_SIZE (type_mode))
564 return type;
565 }
566 }
567
568 if (sign == 0)
569 sign = TYPE_UNSIGNED (type) ? 1 : -1;
570 smaller_type = lang_hooks.types.type_for_mode (mode, sign >= 0);
571 if (GET_MODE_SIZE (type_mode)
572 <= GET_MODE_SIZE (SCALAR_INT_TYPE_MODE (smaller_type)))
573 return type;
574
575 return smaller_type;
576 }
577
578 /* Create an appropriate array type and declaration and assemble a static
579 array variable. Also create a load statement that initializes
580 the variable in question with a value from the static array. SWTCH is
581 the switch statement being converted, NUM is the index to
582 arrays of constructors, default values and target SSA names
583 for this particular array. ARR_INDEX_TYPE is the type of the index
584 of the new array, PHI is the phi node of the final BB that corresponds
585 to the value that will be loaded from the created array. TIDX
586 is an ssa name of a temporary variable holding the index for loads from the
587 new array. */
588
589 void
590 switch_conversion::build_one_array (int num, tree arr_index_type,
591 gphi *phi, tree tidx)
592 {
593 tree name;
594 gimple *load;
595 gimple_stmt_iterator gsi = gsi_for_stmt (m_switch);
596 location_t loc = gimple_location (m_switch);
597
598 gcc_assert (m_default_values[num]);
599
600 name = copy_ssa_name (PHI_RESULT (phi));
601 m_target_inbound_names[num] = name;
602
603 wide_int coeff_a, coeff_b;
604 bool linear_p = contains_linear_function_p (m_constructors[num], &coeff_a,
605 &coeff_b);
606 if (linear_p)
607 {
608 if (dump_file && coeff_a.to_uhwi () > 0)
609 fprintf (dump_file, "Linear transformation with A = %" PRId64
610 " and B = %" PRId64 "\n", coeff_a.to_shwi (),
611 coeff_b.to_shwi ());
612
613 tree t = unsigned_type_for (TREE_TYPE (m_index_expr));
614 gimple_seq seq = NULL;
615 tree tmp = gimple_convert (&seq, t, m_index_expr);
616 tree tmp2 = gimple_build (&seq, MULT_EXPR, t,
617 wide_int_to_tree (t, coeff_a), tmp);
618 tree tmp3 = gimple_build (&seq, PLUS_EXPR, t, tmp2,
619 wide_int_to_tree (t, coeff_b));
620 tree tmp4 = gimple_convert (&seq, TREE_TYPE (name), tmp3);
621 gsi_insert_seq_before (&gsi, seq, GSI_SAME_STMT);
622 load = gimple_build_assign (name, tmp4);
623 }
624 else
625 {
626 tree array_type, ctor, decl, value_type, fetch, default_type;
627
628 default_type = TREE_TYPE (m_default_values[num]);
629 value_type = array_value_type (default_type, num);
630 array_type = build_array_type (value_type, arr_index_type);
631 if (default_type != value_type)
632 {
633 unsigned int i;
634 constructor_elt *elt;
635
636 FOR_EACH_VEC_SAFE_ELT (m_constructors[num], i, elt)
637 elt->value = fold_convert (value_type, elt->value);
638 }
639 ctor = build_constructor (array_type, m_constructors[num]);
640 TREE_CONSTANT (ctor) = true;
641 TREE_STATIC (ctor) = true;
642
643 decl = build_decl (loc, VAR_DECL, NULL_TREE, array_type);
644 TREE_STATIC (decl) = 1;
645 DECL_INITIAL (decl) = ctor;
646
647 DECL_NAME (decl) = create_tmp_var_name ("CSWTCH");
648 DECL_ARTIFICIAL (decl) = 1;
649 DECL_IGNORED_P (decl) = 1;
650 TREE_CONSTANT (decl) = 1;
651 TREE_READONLY (decl) = 1;
652 DECL_IGNORED_P (decl) = 1;
653 if (offloading_function_p (cfun->decl))
654 DECL_ATTRIBUTES (decl)
655 = tree_cons (get_identifier ("omp declare target"), NULL_TREE,
656 NULL_TREE);
657 varpool_node::finalize_decl (decl);
658
659 fetch = build4 (ARRAY_REF, value_type, decl, tidx, NULL_TREE,
660 NULL_TREE);
661 if (default_type != value_type)
662 {
663 fetch = fold_convert (default_type, fetch);
664 fetch = force_gimple_operand_gsi (&gsi, fetch, true, NULL_TREE,
665 true, GSI_SAME_STMT);
666 }
667 load = gimple_build_assign (name, fetch);
668 }
669
670 gsi_insert_before (&gsi, load, GSI_SAME_STMT);
671 update_stmt (load);
672 m_arr_ref_last = load;
673 }
674
675 /* Builds and initializes static arrays initialized with values gathered from
676 the switch statement. Also creates statements that load values from
677 them. */
678
679 void
680 switch_conversion::build_arrays ()
681 {
682 tree arr_index_type;
683 tree tidx, sub, utype;
684 gimple *stmt;
685 gimple_stmt_iterator gsi;
686 gphi_iterator gpi;
687 int i;
688 location_t loc = gimple_location (m_switch);
689
690 gsi = gsi_for_stmt (m_switch);
691
692 /* Make sure we do not generate arithmetics in a subrange. */
693 utype = TREE_TYPE (m_index_expr);
694 if (TREE_TYPE (utype))
695 utype = lang_hooks.types.type_for_mode (TYPE_MODE (TREE_TYPE (utype)), 1);
696 else
697 utype = lang_hooks.types.type_for_mode (TYPE_MODE (utype), 1);
698
699 arr_index_type = build_index_type (m_range_size);
700 tidx = make_ssa_name (utype);
701 sub = fold_build2_loc (loc, MINUS_EXPR, utype,
702 fold_convert_loc (loc, utype, m_index_expr),
703 fold_convert_loc (loc, utype, m_range_min));
704 sub = force_gimple_operand_gsi (&gsi, sub,
705 false, NULL, true, GSI_SAME_STMT);
706 stmt = gimple_build_assign (tidx, sub);
707
708 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
709 update_stmt (stmt);
710 m_arr_ref_first = stmt;
711
712 for (gpi = gsi_start_phis (m_final_bb), i = 0;
713 !gsi_end_p (gpi); gsi_next (&gpi))
714 {
715 gphi *phi = gpi.phi ();
716 if (!virtual_operand_p (gimple_phi_result (phi)))
717 build_one_array (i++, arr_index_type, phi, tidx);
718 else
719 {
720 edge e;
721 edge_iterator ei;
722 FOR_EACH_EDGE (e, ei, m_switch_bb->succs)
723 {
724 if (e->dest == m_final_bb)
725 break;
726 if (!m_default_case_nonstandard
727 || e->dest != m_default_bb)
728 {
729 e = single_succ_edge (e->dest);
730 break;
731 }
732 }
733 gcc_assert (e && e->dest == m_final_bb);
734 m_target_vop = PHI_ARG_DEF_FROM_EDGE (phi, e);
735 }
736 }
737 }
738
739 /* Generates and appropriately inserts loads of default values at the position
740 given by GSI. Returns the last inserted statement. */
741
742 gassign *
743 switch_conversion::gen_def_assigns (gimple_stmt_iterator *gsi)
744 {
745 int i;
746 gassign *assign = NULL;
747
748 for (i = 0; i < m_phi_count; i++)
749 {
750 tree name = copy_ssa_name (m_target_inbound_names[i]);
751 m_target_outbound_names[i] = name;
752 assign = gimple_build_assign (name, m_default_values[i]);
753 gsi_insert_before (gsi, assign, GSI_SAME_STMT);
754 update_stmt (assign);
755 }
756 return assign;
757 }
758
759 /* Deletes the unused bbs and edges that now contain the switch statement and
760 its empty branch bbs. BBD is the now dead BB containing
761 the original switch statement, FINAL is the last BB of the converted
762 switch statement (in terms of succession). */
763
764 void
765 switch_conversion::prune_bbs (basic_block bbd, basic_block final,
766 basic_block default_bb)
767 {
768 edge_iterator ei;
769 edge e;
770
771 for (ei = ei_start (bbd->succs); (e = ei_safe_edge (ei)); )
772 {
773 basic_block bb;
774 bb = e->dest;
775 remove_edge (e);
776 if (bb != final && bb != default_bb)
777 delete_basic_block (bb);
778 }
779 delete_basic_block (bbd);
780 }
781
782 /* Add values to phi nodes in final_bb for the two new edges. E1F is the edge
783 from the basic block loading values from an array and E2F from the basic
784 block loading default values. BBF is the last switch basic block (see the
785 bbf description in the comment below). */
786
787 void
788 switch_conversion::fix_phi_nodes (edge e1f, edge e2f, basic_block bbf)
789 {
790 gphi_iterator gsi;
791 int i;
792
793 for (gsi = gsi_start_phis (bbf), i = 0;
794 !gsi_end_p (gsi); gsi_next (&gsi))
795 {
796 gphi *phi = gsi.phi ();
797 tree inbound, outbound;
798 if (virtual_operand_p (gimple_phi_result (phi)))
799 inbound = outbound = m_target_vop;
800 else
801 {
802 inbound = m_target_inbound_names[i];
803 outbound = m_target_outbound_names[i++];
804 }
805 add_phi_arg (phi, inbound, e1f, UNKNOWN_LOCATION);
806 if (!m_default_case_nonstandard)
807 add_phi_arg (phi, outbound, e2f, UNKNOWN_LOCATION);
808 }
809 }
810
811 /* Creates a check whether the switch expression value actually falls into the
812 range given by all the cases. If it does not, the temporaries are loaded
813 with default values instead. */
814
815 void
816 switch_conversion::gen_inbound_check ()
817 {
818 tree label_decl1 = create_artificial_label (UNKNOWN_LOCATION);
819 tree label_decl2 = create_artificial_label (UNKNOWN_LOCATION);
820 tree label_decl3 = create_artificial_label (UNKNOWN_LOCATION);
821 glabel *label1, *label2, *label3;
822 tree utype, tidx;
823 tree bound;
824
825 gcond *cond_stmt;
826
827 gassign *last_assign = NULL;
828 gimple_stmt_iterator gsi;
829 basic_block bb0, bb1, bb2, bbf, bbd;
830 edge e01 = NULL, e02, e21, e1d, e1f, e2f;
831 location_t loc = gimple_location (m_switch);
832
833 gcc_assert (m_default_values);
834
835 bb0 = gimple_bb (m_switch);
836
837 tidx = gimple_assign_lhs (m_arr_ref_first);
838 utype = TREE_TYPE (tidx);
839
840 /* (end of) block 0 */
841 gsi = gsi_for_stmt (m_arr_ref_first);
842 gsi_next (&gsi);
843
844 bound = fold_convert_loc (loc, utype, m_range_size);
845 cond_stmt = gimple_build_cond (LE_EXPR, tidx, bound, NULL_TREE, NULL_TREE);
846 gsi_insert_before (&gsi, cond_stmt, GSI_SAME_STMT);
847 update_stmt (cond_stmt);
848
849 /* block 2 */
850 if (!m_default_case_nonstandard)
851 {
852 label2 = gimple_build_label (label_decl2);
853 gsi_insert_before (&gsi, label2, GSI_SAME_STMT);
854 last_assign = gen_def_assigns (&gsi);
855 }
856
857 /* block 1 */
858 label1 = gimple_build_label (label_decl1);
859 gsi_insert_before (&gsi, label1, GSI_SAME_STMT);
860
861 /* block F */
862 gsi = gsi_start_bb (m_final_bb);
863 label3 = gimple_build_label (label_decl3);
864 gsi_insert_before (&gsi, label3, GSI_SAME_STMT);
865
866 /* cfg fix */
867 e02 = split_block (bb0, cond_stmt);
868 bb2 = e02->dest;
869
870 if (m_default_case_nonstandard)
871 {
872 bb1 = bb2;
873 bb2 = m_default_bb;
874 e01 = e02;
875 e01->flags = EDGE_TRUE_VALUE;
876 e02 = make_edge (bb0, bb2, EDGE_FALSE_VALUE);
877 edge e_default = find_edge (bb1, bb2);
878 for (gphi_iterator gsi = gsi_start_phis (bb2);
879 !gsi_end_p (gsi); gsi_next (&gsi))
880 {
881 gphi *phi = gsi.phi ();
882 tree arg = PHI_ARG_DEF_FROM_EDGE (phi, e_default);
883 add_phi_arg (phi, arg, e02,
884 gimple_phi_arg_location_from_edge (phi, e_default));
885 }
886 /* Partially fix the dominator tree, if it is available. */
887 if (dom_info_available_p (CDI_DOMINATORS))
888 redirect_immediate_dominators (CDI_DOMINATORS, bb1, bb0);
889 }
890 else
891 {
892 e21 = split_block (bb2, last_assign);
893 bb1 = e21->dest;
894 remove_edge (e21);
895 }
896
897 e1d = split_block (bb1, m_arr_ref_last);
898 bbd = e1d->dest;
899 remove_edge (e1d);
900
901 /* Flags and profiles of the edge for in-range values. */
902 if (!m_default_case_nonstandard)
903 e01 = make_edge (bb0, bb1, EDGE_TRUE_VALUE);
904 e01->probability = m_default_prob.invert ();
905
906 /* Flags and profiles of the edge taking care of out-of-range values. */
907 e02->flags &= ~EDGE_FALLTHRU;
908 e02->flags |= EDGE_FALSE_VALUE;
909 e02->probability = m_default_prob;
910
911 bbf = m_final_bb;
912
913 e1f = make_edge (bb1, bbf, EDGE_FALLTHRU);
914 e1f->probability = profile_probability::always ();
915
916 if (m_default_case_nonstandard)
917 e2f = NULL;
918 else
919 {
920 e2f = make_edge (bb2, bbf, EDGE_FALLTHRU);
921 e2f->probability = profile_probability::always ();
922 }
923
924 /* frequencies of the new BBs */
925 bb1->count = e01->count ();
926 bb2->count = e02->count ();
927 if (!m_default_case_nonstandard)
928 bbf->count = e1f->count () + e2f->count ();
929
930 /* Tidy blocks that have become unreachable. */
931 prune_bbs (bbd, m_final_bb,
932 m_default_case_nonstandard ? m_default_bb : NULL);
933
934 /* Fixup the PHI nodes in bbF. */
935 fix_phi_nodes (e1f, e2f, bbf);
936
937 /* Fix the dominator tree, if it is available. */
938 if (dom_info_available_p (CDI_DOMINATORS))
939 {
940 vec<basic_block> bbs_to_fix_dom;
941
942 set_immediate_dominator (CDI_DOMINATORS, bb1, bb0);
943 if (!m_default_case_nonstandard)
944 set_immediate_dominator (CDI_DOMINATORS, bb2, bb0);
945 if (! get_immediate_dominator (CDI_DOMINATORS, bbf))
946 /* If bbD was the immediate dominator ... */
947 set_immediate_dominator (CDI_DOMINATORS, bbf, bb0);
948
949 bbs_to_fix_dom.create (3 + (bb2 != bbf));
950 bbs_to_fix_dom.quick_push (bb0);
951 bbs_to_fix_dom.quick_push (bb1);
952 if (bb2 != bbf)
953 bbs_to_fix_dom.quick_push (bb2);
954 bbs_to_fix_dom.quick_push (bbf);
955
956 iterate_fix_dominators (CDI_DOMINATORS, bbs_to_fix_dom, true);
957 bbs_to_fix_dom.release ();
958 }
959 }
960
961 /* The following function is invoked on every switch statement (the current
962 one is given in SWTCH) and runs the individual phases of switch
963 conversion on it one after another until one fails or the conversion
964 is completed. On success, NULL is in m_reason, otherwise points
965 to a string with the reason why the conversion failed. */
966
967 void
968 switch_conversion::expand (gswitch *swtch)
969 {
970 /* Group case labels so that we get the right results from the heuristics
971 that decide on the code generation approach for this switch. */
972 m_cfg_altered |= group_case_labels_stmt (swtch);
973
974 /* If this switch is now a degenerate case with only a default label,
975 there is nothing left for us to do. */
976 if (gimple_switch_num_labels (swtch) < 2)
977 {
978 m_reason = "switch is a degenerate case";
979 return;
980 }
981
982 collect (swtch);
983
984 /* No error markers should reach here (they should be filtered out
985 during gimplification). */
986 gcc_checking_assert (TREE_TYPE (m_index_expr) != error_mark_node);
987
988 /* A switch on a constant should have been optimized in tree-cfg-cleanup. */
989 gcc_checking_assert (!TREE_CONSTANT (m_index_expr));
990
991 /* Prefer bit test if possible. */
992 if (tree_fits_uhwi_p (m_range_size)
993 && bit_test_cluster::can_be_handled (tree_to_uhwi (m_range_size), m_uniq)
994 && bit_test_cluster::is_beneficial (m_count, m_uniq))
995 {
996 m_reason = "expanding as bit test is preferable";
997 return;
998 }
999
1000 if (m_uniq <= 2)
1001 {
1002 /* This will be expanded as a decision tree . */
1003 m_reason = "expanding as jumps is preferable";
1004 return;
1005 }
1006
1007 /* If there is no common successor, we cannot do the transformation. */
1008 if (!m_final_bb)
1009 {
1010 m_reason = "no common successor to all case label target blocks found";
1011 return;
1012 }
1013
1014 /* Check the case label values are within reasonable range: */
1015 if (!check_range ())
1016 {
1017 gcc_assert (m_reason);
1018 return;
1019 }
1020
1021 /* For all the cases, see whether they are empty, the assignments they
1022 represent constant and so on... */
1023 if (!check_all_empty_except_final ())
1024 {
1025 gcc_assert (m_reason);
1026 return;
1027 }
1028 if (!check_final_bb ())
1029 {
1030 gcc_assert (m_reason);
1031 return;
1032 }
1033
1034 /* At this point all checks have passed and we can proceed with the
1035 transformation. */
1036
1037 create_temp_arrays ();
1038 gather_default_values (m_default_case_nonstandard
1039 ? gimple_switch_label (swtch, 1)
1040 : gimple_switch_default_label (swtch));
1041 build_constructors ();
1042
1043 build_arrays (); /* Build the static arrays and assignments. */
1044 gen_inbound_check (); /* Build the bounds check. */
1045
1046 m_cfg_altered = true;
1047 }
1048
1049 /* Destructor. */
1050
1051 switch_conversion::~switch_conversion ()
1052 {
1053 XDELETEVEC (m_constructors);
1054 XDELETEVEC (m_default_values);
1055 }
1056
1057 /* Constructor. */
1058
1059 group_cluster::group_cluster (vec<cluster *> &clusters,
1060 unsigned start, unsigned end)
1061 {
1062 gcc_checking_assert (end - start + 1 >= 1);
1063 m_prob = profile_probability::never ();
1064 m_cases.create (end - start + 1);
1065 for (unsigned i = start; i <= end; i++)
1066 {
1067 m_cases.quick_push (static_cast<simple_cluster *> (clusters[i]));
1068 m_prob += clusters[i]->m_prob;
1069 }
1070 m_subtree_prob = m_prob;
1071 }
1072
1073 /* Destructor. */
1074
1075 group_cluster::~group_cluster ()
1076 {
1077 for (unsigned i = 0; i < m_cases.length (); i++)
1078 delete m_cases[i];
1079
1080 m_cases.release ();
1081 }
1082
1083 /* Dump content of a cluster. */
1084
1085 void
1086 group_cluster::dump (FILE *f, bool details)
1087 {
1088 unsigned total_values = 0;
1089 for (unsigned i = 0; i < m_cases.length (); i++)
1090 total_values += m_cases[i]->get_range (m_cases[i]->get_low (),
1091 m_cases[i]->get_high ());
1092
1093 unsigned comparison_count = 0;
1094 for (unsigned i = 0; i < m_cases.length (); i++)
1095 {
1096 simple_cluster *sc = static_cast<simple_cluster *> (m_cases[i]);
1097 comparison_count += sc->m_range_p ? 2 : 1;
1098 }
1099
1100 unsigned HOST_WIDE_INT range = get_range (get_low (), get_high ());
1101 fprintf (f, "%s", get_type () == JUMP_TABLE ? "JT" : "BT");
1102
1103 if (details)
1104 fprintf (f, "(values:%d comparisons:%d range:" HOST_WIDE_INT_PRINT_DEC
1105 " density: %.2f%%)", total_values, comparison_count, range,
1106 100.0f * comparison_count / range);
1107
1108 fprintf (f, ":");
1109 PRINT_CASE (f, get_low ());
1110 fprintf (f, "-");
1111 PRINT_CASE (f, get_high ());
1112 fprintf (f, " ");
1113 }
1114
1115 /* Emit GIMPLE code to handle the cluster. */
1116
1117 void
1118 jump_table_cluster::emit (tree index_expr, tree,
1119 tree default_label_expr, basic_block default_bb)
1120 {
1121 unsigned HOST_WIDE_INT range = get_range (get_low (), get_high ());
1122 unsigned HOST_WIDE_INT nondefault_range = 0;
1123
1124 /* For jump table we just emit a new gswitch statement that will
1125 be latter lowered to jump table. */
1126 auto_vec <tree> labels;
1127 labels.create (m_cases.length ());
1128
1129 make_edge (m_case_bb, default_bb, 0);
1130 for (unsigned i = 0; i < m_cases.length (); i++)
1131 {
1132 labels.quick_push (unshare_expr (m_cases[i]->m_case_label_expr));
1133 make_edge (m_case_bb, m_cases[i]->m_case_bb, 0);
1134 }
1135
1136 gswitch *s = gimple_build_switch (index_expr,
1137 unshare_expr (default_label_expr), labels);
1138 gimple_stmt_iterator gsi = gsi_start_bb (m_case_bb);
1139 gsi_insert_after (&gsi, s, GSI_NEW_STMT);
1140
1141 /* Set up even probabilities for all cases. */
1142 for (unsigned i = 0; i < m_cases.length (); i++)
1143 {
1144 simple_cluster *sc = static_cast<simple_cluster *> (m_cases[i]);
1145 edge case_edge = find_edge (m_case_bb, sc->m_case_bb);
1146 unsigned HOST_WIDE_INT case_range
1147 = sc->get_range (sc->get_low (), sc->get_high ());
1148 nondefault_range += case_range;
1149
1150 /* case_edge->aux is number of values in a jump-table that are covered
1151 by the case_edge. */
1152 case_edge->aux = (void *) ((intptr_t) (case_edge->aux) + case_range);
1153 }
1154
1155 edge default_edge = gimple_switch_default_edge (cfun, s);
1156 default_edge->probability = profile_probability::never ();
1157
1158 for (unsigned i = 0; i < m_cases.length (); i++)
1159 {
1160 simple_cluster *sc = static_cast<simple_cluster *> (m_cases[i]);
1161 edge case_edge = find_edge (m_case_bb, sc->m_case_bb);
1162 case_edge->probability
1163 = profile_probability::always ().apply_scale ((intptr_t)case_edge->aux,
1164 range);
1165 }
1166
1167 /* Number of non-default values is probability of default edge. */
1168 default_edge->probability
1169 += profile_probability::always ().apply_scale (nondefault_range,
1170 range).invert ();
1171
1172 switch_decision_tree::reset_out_edges_aux (s);
1173 }
1174
1175 /* Find jump tables of given CLUSTERS, where all members of the vector
1176 are of type simple_cluster. New clusters are returned. */
1177
1178 vec<cluster *>
1179 jump_table_cluster::find_jump_tables (vec<cluster *> &clusters)
1180 {
1181 if (!is_enabled ())
1182 return clusters.copy ();
1183
1184 unsigned l = clusters.length ();
1185 auto_vec<min_cluster_item> min;
1186 min.reserve (l + 1);
1187
1188 min.quick_push (min_cluster_item (0, 0, 0));
1189
1190 for (unsigned i = 1; i <= l; i++)
1191 {
1192 /* Set minimal # of clusters with i-th item to infinite. */
1193 min.quick_push (min_cluster_item (INT_MAX, INT_MAX, INT_MAX));
1194
1195 for (unsigned j = 0; j < i; j++)
1196 {
1197 unsigned HOST_WIDE_INT s = min[j].m_non_jt_cases;
1198 if (i - j < case_values_threshold ())
1199 s += i - j;
1200
1201 /* Prefer clusters with smaller number of numbers covered. */
1202 if ((min[j].m_count + 1 < min[i].m_count
1203 || (min[j].m_count + 1 == min[i].m_count
1204 && s < min[i].m_non_jt_cases))
1205 && can_be_handled (clusters, j, i - 1))
1206 min[i] = min_cluster_item (min[j].m_count + 1, j, s);
1207 }
1208
1209 gcc_checking_assert (min[i].m_count != INT_MAX);
1210 }
1211
1212 /* No result. */
1213 if (min[l].m_count == INT_MAX)
1214 return clusters.copy ();
1215
1216 vec<cluster *> output;
1217 output.create (4);
1218
1219 /* Find and build the clusters. */
1220 for (int end = l;;)
1221 {
1222 int start = min[end].m_start;
1223
1224 /* Do not allow clusters with small number of cases. */
1225 if (is_beneficial (clusters, start, end - 1))
1226 output.safe_push (new jump_table_cluster (clusters, start, end - 1));
1227 else
1228 for (int i = end - 1; i >= start; i--)
1229 output.safe_push (clusters[i]);
1230
1231 end = start;
1232
1233 if (start <= 0)
1234 break;
1235 }
1236
1237 output.reverse ();
1238 return output;
1239 }
1240
1241 /* Return true when cluster starting at START and ending at END (inclusive)
1242 can build a jump-table. */
1243
1244 bool
1245 jump_table_cluster::can_be_handled (const vec<cluster *> &clusters,
1246 unsigned start, unsigned end)
1247 {
1248 /* If the switch is relatively small such that the cost of one
1249 indirect jump on the target are higher than the cost of a
1250 decision tree, go with the decision tree.
1251
1252 If range of values is much bigger than number of values,
1253 or if it is too large to represent in a HOST_WIDE_INT,
1254 make a sequence of conditional branches instead of a dispatch.
1255
1256 The definition of "much bigger" depends on whether we are
1257 optimizing for size or for speed. */
1258 if (!flag_jump_tables)
1259 return false;
1260
1261 /* For algorithm correctness, jump table for a single case must return
1262 true. We bail out in is_beneficial if it's called just for
1263 a single case. */
1264 if (start == end)
1265 return true;
1266
1267 unsigned HOST_WIDE_INT max_ratio
1268 = optimize_insn_for_size_p () ? max_ratio_for_size : max_ratio_for_speed;
1269 unsigned HOST_WIDE_INT range = get_range (clusters[start]->get_low (),
1270 clusters[end]->get_high ());
1271 /* Check overflow. */
1272 if (range == 0)
1273 return false;
1274
1275 unsigned HOST_WIDE_INT comparison_count = 0;
1276 for (unsigned i = start; i <= end; i++)
1277 {
1278 simple_cluster *sc = static_cast<simple_cluster *> (clusters[i]);
1279 comparison_count += sc->m_range_p ? 2 : 1;
1280 }
1281
1282 return range <= max_ratio * comparison_count;
1283 }
1284
1285 /* Return true if cluster starting at START and ending at END (inclusive)
1286 is profitable transformation. */
1287
1288 bool
1289 jump_table_cluster::is_beneficial (const vec<cluster *> &,
1290 unsigned start, unsigned end)
1291 {
1292 /* Single case bail out. */
1293 if (start == end)
1294 return false;
1295
1296 return end - start + 1 >= case_values_threshold ();
1297 }
1298
1299 /* Definition of jump_table_cluster constants. */
1300
1301 const unsigned HOST_WIDE_INT jump_table_cluster::max_ratio_for_size;
1302 const unsigned HOST_WIDE_INT jump_table_cluster::max_ratio_for_speed;
1303
1304 /* Find bit tests of given CLUSTERS, where all members of the vector
1305 are of type simple_cluster. New clusters are returned. */
1306
1307 vec<cluster *>
1308 bit_test_cluster::find_bit_tests (vec<cluster *> &clusters)
1309 {
1310 vec<cluster *> output;
1311 output.create (4);
1312
1313 unsigned l = clusters.length ();
1314 auto_vec<min_cluster_item> min;
1315 min.reserve (l + 1);
1316
1317 min.quick_push (min_cluster_item (0, 0, 0));
1318
1319 for (unsigned i = 1; i <= l; i++)
1320 {
1321 /* Set minimal # of clusters with i-th item to infinite. */
1322 min.quick_push (min_cluster_item (INT_MAX, INT_MAX, INT_MAX));
1323
1324 for (unsigned j = 0; j < i; j++)
1325 {
1326 if (min[j].m_count + 1 < min[i].m_count
1327 && can_be_handled (clusters, j, i - 1))
1328 min[i] = min_cluster_item (min[j].m_count + 1, j, INT_MAX);
1329 }
1330
1331 gcc_checking_assert (min[i].m_count != INT_MAX);
1332 }
1333
1334 /* No result. */
1335 if (min[l].m_count == INT_MAX)
1336 return clusters.copy ();
1337
1338 /* Find and build the clusters. */
1339 for (unsigned end = l;;)
1340 {
1341 int start = min[end].m_start;
1342
1343 if (is_beneficial (clusters, start, end - 1))
1344 {
1345 bool entire = start == 0 && end == clusters.length ();
1346 output.safe_push (new bit_test_cluster (clusters, start, end - 1,
1347 entire));
1348 }
1349 else
1350 for (int i = end - 1; i >= start; i--)
1351 output.safe_push (clusters[i]);
1352
1353 end = start;
1354
1355 if (start <= 0)
1356 break;
1357 }
1358
1359 output.reverse ();
1360 return output;
1361 }
1362
1363 /* Return true when RANGE of case values with UNIQ labels
1364 can build a bit test. */
1365
1366 bool
1367 bit_test_cluster::can_be_handled (unsigned HOST_WIDE_INT range,
1368 unsigned int uniq)
1369 {
1370 /* Check overflow. */
1371 if (range == 0)
1372 return 0;
1373
1374 if (range >= GET_MODE_BITSIZE (word_mode))
1375 return false;
1376
1377 return uniq <= 3;
1378 }
1379
1380 /* Return true when cluster starting at START and ending at END (inclusive)
1381 can build a bit test. */
1382
1383 bool
1384 bit_test_cluster::can_be_handled (const vec<cluster *> &clusters,
1385 unsigned start, unsigned end)
1386 {
1387 /* For algorithm correctness, bit test for a single case must return
1388 true. We bail out in is_beneficial if it's called just for
1389 a single case. */
1390 if (start == end)
1391 return true;
1392
1393 unsigned HOST_WIDE_INT range = get_range (clusters[start]->get_low (),
1394 clusters[end]->get_high ());
1395 auto_bitmap dest_bbs;
1396
1397 for (unsigned i = start; i <= end; i++)
1398 {
1399 simple_cluster *sc = static_cast<simple_cluster *> (clusters[i]);
1400 bitmap_set_bit (dest_bbs, sc->m_case_bb->index);
1401 }
1402
1403 return can_be_handled (range, bitmap_count_bits (dest_bbs));
1404 }
1405
1406 /* Return true when COUNT of cases of UNIQ labels is beneficial for bit test
1407 transformation. */
1408
1409 bool
1410 bit_test_cluster::is_beneficial (unsigned count, unsigned uniq)
1411 {
1412 return (((uniq == 1 && count >= 3)
1413 || (uniq == 2 && count >= 5)
1414 || (uniq == 3 && count >= 6)));
1415 }
1416
1417 /* Return true if cluster starting at START and ending at END (inclusive)
1418 is profitable transformation. */
1419
1420 bool
1421 bit_test_cluster::is_beneficial (const vec<cluster *> &clusters,
1422 unsigned start, unsigned end)
1423 {
1424 /* Single case bail out. */
1425 if (start == end)
1426 return false;
1427
1428 auto_bitmap dest_bbs;
1429
1430 for (unsigned i = start; i <= end; i++)
1431 {
1432 simple_cluster *sc = static_cast<simple_cluster *> (clusters[i]);
1433 bitmap_set_bit (dest_bbs, sc->m_case_bb->index);
1434 }
1435
1436 unsigned uniq = bitmap_count_bits (dest_bbs);
1437 unsigned count = end - start + 1;
1438 return is_beneficial (count, uniq);
1439 }
1440
1441 /* Comparison function for qsort to order bit tests by decreasing
1442 probability of execution. */
1443
1444 int
1445 case_bit_test::cmp (const void *p1, const void *p2)
1446 {
1447 const struct case_bit_test *const d1 = (const struct case_bit_test *) p1;
1448 const struct case_bit_test *const d2 = (const struct case_bit_test *) p2;
1449
1450 if (d2->bits != d1->bits)
1451 return d2->bits - d1->bits;
1452
1453 /* Stabilize the sort. */
1454 return (LABEL_DECL_UID (CASE_LABEL (d2->label))
1455 - LABEL_DECL_UID (CASE_LABEL (d1->label)));
1456 }
1457
1458 /* Expand a switch statement by a short sequence of bit-wise
1459 comparisons. "switch(x)" is effectively converted into
1460 "if ((1 << (x-MINVAL)) & CST)" where CST and MINVAL are
1461 integer constants.
1462
1463 INDEX_EXPR is the value being switched on.
1464
1465 MINVAL is the lowest case value of in the case nodes,
1466 and RANGE is highest value minus MINVAL. MINVAL and RANGE
1467 are not guaranteed to be of the same type as INDEX_EXPR
1468 (the gimplifier doesn't change the type of case label values,
1469 and MINVAL and RANGE are derived from those values).
1470 MAXVAL is MINVAL + RANGE.
1471
1472 There *MUST* be max_case_bit_tests or less unique case
1473 node targets. */
1474
1475 void
1476 bit_test_cluster::emit (tree index_expr, tree index_type,
1477 tree, basic_block default_bb)
1478 {
1479 struct case_bit_test test[m_max_case_bit_tests] = { {} };
1480 unsigned int i, j, k;
1481 unsigned int count;
1482
1483 tree unsigned_index_type = unsigned_type_for (index_type);
1484
1485 gimple_stmt_iterator gsi;
1486 gassign *shift_stmt;
1487
1488 tree idx, tmp, csui;
1489 tree word_type_node = lang_hooks.types.type_for_mode (word_mode, 1);
1490 tree word_mode_zero = fold_convert (word_type_node, integer_zero_node);
1491 tree word_mode_one = fold_convert (word_type_node, integer_one_node);
1492 int prec = TYPE_PRECISION (word_type_node);
1493 wide_int wone = wi::one (prec);
1494
1495 tree minval = get_low ();
1496 tree maxval = get_high ();
1497 tree range = int_const_binop (MINUS_EXPR, maxval, minval);
1498 unsigned HOST_WIDE_INT bt_range = get_range (minval, maxval);
1499
1500 /* Go through all case labels, and collect the case labels, profile
1501 counts, and other information we need to build the branch tests. */
1502 count = 0;
1503 for (i = 0; i < m_cases.length (); i++)
1504 {
1505 unsigned int lo, hi;
1506 simple_cluster *n = static_cast<simple_cluster *> (m_cases[i]);
1507 for (k = 0; k < count; k++)
1508 if (n->m_case_bb == test[k].target_bb)
1509 break;
1510
1511 if (k == count)
1512 {
1513 gcc_checking_assert (count < m_max_case_bit_tests);
1514 test[k].mask = wi::zero (prec);
1515 test[k].target_bb = n->m_case_bb;
1516 test[k].label = n->m_case_label_expr;
1517 test[k].bits = 0;
1518 count++;
1519 }
1520
1521 test[k].bits += n->get_range (n->get_low (), n->get_high ());
1522
1523 lo = tree_to_uhwi (int_const_binop (MINUS_EXPR, n->get_low (), minval));
1524 if (n->get_high () == NULL_TREE)
1525 hi = lo;
1526 else
1527 hi = tree_to_uhwi (int_const_binop (MINUS_EXPR, n->get_high (),
1528 minval));
1529
1530 for (j = lo; j <= hi; j++)
1531 test[k].mask |= wi::lshift (wone, j);
1532 }
1533
1534 qsort (test, count, sizeof (*test), case_bit_test::cmp);
1535
1536 /* If all values are in the 0 .. BITS_PER_WORD-1 range, we can get rid of
1537 the minval subtractions, but it might make the mask constants more
1538 expensive. So, compare the costs. */
1539 if (compare_tree_int (minval, 0) > 0
1540 && compare_tree_int (maxval, GET_MODE_BITSIZE (word_mode)) < 0)
1541 {
1542 int cost_diff;
1543 HOST_WIDE_INT m = tree_to_uhwi (minval);
1544 rtx reg = gen_raw_REG (word_mode, 10000);
1545 bool speed_p = optimize_insn_for_speed_p ();
1546 cost_diff = set_rtx_cost (gen_rtx_PLUS (word_mode, reg,
1547 GEN_INT (-m)), speed_p);
1548 for (i = 0; i < count; i++)
1549 {
1550 rtx r = immed_wide_int_const (test[i].mask, word_mode);
1551 cost_diff += set_src_cost (gen_rtx_AND (word_mode, reg, r),
1552 word_mode, speed_p);
1553 r = immed_wide_int_const (wi::lshift (test[i].mask, m), word_mode);
1554 cost_diff -= set_src_cost (gen_rtx_AND (word_mode, reg, r),
1555 word_mode, speed_p);
1556 }
1557 if (cost_diff > 0)
1558 {
1559 for (i = 0; i < count; i++)
1560 test[i].mask = wi::lshift (test[i].mask, m);
1561 minval = build_zero_cst (TREE_TYPE (minval));
1562 range = maxval;
1563 }
1564 }
1565
1566 /* Now build the test-and-branch code. */
1567
1568 gsi = gsi_last_bb (m_case_bb);
1569
1570 /* idx = (unsigned)x - minval. */
1571 idx = fold_convert (unsigned_index_type, index_expr);
1572 idx = fold_build2 (MINUS_EXPR, unsigned_index_type, idx,
1573 fold_convert (unsigned_index_type, minval));
1574 idx = force_gimple_operand_gsi (&gsi, idx,
1575 /*simple=*/true, NULL_TREE,
1576 /*before=*/true, GSI_SAME_STMT);
1577
1578 if (m_handles_entire_switch)
1579 {
1580 /* if (idx > range) goto default */
1581 range
1582 = force_gimple_operand_gsi (&gsi,
1583 fold_convert (unsigned_index_type, range),
1584 /*simple=*/true, NULL_TREE,
1585 /*before=*/true, GSI_SAME_STMT);
1586 tmp = fold_build2 (GT_EXPR, boolean_type_node, idx, range);
1587 basic_block new_bb
1588 = hoist_edge_and_branch_if_true (&gsi, tmp, default_bb,
1589 profile_probability::unlikely ());
1590 gsi = gsi_last_bb (new_bb);
1591 }
1592
1593 /* csui = (1 << (word_mode) idx) */
1594 csui = make_ssa_name (word_type_node);
1595 tmp = fold_build2 (LSHIFT_EXPR, word_type_node, word_mode_one,
1596 fold_convert (word_type_node, idx));
1597 tmp = force_gimple_operand_gsi (&gsi, tmp,
1598 /*simple=*/false, NULL_TREE,
1599 /*before=*/true, GSI_SAME_STMT);
1600 shift_stmt = gimple_build_assign (csui, tmp);
1601 gsi_insert_before (&gsi, shift_stmt, GSI_SAME_STMT);
1602 update_stmt (shift_stmt);
1603
1604 profile_probability prob = profile_probability::always ();
1605
1606 /* for each unique set of cases:
1607 if (const & csui) goto target */
1608 for (k = 0; k < count; k++)
1609 {
1610 prob = profile_probability::always ().apply_scale (test[k].bits,
1611 bt_range);
1612 bt_range -= test[k].bits;
1613 tmp = wide_int_to_tree (word_type_node, test[k].mask);
1614 tmp = fold_build2 (BIT_AND_EXPR, word_type_node, csui, tmp);
1615 tmp = force_gimple_operand_gsi (&gsi, tmp,
1616 /*simple=*/true, NULL_TREE,
1617 /*before=*/true, GSI_SAME_STMT);
1618 tmp = fold_build2 (NE_EXPR, boolean_type_node, tmp, word_mode_zero);
1619 basic_block new_bb
1620 = hoist_edge_and_branch_if_true (&gsi, tmp, test[k].target_bb, prob);
1621 gsi = gsi_last_bb (new_bb);
1622 }
1623
1624 /* We should have removed all edges now. */
1625 gcc_assert (EDGE_COUNT (gsi_bb (gsi)->succs) == 0);
1626
1627 /* If nothing matched, go to the default label. */
1628 edge e = make_edge (gsi_bb (gsi), default_bb, EDGE_FALLTHRU);
1629 e->probability = profile_probability::always ();
1630 }
1631
1632 /* Split the basic block at the statement pointed to by GSIP, and insert
1633 a branch to the target basic block of E_TRUE conditional on tree
1634 expression COND.
1635
1636 It is assumed that there is already an edge from the to-be-split
1637 basic block to E_TRUE->dest block. This edge is removed, and the
1638 profile information on the edge is re-used for the new conditional
1639 jump.
1640
1641 The CFG is updated. The dominator tree will not be valid after
1642 this transformation, but the immediate dominators are updated if
1643 UPDATE_DOMINATORS is true.
1644
1645 Returns the newly created basic block. */
1646
1647 basic_block
1648 bit_test_cluster::hoist_edge_and_branch_if_true (gimple_stmt_iterator *gsip,
1649 tree cond, basic_block case_bb,
1650 profile_probability prob)
1651 {
1652 tree tmp;
1653 gcond *cond_stmt;
1654 edge e_false;
1655 basic_block new_bb, split_bb = gsi_bb (*gsip);
1656
1657 edge e_true = make_edge (split_bb, case_bb, EDGE_TRUE_VALUE);
1658 e_true->probability = prob;
1659 gcc_assert (e_true->src == split_bb);
1660
1661 tmp = force_gimple_operand_gsi (gsip, cond, /*simple=*/true, NULL,
1662 /*before=*/true, GSI_SAME_STMT);
1663 cond_stmt = gimple_build_cond_from_tree (tmp, NULL_TREE, NULL_TREE);
1664 gsi_insert_before (gsip, cond_stmt, GSI_SAME_STMT);
1665
1666 e_false = split_block (split_bb, cond_stmt);
1667 new_bb = e_false->dest;
1668 redirect_edge_pred (e_true, split_bb);
1669
1670 e_false->flags &= ~EDGE_FALLTHRU;
1671 e_false->flags |= EDGE_FALSE_VALUE;
1672 e_false->probability = e_true->probability.invert ();
1673 new_bb->count = e_false->count ();
1674
1675 return new_bb;
1676 }
1677
1678 /* Compute the number of case labels that correspond to each outgoing edge of
1679 switch statement. Record this information in the aux field of the edge. */
1680
1681 void
1682 switch_decision_tree::compute_cases_per_edge ()
1683 {
1684 reset_out_edges_aux (m_switch);
1685 int ncases = gimple_switch_num_labels (m_switch);
1686 for (int i = ncases - 1; i >= 1; --i)
1687 {
1688 edge case_edge = gimple_switch_edge (cfun, m_switch, i);
1689 case_edge->aux = (void *) ((intptr_t) (case_edge->aux) + 1);
1690 }
1691 }
1692
1693 /* Analyze switch statement and return true when the statement is expanded
1694 as decision tree. */
1695
1696 bool
1697 switch_decision_tree::analyze_switch_statement ()
1698 {
1699 unsigned l = gimple_switch_num_labels (m_switch);
1700 basic_block bb = gimple_bb (m_switch);
1701 auto_vec<cluster *> clusters;
1702 clusters.create (l - 1);
1703
1704 basic_block default_bb = gimple_switch_default_bb (cfun, m_switch);
1705 m_case_bbs.reserve (l);
1706 m_case_bbs.quick_push (default_bb);
1707
1708 compute_cases_per_edge ();
1709
1710 for (unsigned i = 1; i < l; i++)
1711 {
1712 tree elt = gimple_switch_label (m_switch, i);
1713 tree lab = CASE_LABEL (elt);
1714 basic_block case_bb = label_to_block (cfun, lab);
1715 edge case_edge = find_edge (bb, case_bb);
1716 tree low = CASE_LOW (elt);
1717 tree high = CASE_HIGH (elt);
1718
1719 profile_probability p
1720 = case_edge->probability.apply_scale (1, (intptr_t) (case_edge->aux));
1721 clusters.quick_push (new simple_cluster (low, high, elt, case_edge->dest,
1722 p));
1723 m_case_bbs.quick_push (case_edge->dest);
1724 }
1725
1726 reset_out_edges_aux (m_switch);
1727
1728 /* Find jump table clusters. */
1729 vec<cluster *> output = jump_table_cluster::find_jump_tables (clusters);
1730
1731 /* Find bit test clusters. */
1732 vec<cluster *> output2;
1733 auto_vec<cluster *> tmp;
1734 output2.create (1);
1735 tmp.create (1);
1736
1737 for (unsigned i = 0; i < output.length (); i++)
1738 {
1739 cluster *c = output[i];
1740 if (c->get_type () != SIMPLE_CASE)
1741 {
1742 if (!tmp.is_empty ())
1743 {
1744 vec<cluster *> n = bit_test_cluster::find_bit_tests (tmp);
1745 output2.safe_splice (n);
1746 n.release ();
1747 tmp.truncate (0);
1748 }
1749 output2.safe_push (c);
1750 }
1751 else
1752 tmp.safe_push (c);
1753 }
1754
1755 /* We still can have a temporary vector to test. */
1756 if (!tmp.is_empty ())
1757 {
1758 vec<cluster *> n = bit_test_cluster::find_bit_tests (tmp);
1759 output2.safe_splice (n);
1760 n.release ();
1761 }
1762
1763 if (dump_file)
1764 {
1765 fprintf (dump_file, ";; GIMPLE switch case clusters: ");
1766 for (unsigned i = 0; i < output2.length (); i++)
1767 output2[i]->dump (dump_file, dump_flags & TDF_DETAILS);
1768 fprintf (dump_file, "\n");
1769 }
1770
1771 output.release ();
1772
1773 bool expanded = try_switch_expansion (output2);
1774
1775 for (unsigned i = 0; i < output2.length (); i++)
1776 delete output2[i];
1777
1778 output2.release ();
1779
1780 return expanded;
1781 }
1782
1783 /* Attempt to expand CLUSTERS as a decision tree. Return true when
1784 expanded. */
1785
1786 bool
1787 switch_decision_tree::try_switch_expansion (vec<cluster *> &clusters)
1788 {
1789 tree index_expr = gimple_switch_index (m_switch);
1790 tree index_type = TREE_TYPE (index_expr);
1791 basic_block bb = gimple_bb (m_switch);
1792
1793 if (gimple_switch_num_labels (m_switch) == 1)
1794 return false;
1795
1796 /* Find the default case target label. */
1797 edge default_edge = gimple_switch_default_edge (cfun, m_switch);
1798 m_default_bb = default_edge->dest;
1799
1800 /* Do the insertion of a case label into m_case_list. The labels are
1801 fed to us in descending order from the sorted vector of case labels used
1802 in the tree part of the middle end. So the list we construct is
1803 sorted in ascending order. */
1804
1805 for (int i = clusters.length () - 1; i >= 0; i--)
1806 {
1807 case_tree_node *r = m_case_list;
1808 m_case_list = m_case_node_pool.allocate ();
1809 m_case_list->m_right = r;
1810 m_case_list->m_c = clusters[i];
1811 }
1812
1813 record_phi_operand_mapping ();
1814
1815 /* Split basic block that contains the gswitch statement. */
1816 gimple_stmt_iterator gsi = gsi_last_bb (bb);
1817 edge e;
1818 if (gsi_end_p (gsi))
1819 e = split_block_after_labels (bb);
1820 else
1821 {
1822 gsi_prev (&gsi);
1823 e = split_block (bb, gsi_stmt (gsi));
1824 }
1825 bb = split_edge (e);
1826
1827 /* Create new basic blocks for non-case clusters where specific expansion
1828 needs to happen. */
1829 for (unsigned i = 0; i < clusters.length (); i++)
1830 if (clusters[i]->get_type () != SIMPLE_CASE)
1831 {
1832 clusters[i]->m_case_bb = create_empty_bb (bb);
1833 clusters[i]->m_case_bb->loop_father = bb->loop_father;
1834 }
1835
1836 /* Do not do an extra work for a single cluster. */
1837 if (clusters.length () == 1
1838 && clusters[0]->get_type () != SIMPLE_CASE)
1839 {
1840 cluster *c = clusters[0];
1841 c->emit (index_expr, index_type,
1842 gimple_switch_default_label (m_switch), m_default_bb);
1843 redirect_edge_succ (single_succ_edge (bb), c->m_case_bb);
1844 }
1845 else
1846 {
1847 emit (bb, index_expr, default_edge->probability, index_type);
1848
1849 /* Emit cluster-specific switch handling. */
1850 for (unsigned i = 0; i < clusters.length (); i++)
1851 if (clusters[i]->get_type () != SIMPLE_CASE)
1852 clusters[i]->emit (index_expr, index_type,
1853 gimple_switch_default_label (m_switch),
1854 m_default_bb);
1855 }
1856
1857 fix_phi_operands_for_edges ();
1858
1859 return true;
1860 }
1861
1862 /* Before switch transformation, record all SSA_NAMEs defined in switch BB
1863 and used in a label basic block. */
1864
1865 void
1866 switch_decision_tree::record_phi_operand_mapping ()
1867 {
1868 basic_block switch_bb = gimple_bb (m_switch);
1869 /* Record all PHI nodes that have to be fixed after conversion. */
1870 for (unsigned i = 0; i < m_case_bbs.length (); i++)
1871 {
1872 gphi_iterator gsi;
1873 basic_block bb = m_case_bbs[i];
1874 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1875 {
1876 gphi *phi = gsi.phi ();
1877
1878 for (unsigned i = 0; i < gimple_phi_num_args (phi); i++)
1879 {
1880 basic_block phi_src_bb = gimple_phi_arg_edge (phi, i)->src;
1881 if (phi_src_bb == switch_bb)
1882 {
1883 tree def = gimple_phi_arg_def (phi, i);
1884 tree result = gimple_phi_result (phi);
1885 m_phi_mapping.put (result, def);
1886 break;
1887 }
1888 }
1889 }
1890 }
1891 }
1892
1893 /* Append new operands to PHI statements that were introduced due to
1894 addition of new edges to case labels. */
1895
1896 void
1897 switch_decision_tree::fix_phi_operands_for_edges ()
1898 {
1899 gphi_iterator gsi;
1900
1901 for (unsigned i = 0; i < m_case_bbs.length (); i++)
1902 {
1903 basic_block bb = m_case_bbs[i];
1904 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1905 {
1906 gphi *phi = gsi.phi ();
1907 for (unsigned j = 0; j < gimple_phi_num_args (phi); j++)
1908 {
1909 tree def = gimple_phi_arg_def (phi, j);
1910 if (def == NULL_TREE)
1911 {
1912 edge e = gimple_phi_arg_edge (phi, j);
1913 tree *definition
1914 = m_phi_mapping.get (gimple_phi_result (phi));
1915 gcc_assert (definition);
1916 add_phi_arg (phi, *definition, e, UNKNOWN_LOCATION);
1917 }
1918 }
1919 }
1920 }
1921 }
1922
1923 /* Generate a decision tree, switching on INDEX_EXPR and jumping to
1924 one of the labels in CASE_LIST or to the DEFAULT_LABEL.
1925
1926 We generate a binary decision tree to select the appropriate target
1927 code. */
1928
1929 void
1930 switch_decision_tree::emit (basic_block bb, tree index_expr,
1931 profile_probability default_prob, tree index_type)
1932 {
1933 balance_case_nodes (&m_case_list, NULL);
1934
1935 if (dump_file)
1936 dump_function_to_file (current_function_decl, dump_file, dump_flags);
1937 if (dump_file && (dump_flags & TDF_DETAILS))
1938 {
1939 int indent_step = ceil_log2 (TYPE_PRECISION (index_type)) + 2;
1940 fprintf (dump_file, ";; Expanding GIMPLE switch as decision tree:\n");
1941 gcc_assert (m_case_list != NULL);
1942 dump_case_nodes (dump_file, m_case_list, indent_step, 0);
1943 }
1944
1945 bb = emit_case_nodes (bb, index_expr, m_case_list, default_prob, index_type,
1946 gimple_location (m_switch));
1947
1948 if (bb)
1949 emit_jump (bb, m_default_bb);
1950
1951 /* Remove all edges and do just an edge that will reach default_bb. */
1952 bb = gimple_bb (m_switch);
1953 gimple_stmt_iterator gsi = gsi_last_bb (bb);
1954 gsi_remove (&gsi, true);
1955
1956 delete_basic_block (bb);
1957 }
1958
1959 /* Take an ordered list of case nodes
1960 and transform them into a near optimal binary tree,
1961 on the assumption that any target code selection value is as
1962 likely as any other.
1963
1964 The transformation is performed by splitting the ordered
1965 list into two equal sections plus a pivot. The parts are
1966 then attached to the pivot as left and right branches. Each
1967 branch is then transformed recursively. */
1968
1969 void
1970 switch_decision_tree::balance_case_nodes (case_tree_node **head,
1971 case_tree_node *parent)
1972 {
1973 case_tree_node *np;
1974
1975 np = *head;
1976 if (np)
1977 {
1978 int i = 0;
1979 int ranges = 0;
1980 case_tree_node **npp;
1981 case_tree_node *left;
1982 profile_probability prob = profile_probability::never ();
1983
1984 /* Count the number of entries on branch. Also count the ranges. */
1985
1986 while (np)
1987 {
1988 if (!tree_int_cst_equal (np->m_c->get_low (), np->m_c->get_high ()))
1989 ranges++;
1990
1991 i++;
1992 prob += np->m_c->m_prob;
1993 np = np->m_right;
1994 }
1995
1996 if (i > 2)
1997 {
1998 /* Split this list if it is long enough for that to help. */
1999 npp = head;
2000 left = *npp;
2001 profile_probability pivot_prob = prob.apply_scale (1, 2);
2002
2003 /* Find the place in the list that bisects the list's total cost,
2004 where ranges count as 2. */
2005 while (1)
2006 {
2007 /* Skip nodes while their probability does not reach
2008 that amount. */
2009 prob -= (*npp)->m_c->m_prob;
2010 if ((prob.initialized_p () && prob < pivot_prob)
2011 || ! (*npp)->m_right)
2012 break;
2013 npp = &(*npp)->m_right;
2014 }
2015
2016 np = *npp;
2017 *npp = 0;
2018 *head = np;
2019 np->m_parent = parent;
2020 np->m_left = left == np ? NULL : left;
2021
2022 /* Optimize each of the two split parts. */
2023 balance_case_nodes (&np->m_left, np);
2024 balance_case_nodes (&np->m_right, np);
2025 np->m_c->m_subtree_prob = np->m_c->m_prob;
2026 if (np->m_left)
2027 np->m_c->m_subtree_prob += np->m_left->m_c->m_subtree_prob;
2028 if (np->m_right)
2029 np->m_c->m_subtree_prob += np->m_right->m_c->m_subtree_prob;
2030 }
2031 else
2032 {
2033 /* Else leave this branch as one level,
2034 but fill in `parent' fields. */
2035 np = *head;
2036 np->m_parent = parent;
2037 np->m_c->m_subtree_prob = np->m_c->m_prob;
2038 for (; np->m_right; np = np->m_right)
2039 {
2040 np->m_right->m_parent = np;
2041 (*head)->m_c->m_subtree_prob += np->m_right->m_c->m_subtree_prob;
2042 }
2043 }
2044 }
2045 }
2046
2047 /* Dump ROOT, a list or tree of case nodes, to file. */
2048
2049 void
2050 switch_decision_tree::dump_case_nodes (FILE *f, case_tree_node *root,
2051 int indent_step, int indent_level)
2052 {
2053 if (root == 0)
2054 return;
2055 indent_level++;
2056
2057 dump_case_nodes (f, root->m_left, indent_step, indent_level);
2058
2059 fputs (";; ", f);
2060 fprintf (f, "%*s", indent_step * indent_level, "");
2061 root->m_c->dump (f);
2062 root->m_c->m_prob.dump (f);
2063 fputs (" subtree: ", f);
2064 root->m_c->m_subtree_prob.dump (f);
2065 fputs (")\n", f);
2066
2067 dump_case_nodes (f, root->m_right, indent_step, indent_level);
2068 }
2069
2070
2071 /* Add an unconditional jump to CASE_BB that happens in basic block BB. */
2072
2073 void
2074 switch_decision_tree::emit_jump (basic_block bb, basic_block case_bb)
2075 {
2076 edge e = single_succ_edge (bb);
2077 redirect_edge_succ (e, case_bb);
2078 }
2079
2080 /* Generate code to compare OP0 with OP1 so that the condition codes are
2081 set and to jump to LABEL_BB if the condition is true.
2082 COMPARISON is the GIMPLE comparison (EQ, NE, GT, etc.).
2083 PROB is the probability of jumping to LABEL_BB. */
2084
2085 basic_block
2086 switch_decision_tree::emit_cmp_and_jump_insns (basic_block bb, tree op0,
2087 tree op1, tree_code comparison,
2088 basic_block label_bb,
2089 profile_probability prob,
2090 location_t loc)
2091 {
2092 // TODO: it's once called with lhs != index.
2093 op1 = fold_convert (TREE_TYPE (op0), op1);
2094
2095 gcond *cond = gimple_build_cond (comparison, op0, op1, NULL_TREE, NULL_TREE);
2096 gimple_set_location (cond, loc);
2097 gimple_stmt_iterator gsi = gsi_last_bb (bb);
2098 gsi_insert_after (&gsi, cond, GSI_NEW_STMT);
2099
2100 gcc_assert (single_succ_p (bb));
2101
2102 /* Make a new basic block where false branch will take place. */
2103 edge false_edge = split_block (bb, cond);
2104 false_edge->flags = EDGE_FALSE_VALUE;
2105 false_edge->probability = prob.invert ();
2106
2107 edge true_edge = make_edge (bb, label_bb, EDGE_TRUE_VALUE);
2108 true_edge->probability = prob;
2109
2110 return false_edge->dest;
2111 }
2112
2113 /* Generate code to jump to LABEL if OP0 and OP1 are equal.
2114 PROB is the probability of jumping to LABEL_BB.
2115 BB is a basic block where the new condition will be placed. */
2116
2117 basic_block
2118 switch_decision_tree::do_jump_if_equal (basic_block bb, tree op0, tree op1,
2119 basic_block label_bb,
2120 profile_probability prob,
2121 location_t loc)
2122 {
2123 op1 = fold_convert (TREE_TYPE (op0), op1);
2124
2125 gcond *cond = gimple_build_cond (EQ_EXPR, op0, op1, NULL_TREE, NULL_TREE);
2126 gimple_set_location (cond, loc);
2127 gimple_stmt_iterator gsi = gsi_last_bb (bb);
2128 gsi_insert_before (&gsi, cond, GSI_SAME_STMT);
2129
2130 gcc_assert (single_succ_p (bb));
2131
2132 /* Make a new basic block where false branch will take place. */
2133 edge false_edge = split_block (bb, cond);
2134 false_edge->flags = EDGE_FALSE_VALUE;
2135 false_edge->probability = prob.invert ();
2136
2137 edge true_edge = make_edge (bb, label_bb, EDGE_TRUE_VALUE);
2138 true_edge->probability = prob;
2139
2140 return false_edge->dest;
2141 }
2142
2143 /* Emit step-by-step code to select a case for the value of INDEX.
2144 The thus generated decision tree follows the form of the
2145 case-node binary tree NODE, whose nodes represent test conditions.
2146 DEFAULT_PROB is probability of cases leading to default BB.
2147 INDEX_TYPE is the type of the index of the switch. */
2148
2149 basic_block
2150 switch_decision_tree::emit_case_nodes (basic_block bb, tree index,
2151 case_tree_node *node,
2152 profile_probability default_prob,
2153 tree index_type, location_t loc)
2154 {
2155 profile_probability p;
2156
2157 /* If node is null, we are done. */
2158 if (node == NULL)
2159 return bb;
2160
2161 /* Single value case. */
2162 if (node->m_c->is_single_value_p ())
2163 {
2164 /* Node is single valued. First see if the index expression matches
2165 this node and then check our children, if any. */
2166 p = node->m_c->m_prob / (node->m_c->m_subtree_prob + default_prob);
2167 bb = do_jump_if_equal (bb, index, node->m_c->get_low (),
2168 node->m_c->m_case_bb, p, loc);
2169 /* Since this case is taken at this point, reduce its weight from
2170 subtree_weight. */
2171 node->m_c->m_subtree_prob -= p;
2172
2173 if (node->m_left != NULL && node->m_right != NULL)
2174 {
2175 /* 1) the node has both children
2176
2177 If both children are single-valued cases with no
2178 children, finish up all the work. This way, we can save
2179 one ordered comparison. */
2180
2181 if (!node->m_left->has_child ()
2182 && node->m_left->m_c->is_single_value_p ()
2183 && !node->m_right->has_child ()
2184 && node->m_right->m_c->is_single_value_p ())
2185 {
2186 p = (node->m_right->m_c->m_prob
2187 / (node->m_c->m_subtree_prob + default_prob));
2188 bb = do_jump_if_equal (bb, index, node->m_right->m_c->get_low (),
2189 node->m_right->m_c->m_case_bb, p, loc);
2190
2191 p = (node->m_left->m_c->m_prob
2192 / (node->m_c->m_subtree_prob + default_prob));
2193 bb = do_jump_if_equal (bb, index, node->m_left->m_c->get_low (),
2194 node->m_left->m_c->m_case_bb, p, loc);
2195 }
2196 else
2197 {
2198 /* Branch to a label where we will handle it later. */
2199 basic_block test_bb = split_edge (single_succ_edge (bb));
2200 redirect_edge_succ (single_pred_edge (test_bb),
2201 single_succ_edge (bb)->dest);
2202
2203 p = ((node->m_right->m_c->m_subtree_prob
2204 + default_prob.apply_scale (1, 2))
2205 / (node->m_c->m_subtree_prob + default_prob));
2206 bb = emit_cmp_and_jump_insns (bb, index, node->m_c->get_high (),
2207 GT_EXPR, test_bb, p, loc);
2208 default_prob = default_prob.apply_scale (1, 2);
2209
2210 /* Handle the left-hand subtree. */
2211 bb = emit_case_nodes (bb, index, node->m_left,
2212 default_prob, index_type, loc);
2213
2214 /* If the left-hand subtree fell through,
2215 don't let it fall into the right-hand subtree. */
2216 if (bb && m_default_bb)
2217 emit_jump (bb, m_default_bb);
2218
2219 bb = emit_case_nodes (test_bb, index, node->m_right,
2220 default_prob, index_type, loc);
2221 }
2222 }
2223 else if (node->m_left == NULL && node->m_right != NULL)
2224 {
2225 /* 2) the node has only right child. */
2226
2227 /* Here we have a right child but no left so we issue a conditional
2228 branch to default and process the right child.
2229
2230 Omit the conditional branch to default if the right child
2231 does not have any children and is single valued; it would
2232 cost too much space to save so little time. */
2233
2234 if (node->m_right->has_child ()
2235 || !node->m_right->m_c->is_single_value_p ())
2236 {
2237 p = (default_prob.apply_scale (1, 2)
2238 / (node->m_c->m_subtree_prob + default_prob));
2239 bb = emit_cmp_and_jump_insns (bb, index, node->m_c->get_low (),
2240 LT_EXPR, m_default_bb, p, loc);
2241 default_prob = default_prob.apply_scale (1, 2);
2242
2243 bb = emit_case_nodes (bb, index, node->m_right, default_prob,
2244 index_type, loc);
2245 }
2246 else
2247 {
2248 /* We cannot process node->right normally
2249 since we haven't ruled out the numbers less than
2250 this node's value. So handle node->right explicitly. */
2251 p = (node->m_right->m_c->m_subtree_prob
2252 / (node->m_c->m_subtree_prob + default_prob));
2253 bb = do_jump_if_equal (bb, index, node->m_right->m_c->get_low (),
2254 node->m_right->m_c->m_case_bb, p, loc);
2255 }
2256 }
2257 else if (node->m_left != NULL && node->m_right == NULL)
2258 {
2259 /* 3) just one subtree, on the left. Similar case as previous. */
2260
2261 if (node->m_left->has_child ()
2262 || !node->m_left->m_c->is_single_value_p ())
2263 {
2264 p = (default_prob.apply_scale (1, 2)
2265 / (node->m_c->m_subtree_prob + default_prob));
2266 bb = emit_cmp_and_jump_insns (bb, index, node->m_c->get_high (),
2267 GT_EXPR, m_default_bb, p, loc);
2268 default_prob = default_prob.apply_scale (1, 2);
2269
2270 bb = emit_case_nodes (bb, index, node->m_left, default_prob,
2271 index_type, loc);
2272 }
2273 else
2274 {
2275 /* We cannot process node->left normally
2276 since we haven't ruled out the numbers less than
2277 this node's value. So handle node->left explicitly. */
2278 p = (node->m_left->m_c->m_subtree_prob
2279 / (node->m_c->m_subtree_prob + default_prob));
2280 bb = do_jump_if_equal (bb, index, node->m_left->m_c->get_low (),
2281 node->m_left->m_c->m_case_bb, p, loc);
2282 }
2283 }
2284 }
2285 else
2286 {
2287 /* Node is a range. These cases are very similar to those for a single
2288 value, except that we do not start by testing whether this node
2289 is the one to branch to. */
2290 if (node->has_child () || node->m_c->get_type () != SIMPLE_CASE)
2291 {
2292 /* Branch to a label where we will handle it later. */
2293 basic_block test_bb = split_edge (single_succ_edge (bb));
2294 redirect_edge_succ (single_pred_edge (test_bb),
2295 single_succ_edge (bb)->dest);
2296
2297
2298 profile_probability right_prob = profile_probability::never ();
2299 if (node->m_right)
2300 right_prob = node->m_right->m_c->m_subtree_prob;
2301 p = ((right_prob + default_prob.apply_scale (1, 2))
2302 / (node->m_c->m_subtree_prob + default_prob));
2303
2304 bb = emit_cmp_and_jump_insns (bb, index, node->m_c->get_high (),
2305 GT_EXPR, test_bb, p, loc);
2306 default_prob = default_prob.apply_scale (1, 2);
2307
2308 /* Value belongs to this node or to the left-hand subtree. */
2309 p = node->m_c->m_prob / (node->m_c->m_subtree_prob + default_prob);
2310 bb = emit_cmp_and_jump_insns (bb, index, node->m_c->get_low (),
2311 GE_EXPR, node->m_c->m_case_bb, p, loc);
2312
2313 /* Handle the left-hand subtree. */
2314 bb = emit_case_nodes (bb, index, node->m_left,
2315 default_prob, index_type, loc);
2316
2317 /* If the left-hand subtree fell through,
2318 don't let it fall into the right-hand subtree. */
2319 if (bb && m_default_bb)
2320 emit_jump (bb, m_default_bb);
2321
2322 bb = emit_case_nodes (test_bb, index, node->m_right,
2323 default_prob, index_type, loc);
2324 }
2325 else
2326 {
2327 /* Node has no children so we check low and high bounds to remove
2328 redundant tests. Only one of the bounds can exist,
2329 since otherwise this node is bounded--a case tested already. */
2330 tree lhs, rhs;
2331 generate_range_test (bb, index, node->m_c->get_low (),
2332 node->m_c->get_high (), &lhs, &rhs);
2333 p = default_prob / (node->m_c->m_subtree_prob + default_prob);
2334
2335 bb = emit_cmp_and_jump_insns (bb, lhs, rhs, GT_EXPR,
2336 m_default_bb, p, loc);
2337
2338 emit_jump (bb, node->m_c->m_case_bb);
2339 return NULL;
2340 }
2341 }
2342
2343 return bb;
2344 }
2345
2346 /* The main function of the pass scans statements for switches and invokes
2347 process_switch on them. */
2348
2349 namespace {
2350
2351 const pass_data pass_data_convert_switch =
2352 {
2353 GIMPLE_PASS, /* type */
2354 "switchconv", /* name */
2355 OPTGROUP_NONE, /* optinfo_flags */
2356 TV_TREE_SWITCH_CONVERSION, /* tv_id */
2357 ( PROP_cfg | PROP_ssa ), /* properties_required */
2358 0, /* properties_provided */
2359 0, /* properties_destroyed */
2360 0, /* todo_flags_start */
2361 TODO_update_ssa, /* todo_flags_finish */
2362 };
2363
2364 class pass_convert_switch : public gimple_opt_pass
2365 {
2366 public:
2367 pass_convert_switch (gcc::context *ctxt)
2368 : gimple_opt_pass (pass_data_convert_switch, ctxt)
2369 {}
2370
2371 /* opt_pass methods: */
2372 virtual bool gate (function *) { return flag_tree_switch_conversion != 0; }
2373 virtual unsigned int execute (function *);
2374
2375 }; // class pass_convert_switch
2376
2377 unsigned int
2378 pass_convert_switch::execute (function *fun)
2379 {
2380 basic_block bb;
2381 bool cfg_altered = false;
2382
2383 FOR_EACH_BB_FN (bb, fun)
2384 {
2385 gimple *stmt = last_stmt (bb);
2386 if (stmt && gimple_code (stmt) == GIMPLE_SWITCH)
2387 {
2388 if (dump_file)
2389 {
2390 expanded_location loc = expand_location (gimple_location (stmt));
2391
2392 fprintf (dump_file, "beginning to process the following "
2393 "SWITCH statement (%s:%d) : ------- \n",
2394 loc.file, loc.line);
2395 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
2396 putc ('\n', dump_file);
2397 }
2398
2399 switch_conversion sconv;
2400 sconv.expand (as_a <gswitch *> (stmt));
2401 cfg_altered |= sconv.m_cfg_altered;
2402 if (!sconv.m_reason)
2403 {
2404 if (dump_file)
2405 {
2406 fputs ("Switch converted\n", dump_file);
2407 fputs ("--------------------------------\n", dump_file);
2408 }
2409
2410 /* Make no effort to update the post-dominator tree.
2411 It is actually not that hard for the transformations
2412 we have performed, but it is not supported
2413 by iterate_fix_dominators. */
2414 free_dominance_info (CDI_POST_DOMINATORS);
2415 }
2416 else
2417 {
2418 if (dump_file)
2419 {
2420 fputs ("Bailing out - ", dump_file);
2421 fputs (sconv.m_reason, dump_file);
2422 fputs ("\n--------------------------------\n", dump_file);
2423 }
2424 }
2425 }
2426 }
2427
2428 return cfg_altered ? TODO_cleanup_cfg : 0;;
2429 }
2430
2431 } // anon namespace
2432
2433 gimple_opt_pass *
2434 make_pass_convert_switch (gcc::context *ctxt)
2435 {
2436 return new pass_convert_switch (ctxt);
2437 }
2438
2439 /* The main function of the pass scans statements for switches and invokes
2440 process_switch on them. */
2441
2442 namespace {
2443
2444 template <bool O0> class pass_lower_switch: public gimple_opt_pass
2445 {
2446 public:
2447 pass_lower_switch (gcc::context *ctxt) : gimple_opt_pass (data, ctxt) {}
2448
2449 static const pass_data data;
2450 opt_pass *
2451 clone ()
2452 {
2453 return new pass_lower_switch<O0> (m_ctxt);
2454 }
2455
2456 virtual bool
2457 gate (function *)
2458 {
2459 return !O0 || !optimize;
2460 }
2461
2462 virtual unsigned int execute (function *fun);
2463 }; // class pass_lower_switch
2464
2465 template <bool O0>
2466 const pass_data pass_lower_switch<O0>::data = {
2467 GIMPLE_PASS, /* type */
2468 O0 ? "switchlower_O0" : "switchlower", /* name */
2469 OPTGROUP_NONE, /* optinfo_flags */
2470 TV_TREE_SWITCH_LOWERING, /* tv_id */
2471 ( PROP_cfg | PROP_ssa ), /* properties_required */
2472 0, /* properties_provided */
2473 0, /* properties_destroyed */
2474 0, /* todo_flags_start */
2475 TODO_update_ssa | TODO_cleanup_cfg, /* todo_flags_finish */
2476 };
2477
2478 template <bool O0>
2479 unsigned int
2480 pass_lower_switch<O0>::execute (function *fun)
2481 {
2482 basic_block bb;
2483 bool expanded = false;
2484
2485 auto_vec<gimple *> switch_statements;
2486 switch_statements.create (1);
2487
2488 FOR_EACH_BB_FN (bb, fun)
2489 {
2490 gimple *stmt = last_stmt (bb);
2491 gswitch *swtch;
2492 if (stmt && (swtch = dyn_cast<gswitch *> (stmt)))
2493 {
2494 if (!O0)
2495 group_case_labels_stmt (swtch);
2496 switch_statements.safe_push (swtch);
2497 }
2498 }
2499
2500 for (unsigned i = 0; i < switch_statements.length (); i++)
2501 {
2502 gimple *stmt = switch_statements[i];
2503 if (dump_file)
2504 {
2505 expanded_location loc = expand_location (gimple_location (stmt));
2506
2507 fprintf (dump_file, "beginning to process the following "
2508 "SWITCH statement (%s:%d) : ------- \n",
2509 loc.file, loc.line);
2510 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
2511 putc ('\n', dump_file);
2512 }
2513
2514 gswitch *swtch = dyn_cast<gswitch *> (stmt);
2515 if (swtch)
2516 {
2517 switch_decision_tree dt (swtch);
2518 expanded |= dt.analyze_switch_statement ();
2519 }
2520 }
2521
2522 if (expanded)
2523 {
2524 free_dominance_info (CDI_DOMINATORS);
2525 free_dominance_info (CDI_POST_DOMINATORS);
2526 mark_virtual_operands_for_renaming (cfun);
2527 }
2528
2529 return 0;
2530 }
2531
2532 } // anon namespace
2533
2534 gimple_opt_pass *
2535 make_pass_lower_switch_O0 (gcc::context *ctxt)
2536 {
2537 return new pass_lower_switch<true> (ctxt);
2538 }
2539 gimple_opt_pass *
2540 make_pass_lower_switch (gcc::context *ctxt)
2541 {
2542 return new pass_lower_switch<false> (ctxt);
2543 }
2544
2545