Revert "switch lowering: limit number of cluster attemps"
[gcc.git] / gcc / tree-switch-conversion.c
1 /* Lower GIMPLE_SWITCH expressions to something more efficient than
2 a jump table.
3 Copyright (C) 2006-2020 Free Software Foundation, Inc.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by the
9 Free Software Foundation; either version 3, or (at your option) any
10 later version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not, write to the Free
19 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
20 02110-1301, USA. */
21
22 /* This file handles the lowering of GIMPLE_SWITCH to an indexed
23 load, or a series of bit-test-and-branch expressions. */
24
25 #include "config.h"
26 #include "system.h"
27 #include "coretypes.h"
28 #include "backend.h"
29 #include "insn-codes.h"
30 #include "rtl.h"
31 #include "tree.h"
32 #include "gimple.h"
33 #include "cfghooks.h"
34 #include "tree-pass.h"
35 #include "ssa.h"
36 #include "optabs-tree.h"
37 #include "cgraph.h"
38 #include "gimple-pretty-print.h"
39 #include "fold-const.h"
40 #include "varasm.h"
41 #include "stor-layout.h"
42 #include "cfganal.h"
43 #include "gimplify.h"
44 #include "gimple-iterator.h"
45 #include "gimplify-me.h"
46 #include "gimple-fold.h"
47 #include "tree-cfg.h"
48 #include "cfgloop.h"
49 #include "alloc-pool.h"
50 #include "target.h"
51 #include "tree-into-ssa.h"
52 #include "omp-general.h"
53
54 /* ??? For lang_hooks.types.type_for_mode, but is there a word_mode
55 type in the GIMPLE type system that is language-independent? */
56 #include "langhooks.h"
57
58 #include "tree-switch-conversion.h"
59 \f
60 using namespace tree_switch_conversion;
61
62 /* Constructor. */
63
64 switch_conversion::switch_conversion (): m_final_bb (NULL),
65 m_constructors (NULL), m_default_values (NULL),
66 m_arr_ref_first (NULL), m_arr_ref_last (NULL),
67 m_reason (NULL), m_default_case_nonstandard (false), m_cfg_altered (false)
68 {
69 }
70
71 /* Collection information about SWTCH statement. */
72
73 void
74 switch_conversion::collect (gswitch *swtch)
75 {
76 unsigned int branch_num = gimple_switch_num_labels (swtch);
77 tree min_case, max_case;
78 unsigned int i;
79 edge e, e_default, e_first;
80 edge_iterator ei;
81
82 m_switch = swtch;
83
84 /* The gimplifier has already sorted the cases by CASE_LOW and ensured there
85 is a default label which is the first in the vector.
86 Collect the bits we can deduce from the CFG. */
87 m_index_expr = gimple_switch_index (swtch);
88 m_switch_bb = gimple_bb (swtch);
89 e_default = gimple_switch_default_edge (cfun, swtch);
90 m_default_bb = e_default->dest;
91 m_default_prob = e_default->probability;
92
93 /* Get upper and lower bounds of case values, and the covered range. */
94 min_case = gimple_switch_label (swtch, 1);
95 max_case = gimple_switch_label (swtch, branch_num - 1);
96
97 m_range_min = CASE_LOW (min_case);
98 if (CASE_HIGH (max_case) != NULL_TREE)
99 m_range_max = CASE_HIGH (max_case);
100 else
101 m_range_max = CASE_LOW (max_case);
102
103 m_contiguous_range = true;
104 tree last = CASE_HIGH (min_case) ? CASE_HIGH (min_case) : m_range_min;
105 for (i = 2; i < branch_num; i++)
106 {
107 tree elt = gimple_switch_label (swtch, i);
108 if (wi::to_wide (last) + 1 != wi::to_wide (CASE_LOW (elt)))
109 {
110 m_contiguous_range = false;
111 break;
112 }
113 last = CASE_HIGH (elt) ? CASE_HIGH (elt) : CASE_LOW (elt);
114 }
115
116 if (m_contiguous_range)
117 e_first = gimple_switch_edge (cfun, swtch, 1);
118 else
119 e_first = e_default;
120
121 /* See if there is one common successor block for all branch
122 targets. If it exists, record it in FINAL_BB.
123 Start with the destination of the first non-default case
124 if the range is contiguous and default case otherwise as
125 guess or its destination in case it is a forwarder block. */
126 if (! single_pred_p (e_first->dest))
127 m_final_bb = e_first->dest;
128 else if (single_succ_p (e_first->dest)
129 && ! single_pred_p (single_succ (e_first->dest)))
130 m_final_bb = single_succ (e_first->dest);
131 /* Require that all switch destinations are either that common
132 FINAL_BB or a forwarder to it, except for the default
133 case if contiguous range. */
134 if (m_final_bb)
135 FOR_EACH_EDGE (e, ei, m_switch_bb->succs)
136 {
137 if (e->dest == m_final_bb)
138 continue;
139
140 if (single_pred_p (e->dest)
141 && single_succ_p (e->dest)
142 && single_succ (e->dest) == m_final_bb)
143 continue;
144
145 if (e == e_default && m_contiguous_range)
146 {
147 m_default_case_nonstandard = true;
148 continue;
149 }
150
151 m_final_bb = NULL;
152 break;
153 }
154
155 m_range_size
156 = int_const_binop (MINUS_EXPR, m_range_max, m_range_min);
157
158 /* Get a count of the number of case labels. Single-valued case labels
159 simply count as one, but a case range counts double, since it may
160 require two compares if it gets lowered as a branching tree. */
161 m_count = 0;
162 for (i = 1; i < branch_num; i++)
163 {
164 tree elt = gimple_switch_label (swtch, i);
165 m_count++;
166 if (CASE_HIGH (elt)
167 && ! tree_int_cst_equal (CASE_LOW (elt), CASE_HIGH (elt)))
168 m_count++;
169 }
170
171 /* Get the number of unique non-default targets out of the GIMPLE_SWITCH
172 block. Assume a CFG cleanup would have already removed degenerate
173 switch statements, this allows us to just use EDGE_COUNT. */
174 m_uniq = EDGE_COUNT (gimple_bb (swtch)->succs) - 1;
175 }
176
177 /* Checks whether the range given by individual case statements of the switch
178 switch statement isn't too big and whether the number of branches actually
179 satisfies the size of the new array. */
180
181 bool
182 switch_conversion::check_range ()
183 {
184 gcc_assert (m_range_size);
185 if (!tree_fits_uhwi_p (m_range_size))
186 {
187 m_reason = "index range way too large or otherwise unusable";
188 return false;
189 }
190
191 if (tree_to_uhwi (m_range_size)
192 > ((unsigned) m_count * param_switch_conversion_branch_ratio))
193 {
194 m_reason = "the maximum range-branch ratio exceeded";
195 return false;
196 }
197
198 return true;
199 }
200
201 /* Checks whether all but the final BB basic blocks are empty. */
202
203 bool
204 switch_conversion::check_all_empty_except_final ()
205 {
206 edge e, e_default = find_edge (m_switch_bb, m_default_bb);
207 edge_iterator ei;
208
209 FOR_EACH_EDGE (e, ei, m_switch_bb->succs)
210 {
211 if (e->dest == m_final_bb)
212 continue;
213
214 if (!empty_block_p (e->dest))
215 {
216 if (m_contiguous_range && e == e_default)
217 {
218 m_default_case_nonstandard = true;
219 continue;
220 }
221
222 m_reason = "bad case - a non-final BB not empty";
223 return false;
224 }
225 }
226
227 return true;
228 }
229
230 /* This function checks whether all required values in phi nodes in final_bb
231 are constants. Required values are those that correspond to a basic block
232 which is a part of the examined switch statement. It returns true if the
233 phi nodes are OK, otherwise false. */
234
235 bool
236 switch_conversion::check_final_bb ()
237 {
238 gphi_iterator gsi;
239
240 m_phi_count = 0;
241 for (gsi = gsi_start_phis (m_final_bb); !gsi_end_p (gsi); gsi_next (&gsi))
242 {
243 gphi *phi = gsi.phi ();
244 unsigned int i;
245
246 if (virtual_operand_p (gimple_phi_result (phi)))
247 continue;
248
249 m_phi_count++;
250
251 for (i = 0; i < gimple_phi_num_args (phi); i++)
252 {
253 basic_block bb = gimple_phi_arg_edge (phi, i)->src;
254
255 if (bb == m_switch_bb
256 || (single_pred_p (bb)
257 && single_pred (bb) == m_switch_bb
258 && (!m_default_case_nonstandard
259 || empty_block_p (bb))))
260 {
261 tree reloc, val;
262 const char *reason = NULL;
263
264 val = gimple_phi_arg_def (phi, i);
265 if (!is_gimple_ip_invariant (val))
266 reason = "non-invariant value from a case";
267 else
268 {
269 reloc = initializer_constant_valid_p (val, TREE_TYPE (val));
270 if ((flag_pic && reloc != null_pointer_node)
271 || (!flag_pic && reloc == NULL_TREE))
272 {
273 if (reloc)
274 reason
275 = "value from a case would need runtime relocations";
276 else
277 reason
278 = "value from a case is not a valid initializer";
279 }
280 }
281 if (reason)
282 {
283 /* For contiguous range, we can allow non-constant
284 or one that needs relocation, as long as it is
285 only reachable from the default case. */
286 if (bb == m_switch_bb)
287 bb = m_final_bb;
288 if (!m_contiguous_range || bb != m_default_bb)
289 {
290 m_reason = reason;
291 return false;
292 }
293
294 unsigned int branch_num = gimple_switch_num_labels (m_switch);
295 for (unsigned int i = 1; i < branch_num; i++)
296 {
297 if (gimple_switch_label_bb (cfun, m_switch, i) == bb)
298 {
299 m_reason = reason;
300 return false;
301 }
302 }
303 m_default_case_nonstandard = true;
304 }
305 }
306 }
307 }
308
309 return true;
310 }
311
312 /* The following function allocates default_values, target_{in,out}_names and
313 constructors arrays. The last one is also populated with pointers to
314 vectors that will become constructors of new arrays. */
315
316 void
317 switch_conversion::create_temp_arrays ()
318 {
319 int i;
320
321 m_default_values = XCNEWVEC (tree, m_phi_count * 3);
322 /* ??? Macros do not support multi argument templates in their
323 argument list. We create a typedef to work around that problem. */
324 typedef vec<constructor_elt, va_gc> *vec_constructor_elt_gc;
325 m_constructors = XCNEWVEC (vec_constructor_elt_gc, m_phi_count);
326 m_target_inbound_names = m_default_values + m_phi_count;
327 m_target_outbound_names = m_target_inbound_names + m_phi_count;
328 for (i = 0; i < m_phi_count; i++)
329 vec_alloc (m_constructors[i], tree_to_uhwi (m_range_size) + 1);
330 }
331
332 /* Populate the array of default values in the order of phi nodes.
333 DEFAULT_CASE is the CASE_LABEL_EXPR for the default switch branch
334 if the range is non-contiguous or the default case has standard
335 structure, otherwise it is the first non-default case instead. */
336
337 void
338 switch_conversion::gather_default_values (tree default_case)
339 {
340 gphi_iterator gsi;
341 basic_block bb = label_to_block (cfun, CASE_LABEL (default_case));
342 edge e;
343 int i = 0;
344
345 gcc_assert (CASE_LOW (default_case) == NULL_TREE
346 || m_default_case_nonstandard);
347
348 if (bb == m_final_bb)
349 e = find_edge (m_switch_bb, bb);
350 else
351 e = single_succ_edge (bb);
352
353 for (gsi = gsi_start_phis (m_final_bb); !gsi_end_p (gsi); gsi_next (&gsi))
354 {
355 gphi *phi = gsi.phi ();
356 if (virtual_operand_p (gimple_phi_result (phi)))
357 continue;
358 tree val = PHI_ARG_DEF_FROM_EDGE (phi, e);
359 gcc_assert (val);
360 m_default_values[i++] = val;
361 }
362 }
363
364 /* The following function populates the vectors in the constructors array with
365 future contents of the static arrays. The vectors are populated in the
366 order of phi nodes. */
367
368 void
369 switch_conversion::build_constructors ()
370 {
371 unsigned i, branch_num = gimple_switch_num_labels (m_switch);
372 tree pos = m_range_min;
373 tree pos_one = build_int_cst (TREE_TYPE (pos), 1);
374
375 for (i = 1; i < branch_num; i++)
376 {
377 tree cs = gimple_switch_label (m_switch, i);
378 basic_block bb = label_to_block (cfun, CASE_LABEL (cs));
379 edge e;
380 tree high;
381 gphi_iterator gsi;
382 int j;
383
384 if (bb == m_final_bb)
385 e = find_edge (m_switch_bb, bb);
386 else
387 e = single_succ_edge (bb);
388 gcc_assert (e);
389
390 while (tree_int_cst_lt (pos, CASE_LOW (cs)))
391 {
392 int k;
393 for (k = 0; k < m_phi_count; k++)
394 {
395 constructor_elt elt;
396
397 elt.index = int_const_binop (MINUS_EXPR, pos, m_range_min);
398 elt.value
399 = unshare_expr_without_location (m_default_values[k]);
400 m_constructors[k]->quick_push (elt);
401 }
402
403 pos = int_const_binop (PLUS_EXPR, pos, pos_one);
404 }
405 gcc_assert (tree_int_cst_equal (pos, CASE_LOW (cs)));
406
407 j = 0;
408 if (CASE_HIGH (cs))
409 high = CASE_HIGH (cs);
410 else
411 high = CASE_LOW (cs);
412 for (gsi = gsi_start_phis (m_final_bb);
413 !gsi_end_p (gsi); gsi_next (&gsi))
414 {
415 gphi *phi = gsi.phi ();
416 if (virtual_operand_p (gimple_phi_result (phi)))
417 continue;
418 tree val = PHI_ARG_DEF_FROM_EDGE (phi, e);
419 tree low = CASE_LOW (cs);
420 pos = CASE_LOW (cs);
421
422 do
423 {
424 constructor_elt elt;
425
426 elt.index = int_const_binop (MINUS_EXPR, pos, m_range_min);
427 elt.value = unshare_expr_without_location (val);
428 m_constructors[j]->quick_push (elt);
429
430 pos = int_const_binop (PLUS_EXPR, pos, pos_one);
431 } while (!tree_int_cst_lt (high, pos)
432 && tree_int_cst_lt (low, pos));
433 j++;
434 }
435 }
436 }
437
438 /* If all values in the constructor vector are products of a linear function
439 a * x + b, then return true. When true, COEFF_A and COEFF_B and
440 coefficients of the linear function. Note that equal values are special
441 case of a linear function with a and b equal to zero. */
442
443 bool
444 switch_conversion::contains_linear_function_p (vec<constructor_elt, va_gc> *vec,
445 wide_int *coeff_a,
446 wide_int *coeff_b)
447 {
448 unsigned int i;
449 constructor_elt *elt;
450
451 gcc_assert (vec->length () >= 2);
452
453 /* Let's try to find any linear function a * x + y that can apply to
454 given values. 'a' can be calculated as follows:
455
456 a = (y2 - y1) / (x2 - x1) where x2 - x1 = 1 (consecutive case indices)
457 a = y2 - y1
458
459 and
460
461 b = y2 - a * x2
462
463 */
464
465 tree elt0 = (*vec)[0].value;
466 tree elt1 = (*vec)[1].value;
467
468 if (TREE_CODE (elt0) != INTEGER_CST || TREE_CODE (elt1) != INTEGER_CST)
469 return false;
470
471 wide_int range_min
472 = wide_int::from (wi::to_wide (m_range_min),
473 TYPE_PRECISION (TREE_TYPE (elt0)),
474 TYPE_SIGN (TREE_TYPE (m_range_min)));
475 wide_int y1 = wi::to_wide (elt0);
476 wide_int y2 = wi::to_wide (elt1);
477 wide_int a = y2 - y1;
478 wide_int b = y2 - a * (range_min + 1);
479
480 /* Verify that all values fulfill the linear function. */
481 FOR_EACH_VEC_SAFE_ELT (vec, i, elt)
482 {
483 if (TREE_CODE (elt->value) != INTEGER_CST)
484 return false;
485
486 wide_int value = wi::to_wide (elt->value);
487 if (a * range_min + b != value)
488 return false;
489
490 ++range_min;
491 }
492
493 *coeff_a = a;
494 *coeff_b = b;
495
496 return true;
497 }
498
499 /* Return type which should be used for array elements, either TYPE's
500 main variant or, for integral types, some smaller integral type
501 that can still hold all the constants. */
502
503 tree
504 switch_conversion::array_value_type (tree type, int num)
505 {
506 unsigned int i, len = vec_safe_length (m_constructors[num]);
507 constructor_elt *elt;
508 int sign = 0;
509 tree smaller_type;
510
511 /* Types with alignments greater than their size can reach here, e.g. out of
512 SRA. We couldn't use these as an array component type so get back to the
513 main variant first, which, for our purposes, is fine for other types as
514 well. */
515
516 type = TYPE_MAIN_VARIANT (type);
517
518 if (!INTEGRAL_TYPE_P (type))
519 return type;
520
521 scalar_int_mode type_mode = SCALAR_INT_TYPE_MODE (type);
522 scalar_int_mode mode = get_narrowest_mode (type_mode);
523 if (GET_MODE_SIZE (type_mode) <= GET_MODE_SIZE (mode))
524 return type;
525
526 if (len < (optimize_bb_for_size_p (gimple_bb (m_switch)) ? 2 : 32))
527 return type;
528
529 FOR_EACH_VEC_SAFE_ELT (m_constructors[num], i, elt)
530 {
531 wide_int cst;
532
533 if (TREE_CODE (elt->value) != INTEGER_CST)
534 return type;
535
536 cst = wi::to_wide (elt->value);
537 while (1)
538 {
539 unsigned int prec = GET_MODE_BITSIZE (mode);
540 if (prec > HOST_BITS_PER_WIDE_INT)
541 return type;
542
543 if (sign >= 0 && cst == wi::zext (cst, prec))
544 {
545 if (sign == 0 && cst == wi::sext (cst, prec))
546 break;
547 sign = 1;
548 break;
549 }
550 if (sign <= 0 && cst == wi::sext (cst, prec))
551 {
552 sign = -1;
553 break;
554 }
555
556 if (sign == 1)
557 sign = 0;
558
559 if (!GET_MODE_WIDER_MODE (mode).exists (&mode)
560 || GET_MODE_SIZE (mode) >= GET_MODE_SIZE (type_mode))
561 return type;
562 }
563 }
564
565 if (sign == 0)
566 sign = TYPE_UNSIGNED (type) ? 1 : -1;
567 smaller_type = lang_hooks.types.type_for_mode (mode, sign >= 0);
568 if (GET_MODE_SIZE (type_mode)
569 <= GET_MODE_SIZE (SCALAR_INT_TYPE_MODE (smaller_type)))
570 return type;
571
572 return smaller_type;
573 }
574
575 /* Create an appropriate array type and declaration and assemble a static
576 array variable. Also create a load statement that initializes
577 the variable in question with a value from the static array. SWTCH is
578 the switch statement being converted, NUM is the index to
579 arrays of constructors, default values and target SSA names
580 for this particular array. ARR_INDEX_TYPE is the type of the index
581 of the new array, PHI is the phi node of the final BB that corresponds
582 to the value that will be loaded from the created array. TIDX
583 is an ssa name of a temporary variable holding the index for loads from the
584 new array. */
585
586 void
587 switch_conversion::build_one_array (int num, tree arr_index_type,
588 gphi *phi, tree tidx)
589 {
590 tree name;
591 gimple *load;
592 gimple_stmt_iterator gsi = gsi_for_stmt (m_switch);
593 location_t loc = gimple_location (m_switch);
594
595 gcc_assert (m_default_values[num]);
596
597 name = copy_ssa_name (PHI_RESULT (phi));
598 m_target_inbound_names[num] = name;
599
600 vec<constructor_elt, va_gc> *constructor = m_constructors[num];
601 wide_int coeff_a, coeff_b;
602 bool linear_p = contains_linear_function_p (constructor, &coeff_a, &coeff_b);
603 tree type;
604 if (linear_p
605 && (type = range_check_type (TREE_TYPE ((*constructor)[0].value))))
606 {
607 if (dump_file && coeff_a.to_uhwi () > 0)
608 fprintf (dump_file, "Linear transformation with A = %" PRId64
609 " and B = %" PRId64 "\n", coeff_a.to_shwi (),
610 coeff_b.to_shwi ());
611
612 /* We must use type of constructor values. */
613 gimple_seq seq = NULL;
614 tree tmp = gimple_convert (&seq, type, m_index_expr);
615 tree tmp2 = gimple_build (&seq, MULT_EXPR, type,
616 wide_int_to_tree (type, coeff_a), tmp);
617 tree tmp3 = gimple_build (&seq, PLUS_EXPR, type, tmp2,
618 wide_int_to_tree (type, coeff_b));
619 tree tmp4 = gimple_convert (&seq, TREE_TYPE (name), tmp3);
620 gsi_insert_seq_before (&gsi, seq, GSI_SAME_STMT);
621 load = gimple_build_assign (name, tmp4);
622 }
623 else
624 {
625 tree array_type, ctor, decl, value_type, fetch, default_type;
626
627 default_type = TREE_TYPE (m_default_values[num]);
628 value_type = array_value_type (default_type, num);
629 array_type = build_array_type (value_type, arr_index_type);
630 if (default_type != value_type)
631 {
632 unsigned int i;
633 constructor_elt *elt;
634
635 FOR_EACH_VEC_SAFE_ELT (constructor, i, elt)
636 elt->value = fold_convert (value_type, elt->value);
637 }
638 ctor = build_constructor (array_type, constructor);
639 TREE_CONSTANT (ctor) = true;
640 TREE_STATIC (ctor) = true;
641
642 decl = build_decl (loc, VAR_DECL, NULL_TREE, array_type);
643 TREE_STATIC (decl) = 1;
644 DECL_INITIAL (decl) = ctor;
645
646 DECL_NAME (decl) = create_tmp_var_name ("CSWTCH");
647 DECL_ARTIFICIAL (decl) = 1;
648 DECL_IGNORED_P (decl) = 1;
649 TREE_CONSTANT (decl) = 1;
650 TREE_READONLY (decl) = 1;
651 DECL_IGNORED_P (decl) = 1;
652 if (offloading_function_p (cfun->decl))
653 DECL_ATTRIBUTES (decl)
654 = tree_cons (get_identifier ("omp declare target"), NULL_TREE,
655 NULL_TREE);
656 varpool_node::finalize_decl (decl);
657
658 fetch = build4 (ARRAY_REF, value_type, decl, tidx, NULL_TREE,
659 NULL_TREE);
660 if (default_type != value_type)
661 {
662 fetch = fold_convert (default_type, fetch);
663 fetch = force_gimple_operand_gsi (&gsi, fetch, true, NULL_TREE,
664 true, GSI_SAME_STMT);
665 }
666 load = gimple_build_assign (name, fetch);
667 }
668
669 gsi_insert_before (&gsi, load, GSI_SAME_STMT);
670 update_stmt (load);
671 m_arr_ref_last = load;
672 }
673
674 /* Builds and initializes static arrays initialized with values gathered from
675 the switch statement. Also creates statements that load values from
676 them. */
677
678 void
679 switch_conversion::build_arrays ()
680 {
681 tree arr_index_type;
682 tree tidx, sub, utype;
683 gimple *stmt;
684 gimple_stmt_iterator gsi;
685 gphi_iterator gpi;
686 int i;
687 location_t loc = gimple_location (m_switch);
688
689 gsi = gsi_for_stmt (m_switch);
690
691 /* Make sure we do not generate arithmetics in a subrange. */
692 utype = TREE_TYPE (m_index_expr);
693 if (TREE_TYPE (utype))
694 utype = lang_hooks.types.type_for_mode (TYPE_MODE (TREE_TYPE (utype)), 1);
695 else
696 utype = lang_hooks.types.type_for_mode (TYPE_MODE (utype), 1);
697
698 arr_index_type = build_index_type (m_range_size);
699 tidx = make_ssa_name (utype);
700 sub = fold_build2_loc (loc, MINUS_EXPR, utype,
701 fold_convert_loc (loc, utype, m_index_expr),
702 fold_convert_loc (loc, utype, m_range_min));
703 sub = force_gimple_operand_gsi (&gsi, sub,
704 false, NULL, true, GSI_SAME_STMT);
705 stmt = gimple_build_assign (tidx, sub);
706
707 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
708 update_stmt (stmt);
709 m_arr_ref_first = stmt;
710
711 for (gpi = gsi_start_phis (m_final_bb), i = 0;
712 !gsi_end_p (gpi); gsi_next (&gpi))
713 {
714 gphi *phi = gpi.phi ();
715 if (!virtual_operand_p (gimple_phi_result (phi)))
716 build_one_array (i++, arr_index_type, phi, tidx);
717 else
718 {
719 edge e;
720 edge_iterator ei;
721 FOR_EACH_EDGE (e, ei, m_switch_bb->succs)
722 {
723 if (e->dest == m_final_bb)
724 break;
725 if (!m_default_case_nonstandard
726 || e->dest != m_default_bb)
727 {
728 e = single_succ_edge (e->dest);
729 break;
730 }
731 }
732 gcc_assert (e && e->dest == m_final_bb);
733 m_target_vop = PHI_ARG_DEF_FROM_EDGE (phi, e);
734 }
735 }
736 }
737
738 /* Generates and appropriately inserts loads of default values at the position
739 given by GSI. Returns the last inserted statement. */
740
741 gassign *
742 switch_conversion::gen_def_assigns (gimple_stmt_iterator *gsi)
743 {
744 int i;
745 gassign *assign = NULL;
746
747 for (i = 0; i < m_phi_count; i++)
748 {
749 tree name = copy_ssa_name (m_target_inbound_names[i]);
750 m_target_outbound_names[i] = name;
751 assign = gimple_build_assign (name, m_default_values[i]);
752 gsi_insert_before (gsi, assign, GSI_SAME_STMT);
753 update_stmt (assign);
754 }
755 return assign;
756 }
757
758 /* Deletes the unused bbs and edges that now contain the switch statement and
759 its empty branch bbs. BBD is the now dead BB containing
760 the original switch statement, FINAL is the last BB of the converted
761 switch statement (in terms of succession). */
762
763 void
764 switch_conversion::prune_bbs (basic_block bbd, basic_block final,
765 basic_block default_bb)
766 {
767 edge_iterator ei;
768 edge e;
769
770 for (ei = ei_start (bbd->succs); (e = ei_safe_edge (ei)); )
771 {
772 basic_block bb;
773 bb = e->dest;
774 remove_edge (e);
775 if (bb != final && bb != default_bb)
776 delete_basic_block (bb);
777 }
778 delete_basic_block (bbd);
779 }
780
781 /* Add values to phi nodes in final_bb for the two new edges. E1F is the edge
782 from the basic block loading values from an array and E2F from the basic
783 block loading default values. BBF is the last switch basic block (see the
784 bbf description in the comment below). */
785
786 void
787 switch_conversion::fix_phi_nodes (edge e1f, edge e2f, basic_block bbf)
788 {
789 gphi_iterator gsi;
790 int i;
791
792 for (gsi = gsi_start_phis (bbf), i = 0;
793 !gsi_end_p (gsi); gsi_next (&gsi))
794 {
795 gphi *phi = gsi.phi ();
796 tree inbound, outbound;
797 if (virtual_operand_p (gimple_phi_result (phi)))
798 inbound = outbound = m_target_vop;
799 else
800 {
801 inbound = m_target_inbound_names[i];
802 outbound = m_target_outbound_names[i++];
803 }
804 add_phi_arg (phi, inbound, e1f, UNKNOWN_LOCATION);
805 if (!m_default_case_nonstandard)
806 add_phi_arg (phi, outbound, e2f, UNKNOWN_LOCATION);
807 }
808 }
809
810 /* Creates a check whether the switch expression value actually falls into the
811 range given by all the cases. If it does not, the temporaries are loaded
812 with default values instead. */
813
814 void
815 switch_conversion::gen_inbound_check ()
816 {
817 tree label_decl1 = create_artificial_label (UNKNOWN_LOCATION);
818 tree label_decl2 = create_artificial_label (UNKNOWN_LOCATION);
819 tree label_decl3 = create_artificial_label (UNKNOWN_LOCATION);
820 glabel *label1, *label2, *label3;
821 tree utype, tidx;
822 tree bound;
823
824 gcond *cond_stmt;
825
826 gassign *last_assign = NULL;
827 gimple_stmt_iterator gsi;
828 basic_block bb0, bb1, bb2, bbf, bbd;
829 edge e01 = NULL, e02, e21, e1d, e1f, e2f;
830 location_t loc = gimple_location (m_switch);
831
832 gcc_assert (m_default_values);
833
834 bb0 = gimple_bb (m_switch);
835
836 tidx = gimple_assign_lhs (m_arr_ref_first);
837 utype = TREE_TYPE (tidx);
838
839 /* (end of) block 0 */
840 gsi = gsi_for_stmt (m_arr_ref_first);
841 gsi_next (&gsi);
842
843 bound = fold_convert_loc (loc, utype, m_range_size);
844 cond_stmt = gimple_build_cond (LE_EXPR, tidx, bound, NULL_TREE, NULL_TREE);
845 gsi_insert_before (&gsi, cond_stmt, GSI_SAME_STMT);
846 update_stmt (cond_stmt);
847
848 /* block 2 */
849 if (!m_default_case_nonstandard)
850 {
851 label2 = gimple_build_label (label_decl2);
852 gsi_insert_before (&gsi, label2, GSI_SAME_STMT);
853 last_assign = gen_def_assigns (&gsi);
854 }
855
856 /* block 1 */
857 label1 = gimple_build_label (label_decl1);
858 gsi_insert_before (&gsi, label1, GSI_SAME_STMT);
859
860 /* block F */
861 gsi = gsi_start_bb (m_final_bb);
862 label3 = gimple_build_label (label_decl3);
863 gsi_insert_before (&gsi, label3, GSI_SAME_STMT);
864
865 /* cfg fix */
866 e02 = split_block (bb0, cond_stmt);
867 bb2 = e02->dest;
868
869 if (m_default_case_nonstandard)
870 {
871 bb1 = bb2;
872 bb2 = m_default_bb;
873 e01 = e02;
874 e01->flags = EDGE_TRUE_VALUE;
875 e02 = make_edge (bb0, bb2, EDGE_FALSE_VALUE);
876 edge e_default = find_edge (bb1, bb2);
877 for (gphi_iterator gsi = gsi_start_phis (bb2);
878 !gsi_end_p (gsi); gsi_next (&gsi))
879 {
880 gphi *phi = gsi.phi ();
881 tree arg = PHI_ARG_DEF_FROM_EDGE (phi, e_default);
882 add_phi_arg (phi, arg, e02,
883 gimple_phi_arg_location_from_edge (phi, e_default));
884 }
885 /* Partially fix the dominator tree, if it is available. */
886 if (dom_info_available_p (CDI_DOMINATORS))
887 redirect_immediate_dominators (CDI_DOMINATORS, bb1, bb0);
888 }
889 else
890 {
891 e21 = split_block (bb2, last_assign);
892 bb1 = e21->dest;
893 remove_edge (e21);
894 }
895
896 e1d = split_block (bb1, m_arr_ref_last);
897 bbd = e1d->dest;
898 remove_edge (e1d);
899
900 /* Flags and profiles of the edge for in-range values. */
901 if (!m_default_case_nonstandard)
902 e01 = make_edge (bb0, bb1, EDGE_TRUE_VALUE);
903 e01->probability = m_default_prob.invert ();
904
905 /* Flags and profiles of the edge taking care of out-of-range values. */
906 e02->flags &= ~EDGE_FALLTHRU;
907 e02->flags |= EDGE_FALSE_VALUE;
908 e02->probability = m_default_prob;
909
910 bbf = m_final_bb;
911
912 e1f = make_edge (bb1, bbf, EDGE_FALLTHRU);
913 e1f->probability = profile_probability::always ();
914
915 if (m_default_case_nonstandard)
916 e2f = NULL;
917 else
918 {
919 e2f = make_edge (bb2, bbf, EDGE_FALLTHRU);
920 e2f->probability = profile_probability::always ();
921 }
922
923 /* frequencies of the new BBs */
924 bb1->count = e01->count ();
925 bb2->count = e02->count ();
926 if (!m_default_case_nonstandard)
927 bbf->count = e1f->count () + e2f->count ();
928
929 /* Tidy blocks that have become unreachable. */
930 prune_bbs (bbd, m_final_bb,
931 m_default_case_nonstandard ? m_default_bb : NULL);
932
933 /* Fixup the PHI nodes in bbF. */
934 fix_phi_nodes (e1f, e2f, bbf);
935
936 /* Fix the dominator tree, if it is available. */
937 if (dom_info_available_p (CDI_DOMINATORS))
938 {
939 vec<basic_block> bbs_to_fix_dom;
940
941 set_immediate_dominator (CDI_DOMINATORS, bb1, bb0);
942 if (!m_default_case_nonstandard)
943 set_immediate_dominator (CDI_DOMINATORS, bb2, bb0);
944 if (! get_immediate_dominator (CDI_DOMINATORS, bbf))
945 /* If bbD was the immediate dominator ... */
946 set_immediate_dominator (CDI_DOMINATORS, bbf, bb0);
947
948 bbs_to_fix_dom.create (3 + (bb2 != bbf));
949 bbs_to_fix_dom.quick_push (bb0);
950 bbs_to_fix_dom.quick_push (bb1);
951 if (bb2 != bbf)
952 bbs_to_fix_dom.quick_push (bb2);
953 bbs_to_fix_dom.quick_push (bbf);
954
955 iterate_fix_dominators (CDI_DOMINATORS, bbs_to_fix_dom, true);
956 bbs_to_fix_dom.release ();
957 }
958 }
959
960 /* The following function is invoked on every switch statement (the current
961 one is given in SWTCH) and runs the individual phases of switch
962 conversion on it one after another until one fails or the conversion
963 is completed. On success, NULL is in m_reason, otherwise points
964 to a string with the reason why the conversion failed. */
965
966 void
967 switch_conversion::expand (gswitch *swtch)
968 {
969 /* Group case labels so that we get the right results from the heuristics
970 that decide on the code generation approach for this switch. */
971 m_cfg_altered |= group_case_labels_stmt (swtch);
972
973 /* If this switch is now a degenerate case with only a default label,
974 there is nothing left for us to do. */
975 if (gimple_switch_num_labels (swtch) < 2)
976 {
977 m_reason = "switch is a degenerate case";
978 return;
979 }
980
981 collect (swtch);
982
983 /* No error markers should reach here (they should be filtered out
984 during gimplification). */
985 gcc_checking_assert (TREE_TYPE (m_index_expr) != error_mark_node);
986
987 /* Prefer bit test if possible. */
988 if (tree_fits_uhwi_p (m_range_size)
989 && bit_test_cluster::can_be_handled (tree_to_uhwi (m_range_size), m_uniq)
990 && bit_test_cluster::is_beneficial (m_count, m_uniq))
991 {
992 m_reason = "expanding as bit test is preferable";
993 return;
994 }
995
996 if (m_uniq <= 2)
997 {
998 /* This will be expanded as a decision tree . */
999 m_reason = "expanding as jumps is preferable";
1000 return;
1001 }
1002
1003 /* If there is no common successor, we cannot do the transformation. */
1004 if (!m_final_bb)
1005 {
1006 m_reason = "no common successor to all case label target blocks found";
1007 return;
1008 }
1009
1010 /* Check the case label values are within reasonable range: */
1011 if (!check_range ())
1012 {
1013 gcc_assert (m_reason);
1014 return;
1015 }
1016
1017 /* For all the cases, see whether they are empty, the assignments they
1018 represent constant and so on... */
1019 if (!check_all_empty_except_final ())
1020 {
1021 gcc_assert (m_reason);
1022 return;
1023 }
1024 if (!check_final_bb ())
1025 {
1026 gcc_assert (m_reason);
1027 return;
1028 }
1029
1030 /* At this point all checks have passed and we can proceed with the
1031 transformation. */
1032
1033 create_temp_arrays ();
1034 gather_default_values (m_default_case_nonstandard
1035 ? gimple_switch_label (swtch, 1)
1036 : gimple_switch_default_label (swtch));
1037 build_constructors ();
1038
1039 build_arrays (); /* Build the static arrays and assignments. */
1040 gen_inbound_check (); /* Build the bounds check. */
1041
1042 m_cfg_altered = true;
1043 }
1044
1045 /* Destructor. */
1046
1047 switch_conversion::~switch_conversion ()
1048 {
1049 XDELETEVEC (m_constructors);
1050 XDELETEVEC (m_default_values);
1051 }
1052
1053 /* Constructor. */
1054
1055 group_cluster::group_cluster (vec<cluster *> &clusters,
1056 unsigned start, unsigned end)
1057 {
1058 gcc_checking_assert (end - start + 1 >= 1);
1059 m_prob = profile_probability::never ();
1060 m_cases.create (end - start + 1);
1061 for (unsigned i = start; i <= end; i++)
1062 {
1063 m_cases.quick_push (static_cast<simple_cluster *> (clusters[i]));
1064 m_prob += clusters[i]->m_prob;
1065 }
1066 m_subtree_prob = m_prob;
1067 }
1068
1069 /* Destructor. */
1070
1071 group_cluster::~group_cluster ()
1072 {
1073 for (unsigned i = 0; i < m_cases.length (); i++)
1074 delete m_cases[i];
1075
1076 m_cases.release ();
1077 }
1078
1079 /* Dump content of a cluster. */
1080
1081 void
1082 group_cluster::dump (FILE *f, bool details)
1083 {
1084 unsigned total_values = 0;
1085 for (unsigned i = 0; i < m_cases.length (); i++)
1086 total_values += m_cases[i]->get_range (m_cases[i]->get_low (),
1087 m_cases[i]->get_high ());
1088
1089 unsigned comparison_count = 0;
1090 for (unsigned i = 0; i < m_cases.length (); i++)
1091 {
1092 simple_cluster *sc = static_cast<simple_cluster *> (m_cases[i]);
1093 comparison_count += sc->m_range_p ? 2 : 1;
1094 }
1095
1096 unsigned HOST_WIDE_INT range = get_range (get_low (), get_high ());
1097 fprintf (f, "%s", get_type () == JUMP_TABLE ? "JT" : "BT");
1098
1099 if (details)
1100 fprintf (f, "(values:%d comparisons:%d range:" HOST_WIDE_INT_PRINT_DEC
1101 " density: %.2f%%)", total_values, comparison_count, range,
1102 100.0f * comparison_count / range);
1103
1104 fprintf (f, ":");
1105 PRINT_CASE (f, get_low ());
1106 fprintf (f, "-");
1107 PRINT_CASE (f, get_high ());
1108 fprintf (f, " ");
1109 }
1110
1111 /* Emit GIMPLE code to handle the cluster. */
1112
1113 void
1114 jump_table_cluster::emit (tree index_expr, tree,
1115 tree default_label_expr, basic_block default_bb)
1116 {
1117 unsigned HOST_WIDE_INT range = get_range (get_low (), get_high ());
1118 unsigned HOST_WIDE_INT nondefault_range = 0;
1119
1120 /* For jump table we just emit a new gswitch statement that will
1121 be latter lowered to jump table. */
1122 auto_vec <tree> labels;
1123 labels.create (m_cases.length ());
1124
1125 make_edge (m_case_bb, default_bb, 0);
1126 for (unsigned i = 0; i < m_cases.length (); i++)
1127 {
1128 labels.quick_push (unshare_expr (m_cases[i]->m_case_label_expr));
1129 make_edge (m_case_bb, m_cases[i]->m_case_bb, 0);
1130 }
1131
1132 gswitch *s = gimple_build_switch (index_expr,
1133 unshare_expr (default_label_expr), labels);
1134 gimple_stmt_iterator gsi = gsi_start_bb (m_case_bb);
1135 gsi_insert_after (&gsi, s, GSI_NEW_STMT);
1136
1137 /* Set up even probabilities for all cases. */
1138 for (unsigned i = 0; i < m_cases.length (); i++)
1139 {
1140 simple_cluster *sc = static_cast<simple_cluster *> (m_cases[i]);
1141 edge case_edge = find_edge (m_case_bb, sc->m_case_bb);
1142 unsigned HOST_WIDE_INT case_range
1143 = sc->get_range (sc->get_low (), sc->get_high ());
1144 nondefault_range += case_range;
1145
1146 /* case_edge->aux is number of values in a jump-table that are covered
1147 by the case_edge. */
1148 case_edge->aux = (void *) ((intptr_t) (case_edge->aux) + case_range);
1149 }
1150
1151 edge default_edge = gimple_switch_default_edge (cfun, s);
1152 default_edge->probability = profile_probability::never ();
1153
1154 for (unsigned i = 0; i < m_cases.length (); i++)
1155 {
1156 simple_cluster *sc = static_cast<simple_cluster *> (m_cases[i]);
1157 edge case_edge = find_edge (m_case_bb, sc->m_case_bb);
1158 case_edge->probability
1159 = profile_probability::always ().apply_scale ((intptr_t)case_edge->aux,
1160 range);
1161 }
1162
1163 /* Number of non-default values is probability of default edge. */
1164 default_edge->probability
1165 += profile_probability::always ().apply_scale (nondefault_range,
1166 range).invert ();
1167
1168 switch_decision_tree::reset_out_edges_aux (s);
1169 }
1170
1171 /* Find jump tables of given CLUSTERS, where all members of the vector
1172 are of type simple_cluster. New clusters are returned. */
1173
1174 vec<cluster *>
1175 jump_table_cluster::find_jump_tables (vec<cluster *> &clusters)
1176 {
1177 if (!is_enabled ())
1178 return clusters.copy ();
1179
1180 unsigned l = clusters.length ();
1181 auto_vec<min_cluster_item> min;
1182 min.reserve (l + 1);
1183
1184 min.quick_push (min_cluster_item (0, 0, 0));
1185
1186 for (unsigned i = 1; i <= l; i++)
1187 {
1188 /* Set minimal # of clusters with i-th item to infinite. */
1189 min.quick_push (min_cluster_item (INT_MAX, INT_MAX, INT_MAX));
1190
1191 for (unsigned j = 0; j < i; j++)
1192 {
1193 unsigned HOST_WIDE_INT s = min[j].m_non_jt_cases;
1194 if (i - j < case_values_threshold ())
1195 s += i - j;
1196
1197 /* Prefer clusters with smaller number of numbers covered. */
1198 if ((min[j].m_count + 1 < min[i].m_count
1199 || (min[j].m_count + 1 == min[i].m_count
1200 && s < min[i].m_non_jt_cases))
1201 && can_be_handled (clusters, j, i - 1))
1202 min[i] = min_cluster_item (min[j].m_count + 1, j, s);
1203 }
1204
1205 gcc_checking_assert (min[i].m_count != INT_MAX);
1206 }
1207
1208 /* No result. */
1209 if (min[l].m_count == l)
1210 return clusters.copy ();
1211
1212 vec<cluster *> output;
1213 output.create (4);
1214
1215 /* Find and build the clusters. */
1216 for (unsigned int end = l;;)
1217 {
1218 int start = min[end].m_start;
1219
1220 /* Do not allow clusters with small number of cases. */
1221 if (is_beneficial (clusters, start, end - 1))
1222 output.safe_push (new jump_table_cluster (clusters, start, end - 1));
1223 else
1224 for (int i = end - 1; i >= start; i--)
1225 output.safe_push (clusters[i]);
1226
1227 end = start;
1228
1229 if (start <= 0)
1230 break;
1231 }
1232
1233 output.reverse ();
1234 return output;
1235 }
1236
1237 /* Return true when cluster starting at START and ending at END (inclusive)
1238 can build a jump-table. */
1239
1240 bool
1241 jump_table_cluster::can_be_handled (const vec<cluster *> &clusters,
1242 unsigned start, unsigned end)
1243 {
1244 /* If the switch is relatively small such that the cost of one
1245 indirect jump on the target are higher than the cost of a
1246 decision tree, go with the decision tree.
1247
1248 If range of values is much bigger than number of values,
1249 or if it is too large to represent in a HOST_WIDE_INT,
1250 make a sequence of conditional branches instead of a dispatch.
1251
1252 The definition of "much bigger" depends on whether we are
1253 optimizing for size or for speed.
1254
1255 For algorithm correctness, jump table for a single case must return
1256 true. We bail out in is_beneficial if it's called just for
1257 a single case. */
1258 if (start == end)
1259 return true;
1260
1261 unsigned HOST_WIDE_INT max_ratio
1262 = (optimize_insn_for_size_p ()
1263 ? param_jump_table_max_growth_ratio_for_size
1264 : param_jump_table_max_growth_ratio_for_speed);
1265 unsigned HOST_WIDE_INT range = get_range (clusters[start]->get_low (),
1266 clusters[end]->get_high ());
1267 /* Check overflow. */
1268 if (range == 0)
1269 return false;
1270
1271 unsigned HOST_WIDE_INT comparison_count = 0;
1272 for (unsigned i = start; i <= end; i++)
1273 {
1274 simple_cluster *sc = static_cast<simple_cluster *> (clusters[i]);
1275 comparison_count += sc->m_range_p ? 2 : 1;
1276 }
1277
1278 unsigned HOST_WIDE_INT lhs = 100 * range;
1279 if (lhs < range)
1280 return false;
1281
1282 return lhs <= max_ratio * comparison_count;
1283 }
1284
1285 /* Return true if cluster starting at START and ending at END (inclusive)
1286 is profitable transformation. */
1287
1288 bool
1289 jump_table_cluster::is_beneficial (const vec<cluster *> &,
1290 unsigned start, unsigned end)
1291 {
1292 /* Single case bail out. */
1293 if (start == end)
1294 return false;
1295
1296 return end - start + 1 >= case_values_threshold ();
1297 }
1298
1299 /* Find bit tests of given CLUSTERS, where all members of the vector
1300 are of type simple_cluster. New clusters are returned. */
1301
1302 vec<cluster *>
1303 bit_test_cluster::find_bit_tests (vec<cluster *> &clusters)
1304 {
1305 unsigned l = clusters.length ();
1306 auto_vec<min_cluster_item> min;
1307 min.reserve (l + 1);
1308
1309 min.quick_push (min_cluster_item (0, 0, 0));
1310
1311 for (unsigned i = 1; i <= l; i++)
1312 {
1313 /* Set minimal # of clusters with i-th item to infinite. */
1314 min.quick_push (min_cluster_item (INT_MAX, INT_MAX, INT_MAX));
1315
1316 for (unsigned j = 0; j < i; j++)
1317 {
1318 if (min[j].m_count + 1 < min[i].m_count
1319 && can_be_handled (clusters, j, i - 1))
1320 min[i] = min_cluster_item (min[j].m_count + 1, j, INT_MAX);
1321 }
1322
1323 gcc_checking_assert (min[i].m_count != INT_MAX);
1324 }
1325
1326 /* No result. */
1327 if (min[l].m_count == l)
1328 return clusters.copy ();
1329
1330 vec<cluster *> output;
1331 output.create (4);
1332
1333 /* Find and build the clusters. */
1334 for (unsigned end = l;;)
1335 {
1336 int start = min[end].m_start;
1337
1338 if (is_beneficial (clusters, start, end - 1))
1339 {
1340 bool entire = start == 0 && end == clusters.length ();
1341 output.safe_push (new bit_test_cluster (clusters, start, end - 1,
1342 entire));
1343 }
1344 else
1345 for (int i = end - 1; i >= start; i--)
1346 output.safe_push (clusters[i]);
1347
1348 end = start;
1349
1350 if (start <= 0)
1351 break;
1352 }
1353
1354 output.reverse ();
1355 return output;
1356 }
1357
1358 /* Return true when RANGE of case values with UNIQ labels
1359 can build a bit test. */
1360
1361 bool
1362 bit_test_cluster::can_be_handled (unsigned HOST_WIDE_INT range,
1363 unsigned int uniq)
1364 {
1365 /* Check overflow. */
1366 if (range == 0)
1367 return 0;
1368
1369 if (range >= GET_MODE_BITSIZE (word_mode))
1370 return false;
1371
1372 return uniq <= 3;
1373 }
1374
1375 /* Return true when cluster starting at START and ending at END (inclusive)
1376 can build a bit test. */
1377
1378 bool
1379 bit_test_cluster::can_be_handled (const vec<cluster *> &clusters,
1380 unsigned start, unsigned end)
1381 {
1382 /* For algorithm correctness, bit test for a single case must return
1383 true. We bail out in is_beneficial if it's called just for
1384 a single case. */
1385 if (start == end)
1386 return true;
1387
1388 unsigned HOST_WIDE_INT range = get_range (clusters[start]->get_low (),
1389 clusters[end]->get_high ());
1390 auto_bitmap dest_bbs;
1391
1392 for (unsigned i = start; i <= end; i++)
1393 {
1394 simple_cluster *sc = static_cast<simple_cluster *> (clusters[i]);
1395 bitmap_set_bit (dest_bbs, sc->m_case_bb->index);
1396 }
1397
1398 return can_be_handled (range, bitmap_count_bits (dest_bbs));
1399 }
1400
1401 /* Return true when COUNT of cases of UNIQ labels is beneficial for bit test
1402 transformation. */
1403
1404 bool
1405 bit_test_cluster::is_beneficial (unsigned count, unsigned uniq)
1406 {
1407 return (((uniq == 1 && count >= 3)
1408 || (uniq == 2 && count >= 5)
1409 || (uniq == 3 && count >= 6)));
1410 }
1411
1412 /* Return true if cluster starting at START and ending at END (inclusive)
1413 is profitable transformation. */
1414
1415 bool
1416 bit_test_cluster::is_beneficial (const vec<cluster *> &clusters,
1417 unsigned start, unsigned end)
1418 {
1419 /* Single case bail out. */
1420 if (start == end)
1421 return false;
1422
1423 auto_bitmap dest_bbs;
1424
1425 for (unsigned i = start; i <= end; i++)
1426 {
1427 simple_cluster *sc = static_cast<simple_cluster *> (clusters[i]);
1428 bitmap_set_bit (dest_bbs, sc->m_case_bb->index);
1429 }
1430
1431 unsigned uniq = bitmap_count_bits (dest_bbs);
1432 unsigned count = end - start + 1;
1433 return is_beneficial (count, uniq);
1434 }
1435
1436 /* Comparison function for qsort to order bit tests by decreasing
1437 probability of execution. */
1438
1439 int
1440 case_bit_test::cmp (const void *p1, const void *p2)
1441 {
1442 const case_bit_test *const d1 = (const case_bit_test *) p1;
1443 const case_bit_test *const d2 = (const case_bit_test *) p2;
1444
1445 if (d2->bits != d1->bits)
1446 return d2->bits - d1->bits;
1447
1448 /* Stabilize the sort. */
1449 return (LABEL_DECL_UID (CASE_LABEL (d2->label))
1450 - LABEL_DECL_UID (CASE_LABEL (d1->label)));
1451 }
1452
1453 /* Expand a switch statement by a short sequence of bit-wise
1454 comparisons. "switch(x)" is effectively converted into
1455 "if ((1 << (x-MINVAL)) & CST)" where CST and MINVAL are
1456 integer constants.
1457
1458 INDEX_EXPR is the value being switched on.
1459
1460 MINVAL is the lowest case value of in the case nodes,
1461 and RANGE is highest value minus MINVAL. MINVAL and RANGE
1462 are not guaranteed to be of the same type as INDEX_EXPR
1463 (the gimplifier doesn't change the type of case label values,
1464 and MINVAL and RANGE are derived from those values).
1465 MAXVAL is MINVAL + RANGE.
1466
1467 There *MUST* be max_case_bit_tests or less unique case
1468 node targets. */
1469
1470 void
1471 bit_test_cluster::emit (tree index_expr, tree index_type,
1472 tree, basic_block default_bb)
1473 {
1474 case_bit_test test[m_max_case_bit_tests] = { {} };
1475 unsigned int i, j, k;
1476 unsigned int count;
1477
1478 tree unsigned_index_type = range_check_type (index_type);
1479
1480 gimple_stmt_iterator gsi;
1481 gassign *shift_stmt;
1482
1483 tree idx, tmp, csui;
1484 tree word_type_node = lang_hooks.types.type_for_mode (word_mode, 1);
1485 tree word_mode_zero = fold_convert (word_type_node, integer_zero_node);
1486 tree word_mode_one = fold_convert (word_type_node, integer_one_node);
1487 int prec = TYPE_PRECISION (word_type_node);
1488 wide_int wone = wi::one (prec);
1489
1490 tree minval = get_low ();
1491 tree maxval = get_high ();
1492 tree range = int_const_binop (MINUS_EXPR, maxval, minval);
1493 unsigned HOST_WIDE_INT bt_range = get_range (minval, maxval);
1494
1495 /* Go through all case labels, and collect the case labels, profile
1496 counts, and other information we need to build the branch tests. */
1497 count = 0;
1498 for (i = 0; i < m_cases.length (); i++)
1499 {
1500 unsigned int lo, hi;
1501 simple_cluster *n = static_cast<simple_cluster *> (m_cases[i]);
1502 for (k = 0; k < count; k++)
1503 if (n->m_case_bb == test[k].target_bb)
1504 break;
1505
1506 if (k == count)
1507 {
1508 gcc_checking_assert (count < m_max_case_bit_tests);
1509 test[k].mask = wi::zero (prec);
1510 test[k].target_bb = n->m_case_bb;
1511 test[k].label = n->m_case_label_expr;
1512 test[k].bits = 0;
1513 count++;
1514 }
1515
1516 test[k].bits += n->get_range (n->get_low (), n->get_high ());
1517
1518 lo = tree_to_uhwi (int_const_binop (MINUS_EXPR, n->get_low (), minval));
1519 if (n->get_high () == NULL_TREE)
1520 hi = lo;
1521 else
1522 hi = tree_to_uhwi (int_const_binop (MINUS_EXPR, n->get_high (),
1523 minval));
1524
1525 for (j = lo; j <= hi; j++)
1526 test[k].mask |= wi::lshift (wone, j);
1527 }
1528
1529 qsort (test, count, sizeof (*test), case_bit_test::cmp);
1530
1531 /* If all values are in the 0 .. BITS_PER_WORD-1 range, we can get rid of
1532 the minval subtractions, but it might make the mask constants more
1533 expensive. So, compare the costs. */
1534 if (compare_tree_int (minval, 0) > 0
1535 && compare_tree_int (maxval, GET_MODE_BITSIZE (word_mode)) < 0)
1536 {
1537 int cost_diff;
1538 HOST_WIDE_INT m = tree_to_uhwi (minval);
1539 rtx reg = gen_raw_REG (word_mode, 10000);
1540 bool speed_p = optimize_insn_for_speed_p ();
1541 cost_diff = set_src_cost (gen_rtx_PLUS (word_mode, reg,
1542 GEN_INT (-m)),
1543 word_mode, speed_p);
1544 for (i = 0; i < count; i++)
1545 {
1546 rtx r = immed_wide_int_const (test[i].mask, word_mode);
1547 cost_diff += set_src_cost (gen_rtx_AND (word_mode, reg, r),
1548 word_mode, speed_p);
1549 r = immed_wide_int_const (wi::lshift (test[i].mask, m), word_mode);
1550 cost_diff -= set_src_cost (gen_rtx_AND (word_mode, reg, r),
1551 word_mode, speed_p);
1552 }
1553 if (cost_diff > 0)
1554 {
1555 for (i = 0; i < count; i++)
1556 test[i].mask = wi::lshift (test[i].mask, m);
1557 minval = build_zero_cst (TREE_TYPE (minval));
1558 range = maxval;
1559 }
1560 }
1561
1562 /* Now build the test-and-branch code. */
1563
1564 gsi = gsi_last_bb (m_case_bb);
1565
1566 /* idx = (unsigned)x - minval. */
1567 idx = fold_convert (unsigned_index_type, index_expr);
1568 idx = fold_build2 (MINUS_EXPR, unsigned_index_type, idx,
1569 fold_convert (unsigned_index_type, minval));
1570 idx = force_gimple_operand_gsi (&gsi, idx,
1571 /*simple=*/true, NULL_TREE,
1572 /*before=*/true, GSI_SAME_STMT);
1573
1574 if (m_handles_entire_switch)
1575 {
1576 /* if (idx > range) goto default */
1577 range
1578 = force_gimple_operand_gsi (&gsi,
1579 fold_convert (unsigned_index_type, range),
1580 /*simple=*/true, NULL_TREE,
1581 /*before=*/true, GSI_SAME_STMT);
1582 tmp = fold_build2 (GT_EXPR, boolean_type_node, idx, range);
1583 basic_block new_bb
1584 = hoist_edge_and_branch_if_true (&gsi, tmp, default_bb,
1585 profile_probability::unlikely ());
1586 gsi = gsi_last_bb (new_bb);
1587 }
1588
1589 /* csui = (1 << (word_mode) idx) */
1590 csui = make_ssa_name (word_type_node);
1591 tmp = fold_build2 (LSHIFT_EXPR, word_type_node, word_mode_one,
1592 fold_convert (word_type_node, idx));
1593 tmp = force_gimple_operand_gsi (&gsi, tmp,
1594 /*simple=*/false, NULL_TREE,
1595 /*before=*/true, GSI_SAME_STMT);
1596 shift_stmt = gimple_build_assign (csui, tmp);
1597 gsi_insert_before (&gsi, shift_stmt, GSI_SAME_STMT);
1598 update_stmt (shift_stmt);
1599
1600 profile_probability prob = profile_probability::always ();
1601
1602 /* for each unique set of cases:
1603 if (const & csui) goto target */
1604 for (k = 0; k < count; k++)
1605 {
1606 prob = profile_probability::always ().apply_scale (test[k].bits,
1607 bt_range);
1608 bt_range -= test[k].bits;
1609 tmp = wide_int_to_tree (word_type_node, test[k].mask);
1610 tmp = fold_build2 (BIT_AND_EXPR, word_type_node, csui, tmp);
1611 tmp = force_gimple_operand_gsi (&gsi, tmp,
1612 /*simple=*/true, NULL_TREE,
1613 /*before=*/true, GSI_SAME_STMT);
1614 tmp = fold_build2 (NE_EXPR, boolean_type_node, tmp, word_mode_zero);
1615 basic_block new_bb
1616 = hoist_edge_and_branch_if_true (&gsi, tmp, test[k].target_bb, prob);
1617 gsi = gsi_last_bb (new_bb);
1618 }
1619
1620 /* We should have removed all edges now. */
1621 gcc_assert (EDGE_COUNT (gsi_bb (gsi)->succs) == 0);
1622
1623 /* If nothing matched, go to the default label. */
1624 edge e = make_edge (gsi_bb (gsi), default_bb, EDGE_FALLTHRU);
1625 e->probability = profile_probability::always ();
1626 }
1627
1628 /* Split the basic block at the statement pointed to by GSIP, and insert
1629 a branch to the target basic block of E_TRUE conditional on tree
1630 expression COND.
1631
1632 It is assumed that there is already an edge from the to-be-split
1633 basic block to E_TRUE->dest block. This edge is removed, and the
1634 profile information on the edge is re-used for the new conditional
1635 jump.
1636
1637 The CFG is updated. The dominator tree will not be valid after
1638 this transformation, but the immediate dominators are updated if
1639 UPDATE_DOMINATORS is true.
1640
1641 Returns the newly created basic block. */
1642
1643 basic_block
1644 bit_test_cluster::hoist_edge_and_branch_if_true (gimple_stmt_iterator *gsip,
1645 tree cond, basic_block case_bb,
1646 profile_probability prob)
1647 {
1648 tree tmp;
1649 gcond *cond_stmt;
1650 edge e_false;
1651 basic_block new_bb, split_bb = gsi_bb (*gsip);
1652
1653 edge e_true = make_edge (split_bb, case_bb, EDGE_TRUE_VALUE);
1654 e_true->probability = prob;
1655 gcc_assert (e_true->src == split_bb);
1656
1657 tmp = force_gimple_operand_gsi (gsip, cond, /*simple=*/true, NULL,
1658 /*before=*/true, GSI_SAME_STMT);
1659 cond_stmt = gimple_build_cond_from_tree (tmp, NULL_TREE, NULL_TREE);
1660 gsi_insert_before (gsip, cond_stmt, GSI_SAME_STMT);
1661
1662 e_false = split_block (split_bb, cond_stmt);
1663 new_bb = e_false->dest;
1664 redirect_edge_pred (e_true, split_bb);
1665
1666 e_false->flags &= ~EDGE_FALLTHRU;
1667 e_false->flags |= EDGE_FALSE_VALUE;
1668 e_false->probability = e_true->probability.invert ();
1669 new_bb->count = e_false->count ();
1670
1671 return new_bb;
1672 }
1673
1674 /* Compute the number of case labels that correspond to each outgoing edge of
1675 switch statement. Record this information in the aux field of the edge. */
1676
1677 void
1678 switch_decision_tree::compute_cases_per_edge ()
1679 {
1680 reset_out_edges_aux (m_switch);
1681 int ncases = gimple_switch_num_labels (m_switch);
1682 for (int i = ncases - 1; i >= 1; --i)
1683 {
1684 edge case_edge = gimple_switch_edge (cfun, m_switch, i);
1685 case_edge->aux = (void *) ((intptr_t) (case_edge->aux) + 1);
1686 }
1687 }
1688
1689 /* Analyze switch statement and return true when the statement is expanded
1690 as decision tree. */
1691
1692 bool
1693 switch_decision_tree::analyze_switch_statement ()
1694 {
1695 unsigned l = gimple_switch_num_labels (m_switch);
1696 basic_block bb = gimple_bb (m_switch);
1697 auto_vec<cluster *> clusters;
1698 clusters.create (l - 1);
1699
1700 basic_block default_bb = gimple_switch_default_bb (cfun, m_switch);
1701 m_case_bbs.reserve (l);
1702 m_case_bbs.quick_push (default_bb);
1703
1704 compute_cases_per_edge ();
1705
1706 for (unsigned i = 1; i < l; i++)
1707 {
1708 tree elt = gimple_switch_label (m_switch, i);
1709 tree lab = CASE_LABEL (elt);
1710 basic_block case_bb = label_to_block (cfun, lab);
1711 edge case_edge = find_edge (bb, case_bb);
1712 tree low = CASE_LOW (elt);
1713 tree high = CASE_HIGH (elt);
1714
1715 profile_probability p
1716 = case_edge->probability.apply_scale (1, (intptr_t) (case_edge->aux));
1717 clusters.quick_push (new simple_cluster (low, high, elt, case_edge->dest,
1718 p));
1719 m_case_bbs.quick_push (case_edge->dest);
1720 }
1721
1722 reset_out_edges_aux (m_switch);
1723
1724 /* Find jump table clusters. */
1725 vec<cluster *> output = jump_table_cluster::find_jump_tables (clusters);
1726
1727 /* Find bit test clusters. */
1728 vec<cluster *> output2;
1729 auto_vec<cluster *> tmp;
1730 output2.create (1);
1731 tmp.create (1);
1732
1733 for (unsigned i = 0; i < output.length (); i++)
1734 {
1735 cluster *c = output[i];
1736 if (c->get_type () != SIMPLE_CASE)
1737 {
1738 if (!tmp.is_empty ())
1739 {
1740 vec<cluster *> n = bit_test_cluster::find_bit_tests (tmp);
1741 output2.safe_splice (n);
1742 n.release ();
1743 tmp.truncate (0);
1744 }
1745 output2.safe_push (c);
1746 }
1747 else
1748 tmp.safe_push (c);
1749 }
1750
1751 /* We still can have a temporary vector to test. */
1752 if (!tmp.is_empty ())
1753 {
1754 vec<cluster *> n = bit_test_cluster::find_bit_tests (tmp);
1755 output2.safe_splice (n);
1756 n.release ();
1757 }
1758
1759 if (dump_file)
1760 {
1761 fprintf (dump_file, ";; GIMPLE switch case clusters: ");
1762 for (unsigned i = 0; i < output2.length (); i++)
1763 output2[i]->dump (dump_file, dump_flags & TDF_DETAILS);
1764 fprintf (dump_file, "\n");
1765 }
1766
1767 output.release ();
1768
1769 bool expanded = try_switch_expansion (output2);
1770
1771 for (unsigned i = 0; i < output2.length (); i++)
1772 delete output2[i];
1773
1774 output2.release ();
1775
1776 return expanded;
1777 }
1778
1779 /* Attempt to expand CLUSTERS as a decision tree. Return true when
1780 expanded. */
1781
1782 bool
1783 switch_decision_tree::try_switch_expansion (vec<cluster *> &clusters)
1784 {
1785 tree index_expr = gimple_switch_index (m_switch);
1786 tree index_type = TREE_TYPE (index_expr);
1787 basic_block bb = gimple_bb (m_switch);
1788
1789 if (gimple_switch_num_labels (m_switch) == 1
1790 || range_check_type (index_type) == NULL_TREE)
1791 return false;
1792
1793 /* Find the default case target label. */
1794 edge default_edge = gimple_switch_default_edge (cfun, m_switch);
1795 m_default_bb = default_edge->dest;
1796
1797 /* Do the insertion of a case label into m_case_list. The labels are
1798 fed to us in descending order from the sorted vector of case labels used
1799 in the tree part of the middle end. So the list we construct is
1800 sorted in ascending order. */
1801
1802 for (int i = clusters.length () - 1; i >= 0; i--)
1803 {
1804 case_tree_node *r = m_case_list;
1805 m_case_list = m_case_node_pool.allocate ();
1806 m_case_list->m_right = r;
1807 m_case_list->m_c = clusters[i];
1808 }
1809
1810 record_phi_operand_mapping ();
1811
1812 /* Split basic block that contains the gswitch statement. */
1813 gimple_stmt_iterator gsi = gsi_last_bb (bb);
1814 edge e;
1815 if (gsi_end_p (gsi))
1816 e = split_block_after_labels (bb);
1817 else
1818 {
1819 gsi_prev (&gsi);
1820 e = split_block (bb, gsi_stmt (gsi));
1821 }
1822 bb = split_edge (e);
1823
1824 /* Create new basic blocks for non-case clusters where specific expansion
1825 needs to happen. */
1826 for (unsigned i = 0; i < clusters.length (); i++)
1827 if (clusters[i]->get_type () != SIMPLE_CASE)
1828 {
1829 clusters[i]->m_case_bb = create_empty_bb (bb);
1830 clusters[i]->m_case_bb->count = bb->count;
1831 clusters[i]->m_case_bb->loop_father = bb->loop_father;
1832 }
1833
1834 /* Do not do an extra work for a single cluster. */
1835 if (clusters.length () == 1
1836 && clusters[0]->get_type () != SIMPLE_CASE)
1837 {
1838 cluster *c = clusters[0];
1839 c->emit (index_expr, index_type,
1840 gimple_switch_default_label (m_switch), m_default_bb);
1841 redirect_edge_succ (single_succ_edge (bb), c->m_case_bb);
1842 }
1843 else
1844 {
1845 emit (bb, index_expr, default_edge->probability, index_type);
1846
1847 /* Emit cluster-specific switch handling. */
1848 for (unsigned i = 0; i < clusters.length (); i++)
1849 if (clusters[i]->get_type () != SIMPLE_CASE)
1850 clusters[i]->emit (index_expr, index_type,
1851 gimple_switch_default_label (m_switch),
1852 m_default_bb);
1853 }
1854
1855 fix_phi_operands_for_edges ();
1856
1857 return true;
1858 }
1859
1860 /* Before switch transformation, record all SSA_NAMEs defined in switch BB
1861 and used in a label basic block. */
1862
1863 void
1864 switch_decision_tree::record_phi_operand_mapping ()
1865 {
1866 basic_block switch_bb = gimple_bb (m_switch);
1867 /* Record all PHI nodes that have to be fixed after conversion. */
1868 for (unsigned i = 0; i < m_case_bbs.length (); i++)
1869 {
1870 gphi_iterator gsi;
1871 basic_block bb = m_case_bbs[i];
1872 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1873 {
1874 gphi *phi = gsi.phi ();
1875
1876 for (unsigned i = 0; i < gimple_phi_num_args (phi); i++)
1877 {
1878 basic_block phi_src_bb = gimple_phi_arg_edge (phi, i)->src;
1879 if (phi_src_bb == switch_bb)
1880 {
1881 tree def = gimple_phi_arg_def (phi, i);
1882 tree result = gimple_phi_result (phi);
1883 m_phi_mapping.put (result, def);
1884 break;
1885 }
1886 }
1887 }
1888 }
1889 }
1890
1891 /* Append new operands to PHI statements that were introduced due to
1892 addition of new edges to case labels. */
1893
1894 void
1895 switch_decision_tree::fix_phi_operands_for_edges ()
1896 {
1897 gphi_iterator gsi;
1898
1899 for (unsigned i = 0; i < m_case_bbs.length (); i++)
1900 {
1901 basic_block bb = m_case_bbs[i];
1902 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1903 {
1904 gphi *phi = gsi.phi ();
1905 for (unsigned j = 0; j < gimple_phi_num_args (phi); j++)
1906 {
1907 tree def = gimple_phi_arg_def (phi, j);
1908 if (def == NULL_TREE)
1909 {
1910 edge e = gimple_phi_arg_edge (phi, j);
1911 tree *definition
1912 = m_phi_mapping.get (gimple_phi_result (phi));
1913 gcc_assert (definition);
1914 add_phi_arg (phi, *definition, e, UNKNOWN_LOCATION);
1915 }
1916 }
1917 }
1918 }
1919 }
1920
1921 /* Generate a decision tree, switching on INDEX_EXPR and jumping to
1922 one of the labels in CASE_LIST or to the DEFAULT_LABEL.
1923
1924 We generate a binary decision tree to select the appropriate target
1925 code. */
1926
1927 void
1928 switch_decision_tree::emit (basic_block bb, tree index_expr,
1929 profile_probability default_prob, tree index_type)
1930 {
1931 balance_case_nodes (&m_case_list, NULL);
1932
1933 if (dump_file)
1934 dump_function_to_file (current_function_decl, dump_file, dump_flags);
1935 if (dump_file && (dump_flags & TDF_DETAILS))
1936 {
1937 int indent_step = ceil_log2 (TYPE_PRECISION (index_type)) + 2;
1938 fprintf (dump_file, ";; Expanding GIMPLE switch as decision tree:\n");
1939 gcc_assert (m_case_list != NULL);
1940 dump_case_nodes (dump_file, m_case_list, indent_step, 0);
1941 }
1942
1943 bb = emit_case_nodes (bb, index_expr, m_case_list, default_prob, index_type,
1944 gimple_location (m_switch));
1945
1946 if (bb)
1947 emit_jump (bb, m_default_bb);
1948
1949 /* Remove all edges and do just an edge that will reach default_bb. */
1950 bb = gimple_bb (m_switch);
1951 gimple_stmt_iterator gsi = gsi_last_bb (bb);
1952 gsi_remove (&gsi, true);
1953
1954 delete_basic_block (bb);
1955 }
1956
1957 /* Take an ordered list of case nodes
1958 and transform them into a near optimal binary tree,
1959 on the assumption that any target code selection value is as
1960 likely as any other.
1961
1962 The transformation is performed by splitting the ordered
1963 list into two equal sections plus a pivot. The parts are
1964 then attached to the pivot as left and right branches. Each
1965 branch is then transformed recursively. */
1966
1967 void
1968 switch_decision_tree::balance_case_nodes (case_tree_node **head,
1969 case_tree_node *parent)
1970 {
1971 case_tree_node *np;
1972
1973 np = *head;
1974 if (np)
1975 {
1976 int i = 0;
1977 int ranges = 0;
1978 case_tree_node **npp;
1979 case_tree_node *left;
1980 profile_probability prob = profile_probability::never ();
1981
1982 /* Count the number of entries on branch. Also count the ranges. */
1983
1984 while (np)
1985 {
1986 if (!tree_int_cst_equal (np->m_c->get_low (), np->m_c->get_high ()))
1987 ranges++;
1988
1989 i++;
1990 prob += np->m_c->m_prob;
1991 np = np->m_right;
1992 }
1993
1994 if (i > 2)
1995 {
1996 /* Split this list if it is long enough for that to help. */
1997 npp = head;
1998 left = *npp;
1999 profile_probability pivot_prob = prob.apply_scale (1, 2);
2000
2001 /* Find the place in the list that bisects the list's total cost,
2002 where ranges count as 2. */
2003 while (1)
2004 {
2005 /* Skip nodes while their probability does not reach
2006 that amount. */
2007 prob -= (*npp)->m_c->m_prob;
2008 if ((prob.initialized_p () && prob < pivot_prob)
2009 || ! (*npp)->m_right)
2010 break;
2011 npp = &(*npp)->m_right;
2012 }
2013
2014 np = *npp;
2015 *npp = 0;
2016 *head = np;
2017 np->m_parent = parent;
2018 np->m_left = left == np ? NULL : left;
2019
2020 /* Optimize each of the two split parts. */
2021 balance_case_nodes (&np->m_left, np);
2022 balance_case_nodes (&np->m_right, np);
2023 np->m_c->m_subtree_prob = np->m_c->m_prob;
2024 if (np->m_left)
2025 np->m_c->m_subtree_prob += np->m_left->m_c->m_subtree_prob;
2026 if (np->m_right)
2027 np->m_c->m_subtree_prob += np->m_right->m_c->m_subtree_prob;
2028 }
2029 else
2030 {
2031 /* Else leave this branch as one level,
2032 but fill in `parent' fields. */
2033 np = *head;
2034 np->m_parent = parent;
2035 np->m_c->m_subtree_prob = np->m_c->m_prob;
2036 for (; np->m_right; np = np->m_right)
2037 {
2038 np->m_right->m_parent = np;
2039 (*head)->m_c->m_subtree_prob += np->m_right->m_c->m_subtree_prob;
2040 }
2041 }
2042 }
2043 }
2044
2045 /* Dump ROOT, a list or tree of case nodes, to file. */
2046
2047 void
2048 switch_decision_tree::dump_case_nodes (FILE *f, case_tree_node *root,
2049 int indent_step, int indent_level)
2050 {
2051 if (root == 0)
2052 return;
2053 indent_level++;
2054
2055 dump_case_nodes (f, root->m_left, indent_step, indent_level);
2056
2057 fputs (";; ", f);
2058 fprintf (f, "%*s", indent_step * indent_level, "");
2059 root->m_c->dump (f);
2060 root->m_c->m_prob.dump (f);
2061 fputs (" subtree: ", f);
2062 root->m_c->m_subtree_prob.dump (f);
2063 fputs (")\n", f);
2064
2065 dump_case_nodes (f, root->m_right, indent_step, indent_level);
2066 }
2067
2068
2069 /* Add an unconditional jump to CASE_BB that happens in basic block BB. */
2070
2071 void
2072 switch_decision_tree::emit_jump (basic_block bb, basic_block case_bb)
2073 {
2074 edge e = single_succ_edge (bb);
2075 redirect_edge_succ (e, case_bb);
2076 }
2077
2078 /* Generate code to compare OP0 with OP1 so that the condition codes are
2079 set and to jump to LABEL_BB if the condition is true.
2080 COMPARISON is the GIMPLE comparison (EQ, NE, GT, etc.).
2081 PROB is the probability of jumping to LABEL_BB. */
2082
2083 basic_block
2084 switch_decision_tree::emit_cmp_and_jump_insns (basic_block bb, tree op0,
2085 tree op1, tree_code comparison,
2086 basic_block label_bb,
2087 profile_probability prob,
2088 location_t loc)
2089 {
2090 // TODO: it's once called with lhs != index.
2091 op1 = fold_convert (TREE_TYPE (op0), op1);
2092
2093 gcond *cond = gimple_build_cond (comparison, op0, op1, NULL_TREE, NULL_TREE);
2094 gimple_set_location (cond, loc);
2095 gimple_stmt_iterator gsi = gsi_last_bb (bb);
2096 gsi_insert_after (&gsi, cond, GSI_NEW_STMT);
2097
2098 gcc_assert (single_succ_p (bb));
2099
2100 /* Make a new basic block where false branch will take place. */
2101 edge false_edge = split_block (bb, cond);
2102 false_edge->flags = EDGE_FALSE_VALUE;
2103 false_edge->probability = prob.invert ();
2104
2105 edge true_edge = make_edge (bb, label_bb, EDGE_TRUE_VALUE);
2106 true_edge->probability = prob;
2107
2108 return false_edge->dest;
2109 }
2110
2111 /* Generate code to jump to LABEL if OP0 and OP1 are equal.
2112 PROB is the probability of jumping to LABEL_BB.
2113 BB is a basic block where the new condition will be placed. */
2114
2115 basic_block
2116 switch_decision_tree::do_jump_if_equal (basic_block bb, tree op0, tree op1,
2117 basic_block label_bb,
2118 profile_probability prob,
2119 location_t loc)
2120 {
2121 op1 = fold_convert (TREE_TYPE (op0), op1);
2122
2123 gcond *cond = gimple_build_cond (EQ_EXPR, op0, op1, NULL_TREE, NULL_TREE);
2124 gimple_set_location (cond, loc);
2125 gimple_stmt_iterator gsi = gsi_last_bb (bb);
2126 gsi_insert_before (&gsi, cond, GSI_SAME_STMT);
2127
2128 gcc_assert (single_succ_p (bb));
2129
2130 /* Make a new basic block where false branch will take place. */
2131 edge false_edge = split_block (bb, cond);
2132 false_edge->flags = EDGE_FALSE_VALUE;
2133 false_edge->probability = prob.invert ();
2134
2135 edge true_edge = make_edge (bb, label_bb, EDGE_TRUE_VALUE);
2136 true_edge->probability = prob;
2137
2138 return false_edge->dest;
2139 }
2140
2141 /* Emit step-by-step code to select a case for the value of INDEX.
2142 The thus generated decision tree follows the form of the
2143 case-node binary tree NODE, whose nodes represent test conditions.
2144 DEFAULT_PROB is probability of cases leading to default BB.
2145 INDEX_TYPE is the type of the index of the switch. */
2146
2147 basic_block
2148 switch_decision_tree::emit_case_nodes (basic_block bb, tree index,
2149 case_tree_node *node,
2150 profile_probability default_prob,
2151 tree index_type, location_t loc)
2152 {
2153 profile_probability p;
2154
2155 /* If node is null, we are done. */
2156 if (node == NULL)
2157 return bb;
2158
2159 /* Single value case. */
2160 if (node->m_c->is_single_value_p ())
2161 {
2162 /* Node is single valued. First see if the index expression matches
2163 this node and then check our children, if any. */
2164 p = node->m_c->m_prob / (node->m_c->m_subtree_prob + default_prob);
2165 bb = do_jump_if_equal (bb, index, node->m_c->get_low (),
2166 node->m_c->m_case_bb, p, loc);
2167 /* Since this case is taken at this point, reduce its weight from
2168 subtree_weight. */
2169 node->m_c->m_subtree_prob -= p;
2170
2171 if (node->m_left != NULL && node->m_right != NULL)
2172 {
2173 /* 1) the node has both children
2174
2175 If both children are single-valued cases with no
2176 children, finish up all the work. This way, we can save
2177 one ordered comparison. */
2178
2179 if (!node->m_left->has_child ()
2180 && node->m_left->m_c->is_single_value_p ()
2181 && !node->m_right->has_child ()
2182 && node->m_right->m_c->is_single_value_p ())
2183 {
2184 p = (node->m_right->m_c->m_prob
2185 / (node->m_c->m_subtree_prob + default_prob));
2186 bb = do_jump_if_equal (bb, index, node->m_right->m_c->get_low (),
2187 node->m_right->m_c->m_case_bb, p, loc);
2188
2189 p = (node->m_left->m_c->m_prob
2190 / (node->m_c->m_subtree_prob + default_prob));
2191 bb = do_jump_if_equal (bb, index, node->m_left->m_c->get_low (),
2192 node->m_left->m_c->m_case_bb, p, loc);
2193 }
2194 else
2195 {
2196 /* Branch to a label where we will handle it later. */
2197 basic_block test_bb = split_edge (single_succ_edge (bb));
2198 redirect_edge_succ (single_pred_edge (test_bb),
2199 single_succ_edge (bb)->dest);
2200
2201 p = ((node->m_right->m_c->m_subtree_prob
2202 + default_prob.apply_scale (1, 2))
2203 / (node->m_c->m_subtree_prob + default_prob));
2204 bb = emit_cmp_and_jump_insns (bb, index, node->m_c->get_high (),
2205 GT_EXPR, test_bb, p, loc);
2206 default_prob = default_prob.apply_scale (1, 2);
2207
2208 /* Handle the left-hand subtree. */
2209 bb = emit_case_nodes (bb, index, node->m_left,
2210 default_prob, index_type, loc);
2211
2212 /* If the left-hand subtree fell through,
2213 don't let it fall into the right-hand subtree. */
2214 if (bb && m_default_bb)
2215 emit_jump (bb, m_default_bb);
2216
2217 bb = emit_case_nodes (test_bb, index, node->m_right,
2218 default_prob, index_type, loc);
2219 }
2220 }
2221 else if (node->m_left == NULL && node->m_right != NULL)
2222 {
2223 /* 2) the node has only right child. */
2224
2225 /* Here we have a right child but no left so we issue a conditional
2226 branch to default and process the right child.
2227
2228 Omit the conditional branch to default if the right child
2229 does not have any children and is single valued; it would
2230 cost too much space to save so little time. */
2231
2232 if (node->m_right->has_child ()
2233 || !node->m_right->m_c->is_single_value_p ())
2234 {
2235 p = (default_prob.apply_scale (1, 2)
2236 / (node->m_c->m_subtree_prob + default_prob));
2237 bb = emit_cmp_and_jump_insns (bb, index, node->m_c->get_low (),
2238 LT_EXPR, m_default_bb, p, loc);
2239 default_prob = default_prob.apply_scale (1, 2);
2240
2241 bb = emit_case_nodes (bb, index, node->m_right, default_prob,
2242 index_type, loc);
2243 }
2244 else
2245 {
2246 /* We cannot process node->right normally
2247 since we haven't ruled out the numbers less than
2248 this node's value. So handle node->right explicitly. */
2249 p = (node->m_right->m_c->m_subtree_prob
2250 / (node->m_c->m_subtree_prob + default_prob));
2251 bb = do_jump_if_equal (bb, index, node->m_right->m_c->get_low (),
2252 node->m_right->m_c->m_case_bb, p, loc);
2253 }
2254 }
2255 else if (node->m_left != NULL && node->m_right == NULL)
2256 {
2257 /* 3) just one subtree, on the left. Similar case as previous. */
2258
2259 if (node->m_left->has_child ()
2260 || !node->m_left->m_c->is_single_value_p ())
2261 {
2262 p = (default_prob.apply_scale (1, 2)
2263 / (node->m_c->m_subtree_prob + default_prob));
2264 bb = emit_cmp_and_jump_insns (bb, index, node->m_c->get_high (),
2265 GT_EXPR, m_default_bb, p, loc);
2266 default_prob = default_prob.apply_scale (1, 2);
2267
2268 bb = emit_case_nodes (bb, index, node->m_left, default_prob,
2269 index_type, loc);
2270 }
2271 else
2272 {
2273 /* We cannot process node->left normally
2274 since we haven't ruled out the numbers less than
2275 this node's value. So handle node->left explicitly. */
2276 p = (node->m_left->m_c->m_subtree_prob
2277 / (node->m_c->m_subtree_prob + default_prob));
2278 bb = do_jump_if_equal (bb, index, node->m_left->m_c->get_low (),
2279 node->m_left->m_c->m_case_bb, p, loc);
2280 }
2281 }
2282 }
2283 else
2284 {
2285 /* Node is a range. These cases are very similar to those for a single
2286 value, except that we do not start by testing whether this node
2287 is the one to branch to. */
2288 if (node->has_child () || node->m_c->get_type () != SIMPLE_CASE)
2289 {
2290 /* Branch to a label where we will handle it later. */
2291 basic_block test_bb = split_edge (single_succ_edge (bb));
2292 redirect_edge_succ (single_pred_edge (test_bb),
2293 single_succ_edge (bb)->dest);
2294
2295
2296 profile_probability right_prob = profile_probability::never ();
2297 if (node->m_right)
2298 right_prob = node->m_right->m_c->m_subtree_prob;
2299 p = ((right_prob + default_prob.apply_scale (1, 2))
2300 / (node->m_c->m_subtree_prob + default_prob));
2301
2302 bb = emit_cmp_and_jump_insns (bb, index, node->m_c->get_high (),
2303 GT_EXPR, test_bb, p, loc);
2304 default_prob = default_prob.apply_scale (1, 2);
2305
2306 /* Value belongs to this node or to the left-hand subtree. */
2307 p = node->m_c->m_prob / (node->m_c->m_subtree_prob + default_prob);
2308 bb = emit_cmp_and_jump_insns (bb, index, node->m_c->get_low (),
2309 GE_EXPR, node->m_c->m_case_bb, p, loc);
2310
2311 /* Handle the left-hand subtree. */
2312 bb = emit_case_nodes (bb, index, node->m_left,
2313 default_prob, index_type, loc);
2314
2315 /* If the left-hand subtree fell through,
2316 don't let it fall into the right-hand subtree. */
2317 if (bb && m_default_bb)
2318 emit_jump (bb, m_default_bb);
2319
2320 bb = emit_case_nodes (test_bb, index, node->m_right,
2321 default_prob, index_type, loc);
2322 }
2323 else
2324 {
2325 /* Node has no children so we check low and high bounds to remove
2326 redundant tests. Only one of the bounds can exist,
2327 since otherwise this node is bounded--a case tested already. */
2328 tree lhs, rhs;
2329 generate_range_test (bb, index, node->m_c->get_low (),
2330 node->m_c->get_high (), &lhs, &rhs);
2331 p = default_prob / (node->m_c->m_subtree_prob + default_prob);
2332
2333 bb = emit_cmp_and_jump_insns (bb, lhs, rhs, GT_EXPR,
2334 m_default_bb, p, loc);
2335
2336 emit_jump (bb, node->m_c->m_case_bb);
2337 return NULL;
2338 }
2339 }
2340
2341 return bb;
2342 }
2343
2344 /* The main function of the pass scans statements for switches and invokes
2345 process_switch on them. */
2346
2347 namespace {
2348
2349 const pass_data pass_data_convert_switch =
2350 {
2351 GIMPLE_PASS, /* type */
2352 "switchconv", /* name */
2353 OPTGROUP_NONE, /* optinfo_flags */
2354 TV_TREE_SWITCH_CONVERSION, /* tv_id */
2355 ( PROP_cfg | PROP_ssa ), /* properties_required */
2356 0, /* properties_provided */
2357 0, /* properties_destroyed */
2358 0, /* todo_flags_start */
2359 TODO_update_ssa, /* todo_flags_finish */
2360 };
2361
2362 class pass_convert_switch : public gimple_opt_pass
2363 {
2364 public:
2365 pass_convert_switch (gcc::context *ctxt)
2366 : gimple_opt_pass (pass_data_convert_switch, ctxt)
2367 {}
2368
2369 /* opt_pass methods: */
2370 virtual bool gate (function *) { return flag_tree_switch_conversion != 0; }
2371 virtual unsigned int execute (function *);
2372
2373 }; // class pass_convert_switch
2374
2375 unsigned int
2376 pass_convert_switch::execute (function *fun)
2377 {
2378 basic_block bb;
2379 bool cfg_altered = false;
2380
2381 FOR_EACH_BB_FN (bb, fun)
2382 {
2383 gimple *stmt = last_stmt (bb);
2384 if (stmt && gimple_code (stmt) == GIMPLE_SWITCH)
2385 {
2386 if (dump_file)
2387 {
2388 expanded_location loc = expand_location (gimple_location (stmt));
2389
2390 fprintf (dump_file, "beginning to process the following "
2391 "SWITCH statement (%s:%d) : ------- \n",
2392 loc.file, loc.line);
2393 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
2394 putc ('\n', dump_file);
2395 }
2396
2397 switch_conversion sconv;
2398 sconv.expand (as_a <gswitch *> (stmt));
2399 cfg_altered |= sconv.m_cfg_altered;
2400 if (!sconv.m_reason)
2401 {
2402 if (dump_file)
2403 {
2404 fputs ("Switch converted\n", dump_file);
2405 fputs ("--------------------------------\n", dump_file);
2406 }
2407
2408 /* Make no effort to update the post-dominator tree.
2409 It is actually not that hard for the transformations
2410 we have performed, but it is not supported
2411 by iterate_fix_dominators. */
2412 free_dominance_info (CDI_POST_DOMINATORS);
2413 }
2414 else
2415 {
2416 if (dump_file)
2417 {
2418 fputs ("Bailing out - ", dump_file);
2419 fputs (sconv.m_reason, dump_file);
2420 fputs ("\n--------------------------------\n", dump_file);
2421 }
2422 }
2423 }
2424 }
2425
2426 return cfg_altered ? TODO_cleanup_cfg : 0;;
2427 }
2428
2429 } // anon namespace
2430
2431 gimple_opt_pass *
2432 make_pass_convert_switch (gcc::context *ctxt)
2433 {
2434 return new pass_convert_switch (ctxt);
2435 }
2436
2437 /* The main function of the pass scans statements for switches and invokes
2438 process_switch on them. */
2439
2440 namespace {
2441
2442 template <bool O0> class pass_lower_switch: public gimple_opt_pass
2443 {
2444 public:
2445 pass_lower_switch (gcc::context *ctxt) : gimple_opt_pass (data, ctxt) {}
2446
2447 static const pass_data data;
2448 opt_pass *
2449 clone ()
2450 {
2451 return new pass_lower_switch<O0> (m_ctxt);
2452 }
2453
2454 virtual bool
2455 gate (function *)
2456 {
2457 return !O0 || !optimize;
2458 }
2459
2460 virtual unsigned int execute (function *fun);
2461 }; // class pass_lower_switch
2462
2463 template <bool O0>
2464 const pass_data pass_lower_switch<O0>::data = {
2465 GIMPLE_PASS, /* type */
2466 O0 ? "switchlower_O0" : "switchlower", /* name */
2467 OPTGROUP_NONE, /* optinfo_flags */
2468 TV_TREE_SWITCH_LOWERING, /* tv_id */
2469 ( PROP_cfg | PROP_ssa ), /* properties_required */
2470 0, /* properties_provided */
2471 0, /* properties_destroyed */
2472 0, /* todo_flags_start */
2473 TODO_update_ssa | TODO_cleanup_cfg, /* todo_flags_finish */
2474 };
2475
2476 template <bool O0>
2477 unsigned int
2478 pass_lower_switch<O0>::execute (function *fun)
2479 {
2480 basic_block bb;
2481 bool expanded = false;
2482
2483 auto_vec<gimple *> switch_statements;
2484 switch_statements.create (1);
2485
2486 FOR_EACH_BB_FN (bb, fun)
2487 {
2488 gimple *stmt = last_stmt (bb);
2489 gswitch *swtch;
2490 if (stmt && (swtch = dyn_cast<gswitch *> (stmt)))
2491 {
2492 if (!O0)
2493 group_case_labels_stmt (swtch);
2494 switch_statements.safe_push (swtch);
2495 }
2496 }
2497
2498 for (unsigned i = 0; i < switch_statements.length (); i++)
2499 {
2500 gimple *stmt = switch_statements[i];
2501 if (dump_file)
2502 {
2503 expanded_location loc = expand_location (gimple_location (stmt));
2504
2505 fprintf (dump_file, "beginning to process the following "
2506 "SWITCH statement (%s:%d) : ------- \n",
2507 loc.file, loc.line);
2508 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
2509 putc ('\n', dump_file);
2510 }
2511
2512 gswitch *swtch = dyn_cast<gswitch *> (stmt);
2513 if (swtch)
2514 {
2515 switch_decision_tree dt (swtch);
2516 expanded |= dt.analyze_switch_statement ();
2517 }
2518 }
2519
2520 if (expanded)
2521 {
2522 free_dominance_info (CDI_DOMINATORS);
2523 free_dominance_info (CDI_POST_DOMINATORS);
2524 mark_virtual_operands_for_renaming (cfun);
2525 }
2526
2527 return 0;
2528 }
2529
2530 } // anon namespace
2531
2532 gimple_opt_pass *
2533 make_pass_lower_switch_O0 (gcc::context *ctxt)
2534 {
2535 return new pass_lower_switch<true> (ctxt);
2536 }
2537 gimple_opt_pass *
2538 make_pass_lower_switch (gcc::context *ctxt)
2539 {
2540 return new pass_lower_switch<false> (ctxt);
2541 }
2542
2543