decl.c (start_decl): Improve error location.
[gcc.git] / gcc / tree-switch-conversion.c
1 /* Lower GIMPLE_SWITCH expressions to something more efficient than
2 a jump table.
3 Copyright (C) 2006-2019 Free Software Foundation, Inc.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by the
9 Free Software Foundation; either version 3, or (at your option) any
10 later version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not, write to the Free
19 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
20 02110-1301, USA. */
21
22 /* This file handles the lowering of GIMPLE_SWITCH to an indexed
23 load, or a series of bit-test-and-branch expressions. */
24
25 #include "config.h"
26 #include "system.h"
27 #include "coretypes.h"
28 #include "backend.h"
29 #include "insn-codes.h"
30 #include "rtl.h"
31 #include "tree.h"
32 #include "gimple.h"
33 #include "cfghooks.h"
34 #include "tree-pass.h"
35 #include "ssa.h"
36 #include "optabs-tree.h"
37 #include "cgraph.h"
38 #include "gimple-pretty-print.h"
39 #include "params.h"
40 #include "fold-const.h"
41 #include "varasm.h"
42 #include "stor-layout.h"
43 #include "cfganal.h"
44 #include "gimplify.h"
45 #include "gimple-iterator.h"
46 #include "gimplify-me.h"
47 #include "gimple-fold.h"
48 #include "tree-cfg.h"
49 #include "cfgloop.h"
50 #include "alloc-pool.h"
51 #include "target.h"
52 #include "tree-into-ssa.h"
53 #include "omp-general.h"
54
55 /* ??? For lang_hooks.types.type_for_mode, but is there a word_mode
56 type in the GIMPLE type system that is language-independent? */
57 #include "langhooks.h"
58
59 #include "tree-switch-conversion.h"
60 \f
61 using namespace tree_switch_conversion;
62
63 /* Constructor. */
64
65 switch_conversion::switch_conversion (): m_final_bb (NULL), m_other_count (),
66 m_constructors (NULL), m_default_values (NULL),
67 m_arr_ref_first (NULL), m_arr_ref_last (NULL),
68 m_reason (NULL), m_default_case_nonstandard (false), m_cfg_altered (false)
69 {
70 }
71
72 /* Collection information about SWTCH statement. */
73
74 void
75 switch_conversion::collect (gswitch *swtch)
76 {
77 unsigned int branch_num = gimple_switch_num_labels (swtch);
78 tree min_case, max_case;
79 unsigned int i;
80 edge e, e_default, e_first;
81 edge_iterator ei;
82
83 m_switch = swtch;
84
85 /* The gimplifier has already sorted the cases by CASE_LOW and ensured there
86 is a default label which is the first in the vector.
87 Collect the bits we can deduce from the CFG. */
88 m_index_expr = gimple_switch_index (swtch);
89 m_switch_bb = gimple_bb (swtch);
90 e_default = gimple_switch_default_edge (cfun, swtch);
91 m_default_bb = e_default->dest;
92 m_default_prob = e_default->probability;
93 m_default_count = e_default->count ();
94 FOR_EACH_EDGE (e, ei, m_switch_bb->succs)
95 if (e != e_default)
96 m_other_count += e->count ();
97
98 /* Get upper and lower bounds of case values, and the covered range. */
99 min_case = gimple_switch_label (swtch, 1);
100 max_case = gimple_switch_label (swtch, branch_num - 1);
101
102 m_range_min = CASE_LOW (min_case);
103 if (CASE_HIGH (max_case) != NULL_TREE)
104 m_range_max = CASE_HIGH (max_case);
105 else
106 m_range_max = CASE_LOW (max_case);
107
108 m_contiguous_range = true;
109 tree last = CASE_HIGH (min_case) ? CASE_HIGH (min_case) : m_range_min;
110 for (i = 2; i < branch_num; i++)
111 {
112 tree elt = gimple_switch_label (swtch, i);
113 if (wi::to_wide (last) + 1 != wi::to_wide (CASE_LOW (elt)))
114 {
115 m_contiguous_range = false;
116 break;
117 }
118 last = CASE_HIGH (elt) ? CASE_HIGH (elt) : CASE_LOW (elt);
119 }
120
121 if (m_contiguous_range)
122 e_first = gimple_switch_edge (cfun, swtch, 1);
123 else
124 e_first = e_default;
125
126 /* See if there is one common successor block for all branch
127 targets. If it exists, record it in FINAL_BB.
128 Start with the destination of the first non-default case
129 if the range is contiguous and default case otherwise as
130 guess or its destination in case it is a forwarder block. */
131 if (! single_pred_p (e_first->dest))
132 m_final_bb = e_first->dest;
133 else if (single_succ_p (e_first->dest)
134 && ! single_pred_p (single_succ (e_first->dest)))
135 m_final_bb = single_succ (e_first->dest);
136 /* Require that all switch destinations are either that common
137 FINAL_BB or a forwarder to it, except for the default
138 case if contiguous range. */
139 if (m_final_bb)
140 FOR_EACH_EDGE (e, ei, m_switch_bb->succs)
141 {
142 if (e->dest == m_final_bb)
143 continue;
144
145 if (single_pred_p (e->dest)
146 && single_succ_p (e->dest)
147 && single_succ (e->dest) == m_final_bb)
148 continue;
149
150 if (e == e_default && m_contiguous_range)
151 {
152 m_default_case_nonstandard = true;
153 continue;
154 }
155
156 m_final_bb = NULL;
157 break;
158 }
159
160 m_range_size
161 = int_const_binop (MINUS_EXPR, m_range_max, m_range_min);
162
163 /* Get a count of the number of case labels. Single-valued case labels
164 simply count as one, but a case range counts double, since it may
165 require two compares if it gets lowered as a branching tree. */
166 m_count = 0;
167 for (i = 1; i < branch_num; i++)
168 {
169 tree elt = gimple_switch_label (swtch, i);
170 m_count++;
171 if (CASE_HIGH (elt)
172 && ! tree_int_cst_equal (CASE_LOW (elt), CASE_HIGH (elt)))
173 m_count++;
174 }
175
176 /* Get the number of unique non-default targets out of the GIMPLE_SWITCH
177 block. Assume a CFG cleanup would have already removed degenerate
178 switch statements, this allows us to just use EDGE_COUNT. */
179 m_uniq = EDGE_COUNT (gimple_bb (swtch)->succs) - 1;
180 }
181
182 /* Checks whether the range given by individual case statements of the switch
183 switch statement isn't too big and whether the number of branches actually
184 satisfies the size of the new array. */
185
186 bool
187 switch_conversion::check_range ()
188 {
189 gcc_assert (m_range_size);
190 if (!tree_fits_uhwi_p (m_range_size))
191 {
192 m_reason = "index range way too large or otherwise unusable";
193 return false;
194 }
195
196 if (tree_to_uhwi (m_range_size)
197 > ((unsigned) m_count * SWITCH_CONVERSION_BRANCH_RATIO))
198 {
199 m_reason = "the maximum range-branch ratio exceeded";
200 return false;
201 }
202
203 return true;
204 }
205
206 /* Checks whether all but the final BB basic blocks are empty. */
207
208 bool
209 switch_conversion::check_all_empty_except_final ()
210 {
211 edge e, e_default = find_edge (m_switch_bb, m_default_bb);
212 edge_iterator ei;
213
214 FOR_EACH_EDGE (e, ei, m_switch_bb->succs)
215 {
216 if (e->dest == m_final_bb)
217 continue;
218
219 if (!empty_block_p (e->dest))
220 {
221 if (m_contiguous_range && e == e_default)
222 {
223 m_default_case_nonstandard = true;
224 continue;
225 }
226
227 m_reason = "bad case - a non-final BB not empty";
228 return false;
229 }
230 }
231
232 return true;
233 }
234
235 /* This function checks whether all required values in phi nodes in final_bb
236 are constants. Required values are those that correspond to a basic block
237 which is a part of the examined switch statement. It returns true if the
238 phi nodes are OK, otherwise false. */
239
240 bool
241 switch_conversion::check_final_bb ()
242 {
243 gphi_iterator gsi;
244
245 m_phi_count = 0;
246 for (gsi = gsi_start_phis (m_final_bb); !gsi_end_p (gsi); gsi_next (&gsi))
247 {
248 gphi *phi = gsi.phi ();
249 unsigned int i;
250
251 if (virtual_operand_p (gimple_phi_result (phi)))
252 continue;
253
254 m_phi_count++;
255
256 for (i = 0; i < gimple_phi_num_args (phi); i++)
257 {
258 basic_block bb = gimple_phi_arg_edge (phi, i)->src;
259
260 if (bb == m_switch_bb
261 || (single_pred_p (bb)
262 && single_pred (bb) == m_switch_bb
263 && (!m_default_case_nonstandard
264 || empty_block_p (bb))))
265 {
266 tree reloc, val;
267 const char *reason = NULL;
268
269 val = gimple_phi_arg_def (phi, i);
270 if (!is_gimple_ip_invariant (val))
271 reason = "non-invariant value from a case";
272 else
273 {
274 reloc = initializer_constant_valid_p (val, TREE_TYPE (val));
275 if ((flag_pic && reloc != null_pointer_node)
276 || (!flag_pic && reloc == NULL_TREE))
277 {
278 if (reloc)
279 reason
280 = "value from a case would need runtime relocations";
281 else
282 reason
283 = "value from a case is not a valid initializer";
284 }
285 }
286 if (reason)
287 {
288 /* For contiguous range, we can allow non-constant
289 or one that needs relocation, as long as it is
290 only reachable from the default case. */
291 if (bb == m_switch_bb)
292 bb = m_final_bb;
293 if (!m_contiguous_range || bb != m_default_bb)
294 {
295 m_reason = reason;
296 return false;
297 }
298
299 unsigned int branch_num = gimple_switch_num_labels (m_switch);
300 for (unsigned int i = 1; i < branch_num; i++)
301 {
302 if (gimple_switch_label_bb (cfun, m_switch, i) == bb)
303 {
304 m_reason = reason;
305 return false;
306 }
307 }
308 m_default_case_nonstandard = true;
309 }
310 }
311 }
312 }
313
314 return true;
315 }
316
317 /* The following function allocates default_values, target_{in,out}_names and
318 constructors arrays. The last one is also populated with pointers to
319 vectors that will become constructors of new arrays. */
320
321 void
322 switch_conversion::create_temp_arrays ()
323 {
324 int i;
325
326 m_default_values = XCNEWVEC (tree, m_phi_count * 3);
327 /* ??? Macros do not support multi argument templates in their
328 argument list. We create a typedef to work around that problem. */
329 typedef vec<constructor_elt, va_gc> *vec_constructor_elt_gc;
330 m_constructors = XCNEWVEC (vec_constructor_elt_gc, m_phi_count);
331 m_target_inbound_names = m_default_values + m_phi_count;
332 m_target_outbound_names = m_target_inbound_names + m_phi_count;
333 for (i = 0; i < m_phi_count; i++)
334 vec_alloc (m_constructors[i], tree_to_uhwi (m_range_size) + 1);
335 }
336
337 /* Populate the array of default values in the order of phi nodes.
338 DEFAULT_CASE is the CASE_LABEL_EXPR for the default switch branch
339 if the range is non-contiguous or the default case has standard
340 structure, otherwise it is the first non-default case instead. */
341
342 void
343 switch_conversion::gather_default_values (tree default_case)
344 {
345 gphi_iterator gsi;
346 basic_block bb = label_to_block (cfun, CASE_LABEL (default_case));
347 edge e;
348 int i = 0;
349
350 gcc_assert (CASE_LOW (default_case) == NULL_TREE
351 || m_default_case_nonstandard);
352
353 if (bb == m_final_bb)
354 e = find_edge (m_switch_bb, bb);
355 else
356 e = single_succ_edge (bb);
357
358 for (gsi = gsi_start_phis (m_final_bb); !gsi_end_p (gsi); gsi_next (&gsi))
359 {
360 gphi *phi = gsi.phi ();
361 if (virtual_operand_p (gimple_phi_result (phi)))
362 continue;
363 tree val = PHI_ARG_DEF_FROM_EDGE (phi, e);
364 gcc_assert (val);
365 m_default_values[i++] = val;
366 }
367 }
368
369 /* The following function populates the vectors in the constructors array with
370 future contents of the static arrays. The vectors are populated in the
371 order of phi nodes. */
372
373 void
374 switch_conversion::build_constructors ()
375 {
376 unsigned i, branch_num = gimple_switch_num_labels (m_switch);
377 tree pos = m_range_min;
378 tree pos_one = build_int_cst (TREE_TYPE (pos), 1);
379
380 for (i = 1; i < branch_num; i++)
381 {
382 tree cs = gimple_switch_label (m_switch, i);
383 basic_block bb = label_to_block (cfun, CASE_LABEL (cs));
384 edge e;
385 tree high;
386 gphi_iterator gsi;
387 int j;
388
389 if (bb == m_final_bb)
390 e = find_edge (m_switch_bb, bb);
391 else
392 e = single_succ_edge (bb);
393 gcc_assert (e);
394
395 while (tree_int_cst_lt (pos, CASE_LOW (cs)))
396 {
397 int k;
398 for (k = 0; k < m_phi_count; k++)
399 {
400 constructor_elt elt;
401
402 elt.index = int_const_binop (MINUS_EXPR, pos, m_range_min);
403 elt.value
404 = unshare_expr_without_location (m_default_values[k]);
405 m_constructors[k]->quick_push (elt);
406 }
407
408 pos = int_const_binop (PLUS_EXPR, pos, pos_one);
409 }
410 gcc_assert (tree_int_cst_equal (pos, CASE_LOW (cs)));
411
412 j = 0;
413 if (CASE_HIGH (cs))
414 high = CASE_HIGH (cs);
415 else
416 high = CASE_LOW (cs);
417 for (gsi = gsi_start_phis (m_final_bb);
418 !gsi_end_p (gsi); gsi_next (&gsi))
419 {
420 gphi *phi = gsi.phi ();
421 if (virtual_operand_p (gimple_phi_result (phi)))
422 continue;
423 tree val = PHI_ARG_DEF_FROM_EDGE (phi, e);
424 tree low = CASE_LOW (cs);
425 pos = CASE_LOW (cs);
426
427 do
428 {
429 constructor_elt elt;
430
431 elt.index = int_const_binop (MINUS_EXPR, pos, m_range_min);
432 elt.value = unshare_expr_without_location (val);
433 m_constructors[j]->quick_push (elt);
434
435 pos = int_const_binop (PLUS_EXPR, pos, pos_one);
436 } while (!tree_int_cst_lt (high, pos)
437 && tree_int_cst_lt (low, pos));
438 j++;
439 }
440 }
441 }
442
443 /* If all values in the constructor vector are products of a linear function
444 a * x + b, then return true. When true, COEFF_A and COEFF_B and
445 coefficients of the linear function. Note that equal values are special
446 case of a linear function with a and b equal to zero. */
447
448 bool
449 switch_conversion::contains_linear_function_p (vec<constructor_elt, va_gc> *vec,
450 wide_int *coeff_a,
451 wide_int *coeff_b)
452 {
453 unsigned int i;
454 constructor_elt *elt;
455
456 gcc_assert (vec->length () >= 2);
457
458 /* Let's try to find any linear function a * x + y that can apply to
459 given values. 'a' can be calculated as follows:
460
461 a = (y2 - y1) / (x2 - x1) where x2 - x1 = 1 (consecutive case indices)
462 a = y2 - y1
463
464 and
465
466 b = y2 - a * x2
467
468 */
469
470 tree elt0 = (*vec)[0].value;
471 tree elt1 = (*vec)[1].value;
472
473 if (TREE_CODE (elt0) != INTEGER_CST || TREE_CODE (elt1) != INTEGER_CST)
474 return false;
475
476 wide_int range_min
477 = wide_int::from (wi::to_wide (m_range_min),
478 TYPE_PRECISION (TREE_TYPE (elt0)),
479 TYPE_SIGN (TREE_TYPE (m_range_min)));
480 wide_int y1 = wi::to_wide (elt0);
481 wide_int y2 = wi::to_wide (elt1);
482 wide_int a = y2 - y1;
483 wide_int b = y2 - a * (range_min + 1);
484
485 /* Verify that all values fulfill the linear function. */
486 FOR_EACH_VEC_SAFE_ELT (vec, i, elt)
487 {
488 if (TREE_CODE (elt->value) != INTEGER_CST)
489 return false;
490
491 wide_int value = wi::to_wide (elt->value);
492 if (a * range_min + b != value)
493 return false;
494
495 ++range_min;
496 }
497
498 *coeff_a = a;
499 *coeff_b = b;
500
501 return true;
502 }
503
504 /* Return type which should be used for array elements, either TYPE's
505 main variant or, for integral types, some smaller integral type
506 that can still hold all the constants. */
507
508 tree
509 switch_conversion::array_value_type (tree type, int num)
510 {
511 unsigned int i, len = vec_safe_length (m_constructors[num]);
512 constructor_elt *elt;
513 int sign = 0;
514 tree smaller_type;
515
516 /* Types with alignments greater than their size can reach here, e.g. out of
517 SRA. We couldn't use these as an array component type so get back to the
518 main variant first, which, for our purposes, is fine for other types as
519 well. */
520
521 type = TYPE_MAIN_VARIANT (type);
522
523 if (!INTEGRAL_TYPE_P (type))
524 return type;
525
526 scalar_int_mode type_mode = SCALAR_INT_TYPE_MODE (type);
527 scalar_int_mode mode = get_narrowest_mode (type_mode);
528 if (GET_MODE_SIZE (type_mode) <= GET_MODE_SIZE (mode))
529 return type;
530
531 if (len < (optimize_bb_for_size_p (gimple_bb (m_switch)) ? 2 : 32))
532 return type;
533
534 FOR_EACH_VEC_SAFE_ELT (m_constructors[num], i, elt)
535 {
536 wide_int cst;
537
538 if (TREE_CODE (elt->value) != INTEGER_CST)
539 return type;
540
541 cst = wi::to_wide (elt->value);
542 while (1)
543 {
544 unsigned int prec = GET_MODE_BITSIZE (mode);
545 if (prec > HOST_BITS_PER_WIDE_INT)
546 return type;
547
548 if (sign >= 0 && cst == wi::zext (cst, prec))
549 {
550 if (sign == 0 && cst == wi::sext (cst, prec))
551 break;
552 sign = 1;
553 break;
554 }
555 if (sign <= 0 && cst == wi::sext (cst, prec))
556 {
557 sign = -1;
558 break;
559 }
560
561 if (sign == 1)
562 sign = 0;
563
564 if (!GET_MODE_WIDER_MODE (mode).exists (&mode)
565 || GET_MODE_SIZE (mode) >= GET_MODE_SIZE (type_mode))
566 return type;
567 }
568 }
569
570 if (sign == 0)
571 sign = TYPE_UNSIGNED (type) ? 1 : -1;
572 smaller_type = lang_hooks.types.type_for_mode (mode, sign >= 0);
573 if (GET_MODE_SIZE (type_mode)
574 <= GET_MODE_SIZE (SCALAR_INT_TYPE_MODE (smaller_type)))
575 return type;
576
577 return smaller_type;
578 }
579
580 /* Create an appropriate array type and declaration and assemble a static
581 array variable. Also create a load statement that initializes
582 the variable in question with a value from the static array. SWTCH is
583 the switch statement being converted, NUM is the index to
584 arrays of constructors, default values and target SSA names
585 for this particular array. ARR_INDEX_TYPE is the type of the index
586 of the new array, PHI is the phi node of the final BB that corresponds
587 to the value that will be loaded from the created array. TIDX
588 is an ssa name of a temporary variable holding the index for loads from the
589 new array. */
590
591 void
592 switch_conversion::build_one_array (int num, tree arr_index_type,
593 gphi *phi, tree tidx)
594 {
595 tree name;
596 gimple *load;
597 gimple_stmt_iterator gsi = gsi_for_stmt (m_switch);
598 location_t loc = gimple_location (m_switch);
599
600 gcc_assert (m_default_values[num]);
601
602 name = copy_ssa_name (PHI_RESULT (phi));
603 m_target_inbound_names[num] = name;
604
605 vec<constructor_elt, va_gc> *constructor = m_constructors[num];
606 wide_int coeff_a, coeff_b;
607 bool linear_p = contains_linear_function_p (constructor, &coeff_a, &coeff_b);
608 if (linear_p)
609 {
610 if (dump_file && coeff_a.to_uhwi () > 0)
611 fprintf (dump_file, "Linear transformation with A = %" PRId64
612 " and B = %" PRId64 "\n", coeff_a.to_shwi (),
613 coeff_b.to_shwi ());
614
615 /* We must use type of constructor values. */
616 tree t = unsigned_type_for (TREE_TYPE ((*constructor)[0].value));
617 gimple_seq seq = NULL;
618 tree tmp = gimple_convert (&seq, t, m_index_expr);
619 tree tmp2 = gimple_build (&seq, MULT_EXPR, t,
620 wide_int_to_tree (t, coeff_a), tmp);
621 tree tmp3 = gimple_build (&seq, PLUS_EXPR, t, tmp2,
622 wide_int_to_tree (t, coeff_b));
623 tree tmp4 = gimple_convert (&seq, TREE_TYPE (name), tmp3);
624 gsi_insert_seq_before (&gsi, seq, GSI_SAME_STMT);
625 load = gimple_build_assign (name, tmp4);
626 }
627 else
628 {
629 tree array_type, ctor, decl, value_type, fetch, default_type;
630
631 default_type = TREE_TYPE (m_default_values[num]);
632 value_type = array_value_type (default_type, num);
633 array_type = build_array_type (value_type, arr_index_type);
634 if (default_type != value_type)
635 {
636 unsigned int i;
637 constructor_elt *elt;
638
639 FOR_EACH_VEC_SAFE_ELT (constructor, i, elt)
640 elt->value = fold_convert (value_type, elt->value);
641 }
642 ctor = build_constructor (array_type, constructor);
643 TREE_CONSTANT (ctor) = true;
644 TREE_STATIC (ctor) = true;
645
646 decl = build_decl (loc, VAR_DECL, NULL_TREE, array_type);
647 TREE_STATIC (decl) = 1;
648 DECL_INITIAL (decl) = ctor;
649
650 DECL_NAME (decl) = create_tmp_var_name ("CSWTCH");
651 DECL_ARTIFICIAL (decl) = 1;
652 DECL_IGNORED_P (decl) = 1;
653 TREE_CONSTANT (decl) = 1;
654 TREE_READONLY (decl) = 1;
655 DECL_IGNORED_P (decl) = 1;
656 if (offloading_function_p (cfun->decl))
657 DECL_ATTRIBUTES (decl)
658 = tree_cons (get_identifier ("omp declare target"), NULL_TREE,
659 NULL_TREE);
660 varpool_node::finalize_decl (decl);
661
662 fetch = build4 (ARRAY_REF, value_type, decl, tidx, NULL_TREE,
663 NULL_TREE);
664 if (default_type != value_type)
665 {
666 fetch = fold_convert (default_type, fetch);
667 fetch = force_gimple_operand_gsi (&gsi, fetch, true, NULL_TREE,
668 true, GSI_SAME_STMT);
669 }
670 load = gimple_build_assign (name, fetch);
671 }
672
673 gsi_insert_before (&gsi, load, GSI_SAME_STMT);
674 update_stmt (load);
675 m_arr_ref_last = load;
676 }
677
678 /* Builds and initializes static arrays initialized with values gathered from
679 the switch statement. Also creates statements that load values from
680 them. */
681
682 void
683 switch_conversion::build_arrays ()
684 {
685 tree arr_index_type;
686 tree tidx, sub, utype;
687 gimple *stmt;
688 gimple_stmt_iterator gsi;
689 gphi_iterator gpi;
690 int i;
691 location_t loc = gimple_location (m_switch);
692
693 gsi = gsi_for_stmt (m_switch);
694
695 /* Make sure we do not generate arithmetics in a subrange. */
696 utype = TREE_TYPE (m_index_expr);
697 if (TREE_TYPE (utype))
698 utype = lang_hooks.types.type_for_mode (TYPE_MODE (TREE_TYPE (utype)), 1);
699 else
700 utype = lang_hooks.types.type_for_mode (TYPE_MODE (utype), 1);
701
702 arr_index_type = build_index_type (m_range_size);
703 tidx = make_ssa_name (utype);
704 sub = fold_build2_loc (loc, MINUS_EXPR, utype,
705 fold_convert_loc (loc, utype, m_index_expr),
706 fold_convert_loc (loc, utype, m_range_min));
707 sub = force_gimple_operand_gsi (&gsi, sub,
708 false, NULL, true, GSI_SAME_STMT);
709 stmt = gimple_build_assign (tidx, sub);
710
711 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
712 update_stmt (stmt);
713 m_arr_ref_first = stmt;
714
715 for (gpi = gsi_start_phis (m_final_bb), i = 0;
716 !gsi_end_p (gpi); gsi_next (&gpi))
717 {
718 gphi *phi = gpi.phi ();
719 if (!virtual_operand_p (gimple_phi_result (phi)))
720 build_one_array (i++, arr_index_type, phi, tidx);
721 else
722 {
723 edge e;
724 edge_iterator ei;
725 FOR_EACH_EDGE (e, ei, m_switch_bb->succs)
726 {
727 if (e->dest == m_final_bb)
728 break;
729 if (!m_default_case_nonstandard
730 || e->dest != m_default_bb)
731 {
732 e = single_succ_edge (e->dest);
733 break;
734 }
735 }
736 gcc_assert (e && e->dest == m_final_bb);
737 m_target_vop = PHI_ARG_DEF_FROM_EDGE (phi, e);
738 }
739 }
740 }
741
742 /* Generates and appropriately inserts loads of default values at the position
743 given by GSI. Returns the last inserted statement. */
744
745 gassign *
746 switch_conversion::gen_def_assigns (gimple_stmt_iterator *gsi)
747 {
748 int i;
749 gassign *assign = NULL;
750
751 for (i = 0; i < m_phi_count; i++)
752 {
753 tree name = copy_ssa_name (m_target_inbound_names[i]);
754 m_target_outbound_names[i] = name;
755 assign = gimple_build_assign (name, m_default_values[i]);
756 gsi_insert_before (gsi, assign, GSI_SAME_STMT);
757 update_stmt (assign);
758 }
759 return assign;
760 }
761
762 /* Deletes the unused bbs and edges that now contain the switch statement and
763 its empty branch bbs. BBD is the now dead BB containing
764 the original switch statement, FINAL is the last BB of the converted
765 switch statement (in terms of succession). */
766
767 void
768 switch_conversion::prune_bbs (basic_block bbd, basic_block final,
769 basic_block default_bb)
770 {
771 edge_iterator ei;
772 edge e;
773
774 for (ei = ei_start (bbd->succs); (e = ei_safe_edge (ei)); )
775 {
776 basic_block bb;
777 bb = e->dest;
778 remove_edge (e);
779 if (bb != final && bb != default_bb)
780 delete_basic_block (bb);
781 }
782 delete_basic_block (bbd);
783 }
784
785 /* Add values to phi nodes in final_bb for the two new edges. E1F is the edge
786 from the basic block loading values from an array and E2F from the basic
787 block loading default values. BBF is the last switch basic block (see the
788 bbf description in the comment below). */
789
790 void
791 switch_conversion::fix_phi_nodes (edge e1f, edge e2f, basic_block bbf)
792 {
793 gphi_iterator gsi;
794 int i;
795
796 for (gsi = gsi_start_phis (bbf), i = 0;
797 !gsi_end_p (gsi); gsi_next (&gsi))
798 {
799 gphi *phi = gsi.phi ();
800 tree inbound, outbound;
801 if (virtual_operand_p (gimple_phi_result (phi)))
802 inbound = outbound = m_target_vop;
803 else
804 {
805 inbound = m_target_inbound_names[i];
806 outbound = m_target_outbound_names[i++];
807 }
808 add_phi_arg (phi, inbound, e1f, UNKNOWN_LOCATION);
809 if (!m_default_case_nonstandard)
810 add_phi_arg (phi, outbound, e2f, UNKNOWN_LOCATION);
811 }
812 }
813
814 /* Creates a check whether the switch expression value actually falls into the
815 range given by all the cases. If it does not, the temporaries are loaded
816 with default values instead. */
817
818 void
819 switch_conversion::gen_inbound_check ()
820 {
821 tree label_decl1 = create_artificial_label (UNKNOWN_LOCATION);
822 tree label_decl2 = create_artificial_label (UNKNOWN_LOCATION);
823 tree label_decl3 = create_artificial_label (UNKNOWN_LOCATION);
824 glabel *label1, *label2, *label3;
825 tree utype, tidx;
826 tree bound;
827
828 gcond *cond_stmt;
829
830 gassign *last_assign = NULL;
831 gimple_stmt_iterator gsi;
832 basic_block bb0, bb1, bb2, bbf, bbd;
833 edge e01 = NULL, e02, e21, e1d, e1f, e2f;
834 location_t loc = gimple_location (m_switch);
835
836 gcc_assert (m_default_values);
837
838 bb0 = gimple_bb (m_switch);
839
840 tidx = gimple_assign_lhs (m_arr_ref_first);
841 utype = TREE_TYPE (tidx);
842
843 /* (end of) block 0 */
844 gsi = gsi_for_stmt (m_arr_ref_first);
845 gsi_next (&gsi);
846
847 bound = fold_convert_loc (loc, utype, m_range_size);
848 cond_stmt = gimple_build_cond (LE_EXPR, tidx, bound, NULL_TREE, NULL_TREE);
849 gsi_insert_before (&gsi, cond_stmt, GSI_SAME_STMT);
850 update_stmt (cond_stmt);
851
852 /* block 2 */
853 if (!m_default_case_nonstandard)
854 {
855 label2 = gimple_build_label (label_decl2);
856 gsi_insert_before (&gsi, label2, GSI_SAME_STMT);
857 last_assign = gen_def_assigns (&gsi);
858 }
859
860 /* block 1 */
861 label1 = gimple_build_label (label_decl1);
862 gsi_insert_before (&gsi, label1, GSI_SAME_STMT);
863
864 /* block F */
865 gsi = gsi_start_bb (m_final_bb);
866 label3 = gimple_build_label (label_decl3);
867 gsi_insert_before (&gsi, label3, GSI_SAME_STMT);
868
869 /* cfg fix */
870 e02 = split_block (bb0, cond_stmt);
871 bb2 = e02->dest;
872
873 if (m_default_case_nonstandard)
874 {
875 bb1 = bb2;
876 bb2 = m_default_bb;
877 e01 = e02;
878 e01->flags = EDGE_TRUE_VALUE;
879 e02 = make_edge (bb0, bb2, EDGE_FALSE_VALUE);
880 edge e_default = find_edge (bb1, bb2);
881 for (gphi_iterator gsi = gsi_start_phis (bb2);
882 !gsi_end_p (gsi); gsi_next (&gsi))
883 {
884 gphi *phi = gsi.phi ();
885 tree arg = PHI_ARG_DEF_FROM_EDGE (phi, e_default);
886 add_phi_arg (phi, arg, e02,
887 gimple_phi_arg_location_from_edge (phi, e_default));
888 }
889 /* Partially fix the dominator tree, if it is available. */
890 if (dom_info_available_p (CDI_DOMINATORS))
891 redirect_immediate_dominators (CDI_DOMINATORS, bb1, bb0);
892 }
893 else
894 {
895 e21 = split_block (bb2, last_assign);
896 bb1 = e21->dest;
897 remove_edge (e21);
898 }
899
900 e1d = split_block (bb1, m_arr_ref_last);
901 bbd = e1d->dest;
902 remove_edge (e1d);
903
904 /* Flags and profiles of the edge for in-range values. */
905 if (!m_default_case_nonstandard)
906 e01 = make_edge (bb0, bb1, EDGE_TRUE_VALUE);
907 e01->probability = m_default_prob.invert ();
908
909 /* Flags and profiles of the edge taking care of out-of-range values. */
910 e02->flags &= ~EDGE_FALLTHRU;
911 e02->flags |= EDGE_FALSE_VALUE;
912 e02->probability = m_default_prob;
913
914 bbf = m_final_bb;
915
916 e1f = make_edge (bb1, bbf, EDGE_FALLTHRU);
917 e1f->probability = profile_probability::always ();
918
919 if (m_default_case_nonstandard)
920 e2f = NULL;
921 else
922 {
923 e2f = make_edge (bb2, bbf, EDGE_FALLTHRU);
924 e2f->probability = profile_probability::always ();
925 }
926
927 /* frequencies of the new BBs */
928 bb1->count = e01->count ();
929 bb2->count = e02->count ();
930 if (!m_default_case_nonstandard)
931 bbf->count = e1f->count () + e2f->count ();
932
933 /* Tidy blocks that have become unreachable. */
934 prune_bbs (bbd, m_final_bb,
935 m_default_case_nonstandard ? m_default_bb : NULL);
936
937 /* Fixup the PHI nodes in bbF. */
938 fix_phi_nodes (e1f, e2f, bbf);
939
940 /* Fix the dominator tree, if it is available. */
941 if (dom_info_available_p (CDI_DOMINATORS))
942 {
943 vec<basic_block> bbs_to_fix_dom;
944
945 set_immediate_dominator (CDI_DOMINATORS, bb1, bb0);
946 if (!m_default_case_nonstandard)
947 set_immediate_dominator (CDI_DOMINATORS, bb2, bb0);
948 if (! get_immediate_dominator (CDI_DOMINATORS, bbf))
949 /* If bbD was the immediate dominator ... */
950 set_immediate_dominator (CDI_DOMINATORS, bbf, bb0);
951
952 bbs_to_fix_dom.create (3 + (bb2 != bbf));
953 bbs_to_fix_dom.quick_push (bb0);
954 bbs_to_fix_dom.quick_push (bb1);
955 if (bb2 != bbf)
956 bbs_to_fix_dom.quick_push (bb2);
957 bbs_to_fix_dom.quick_push (bbf);
958
959 iterate_fix_dominators (CDI_DOMINATORS, bbs_to_fix_dom, true);
960 bbs_to_fix_dom.release ();
961 }
962 }
963
964 /* The following function is invoked on every switch statement (the current
965 one is given in SWTCH) and runs the individual phases of switch
966 conversion on it one after another until one fails or the conversion
967 is completed. On success, NULL is in m_reason, otherwise points
968 to a string with the reason why the conversion failed. */
969
970 void
971 switch_conversion::expand (gswitch *swtch)
972 {
973 /* Group case labels so that we get the right results from the heuristics
974 that decide on the code generation approach for this switch. */
975 m_cfg_altered |= group_case_labels_stmt (swtch);
976
977 /* If this switch is now a degenerate case with only a default label,
978 there is nothing left for us to do. */
979 if (gimple_switch_num_labels (swtch) < 2)
980 {
981 m_reason = "switch is a degenerate case";
982 return;
983 }
984
985 collect (swtch);
986
987 /* No error markers should reach here (they should be filtered out
988 during gimplification). */
989 gcc_checking_assert (TREE_TYPE (m_index_expr) != error_mark_node);
990
991 /* A switch on a constant should have been optimized in tree-cfg-cleanup. */
992 gcc_checking_assert (!TREE_CONSTANT (m_index_expr));
993
994 /* Prefer bit test if possible. */
995 if (tree_fits_uhwi_p (m_range_size)
996 && bit_test_cluster::can_be_handled (tree_to_uhwi (m_range_size), m_uniq)
997 && bit_test_cluster::is_beneficial (m_count, m_uniq))
998 {
999 m_reason = "expanding as bit test is preferable";
1000 return;
1001 }
1002
1003 if (m_uniq <= 2)
1004 {
1005 /* This will be expanded as a decision tree . */
1006 m_reason = "expanding as jumps is preferable";
1007 return;
1008 }
1009
1010 /* If there is no common successor, we cannot do the transformation. */
1011 if (!m_final_bb)
1012 {
1013 m_reason = "no common successor to all case label target blocks found";
1014 return;
1015 }
1016
1017 /* Check the case label values are within reasonable range: */
1018 if (!check_range ())
1019 {
1020 gcc_assert (m_reason);
1021 return;
1022 }
1023
1024 /* For all the cases, see whether they are empty, the assignments they
1025 represent constant and so on... */
1026 if (!check_all_empty_except_final ())
1027 {
1028 gcc_assert (m_reason);
1029 return;
1030 }
1031 if (!check_final_bb ())
1032 {
1033 gcc_assert (m_reason);
1034 return;
1035 }
1036
1037 /* At this point all checks have passed and we can proceed with the
1038 transformation. */
1039
1040 create_temp_arrays ();
1041 gather_default_values (m_default_case_nonstandard
1042 ? gimple_switch_label (swtch, 1)
1043 : gimple_switch_default_label (swtch));
1044 build_constructors ();
1045
1046 build_arrays (); /* Build the static arrays and assignments. */
1047 gen_inbound_check (); /* Build the bounds check. */
1048
1049 m_cfg_altered = true;
1050 }
1051
1052 /* Destructor. */
1053
1054 switch_conversion::~switch_conversion ()
1055 {
1056 XDELETEVEC (m_constructors);
1057 XDELETEVEC (m_default_values);
1058 }
1059
1060 /* Constructor. */
1061
1062 group_cluster::group_cluster (vec<cluster *> &clusters,
1063 unsigned start, unsigned end)
1064 {
1065 gcc_checking_assert (end - start + 1 >= 1);
1066 m_prob = profile_probability::never ();
1067 m_cases.create (end - start + 1);
1068 for (unsigned i = start; i <= end; i++)
1069 {
1070 m_cases.quick_push (static_cast<simple_cluster *> (clusters[i]));
1071 m_prob += clusters[i]->m_prob;
1072 }
1073 m_subtree_prob = m_prob;
1074 }
1075
1076 /* Destructor. */
1077
1078 group_cluster::~group_cluster ()
1079 {
1080 for (unsigned i = 0; i < m_cases.length (); i++)
1081 delete m_cases[i];
1082
1083 m_cases.release ();
1084 }
1085
1086 /* Dump content of a cluster. */
1087
1088 void
1089 group_cluster::dump (FILE *f, bool details)
1090 {
1091 unsigned total_values = 0;
1092 for (unsigned i = 0; i < m_cases.length (); i++)
1093 total_values += m_cases[i]->get_range (m_cases[i]->get_low (),
1094 m_cases[i]->get_high ());
1095
1096 unsigned comparison_count = 0;
1097 for (unsigned i = 0; i < m_cases.length (); i++)
1098 {
1099 simple_cluster *sc = static_cast<simple_cluster *> (m_cases[i]);
1100 comparison_count += sc->m_range_p ? 2 : 1;
1101 }
1102
1103 unsigned HOST_WIDE_INT range = get_range (get_low (), get_high ());
1104 fprintf (f, "%s", get_type () == JUMP_TABLE ? "JT" : "BT");
1105
1106 if (details)
1107 fprintf (f, "(values:%d comparisons:%d range:" HOST_WIDE_INT_PRINT_DEC
1108 " density: %.2f%%)", total_values, comparison_count, range,
1109 100.0f * comparison_count / range);
1110
1111 fprintf (f, ":");
1112 PRINT_CASE (f, get_low ());
1113 fprintf (f, "-");
1114 PRINT_CASE (f, get_high ());
1115 fprintf (f, " ");
1116 }
1117
1118 /* Emit GIMPLE code to handle the cluster. */
1119
1120 void
1121 jump_table_cluster::emit (tree index_expr, tree,
1122 tree default_label_expr, basic_block default_bb)
1123 {
1124 unsigned HOST_WIDE_INT range = get_range (get_low (), get_high ());
1125 unsigned HOST_WIDE_INT nondefault_range = 0;
1126
1127 /* For jump table we just emit a new gswitch statement that will
1128 be latter lowered to jump table. */
1129 auto_vec <tree> labels;
1130 labels.create (m_cases.length ());
1131
1132 make_edge (m_case_bb, default_bb, 0);
1133 for (unsigned i = 0; i < m_cases.length (); i++)
1134 {
1135 labels.quick_push (unshare_expr (m_cases[i]->m_case_label_expr));
1136 make_edge (m_case_bb, m_cases[i]->m_case_bb, 0);
1137 }
1138
1139 gswitch *s = gimple_build_switch (index_expr,
1140 unshare_expr (default_label_expr), labels);
1141 gimple_stmt_iterator gsi = gsi_start_bb (m_case_bb);
1142 gsi_insert_after (&gsi, s, GSI_NEW_STMT);
1143
1144 /* Set up even probabilities for all cases. */
1145 for (unsigned i = 0; i < m_cases.length (); i++)
1146 {
1147 simple_cluster *sc = static_cast<simple_cluster *> (m_cases[i]);
1148 edge case_edge = find_edge (m_case_bb, sc->m_case_bb);
1149 unsigned HOST_WIDE_INT case_range
1150 = sc->get_range (sc->get_low (), sc->get_high ());
1151 nondefault_range += case_range;
1152
1153 /* case_edge->aux is number of values in a jump-table that are covered
1154 by the case_edge. */
1155 case_edge->aux = (void *) ((intptr_t) (case_edge->aux) + case_range);
1156 }
1157
1158 edge default_edge = gimple_switch_default_edge (cfun, s);
1159 default_edge->probability = profile_probability::never ();
1160
1161 for (unsigned i = 0; i < m_cases.length (); i++)
1162 {
1163 simple_cluster *sc = static_cast<simple_cluster *> (m_cases[i]);
1164 edge case_edge = find_edge (m_case_bb, sc->m_case_bb);
1165 case_edge->probability
1166 = profile_probability::always ().apply_scale ((intptr_t)case_edge->aux,
1167 range);
1168 }
1169
1170 /* Number of non-default values is probability of default edge. */
1171 default_edge->probability
1172 += profile_probability::always ().apply_scale (nondefault_range,
1173 range).invert ();
1174
1175 switch_decision_tree::reset_out_edges_aux (s);
1176 }
1177
1178 /* Find jump tables of given CLUSTERS, where all members of the vector
1179 are of type simple_cluster. New clusters are returned. */
1180
1181 vec<cluster *>
1182 jump_table_cluster::find_jump_tables (vec<cluster *> &clusters)
1183 {
1184 if (!is_enabled ())
1185 return clusters.copy ();
1186
1187 unsigned l = clusters.length ();
1188 auto_vec<min_cluster_item> min;
1189 min.reserve (l + 1);
1190
1191 min.quick_push (min_cluster_item (0, 0, 0));
1192
1193 for (unsigned i = 1; i <= l; i++)
1194 {
1195 /* Set minimal # of clusters with i-th item to infinite. */
1196 min.quick_push (min_cluster_item (INT_MAX, INT_MAX, INT_MAX));
1197
1198 for (unsigned j = 0; j < i; j++)
1199 {
1200 unsigned HOST_WIDE_INT s = min[j].m_non_jt_cases;
1201 if (i - j < case_values_threshold ())
1202 s += i - j;
1203
1204 /* Prefer clusters with smaller number of numbers covered. */
1205 if ((min[j].m_count + 1 < min[i].m_count
1206 || (min[j].m_count + 1 == min[i].m_count
1207 && s < min[i].m_non_jt_cases))
1208 && can_be_handled (clusters, j, i - 1))
1209 min[i] = min_cluster_item (min[j].m_count + 1, j, s);
1210 }
1211
1212 gcc_checking_assert (min[i].m_count != INT_MAX);
1213 }
1214
1215 /* No result. */
1216 if (min[l].m_count == INT_MAX)
1217 return clusters.copy ();
1218
1219 vec<cluster *> output;
1220 output.create (4);
1221
1222 /* Find and build the clusters. */
1223 for (int end = l;;)
1224 {
1225 int start = min[end].m_start;
1226
1227 /* Do not allow clusters with small number of cases. */
1228 if (is_beneficial (clusters, start, end - 1))
1229 output.safe_push (new jump_table_cluster (clusters, start, end - 1));
1230 else
1231 for (int i = end - 1; i >= start; i--)
1232 output.safe_push (clusters[i]);
1233
1234 end = start;
1235
1236 if (start <= 0)
1237 break;
1238 }
1239
1240 output.reverse ();
1241 return output;
1242 }
1243
1244 /* Return true when cluster starting at START and ending at END (inclusive)
1245 can build a jump-table. */
1246
1247 bool
1248 jump_table_cluster::can_be_handled (const vec<cluster *> &clusters,
1249 unsigned start, unsigned end)
1250 {
1251 /* If the switch is relatively small such that the cost of one
1252 indirect jump on the target are higher than the cost of a
1253 decision tree, go with the decision tree.
1254
1255 If range of values is much bigger than number of values,
1256 or if it is too large to represent in a HOST_WIDE_INT,
1257 make a sequence of conditional branches instead of a dispatch.
1258
1259 The definition of "much bigger" depends on whether we are
1260 optimizing for size or for speed. */
1261 if (!flag_jump_tables)
1262 return false;
1263
1264 /* For algorithm correctness, jump table for a single case must return
1265 true. We bail out in is_beneficial if it's called just for
1266 a single case. */
1267 if (start == end)
1268 return true;
1269
1270 unsigned HOST_WIDE_INT max_ratio
1271 = optimize_insn_for_size_p () ? max_ratio_for_size : max_ratio_for_speed;
1272 unsigned HOST_WIDE_INT range = get_range (clusters[start]->get_low (),
1273 clusters[end]->get_high ());
1274 /* Check overflow. */
1275 if (range == 0)
1276 return false;
1277
1278 unsigned HOST_WIDE_INT comparison_count = 0;
1279 for (unsigned i = start; i <= end; i++)
1280 {
1281 simple_cluster *sc = static_cast<simple_cluster *> (clusters[i]);
1282 comparison_count += sc->m_range_p ? 2 : 1;
1283 }
1284
1285 return range <= max_ratio * comparison_count;
1286 }
1287
1288 /* Return true if cluster starting at START and ending at END (inclusive)
1289 is profitable transformation. */
1290
1291 bool
1292 jump_table_cluster::is_beneficial (const vec<cluster *> &,
1293 unsigned start, unsigned end)
1294 {
1295 /* Single case bail out. */
1296 if (start == end)
1297 return false;
1298
1299 return end - start + 1 >= case_values_threshold ();
1300 }
1301
1302 /* Definition of jump_table_cluster constants. */
1303
1304 const unsigned HOST_WIDE_INT jump_table_cluster::max_ratio_for_size;
1305 const unsigned HOST_WIDE_INT jump_table_cluster::max_ratio_for_speed;
1306
1307 /* Find bit tests of given CLUSTERS, where all members of the vector
1308 are of type simple_cluster. New clusters are returned. */
1309
1310 vec<cluster *>
1311 bit_test_cluster::find_bit_tests (vec<cluster *> &clusters)
1312 {
1313 vec<cluster *> output;
1314 output.create (4);
1315
1316 unsigned l = clusters.length ();
1317 auto_vec<min_cluster_item> min;
1318 min.reserve (l + 1);
1319
1320 min.quick_push (min_cluster_item (0, 0, 0));
1321
1322 for (unsigned i = 1; i <= l; i++)
1323 {
1324 /* Set minimal # of clusters with i-th item to infinite. */
1325 min.quick_push (min_cluster_item (INT_MAX, INT_MAX, INT_MAX));
1326
1327 for (unsigned j = 0; j < i; j++)
1328 {
1329 if (min[j].m_count + 1 < min[i].m_count
1330 && can_be_handled (clusters, j, i - 1))
1331 min[i] = min_cluster_item (min[j].m_count + 1, j, INT_MAX);
1332 }
1333
1334 gcc_checking_assert (min[i].m_count != INT_MAX);
1335 }
1336
1337 /* No result. */
1338 if (min[l].m_count == INT_MAX)
1339 return clusters.copy ();
1340
1341 /* Find and build the clusters. */
1342 for (unsigned end = l;;)
1343 {
1344 int start = min[end].m_start;
1345
1346 if (is_beneficial (clusters, start, end - 1))
1347 {
1348 bool entire = start == 0 && end == clusters.length ();
1349 output.safe_push (new bit_test_cluster (clusters, start, end - 1,
1350 entire));
1351 }
1352 else
1353 for (int i = end - 1; i >= start; i--)
1354 output.safe_push (clusters[i]);
1355
1356 end = start;
1357
1358 if (start <= 0)
1359 break;
1360 }
1361
1362 output.reverse ();
1363 return output;
1364 }
1365
1366 /* Return true when RANGE of case values with UNIQ labels
1367 can build a bit test. */
1368
1369 bool
1370 bit_test_cluster::can_be_handled (unsigned HOST_WIDE_INT range,
1371 unsigned int uniq)
1372 {
1373 /* Check overflow. */
1374 if (range == 0)
1375 return 0;
1376
1377 if (range >= GET_MODE_BITSIZE (word_mode))
1378 return false;
1379
1380 return uniq <= 3;
1381 }
1382
1383 /* Return true when cluster starting at START and ending at END (inclusive)
1384 can build a bit test. */
1385
1386 bool
1387 bit_test_cluster::can_be_handled (const vec<cluster *> &clusters,
1388 unsigned start, unsigned end)
1389 {
1390 /* For algorithm correctness, bit test for a single case must return
1391 true. We bail out in is_beneficial if it's called just for
1392 a single case. */
1393 if (start == end)
1394 return true;
1395
1396 unsigned HOST_WIDE_INT range = get_range (clusters[start]->get_low (),
1397 clusters[end]->get_high ());
1398 auto_bitmap dest_bbs;
1399
1400 for (unsigned i = start; i <= end; i++)
1401 {
1402 simple_cluster *sc = static_cast<simple_cluster *> (clusters[i]);
1403 bitmap_set_bit (dest_bbs, sc->m_case_bb->index);
1404 }
1405
1406 return can_be_handled (range, bitmap_count_bits (dest_bbs));
1407 }
1408
1409 /* Return true when COUNT of cases of UNIQ labels is beneficial for bit test
1410 transformation. */
1411
1412 bool
1413 bit_test_cluster::is_beneficial (unsigned count, unsigned uniq)
1414 {
1415 return (((uniq == 1 && count >= 3)
1416 || (uniq == 2 && count >= 5)
1417 || (uniq == 3 && count >= 6)));
1418 }
1419
1420 /* Return true if cluster starting at START and ending at END (inclusive)
1421 is profitable transformation. */
1422
1423 bool
1424 bit_test_cluster::is_beneficial (const vec<cluster *> &clusters,
1425 unsigned start, unsigned end)
1426 {
1427 /* Single case bail out. */
1428 if (start == end)
1429 return false;
1430
1431 auto_bitmap dest_bbs;
1432
1433 for (unsigned i = start; i <= end; i++)
1434 {
1435 simple_cluster *sc = static_cast<simple_cluster *> (clusters[i]);
1436 bitmap_set_bit (dest_bbs, sc->m_case_bb->index);
1437 }
1438
1439 unsigned uniq = bitmap_count_bits (dest_bbs);
1440 unsigned count = end - start + 1;
1441 return is_beneficial (count, uniq);
1442 }
1443
1444 /* Comparison function for qsort to order bit tests by decreasing
1445 probability of execution. */
1446
1447 int
1448 case_bit_test::cmp (const void *p1, const void *p2)
1449 {
1450 const struct case_bit_test *const d1 = (const struct case_bit_test *) p1;
1451 const struct case_bit_test *const d2 = (const struct case_bit_test *) p2;
1452
1453 if (d2->bits != d1->bits)
1454 return d2->bits - d1->bits;
1455
1456 /* Stabilize the sort. */
1457 return (LABEL_DECL_UID (CASE_LABEL (d2->label))
1458 - LABEL_DECL_UID (CASE_LABEL (d1->label)));
1459 }
1460
1461 /* Expand a switch statement by a short sequence of bit-wise
1462 comparisons. "switch(x)" is effectively converted into
1463 "if ((1 << (x-MINVAL)) & CST)" where CST and MINVAL are
1464 integer constants.
1465
1466 INDEX_EXPR is the value being switched on.
1467
1468 MINVAL is the lowest case value of in the case nodes,
1469 and RANGE is highest value minus MINVAL. MINVAL and RANGE
1470 are not guaranteed to be of the same type as INDEX_EXPR
1471 (the gimplifier doesn't change the type of case label values,
1472 and MINVAL and RANGE are derived from those values).
1473 MAXVAL is MINVAL + RANGE.
1474
1475 There *MUST* be max_case_bit_tests or less unique case
1476 node targets. */
1477
1478 void
1479 bit_test_cluster::emit (tree index_expr, tree index_type,
1480 tree, basic_block default_bb)
1481 {
1482 struct case_bit_test test[m_max_case_bit_tests] = { {} };
1483 unsigned int i, j, k;
1484 unsigned int count;
1485
1486 tree unsigned_index_type = unsigned_type_for (index_type);
1487
1488 gimple_stmt_iterator gsi;
1489 gassign *shift_stmt;
1490
1491 tree idx, tmp, csui;
1492 tree word_type_node = lang_hooks.types.type_for_mode (word_mode, 1);
1493 tree word_mode_zero = fold_convert (word_type_node, integer_zero_node);
1494 tree word_mode_one = fold_convert (word_type_node, integer_one_node);
1495 int prec = TYPE_PRECISION (word_type_node);
1496 wide_int wone = wi::one (prec);
1497
1498 tree minval = get_low ();
1499 tree maxval = get_high ();
1500 tree range = int_const_binop (MINUS_EXPR, maxval, minval);
1501 unsigned HOST_WIDE_INT bt_range = get_range (minval, maxval);
1502
1503 /* Go through all case labels, and collect the case labels, profile
1504 counts, and other information we need to build the branch tests. */
1505 count = 0;
1506 for (i = 0; i < m_cases.length (); i++)
1507 {
1508 unsigned int lo, hi;
1509 simple_cluster *n = static_cast<simple_cluster *> (m_cases[i]);
1510 for (k = 0; k < count; k++)
1511 if (n->m_case_bb == test[k].target_bb)
1512 break;
1513
1514 if (k == count)
1515 {
1516 gcc_checking_assert (count < m_max_case_bit_tests);
1517 test[k].mask = wi::zero (prec);
1518 test[k].target_bb = n->m_case_bb;
1519 test[k].label = n->m_case_label_expr;
1520 test[k].bits = 0;
1521 count++;
1522 }
1523
1524 test[k].bits += n->get_range (n->get_low (), n->get_high ());
1525
1526 lo = tree_to_uhwi (int_const_binop (MINUS_EXPR, n->get_low (), minval));
1527 if (n->get_high () == NULL_TREE)
1528 hi = lo;
1529 else
1530 hi = tree_to_uhwi (int_const_binop (MINUS_EXPR, n->get_high (),
1531 minval));
1532
1533 for (j = lo; j <= hi; j++)
1534 test[k].mask |= wi::lshift (wone, j);
1535 }
1536
1537 qsort (test, count, sizeof (*test), case_bit_test::cmp);
1538
1539 /* If all values are in the 0 .. BITS_PER_WORD-1 range, we can get rid of
1540 the minval subtractions, but it might make the mask constants more
1541 expensive. So, compare the costs. */
1542 if (compare_tree_int (minval, 0) > 0
1543 && compare_tree_int (maxval, GET_MODE_BITSIZE (word_mode)) < 0)
1544 {
1545 int cost_diff;
1546 HOST_WIDE_INT m = tree_to_uhwi (minval);
1547 rtx reg = gen_raw_REG (word_mode, 10000);
1548 bool speed_p = optimize_insn_for_speed_p ();
1549 cost_diff = set_rtx_cost (gen_rtx_PLUS (word_mode, reg,
1550 GEN_INT (-m)), speed_p);
1551 for (i = 0; i < count; i++)
1552 {
1553 rtx r = immed_wide_int_const (test[i].mask, word_mode);
1554 cost_diff += set_src_cost (gen_rtx_AND (word_mode, reg, r),
1555 word_mode, speed_p);
1556 r = immed_wide_int_const (wi::lshift (test[i].mask, m), word_mode);
1557 cost_diff -= set_src_cost (gen_rtx_AND (word_mode, reg, r),
1558 word_mode, speed_p);
1559 }
1560 if (cost_diff > 0)
1561 {
1562 for (i = 0; i < count; i++)
1563 test[i].mask = wi::lshift (test[i].mask, m);
1564 minval = build_zero_cst (TREE_TYPE (minval));
1565 range = maxval;
1566 }
1567 }
1568
1569 /* Now build the test-and-branch code. */
1570
1571 gsi = gsi_last_bb (m_case_bb);
1572
1573 /* idx = (unsigned)x - minval. */
1574 idx = fold_convert (unsigned_index_type, index_expr);
1575 idx = fold_build2 (MINUS_EXPR, unsigned_index_type, idx,
1576 fold_convert (unsigned_index_type, minval));
1577 idx = force_gimple_operand_gsi (&gsi, idx,
1578 /*simple=*/true, NULL_TREE,
1579 /*before=*/true, GSI_SAME_STMT);
1580
1581 if (m_handles_entire_switch)
1582 {
1583 /* if (idx > range) goto default */
1584 range
1585 = force_gimple_operand_gsi (&gsi,
1586 fold_convert (unsigned_index_type, range),
1587 /*simple=*/true, NULL_TREE,
1588 /*before=*/true, GSI_SAME_STMT);
1589 tmp = fold_build2 (GT_EXPR, boolean_type_node, idx, range);
1590 basic_block new_bb
1591 = hoist_edge_and_branch_if_true (&gsi, tmp, default_bb,
1592 profile_probability::unlikely ());
1593 gsi = gsi_last_bb (new_bb);
1594 }
1595
1596 /* csui = (1 << (word_mode) idx) */
1597 csui = make_ssa_name (word_type_node);
1598 tmp = fold_build2 (LSHIFT_EXPR, word_type_node, word_mode_one,
1599 fold_convert (word_type_node, idx));
1600 tmp = force_gimple_operand_gsi (&gsi, tmp,
1601 /*simple=*/false, NULL_TREE,
1602 /*before=*/true, GSI_SAME_STMT);
1603 shift_stmt = gimple_build_assign (csui, tmp);
1604 gsi_insert_before (&gsi, shift_stmt, GSI_SAME_STMT);
1605 update_stmt (shift_stmt);
1606
1607 profile_probability prob = profile_probability::always ();
1608
1609 /* for each unique set of cases:
1610 if (const & csui) goto target */
1611 for (k = 0; k < count; k++)
1612 {
1613 prob = profile_probability::always ().apply_scale (test[k].bits,
1614 bt_range);
1615 bt_range -= test[k].bits;
1616 tmp = wide_int_to_tree (word_type_node, test[k].mask);
1617 tmp = fold_build2 (BIT_AND_EXPR, word_type_node, csui, tmp);
1618 tmp = force_gimple_operand_gsi (&gsi, tmp,
1619 /*simple=*/true, NULL_TREE,
1620 /*before=*/true, GSI_SAME_STMT);
1621 tmp = fold_build2 (NE_EXPR, boolean_type_node, tmp, word_mode_zero);
1622 basic_block new_bb
1623 = hoist_edge_and_branch_if_true (&gsi, tmp, test[k].target_bb, prob);
1624 gsi = gsi_last_bb (new_bb);
1625 }
1626
1627 /* We should have removed all edges now. */
1628 gcc_assert (EDGE_COUNT (gsi_bb (gsi)->succs) == 0);
1629
1630 /* If nothing matched, go to the default label. */
1631 edge e = make_edge (gsi_bb (gsi), default_bb, EDGE_FALLTHRU);
1632 e->probability = profile_probability::always ();
1633 }
1634
1635 /* Split the basic block at the statement pointed to by GSIP, and insert
1636 a branch to the target basic block of E_TRUE conditional on tree
1637 expression COND.
1638
1639 It is assumed that there is already an edge from the to-be-split
1640 basic block to E_TRUE->dest block. This edge is removed, and the
1641 profile information on the edge is re-used for the new conditional
1642 jump.
1643
1644 The CFG is updated. The dominator tree will not be valid after
1645 this transformation, but the immediate dominators are updated if
1646 UPDATE_DOMINATORS is true.
1647
1648 Returns the newly created basic block. */
1649
1650 basic_block
1651 bit_test_cluster::hoist_edge_and_branch_if_true (gimple_stmt_iterator *gsip,
1652 tree cond, basic_block case_bb,
1653 profile_probability prob)
1654 {
1655 tree tmp;
1656 gcond *cond_stmt;
1657 edge e_false;
1658 basic_block new_bb, split_bb = gsi_bb (*gsip);
1659
1660 edge e_true = make_edge (split_bb, case_bb, EDGE_TRUE_VALUE);
1661 e_true->probability = prob;
1662 gcc_assert (e_true->src == split_bb);
1663
1664 tmp = force_gimple_operand_gsi (gsip, cond, /*simple=*/true, NULL,
1665 /*before=*/true, GSI_SAME_STMT);
1666 cond_stmt = gimple_build_cond_from_tree (tmp, NULL_TREE, NULL_TREE);
1667 gsi_insert_before (gsip, cond_stmt, GSI_SAME_STMT);
1668
1669 e_false = split_block (split_bb, cond_stmt);
1670 new_bb = e_false->dest;
1671 redirect_edge_pred (e_true, split_bb);
1672
1673 e_false->flags &= ~EDGE_FALLTHRU;
1674 e_false->flags |= EDGE_FALSE_VALUE;
1675 e_false->probability = e_true->probability.invert ();
1676 new_bb->count = e_false->count ();
1677
1678 return new_bb;
1679 }
1680
1681 /* Compute the number of case labels that correspond to each outgoing edge of
1682 switch statement. Record this information in the aux field of the edge. */
1683
1684 void
1685 switch_decision_tree::compute_cases_per_edge ()
1686 {
1687 reset_out_edges_aux (m_switch);
1688 int ncases = gimple_switch_num_labels (m_switch);
1689 for (int i = ncases - 1; i >= 1; --i)
1690 {
1691 edge case_edge = gimple_switch_edge (cfun, m_switch, i);
1692 case_edge->aux = (void *) ((intptr_t) (case_edge->aux) + 1);
1693 }
1694 }
1695
1696 /* Analyze switch statement and return true when the statement is expanded
1697 as decision tree. */
1698
1699 bool
1700 switch_decision_tree::analyze_switch_statement ()
1701 {
1702 unsigned l = gimple_switch_num_labels (m_switch);
1703 basic_block bb = gimple_bb (m_switch);
1704 auto_vec<cluster *> clusters;
1705 clusters.create (l - 1);
1706
1707 basic_block default_bb = gimple_switch_default_bb (cfun, m_switch);
1708 m_case_bbs.reserve (l);
1709 m_case_bbs.quick_push (default_bb);
1710
1711 compute_cases_per_edge ();
1712
1713 for (unsigned i = 1; i < l; i++)
1714 {
1715 tree elt = gimple_switch_label (m_switch, i);
1716 tree lab = CASE_LABEL (elt);
1717 basic_block case_bb = label_to_block (cfun, lab);
1718 edge case_edge = find_edge (bb, case_bb);
1719 tree low = CASE_LOW (elt);
1720 tree high = CASE_HIGH (elt);
1721
1722 profile_probability p
1723 = case_edge->probability.apply_scale (1, (intptr_t) (case_edge->aux));
1724 clusters.quick_push (new simple_cluster (low, high, elt, case_edge->dest,
1725 p));
1726 m_case_bbs.quick_push (case_edge->dest);
1727 }
1728
1729 reset_out_edges_aux (m_switch);
1730
1731 /* Find jump table clusters. */
1732 vec<cluster *> output = jump_table_cluster::find_jump_tables (clusters);
1733
1734 /* Find bit test clusters. */
1735 vec<cluster *> output2;
1736 auto_vec<cluster *> tmp;
1737 output2.create (1);
1738 tmp.create (1);
1739
1740 for (unsigned i = 0; i < output.length (); i++)
1741 {
1742 cluster *c = output[i];
1743 if (c->get_type () != SIMPLE_CASE)
1744 {
1745 if (!tmp.is_empty ())
1746 {
1747 vec<cluster *> n = bit_test_cluster::find_bit_tests (tmp);
1748 output2.safe_splice (n);
1749 n.release ();
1750 tmp.truncate (0);
1751 }
1752 output2.safe_push (c);
1753 }
1754 else
1755 tmp.safe_push (c);
1756 }
1757
1758 /* We still can have a temporary vector to test. */
1759 if (!tmp.is_empty ())
1760 {
1761 vec<cluster *> n = bit_test_cluster::find_bit_tests (tmp);
1762 output2.safe_splice (n);
1763 n.release ();
1764 }
1765
1766 if (dump_file)
1767 {
1768 fprintf (dump_file, ";; GIMPLE switch case clusters: ");
1769 for (unsigned i = 0; i < output2.length (); i++)
1770 output2[i]->dump (dump_file, dump_flags & TDF_DETAILS);
1771 fprintf (dump_file, "\n");
1772 }
1773
1774 output.release ();
1775
1776 bool expanded = try_switch_expansion (output2);
1777
1778 for (unsigned i = 0; i < output2.length (); i++)
1779 delete output2[i];
1780
1781 output2.release ();
1782
1783 return expanded;
1784 }
1785
1786 /* Attempt to expand CLUSTERS as a decision tree. Return true when
1787 expanded. */
1788
1789 bool
1790 switch_decision_tree::try_switch_expansion (vec<cluster *> &clusters)
1791 {
1792 tree index_expr = gimple_switch_index (m_switch);
1793 tree index_type = TREE_TYPE (index_expr);
1794 basic_block bb = gimple_bb (m_switch);
1795
1796 if (gimple_switch_num_labels (m_switch) == 1)
1797 return false;
1798
1799 /* Find the default case target label. */
1800 edge default_edge = gimple_switch_default_edge (cfun, m_switch);
1801 m_default_bb = default_edge->dest;
1802
1803 /* Do the insertion of a case label into m_case_list. The labels are
1804 fed to us in descending order from the sorted vector of case labels used
1805 in the tree part of the middle end. So the list we construct is
1806 sorted in ascending order. */
1807
1808 for (int i = clusters.length () - 1; i >= 0; i--)
1809 {
1810 case_tree_node *r = m_case_list;
1811 m_case_list = m_case_node_pool.allocate ();
1812 m_case_list->m_right = r;
1813 m_case_list->m_c = clusters[i];
1814 }
1815
1816 record_phi_operand_mapping ();
1817
1818 /* Split basic block that contains the gswitch statement. */
1819 gimple_stmt_iterator gsi = gsi_last_bb (bb);
1820 edge e;
1821 if (gsi_end_p (gsi))
1822 e = split_block_after_labels (bb);
1823 else
1824 {
1825 gsi_prev (&gsi);
1826 e = split_block (bb, gsi_stmt (gsi));
1827 }
1828 bb = split_edge (e);
1829
1830 /* Create new basic blocks for non-case clusters where specific expansion
1831 needs to happen. */
1832 for (unsigned i = 0; i < clusters.length (); i++)
1833 if (clusters[i]->get_type () != SIMPLE_CASE)
1834 {
1835 clusters[i]->m_case_bb = create_empty_bb (bb);
1836 clusters[i]->m_case_bb->loop_father = bb->loop_father;
1837 }
1838
1839 /* Do not do an extra work for a single cluster. */
1840 if (clusters.length () == 1
1841 && clusters[0]->get_type () != SIMPLE_CASE)
1842 {
1843 cluster *c = clusters[0];
1844 c->emit (index_expr, index_type,
1845 gimple_switch_default_label (m_switch), m_default_bb);
1846 redirect_edge_succ (single_succ_edge (bb), c->m_case_bb);
1847 }
1848 else
1849 {
1850 emit (bb, index_expr, default_edge->probability, index_type);
1851
1852 /* Emit cluster-specific switch handling. */
1853 for (unsigned i = 0; i < clusters.length (); i++)
1854 if (clusters[i]->get_type () != SIMPLE_CASE)
1855 clusters[i]->emit (index_expr, index_type,
1856 gimple_switch_default_label (m_switch),
1857 m_default_bb);
1858 }
1859
1860 fix_phi_operands_for_edges ();
1861
1862 return true;
1863 }
1864
1865 /* Before switch transformation, record all SSA_NAMEs defined in switch BB
1866 and used in a label basic block. */
1867
1868 void
1869 switch_decision_tree::record_phi_operand_mapping ()
1870 {
1871 basic_block switch_bb = gimple_bb (m_switch);
1872 /* Record all PHI nodes that have to be fixed after conversion. */
1873 for (unsigned i = 0; i < m_case_bbs.length (); i++)
1874 {
1875 gphi_iterator gsi;
1876 basic_block bb = m_case_bbs[i];
1877 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1878 {
1879 gphi *phi = gsi.phi ();
1880
1881 for (unsigned i = 0; i < gimple_phi_num_args (phi); i++)
1882 {
1883 basic_block phi_src_bb = gimple_phi_arg_edge (phi, i)->src;
1884 if (phi_src_bb == switch_bb)
1885 {
1886 tree def = gimple_phi_arg_def (phi, i);
1887 tree result = gimple_phi_result (phi);
1888 m_phi_mapping.put (result, def);
1889 break;
1890 }
1891 }
1892 }
1893 }
1894 }
1895
1896 /* Append new operands to PHI statements that were introduced due to
1897 addition of new edges to case labels. */
1898
1899 void
1900 switch_decision_tree::fix_phi_operands_for_edges ()
1901 {
1902 gphi_iterator gsi;
1903
1904 for (unsigned i = 0; i < m_case_bbs.length (); i++)
1905 {
1906 basic_block bb = m_case_bbs[i];
1907 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1908 {
1909 gphi *phi = gsi.phi ();
1910 for (unsigned j = 0; j < gimple_phi_num_args (phi); j++)
1911 {
1912 tree def = gimple_phi_arg_def (phi, j);
1913 if (def == NULL_TREE)
1914 {
1915 edge e = gimple_phi_arg_edge (phi, j);
1916 tree *definition
1917 = m_phi_mapping.get (gimple_phi_result (phi));
1918 gcc_assert (definition);
1919 add_phi_arg (phi, *definition, e, UNKNOWN_LOCATION);
1920 }
1921 }
1922 }
1923 }
1924 }
1925
1926 /* Generate a decision tree, switching on INDEX_EXPR and jumping to
1927 one of the labels in CASE_LIST or to the DEFAULT_LABEL.
1928
1929 We generate a binary decision tree to select the appropriate target
1930 code. */
1931
1932 void
1933 switch_decision_tree::emit (basic_block bb, tree index_expr,
1934 profile_probability default_prob, tree index_type)
1935 {
1936 balance_case_nodes (&m_case_list, NULL);
1937
1938 if (dump_file)
1939 dump_function_to_file (current_function_decl, dump_file, dump_flags);
1940 if (dump_file && (dump_flags & TDF_DETAILS))
1941 {
1942 int indent_step = ceil_log2 (TYPE_PRECISION (index_type)) + 2;
1943 fprintf (dump_file, ";; Expanding GIMPLE switch as decision tree:\n");
1944 gcc_assert (m_case_list != NULL);
1945 dump_case_nodes (dump_file, m_case_list, indent_step, 0);
1946 }
1947
1948 bb = emit_case_nodes (bb, index_expr, m_case_list, default_prob, index_type,
1949 gimple_location (m_switch));
1950
1951 if (bb)
1952 emit_jump (bb, m_default_bb);
1953
1954 /* Remove all edges and do just an edge that will reach default_bb. */
1955 bb = gimple_bb (m_switch);
1956 gimple_stmt_iterator gsi = gsi_last_bb (bb);
1957 gsi_remove (&gsi, true);
1958
1959 delete_basic_block (bb);
1960 }
1961
1962 /* Take an ordered list of case nodes
1963 and transform them into a near optimal binary tree,
1964 on the assumption that any target code selection value is as
1965 likely as any other.
1966
1967 The transformation is performed by splitting the ordered
1968 list into two equal sections plus a pivot. The parts are
1969 then attached to the pivot as left and right branches. Each
1970 branch is then transformed recursively. */
1971
1972 void
1973 switch_decision_tree::balance_case_nodes (case_tree_node **head,
1974 case_tree_node *parent)
1975 {
1976 case_tree_node *np;
1977
1978 np = *head;
1979 if (np)
1980 {
1981 int i = 0;
1982 int ranges = 0;
1983 case_tree_node **npp;
1984 case_tree_node *left;
1985 profile_probability prob = profile_probability::never ();
1986
1987 /* Count the number of entries on branch. Also count the ranges. */
1988
1989 while (np)
1990 {
1991 if (!tree_int_cst_equal (np->m_c->get_low (), np->m_c->get_high ()))
1992 ranges++;
1993
1994 i++;
1995 prob += np->m_c->m_prob;
1996 np = np->m_right;
1997 }
1998
1999 if (i > 2)
2000 {
2001 /* Split this list if it is long enough for that to help. */
2002 npp = head;
2003 left = *npp;
2004 profile_probability pivot_prob = prob.apply_scale (1, 2);
2005
2006 /* Find the place in the list that bisects the list's total cost,
2007 where ranges count as 2. */
2008 while (1)
2009 {
2010 /* Skip nodes while their probability does not reach
2011 that amount. */
2012 prob -= (*npp)->m_c->m_prob;
2013 if ((prob.initialized_p () && prob < pivot_prob)
2014 || ! (*npp)->m_right)
2015 break;
2016 npp = &(*npp)->m_right;
2017 }
2018
2019 np = *npp;
2020 *npp = 0;
2021 *head = np;
2022 np->m_parent = parent;
2023 np->m_left = left == np ? NULL : left;
2024
2025 /* Optimize each of the two split parts. */
2026 balance_case_nodes (&np->m_left, np);
2027 balance_case_nodes (&np->m_right, np);
2028 np->m_c->m_subtree_prob = np->m_c->m_prob;
2029 if (np->m_left)
2030 np->m_c->m_subtree_prob += np->m_left->m_c->m_subtree_prob;
2031 if (np->m_right)
2032 np->m_c->m_subtree_prob += np->m_right->m_c->m_subtree_prob;
2033 }
2034 else
2035 {
2036 /* Else leave this branch as one level,
2037 but fill in `parent' fields. */
2038 np = *head;
2039 np->m_parent = parent;
2040 np->m_c->m_subtree_prob = np->m_c->m_prob;
2041 for (; np->m_right; np = np->m_right)
2042 {
2043 np->m_right->m_parent = np;
2044 (*head)->m_c->m_subtree_prob += np->m_right->m_c->m_subtree_prob;
2045 }
2046 }
2047 }
2048 }
2049
2050 /* Dump ROOT, a list or tree of case nodes, to file. */
2051
2052 void
2053 switch_decision_tree::dump_case_nodes (FILE *f, case_tree_node *root,
2054 int indent_step, int indent_level)
2055 {
2056 if (root == 0)
2057 return;
2058 indent_level++;
2059
2060 dump_case_nodes (f, root->m_left, indent_step, indent_level);
2061
2062 fputs (";; ", f);
2063 fprintf (f, "%*s", indent_step * indent_level, "");
2064 root->m_c->dump (f);
2065 root->m_c->m_prob.dump (f);
2066 fputs (" subtree: ", f);
2067 root->m_c->m_subtree_prob.dump (f);
2068 fputs (")\n", f);
2069
2070 dump_case_nodes (f, root->m_right, indent_step, indent_level);
2071 }
2072
2073
2074 /* Add an unconditional jump to CASE_BB that happens in basic block BB. */
2075
2076 void
2077 switch_decision_tree::emit_jump (basic_block bb, basic_block case_bb)
2078 {
2079 edge e = single_succ_edge (bb);
2080 redirect_edge_succ (e, case_bb);
2081 }
2082
2083 /* Generate code to compare OP0 with OP1 so that the condition codes are
2084 set and to jump to LABEL_BB if the condition is true.
2085 COMPARISON is the GIMPLE comparison (EQ, NE, GT, etc.).
2086 PROB is the probability of jumping to LABEL_BB. */
2087
2088 basic_block
2089 switch_decision_tree::emit_cmp_and_jump_insns (basic_block bb, tree op0,
2090 tree op1, tree_code comparison,
2091 basic_block label_bb,
2092 profile_probability prob,
2093 location_t loc)
2094 {
2095 // TODO: it's once called with lhs != index.
2096 op1 = fold_convert (TREE_TYPE (op0), op1);
2097
2098 gcond *cond = gimple_build_cond (comparison, op0, op1, NULL_TREE, NULL_TREE);
2099 gimple_set_location (cond, loc);
2100 gimple_stmt_iterator gsi = gsi_last_bb (bb);
2101 gsi_insert_after (&gsi, cond, GSI_NEW_STMT);
2102
2103 gcc_assert (single_succ_p (bb));
2104
2105 /* Make a new basic block where false branch will take place. */
2106 edge false_edge = split_block (bb, cond);
2107 false_edge->flags = EDGE_FALSE_VALUE;
2108 false_edge->probability = prob.invert ();
2109
2110 edge true_edge = make_edge (bb, label_bb, EDGE_TRUE_VALUE);
2111 true_edge->probability = prob;
2112
2113 return false_edge->dest;
2114 }
2115
2116 /* Generate code to jump to LABEL if OP0 and OP1 are equal.
2117 PROB is the probability of jumping to LABEL_BB.
2118 BB is a basic block where the new condition will be placed. */
2119
2120 basic_block
2121 switch_decision_tree::do_jump_if_equal (basic_block bb, tree op0, tree op1,
2122 basic_block label_bb,
2123 profile_probability prob,
2124 location_t loc)
2125 {
2126 op1 = fold_convert (TREE_TYPE (op0), op1);
2127
2128 gcond *cond = gimple_build_cond (EQ_EXPR, op0, op1, NULL_TREE, NULL_TREE);
2129 gimple_set_location (cond, loc);
2130 gimple_stmt_iterator gsi = gsi_last_bb (bb);
2131 gsi_insert_before (&gsi, cond, GSI_SAME_STMT);
2132
2133 gcc_assert (single_succ_p (bb));
2134
2135 /* Make a new basic block where false branch will take place. */
2136 edge false_edge = split_block (bb, cond);
2137 false_edge->flags = EDGE_FALSE_VALUE;
2138 false_edge->probability = prob.invert ();
2139
2140 edge true_edge = make_edge (bb, label_bb, EDGE_TRUE_VALUE);
2141 true_edge->probability = prob;
2142
2143 return false_edge->dest;
2144 }
2145
2146 /* Emit step-by-step code to select a case for the value of INDEX.
2147 The thus generated decision tree follows the form of the
2148 case-node binary tree NODE, whose nodes represent test conditions.
2149 DEFAULT_PROB is probability of cases leading to default BB.
2150 INDEX_TYPE is the type of the index of the switch. */
2151
2152 basic_block
2153 switch_decision_tree::emit_case_nodes (basic_block bb, tree index,
2154 case_tree_node *node,
2155 profile_probability default_prob,
2156 tree index_type, location_t loc)
2157 {
2158 profile_probability p;
2159
2160 /* If node is null, we are done. */
2161 if (node == NULL)
2162 return bb;
2163
2164 /* Single value case. */
2165 if (node->m_c->is_single_value_p ())
2166 {
2167 /* Node is single valued. First see if the index expression matches
2168 this node and then check our children, if any. */
2169 p = node->m_c->m_prob / (node->m_c->m_subtree_prob + default_prob);
2170 bb = do_jump_if_equal (bb, index, node->m_c->get_low (),
2171 node->m_c->m_case_bb, p, loc);
2172 /* Since this case is taken at this point, reduce its weight from
2173 subtree_weight. */
2174 node->m_c->m_subtree_prob -= p;
2175
2176 if (node->m_left != NULL && node->m_right != NULL)
2177 {
2178 /* 1) the node has both children
2179
2180 If both children are single-valued cases with no
2181 children, finish up all the work. This way, we can save
2182 one ordered comparison. */
2183
2184 if (!node->m_left->has_child ()
2185 && node->m_left->m_c->is_single_value_p ()
2186 && !node->m_right->has_child ()
2187 && node->m_right->m_c->is_single_value_p ())
2188 {
2189 p = (node->m_right->m_c->m_prob
2190 / (node->m_c->m_subtree_prob + default_prob));
2191 bb = do_jump_if_equal (bb, index, node->m_right->m_c->get_low (),
2192 node->m_right->m_c->m_case_bb, p, loc);
2193
2194 p = (node->m_left->m_c->m_prob
2195 / (node->m_c->m_subtree_prob + default_prob));
2196 bb = do_jump_if_equal (bb, index, node->m_left->m_c->get_low (),
2197 node->m_left->m_c->m_case_bb, p, loc);
2198 }
2199 else
2200 {
2201 /* Branch to a label where we will handle it later. */
2202 basic_block test_bb = split_edge (single_succ_edge (bb));
2203 redirect_edge_succ (single_pred_edge (test_bb),
2204 single_succ_edge (bb)->dest);
2205
2206 p = ((node->m_right->m_c->m_subtree_prob
2207 + default_prob.apply_scale (1, 2))
2208 / (node->m_c->m_subtree_prob + default_prob));
2209 bb = emit_cmp_and_jump_insns (bb, index, node->m_c->get_high (),
2210 GT_EXPR, test_bb, p, loc);
2211 default_prob = default_prob.apply_scale (1, 2);
2212
2213 /* Handle the left-hand subtree. */
2214 bb = emit_case_nodes (bb, index, node->m_left,
2215 default_prob, index_type, loc);
2216
2217 /* If the left-hand subtree fell through,
2218 don't let it fall into the right-hand subtree. */
2219 if (bb && m_default_bb)
2220 emit_jump (bb, m_default_bb);
2221
2222 bb = emit_case_nodes (test_bb, index, node->m_right,
2223 default_prob, index_type, loc);
2224 }
2225 }
2226 else if (node->m_left == NULL && node->m_right != NULL)
2227 {
2228 /* 2) the node has only right child. */
2229
2230 /* Here we have a right child but no left so we issue a conditional
2231 branch to default and process the right child.
2232
2233 Omit the conditional branch to default if the right child
2234 does not have any children and is single valued; it would
2235 cost too much space to save so little time. */
2236
2237 if (node->m_right->has_child ()
2238 || !node->m_right->m_c->is_single_value_p ())
2239 {
2240 p = (default_prob.apply_scale (1, 2)
2241 / (node->m_c->m_subtree_prob + default_prob));
2242 bb = emit_cmp_and_jump_insns (bb, index, node->m_c->get_low (),
2243 LT_EXPR, m_default_bb, p, loc);
2244 default_prob = default_prob.apply_scale (1, 2);
2245
2246 bb = emit_case_nodes (bb, index, node->m_right, default_prob,
2247 index_type, loc);
2248 }
2249 else
2250 {
2251 /* We cannot process node->right normally
2252 since we haven't ruled out the numbers less than
2253 this node's value. So handle node->right explicitly. */
2254 p = (node->m_right->m_c->m_subtree_prob
2255 / (node->m_c->m_subtree_prob + default_prob));
2256 bb = do_jump_if_equal (bb, index, node->m_right->m_c->get_low (),
2257 node->m_right->m_c->m_case_bb, p, loc);
2258 }
2259 }
2260 else if (node->m_left != NULL && node->m_right == NULL)
2261 {
2262 /* 3) just one subtree, on the left. Similar case as previous. */
2263
2264 if (node->m_left->has_child ()
2265 || !node->m_left->m_c->is_single_value_p ())
2266 {
2267 p = (default_prob.apply_scale (1, 2)
2268 / (node->m_c->m_subtree_prob + default_prob));
2269 bb = emit_cmp_and_jump_insns (bb, index, node->m_c->get_high (),
2270 GT_EXPR, m_default_bb, p, loc);
2271 default_prob = default_prob.apply_scale (1, 2);
2272
2273 bb = emit_case_nodes (bb, index, node->m_left, default_prob,
2274 index_type, loc);
2275 }
2276 else
2277 {
2278 /* We cannot process node->left normally
2279 since we haven't ruled out the numbers less than
2280 this node's value. So handle node->left explicitly. */
2281 p = (node->m_left->m_c->m_subtree_prob
2282 / (node->m_c->m_subtree_prob + default_prob));
2283 bb = do_jump_if_equal (bb, index, node->m_left->m_c->get_low (),
2284 node->m_left->m_c->m_case_bb, p, loc);
2285 }
2286 }
2287 }
2288 else
2289 {
2290 /* Node is a range. These cases are very similar to those for a single
2291 value, except that we do not start by testing whether this node
2292 is the one to branch to. */
2293 if (node->has_child () || node->m_c->get_type () != SIMPLE_CASE)
2294 {
2295 /* Branch to a label where we will handle it later. */
2296 basic_block test_bb = split_edge (single_succ_edge (bb));
2297 redirect_edge_succ (single_pred_edge (test_bb),
2298 single_succ_edge (bb)->dest);
2299
2300
2301 profile_probability right_prob = profile_probability::never ();
2302 if (node->m_right)
2303 right_prob = node->m_right->m_c->m_subtree_prob;
2304 p = ((right_prob + default_prob.apply_scale (1, 2))
2305 / (node->m_c->m_subtree_prob + default_prob));
2306
2307 bb = emit_cmp_and_jump_insns (bb, index, node->m_c->get_high (),
2308 GT_EXPR, test_bb, p, loc);
2309 default_prob = default_prob.apply_scale (1, 2);
2310
2311 /* Value belongs to this node or to the left-hand subtree. */
2312 p = node->m_c->m_prob / (node->m_c->m_subtree_prob + default_prob);
2313 bb = emit_cmp_and_jump_insns (bb, index, node->m_c->get_low (),
2314 GE_EXPR, node->m_c->m_case_bb, p, loc);
2315
2316 /* Handle the left-hand subtree. */
2317 bb = emit_case_nodes (bb, index, node->m_left,
2318 default_prob, index_type, loc);
2319
2320 /* If the left-hand subtree fell through,
2321 don't let it fall into the right-hand subtree. */
2322 if (bb && m_default_bb)
2323 emit_jump (bb, m_default_bb);
2324
2325 bb = emit_case_nodes (test_bb, index, node->m_right,
2326 default_prob, index_type, loc);
2327 }
2328 else
2329 {
2330 /* Node has no children so we check low and high bounds to remove
2331 redundant tests. Only one of the bounds can exist,
2332 since otherwise this node is bounded--a case tested already. */
2333 tree lhs, rhs;
2334 generate_range_test (bb, index, node->m_c->get_low (),
2335 node->m_c->get_high (), &lhs, &rhs);
2336 p = default_prob / (node->m_c->m_subtree_prob + default_prob);
2337
2338 bb = emit_cmp_and_jump_insns (bb, lhs, rhs, GT_EXPR,
2339 m_default_bb, p, loc);
2340
2341 emit_jump (bb, node->m_c->m_case_bb);
2342 return NULL;
2343 }
2344 }
2345
2346 return bb;
2347 }
2348
2349 /* The main function of the pass scans statements for switches and invokes
2350 process_switch on them. */
2351
2352 namespace {
2353
2354 const pass_data pass_data_convert_switch =
2355 {
2356 GIMPLE_PASS, /* type */
2357 "switchconv", /* name */
2358 OPTGROUP_NONE, /* optinfo_flags */
2359 TV_TREE_SWITCH_CONVERSION, /* tv_id */
2360 ( PROP_cfg | PROP_ssa ), /* properties_required */
2361 0, /* properties_provided */
2362 0, /* properties_destroyed */
2363 0, /* todo_flags_start */
2364 TODO_update_ssa, /* todo_flags_finish */
2365 };
2366
2367 class pass_convert_switch : public gimple_opt_pass
2368 {
2369 public:
2370 pass_convert_switch (gcc::context *ctxt)
2371 : gimple_opt_pass (pass_data_convert_switch, ctxt)
2372 {}
2373
2374 /* opt_pass methods: */
2375 virtual bool gate (function *) { return flag_tree_switch_conversion != 0; }
2376 virtual unsigned int execute (function *);
2377
2378 }; // class pass_convert_switch
2379
2380 unsigned int
2381 pass_convert_switch::execute (function *fun)
2382 {
2383 basic_block bb;
2384 bool cfg_altered = false;
2385
2386 FOR_EACH_BB_FN (bb, fun)
2387 {
2388 gimple *stmt = last_stmt (bb);
2389 if (stmt && gimple_code (stmt) == GIMPLE_SWITCH)
2390 {
2391 if (dump_file)
2392 {
2393 expanded_location loc = expand_location (gimple_location (stmt));
2394
2395 fprintf (dump_file, "beginning to process the following "
2396 "SWITCH statement (%s:%d) : ------- \n",
2397 loc.file, loc.line);
2398 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
2399 putc ('\n', dump_file);
2400 }
2401
2402 switch_conversion sconv;
2403 sconv.expand (as_a <gswitch *> (stmt));
2404 cfg_altered |= sconv.m_cfg_altered;
2405 if (!sconv.m_reason)
2406 {
2407 if (dump_file)
2408 {
2409 fputs ("Switch converted\n", dump_file);
2410 fputs ("--------------------------------\n", dump_file);
2411 }
2412
2413 /* Make no effort to update the post-dominator tree.
2414 It is actually not that hard for the transformations
2415 we have performed, but it is not supported
2416 by iterate_fix_dominators. */
2417 free_dominance_info (CDI_POST_DOMINATORS);
2418 }
2419 else
2420 {
2421 if (dump_file)
2422 {
2423 fputs ("Bailing out - ", dump_file);
2424 fputs (sconv.m_reason, dump_file);
2425 fputs ("\n--------------------------------\n", dump_file);
2426 }
2427 }
2428 }
2429 }
2430
2431 return cfg_altered ? TODO_cleanup_cfg : 0;;
2432 }
2433
2434 } // anon namespace
2435
2436 gimple_opt_pass *
2437 make_pass_convert_switch (gcc::context *ctxt)
2438 {
2439 return new pass_convert_switch (ctxt);
2440 }
2441
2442 /* The main function of the pass scans statements for switches and invokes
2443 process_switch on them. */
2444
2445 namespace {
2446
2447 template <bool O0> class pass_lower_switch: public gimple_opt_pass
2448 {
2449 public:
2450 pass_lower_switch (gcc::context *ctxt) : gimple_opt_pass (data, ctxt) {}
2451
2452 static const pass_data data;
2453 opt_pass *
2454 clone ()
2455 {
2456 return new pass_lower_switch<O0> (m_ctxt);
2457 }
2458
2459 virtual bool
2460 gate (function *)
2461 {
2462 return !O0 || !optimize;
2463 }
2464
2465 virtual unsigned int execute (function *fun);
2466 }; // class pass_lower_switch
2467
2468 template <bool O0>
2469 const pass_data pass_lower_switch<O0>::data = {
2470 GIMPLE_PASS, /* type */
2471 O0 ? "switchlower_O0" : "switchlower", /* name */
2472 OPTGROUP_NONE, /* optinfo_flags */
2473 TV_TREE_SWITCH_LOWERING, /* tv_id */
2474 ( PROP_cfg | PROP_ssa ), /* properties_required */
2475 0, /* properties_provided */
2476 0, /* properties_destroyed */
2477 0, /* todo_flags_start */
2478 TODO_update_ssa | TODO_cleanup_cfg, /* todo_flags_finish */
2479 };
2480
2481 template <bool O0>
2482 unsigned int
2483 pass_lower_switch<O0>::execute (function *fun)
2484 {
2485 basic_block bb;
2486 bool expanded = false;
2487
2488 auto_vec<gimple *> switch_statements;
2489 switch_statements.create (1);
2490
2491 FOR_EACH_BB_FN (bb, fun)
2492 {
2493 gimple *stmt = last_stmt (bb);
2494 gswitch *swtch;
2495 if (stmt && (swtch = dyn_cast<gswitch *> (stmt)))
2496 {
2497 if (!O0)
2498 group_case_labels_stmt (swtch);
2499 switch_statements.safe_push (swtch);
2500 }
2501 }
2502
2503 for (unsigned i = 0; i < switch_statements.length (); i++)
2504 {
2505 gimple *stmt = switch_statements[i];
2506 if (dump_file)
2507 {
2508 expanded_location loc = expand_location (gimple_location (stmt));
2509
2510 fprintf (dump_file, "beginning to process the following "
2511 "SWITCH statement (%s:%d) : ------- \n",
2512 loc.file, loc.line);
2513 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
2514 putc ('\n', dump_file);
2515 }
2516
2517 gswitch *swtch = dyn_cast<gswitch *> (stmt);
2518 if (swtch)
2519 {
2520 switch_decision_tree dt (swtch);
2521 expanded |= dt.analyze_switch_statement ();
2522 }
2523 }
2524
2525 if (expanded)
2526 {
2527 free_dominance_info (CDI_DOMINATORS);
2528 free_dominance_info (CDI_POST_DOMINATORS);
2529 mark_virtual_operands_for_renaming (cfun);
2530 }
2531
2532 return 0;
2533 }
2534
2535 } // anon namespace
2536
2537 gimple_opt_pass *
2538 make_pass_lower_switch_O0 (gcc::context *ctxt)
2539 {
2540 return new pass_lower_switch<true> (ctxt);
2541 }
2542 gimple_opt_pass *
2543 make_pass_lower_switch (gcc::context *ctxt)
2544 {
2545 return new pass_lower_switch<false> (ctxt);
2546 }
2547
2548